1 /*
2 * Copyright (c) 2017-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #ifndef ARM_COMPUTE_TEST_TENSOR_LIBRARY_H
25 #define ARM_COMPUTE_TEST_TENSOR_LIBRARY_H
26
27 #include "arm_compute/core/Coordinates.h"
28 #include "arm_compute/core/Error.h"
29 #include "arm_compute/core/Helpers.h"
30 #include "arm_compute/core/TensorInfo.h"
31 #include "arm_compute/core/TensorShape.h"
32 #include "arm_compute/core/Types.h"
33 #include "arm_compute/core/Window.h"
34 #include "support/Random.h"
35 #include "tests/RawTensor.h"
36 #include "tests/TensorCache.h"
37 #include "tests/Utils.h"
38 #include "tests/framework/Exceptions.h"
39
40 #include <algorithm>
41 #include <cstddef>
42 #include <fstream>
43 #include <random>
44 #include <string>
45 #include <type_traits>
46 #include <vector>
47
48 namespace arm_compute
49 {
50 namespace test
51 {
52 /** Factory class to create and fill tensors.
53 *
54 * Allows to initialise tensors from loaded images or by specifying the shape
55 * explicitly. Furthermore, provides methods to fill tensors with the content of
56 * loaded images or with random values.
57 */
58 class AssetsLibrary final
59 {
60 public:
61 using RangePair = std::pair<float, float>;
62
63 public:
64 /** Initialises the library with a @p path to the assets directory.
65 * Furthermore, sets the seed for the random generator to @p seed.
66 *
67 * @param[in] path Path to load assets from.
68 * @param[in] seed Seed used to initialise the random number generator.
69 */
70 AssetsLibrary(std::string path, std::random_device::result_type seed);
71
72 /** Path to assets directory used to initialise library.
73 *
74 * @return the path to the assets directory.
75 */
76 std::string path() const;
77
78 /** Seed that is used to fill tensors with random values.
79 *
80 * @return the initial random seed.
81 */
82 std::random_device::result_type seed() const;
83
84 /** Provides a tensor shape for the specified image.
85 *
86 * @param[in] name Image file used to look up the raw tensor.
87 *
88 * @return the tensor shape for the specified image.
89 */
90 TensorShape get_image_shape(const std::string &name);
91
92 /** Provides a constant raw tensor for the specified image.
93 *
94 * @param[in] name Image file used to look up the raw tensor.
95 *
96 * @return a raw tensor for the specified image.
97 */
98 const RawTensor &get(const std::string &name) const;
99
100 /** Provides a raw tensor for the specified image.
101 *
102 * @param[in] name Image file used to look up the raw tensor.
103 *
104 * @return a raw tensor for the specified image.
105 */
106 RawTensor get(const std::string &name);
107
108 /** Creates an uninitialised raw tensor with the given @p data_type and @p
109 * num_channels. The shape is derived from the specified image.
110 *
111 * @param[in] name Image file used to initialise the tensor.
112 * @param[in] data_type Data type used to initialise the tensor.
113 * @param[in] num_channels Number of channels used to initialise the tensor.
114 *
115 * @return a raw tensor for the specified image.
116 */
117 RawTensor get(const std::string &name, DataType data_type, int num_channels = 1) const;
118
119 /** Provides a contant raw tensor for the specified image after it has been
120 * converted to @p format.
121 *
122 * @param[in] name Image file used to look up the raw tensor.
123 * @param[in] format Format used to look up the raw tensor.
124 *
125 * @return a raw tensor for the specified image.
126 */
127 const RawTensor &get(const std::string &name, Format format) const;
128
129 /** Provides a raw tensor for the specified image after it has been
130 * converted to @p format.
131 *
132 * @param[in] name Image file used to look up the raw tensor.
133 * @param[in] format Format used to look up the raw tensor.
134 *
135 * @return a raw tensor for the specified image.
136 */
137 RawTensor get(const std::string &name, Format format);
138
139 /** Provides a contant raw tensor for the specified channel after it has
140 * been extracted form the given image.
141 *
142 * @param[in] name Image file used to look up the raw tensor.
143 * @param[in] channel Channel used to look up the raw tensor.
144 *
145 * @note The channel has to be unambiguous so that the format can be
146 * inferred automatically.
147 *
148 * @return a raw tensor for the specified image channel.
149 */
150 const RawTensor &get(const std::string &name, Channel channel) const;
151
152 /** Provides a raw tensor for the specified channel after it has been
153 * extracted form the given image.
154 *
155 * @param[in] name Image file used to look up the raw tensor.
156 * @param[in] channel Channel used to look up the raw tensor.
157 *
158 * @note The channel has to be unambiguous so that the format can be
159 * inferred automatically.
160 *
161 * @return a raw tensor for the specified image channel.
162 */
163 RawTensor get(const std::string &name, Channel channel);
164
165 /** Provides a constant raw tensor for the specified channel after it has
166 * been extracted form the given image formatted to @p format.
167 *
168 * @param[in] name Image file used to look up the raw tensor.
169 * @param[in] format Format used to look up the raw tensor.
170 * @param[in] channel Channel used to look up the raw tensor.
171 *
172 * @return a raw tensor for the specified image channel.
173 */
174 const RawTensor &get(const std::string &name, Format format, Channel channel) const;
175
176 /** Provides a raw tensor for the specified channel after it has been
177 * extracted form the given image formatted to @p format.
178 *
179 * @param[in] name Image file used to look up the raw tensor.
180 * @param[in] format Format used to look up the raw tensor.
181 * @param[in] channel Channel used to look up the raw tensor.
182 *
183 * @return a raw tensor for the specified image channel.
184 */
185 RawTensor get(const std::string &name, Format format, Channel channel);
186
187 /** Puts garbage values all around the tensor for testing purposes
188 *
189 * @param[in, out] tensor To be filled tensor.
190 * @param[in] distribution Distribution used to fill the tensor's surroundings.
191 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
192 */
193 template <typename T, typename D>
194 void fill_borders_with_garbage(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const;
195
196 /** Fills the specified @p tensor with random values drawn from @p
197 * distribution.
198 *
199 * @param[in, out] tensor To be filled tensor.
200 * @param[in] distribution Distribution used to fill the tensor.
201 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
202 *
203 * @note The @p distribution has to provide operator(Generator &) which
204 * will be used to draw samples.
205 */
206 template <typename T, typename D>
207 void fill(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const;
208
209 template <typename T, typename D>
210 void fill_boxes(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const;
211
212 /** Fills the specified @p raw tensor with random values drawn from @p
213 * distribution.
214 *
215 * @param[in, out] vec To be filled vector.
216 * @param[in] distribution Distribution used to fill the tensor.
217 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
218 *
219 * @note The @p distribution has to provide operator(Generator &) which
220 * will be used to draw samples.
221 */
222 template <typename T, typename D>
223 void fill(std::vector<T> &vec, D &&distribution, std::random_device::result_type seed_offset) const;
224
225 /** Fills the specified @p raw tensor with random values drawn from @p
226 * distribution.
227 *
228 * @param[in, out] raw To be filled raw.
229 * @param[in] distribution Distribution used to fill the tensor.
230 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
231 *
232 * @note The @p distribution has to provide operator(Generator &) which
233 * will be used to draw samples.
234 */
235 template <typename D>
236 void fill(RawTensor &raw, D &&distribution, std::random_device::result_type seed_offset) const;
237
238 /** Fills the specified @p tensor with the content of the specified image
239 * converted to the given format.
240 *
241 * @param[in, out] tensor To be filled tensor.
242 * @param[in] name Image file used to fill the tensor.
243 * @param[in] format Format of the image used to fill the tensor.
244 *
245 * @warning No check is performed that the specified format actually
246 * matches the format of the tensor.
247 */
248 template <typename T>
249 void fill(T &&tensor, const std::string &name, Format format) const;
250
251 /** Fills the raw tensor with the content of the specified image
252 * converted to the given format.
253 *
254 * @param[in, out] raw To be filled raw tensor.
255 * @param[in] name Image file used to fill the tensor.
256 * @param[in] format Format of the image used to fill the tensor.
257 *
258 * @warning No check is performed that the specified format actually
259 * matches the format of the tensor.
260 */
261 void fill(RawTensor &raw, const std::string &name, Format format) const;
262
263 /** Fills the specified @p tensor with the content of the specified channel
264 * extracted from the given image.
265 *
266 * @param[in, out] tensor To be filled tensor.
267 * @param[in] name Image file used to fill the tensor.
268 * @param[in] channel Channel of the image used to fill the tensor.
269 *
270 * @note The channel has to be unambiguous so that the format can be
271 * inferred automatically.
272 *
273 * @warning No check is performed that the specified format actually
274 * matches the format of the tensor.
275 */
276 template <typename T>
277 void fill(T &&tensor, const std::string &name, Channel channel) const;
278
279 /** Fills the raw tensor with the content of the specified channel
280 * extracted from the given image.
281 *
282 * @param[in, out] raw To be filled raw tensor.
283 * @param[in] name Image file used to fill the tensor.
284 * @param[in] channel Channel of the image used to fill the tensor.
285 *
286 * @note The channel has to be unambiguous so that the format can be
287 * inferred automatically.
288 *
289 * @warning No check is performed that the specified format actually
290 * matches the format of the tensor.
291 */
292 void fill(RawTensor &raw, const std::string &name, Channel channel) const;
293
294 /** Fills the specified @p tensor with the content of the specified channel
295 * extracted from the given image after it has been converted to the given
296 * format.
297 *
298 * @param[in, out] tensor To be filled tensor.
299 * @param[in] name Image file used to fill the tensor.
300 * @param[in] format Format of the image used to fill the tensor.
301 * @param[in] channel Channel of the image used to fill the tensor.
302 *
303 * @warning No check is performed that the specified format actually
304 * matches the format of the tensor.
305 */
306 template <typename T>
307 void fill(T &&tensor, const std::string &name, Format format, Channel channel) const;
308
309 /** Fills the raw tensor with the content of the specified channel
310 * extracted from the given image after it has been converted to the given
311 * format.
312 *
313 * @param[in, out] raw To be filled raw tensor.
314 * @param[in] name Image file used to fill the tensor.
315 * @param[in] format Format of the image used to fill the tensor.
316 * @param[in] channel Channel of the image used to fill the tensor.
317 *
318 * @warning No check is performed that the specified format actually
319 * matches the format of the tensor.
320 */
321 void fill(RawTensor &raw, const std::string &name, Format format, Channel channel) const;
322
323 /** Fills the specified @p tensor with the content of the raw tensor.
324 *
325 * @param[in, out] tensor To be filled tensor.
326 * @param[in] raw Raw tensor used to fill the tensor.
327 *
328 * @warning No check is performed that the specified format actually
329 * matches the format of the tensor.
330 */
331 template <typename T>
332 void fill(T &&tensor, RawTensor raw) const;
333
334 /** Fill a tensor with uniform distribution
335 *
336 * @param[in, out] tensor To be filled tensor.
337 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
338 */
339 template <typename T>
340 void fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset) const;
341
342 /** Fill a tensor with uniform distribution
343 *
344 * @param[in, out] tensor To be filled tensor.
345 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
346 * @param[in] low lowest value in the range (inclusive)
347 * @param[in] high highest value in the range (inclusive)
348 *
349 * @note @p low and @p high must be of the same type as the data type of @p tensor
350 */
351 template <typename T, typename D>
352 void fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset, D low, D high) const;
353
354 /** Fill a tensor with uniform distribution across the specified range
355 *
356 * @param[in, out] tensor To be filled tensor.
357 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
358 * @param[in] excluded_range_pairs Ranges to exclude from the generator
359 */
360 template <typename T>
361 void fill_tensor_uniform_ranged(T &&tensor,
362 std::random_device::result_type seed_offset,
363 const std::vector<AssetsLibrary::RangePair> &excluded_range_pairs) const;
364
365 /** Fills the specified @p tensor with data loaded from .npy (numpy binary) in specified path.
366 *
367 * @param[in, out] tensor To be filled tensor.
368 * @param[in] name Data file.
369 *
370 * @note The numpy array stored in the binary .npy file must be row-major in the sense that it
371 * must store elements within a row consecutively in the memory, then rows within a 2D slice,
372 * then 2D slices within a 3D slice and so on. Note that it imposes no restrictions on what
373 * indexing convention is used in the numpy array. That is, the numpy array can be either fortran
374 * style or C style as long as it adheres to the rule above.
375 *
376 * More concretely, the orders of dimensions for each style are as follows:
377 * C-style (numpy default):
378 * array[HigherDims..., Z, Y, X]
379 * Fortran style:
380 * array[X, Y, Z, HigherDims...]
381 */
382 template <typename T>
383 void fill_layer_data(T &&tensor, std::string name) const;
384
385 /** Fill a tensor with a constant value
386 *
387 * @param[in, out] tensor To be filled tensor.
388 * @param[in] value Value to be assigned to all elements of the input tensor.
389 *
390 * @note @p value must be of the same type as the data type of @p tensor
391 */
392 template <typename T, typename D>
393 void fill_tensor_value(T &&tensor, D value) const;
394
395 /** Fill a tensor with a given vector with static values.
396 *
397 * @param[in, out] tensor To be filled tensor.
398 * @param[in] values A vector containing values
399 *
400 * To cope with various size tensors, the vector size doens't have to be
401 * the same as tensor's size. If the size of the tensor is larger than the vector,
402 * the iterator the vector will keep iterating and wrap around. If the vector is
403 * larger, values located after the required size won't be used.
404 */
405 template <typename T, typename DataType>
406 void fill_static_values(T &&tensor, const std::vector<DataType> &values) const;
407
408 private:
409 // Function prototype to convert between image formats.
410 using Converter = void (*)(const RawTensor &src, RawTensor &dst);
411 // Function prototype to extract a channel from an image.
412 using Extractor = void (*)(const RawTensor &src, RawTensor &dst);
413 // Function prototype to load an image file.
414 using Loader = RawTensor (*)(const std::string &path);
415 // Function type to generate a number to fill tensors.
416 template <typename ResultType>
417 using GeneratorFunctionType = std::function<ResultType(void)>;
418
419 const Converter &get_converter(Format src, Format dst) const;
420 const Converter &get_converter(DataType src, Format dst) const;
421 const Converter &get_converter(Format src, DataType dst) const;
422 const Converter &get_converter(DataType src, DataType dst) const;
423 const Extractor &get_extractor(Format format, Channel) const;
424 const Loader &get_loader(const std::string &extension) const;
425
426 /** Creates a raw tensor from the specified image.
427 *
428 * @param[in] name To be loaded image file.
429 *
430 * @note If use_single_image is true @p name is ignored and the user image
431 * is loaded instead.
432 */
433 RawTensor load_image(const std::string &name) const;
434
435 /** Provides a raw tensor for the specified image and format.
436 *
437 * @param[in] name Image file used to look up the raw tensor.
438 * @param[in] format Format used to look up the raw tensor.
439 *
440 * If the tensor has already been requested before the cached version will
441 * be returned. Otherwise the tensor will be added to the cache.
442 *
443 * @note If use_single_image is true @p name is ignored and the user image
444 * is loaded instead.
445 */
446 const RawTensor &find_or_create_raw_tensor(const std::string &name, Format format) const;
447
448 /** Provides a raw tensor for the specified image, format and channel.
449 *
450 * @param[in] name Image file used to look up the raw tensor.
451 * @param[in] format Format used to look up the raw tensor.
452 * @param[in] channel Channel used to look up the raw tensor.
453 *
454 * If the tensor has already been requested before the cached version will
455 * be returned. Otherwise the tensor will be added to the cache.
456 *
457 * @note If use_single_image is true @p name is ignored and the user image
458 * is loaded instead.
459 */
460 const RawTensor &find_or_create_raw_tensor(const std::string &name, Format format, Channel channel) const;
461
462 /** Fill a tensor with a value generator function.
463 *
464 * @param[in, out] tensor To be filled tensor.
465 * @param[in] generate_value A function that generates values.
466 */
467 template <typename T, typename ResultType>
468 void fill_with_generator(T &&tensor, const GeneratorFunctionType<ResultType> &generate_value) const;
469
470 mutable TensorCache _cache{};
471 mutable arm_compute::Mutex _format_lock{};
472 mutable arm_compute::Mutex _channel_lock{};
473 const std::string _library_path;
474 std::random_device::result_type _seed;
475 };
476
477 namespace detail
478 {
479 template <typename T>
convert_range_pair(const std::vector<AssetsLibrary::RangePair> & excluded_range_pairs)480 inline std::vector<std::pair<T, T>> convert_range_pair(const std::vector<AssetsLibrary::RangePair> &excluded_range_pairs)
481 {
482 std::vector<std::pair<T, T>> converted;
483 std::transform(excluded_range_pairs.begin(),
484 excluded_range_pairs.end(),
485 std::back_inserter(converted),
486 [](const AssetsLibrary::RangePair & p)
487 {
488 return std::pair<T, T>(static_cast<T>(p.first), static_cast<T>(p.second));
489 });
490 return converted;
491 }
492
493 /* Read npy header and check the payload is suitable for the specified type and shape
494 *
495 * @param[in] stream ifstream of the npy file
496 * @param[in] expect_typestr Expected typestr
497 * @param[in] expect_shape Shape of tensor expected to receive the data
498 *
499 * @note Advances stream to the beginning of the data payload
500 */
501 void validate_npy_header(std::ifstream &stream, const std::string &expect_typestr, const TensorShape &expect_shape);
502 } // namespace detail
503
504 template <typename T, typename D>
fill_borders_with_garbage(T && tensor,D && distribution,std::random_device::result_type seed_offset)505 void AssetsLibrary::fill_borders_with_garbage(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const
506 {
507 const PaddingSize padding_size = tensor.padding();
508
509 Window window;
510 window.set(0, Window::Dimension(-padding_size.left, tensor.shape()[0] + padding_size.right, 1));
511 if(tensor.shape().num_dimensions() > 1)
512 {
513 window.set(1, Window::Dimension(-padding_size.top, tensor.shape()[1] + padding_size.bottom, 1));
514 }
515
516 std::mt19937 gen(_seed + seed_offset);
517
518 execute_window_loop(window, [&](const Coordinates & id)
519 {
520 TensorShape shape = tensor.shape();
521
522 // If outside of valid region
523 if(id.x() < 0 || id.x() >= static_cast<int>(shape.x()) || id.y() < 0 || id.y() >= static_cast<int>(shape.y()))
524 {
525 using ResultType = typename std::remove_reference<D>::type::result_type;
526 const ResultType value = distribution(gen);
527 void *const out_ptr = tensor(id);
528 store_value_with_data_type(out_ptr, value, tensor.data_type());
529 }
530 });
531 }
532
533 template <typename T, typename D>
fill_boxes(T && tensor,D && distribution,std::random_device::result_type seed_offset)534 void AssetsLibrary::fill_boxes(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const
535 {
536 using ResultType = typename std::remove_reference<D>::type::result_type;
537 std::mt19937 gen(_seed + seed_offset);
538 TensorShape shape(tensor.shape());
539 const uint32_t num_boxes = tensor.num_elements() / 4;
540 // Iterate over all elements
541 std::uniform_real_distribution<> size_dist(0.f, 1.f);
542 for(uint32_t element_idx = 0; element_idx < num_boxes * 4; element_idx += 4)
543 {
544 const ResultType delta = size_dist(gen);
545 const ResultType epsilon = size_dist(gen);
546 const ResultType left = distribution(gen);
547 const ResultType top = distribution(gen);
548 const ResultType right = left + delta;
549 const ResultType bottom = top + epsilon;
550 const std::tuple<ResultType, ResultType, ResultType, ResultType> box(left, top, right, bottom);
551 Coordinates x1 = index2coord(shape, element_idx);
552 Coordinates y1 = index2coord(shape, element_idx + 1);
553 Coordinates x2 = index2coord(shape, element_idx + 2);
554 Coordinates y2 = index2coord(shape, element_idx + 3);
555 ResultType &target_value_x1 = reinterpret_cast<ResultType *>(tensor(x1))[0];
556 ResultType &target_value_y1 = reinterpret_cast<ResultType *>(tensor(y1))[0];
557 ResultType &target_value_x2 = reinterpret_cast<ResultType *>(tensor(x2))[0];
558 ResultType &target_value_y2 = reinterpret_cast<ResultType *>(tensor(y2))[0];
559 store_value_with_data_type(&target_value_x1, std::get<0>(box), tensor.data_type());
560 store_value_with_data_type(&target_value_y1, std::get<1>(box), tensor.data_type());
561 store_value_with_data_type(&target_value_x2, std::get<2>(box), tensor.data_type());
562 store_value_with_data_type(&target_value_y2, std::get<3>(box), tensor.data_type());
563 }
564 fill_borders_with_garbage(tensor, distribution, seed_offset);
565 }
566
567 template <typename T, typename D>
fill(std::vector<T> & vec,D && distribution,std::random_device::result_type seed_offset)568 void AssetsLibrary::fill(std::vector<T> &vec, D &&distribution, std::random_device::result_type seed_offset) const
569 {
570 ARM_COMPUTE_ERROR_ON_MSG(vec.empty(), "Vector must not be empty");
571
572 using ResultType = typename std::remove_reference<D>::type::result_type;
573
574 std::mt19937 gen(_seed + seed_offset);
575 for(size_t i = 0; i < vec.size(); ++i)
576 {
577 const ResultType value = distribution(gen);
578
579 vec[i] = value;
580 }
581 }
582
583 template <typename T, typename ResultType>
fill_with_generator(T && tensor,const GeneratorFunctionType<ResultType> & generate_value)584 void AssetsLibrary::fill_with_generator(T &&tensor, const GeneratorFunctionType<ResultType> &generate_value) const
585 {
586 const bool is_nhwc = tensor.data_layout() == DataLayout::NHWC;
587 TensorShape shape(tensor.shape());
588
589 if(is_nhwc)
590 {
591 // Ensure that the equivalent tensors will be filled for both data layouts
592 permute(shape, PermutationVector(1U, 2U, 0U));
593 }
594
595 // Iterate over all elements
596 const uint32_t num_elements = tensor.num_elements();
597 for(uint32_t element_idx = 0; element_idx < num_elements; ++element_idx)
598 {
599 Coordinates id = index2coord(shape, element_idx);
600
601 if(is_nhwc)
602 {
603 // Write in the correct id for permuted shapes
604 permute(id, PermutationVector(2U, 0U, 1U));
605 }
606
607 // Iterate over all channels
608 for(int channel = 0; channel < tensor.num_channels(); ++channel)
609 {
610 const ResultType value = generate_value();
611 ResultType &target_value = reinterpret_cast<ResultType *>(tensor(id))[channel];
612
613 store_value_with_data_type(&target_value, value, tensor.data_type());
614 }
615 }
616 }
617
618 template <typename T, typename D>
fill(T && tensor,D && distribution,std::random_device::result_type seed_offset)619 void AssetsLibrary::fill(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const
620 {
621 using ResultType = typename std::remove_reference<D>::type::result_type;
622 std::mt19937 gen(_seed + seed_offset);
623
624 GeneratorFunctionType<ResultType> number_generator = [&]()
625 {
626 const ResultType value = distribution(gen);
627 return value;
628 };
629
630 fill_with_generator(tensor, number_generator);
631 fill_borders_with_garbage(tensor, distribution, seed_offset);
632 }
633
634 template <typename T, typename DataType>
fill_static_values(T && tensor,const std::vector<DataType> & values)635 void AssetsLibrary::fill_static_values(T &&tensor, const std::vector<DataType> &values) const
636 {
637 auto it = values.begin();
638 GeneratorFunctionType<DataType> get_next_value = [&]()
639 {
640 const DataType value = *it;
641 ++it;
642
643 if(it == values.end())
644 {
645 it = values.begin();
646 }
647
648 return value;
649 };
650
651 fill_with_generator(tensor, get_next_value);
652 }
653
654 template <typename D>
fill(RawTensor & raw,D && distribution,std::random_device::result_type seed_offset)655 void AssetsLibrary::fill(RawTensor &raw, D &&distribution, std::random_device::result_type seed_offset) const
656 {
657 std::mt19937 gen(_seed + seed_offset);
658
659 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
660 {
661 using ResultType = typename std::remove_reference<D>::type::result_type;
662 const ResultType value = distribution(gen);
663
664 store_value_with_data_type(raw.data() + offset, value, raw.data_type());
665 }
666 }
667
668 template <typename T>
fill(T && tensor,const std::string & name,Format format)669 void AssetsLibrary::fill(T &&tensor, const std::string &name, Format format) const
670 {
671 const RawTensor &raw = get(name, format);
672
673 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
674 {
675 const Coordinates id = index2coord(raw.shape(), offset / raw.element_size());
676
677 const RawTensor::value_type *const raw_ptr = raw.data() + offset;
678 const auto out_ptr = static_cast<RawTensor::value_type *>(tensor(id));
679 std::copy_n(raw_ptr, raw.element_size(), out_ptr);
680 }
681 }
682
683 template <typename T>
fill(T && tensor,const std::string & name,Channel channel)684 void AssetsLibrary::fill(T &&tensor, const std::string &name, Channel channel) const
685 {
686 fill(std::forward<T>(tensor), name, get_format_for_channel(channel), channel);
687 }
688
689 template <typename T>
fill(T && tensor,const std::string & name,Format format,Channel channel)690 void AssetsLibrary::fill(T &&tensor, const std::string &name, Format format, Channel channel) const
691 {
692 const RawTensor &raw = get(name, format, channel);
693
694 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
695 {
696 const Coordinates id = index2coord(raw.shape(), offset / raw.element_size());
697
698 const RawTensor::value_type *const raw_ptr = raw.data() + offset;
699 const auto out_ptr = static_cast<RawTensor::value_type *>(tensor(id));
700 std::copy_n(raw_ptr, raw.element_size(), out_ptr);
701 }
702 }
703
704 template <typename T>
fill(T && tensor,RawTensor raw)705 void AssetsLibrary::fill(T &&tensor, RawTensor raw) const
706 {
707 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
708 {
709 const Coordinates id = index2coord(raw.shape(), offset / raw.element_size());
710
711 const RawTensor::value_type *const raw_ptr = raw.data() + offset;
712 const auto out_ptr = static_cast<RawTensor::value_type *>(tensor(id));
713 std::copy_n(raw_ptr, raw.element_size(), out_ptr);
714 }
715 }
716
717 template <typename T>
fill_tensor_uniform(T && tensor,std::random_device::result_type seed_offset)718 void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset) const
719 {
720 switch(tensor.data_type())
721 {
722 case DataType::U8:
723 case DataType::QASYMM8:
724 {
725 std::uniform_int_distribution<uint8_t> distribution_u8(std::numeric_limits<uint8_t>::lowest(), std::numeric_limits<uint8_t>::max());
726 fill(tensor, distribution_u8, seed_offset);
727 break;
728 }
729 case DataType::S8:
730 case DataType::QSYMM8:
731 case DataType::QSYMM8_PER_CHANNEL:
732 case DataType::QASYMM8_SIGNED:
733 {
734 std::uniform_int_distribution<int8_t> distribution_s8(std::numeric_limits<int8_t>::lowest(), std::numeric_limits<int8_t>::max());
735 fill(tensor, distribution_s8, seed_offset);
736 break;
737 }
738 case DataType::U16:
739 {
740 std::uniform_int_distribution<uint16_t> distribution_u16(std::numeric_limits<uint16_t>::lowest(), std::numeric_limits<uint16_t>::max());
741 fill(tensor, distribution_u16, seed_offset);
742 break;
743 }
744 case DataType::S16:
745 case DataType::QSYMM16:
746 {
747 std::uniform_int_distribution<int16_t> distribution_s16(std::numeric_limits<int16_t>::lowest(), std::numeric_limits<int16_t>::max());
748 fill(tensor, distribution_s16, seed_offset);
749 break;
750 }
751 case DataType::U32:
752 {
753 std::uniform_int_distribution<uint32_t> distribution_u32(std::numeric_limits<uint32_t>::lowest(), std::numeric_limits<uint32_t>::max());
754 fill(tensor, distribution_u32, seed_offset);
755 break;
756 }
757 case DataType::S32:
758 {
759 std::uniform_int_distribution<int32_t> distribution_s32(std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
760 fill(tensor, distribution_s32, seed_offset);
761 break;
762 }
763 case DataType::U64:
764 {
765 std::uniform_int_distribution<uint64_t> distribution_u64(std::numeric_limits<uint64_t>::lowest(), std::numeric_limits<uint64_t>::max());
766 fill(tensor, distribution_u64, seed_offset);
767 break;
768 }
769 case DataType::S64:
770 {
771 std::uniform_int_distribution<int64_t> distribution_s64(std::numeric_limits<int64_t>::lowest(), std::numeric_limits<int64_t>::max());
772 fill(tensor, distribution_s64, seed_offset);
773 break;
774 }
775 case DataType::BFLOAT16:
776 {
777 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
778 std::uniform_real_distribution<float> distribution_bf16(-1000.f, 1000.f);
779 fill(tensor, distribution_bf16, seed_offset);
780 break;
781 }
782 case DataType::F16:
783 {
784 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
785 std::uniform_real_distribution<float> distribution_f16(-100.f, 100.f);
786 fill(tensor, distribution_f16, seed_offset);
787 break;
788 }
789 case DataType::F32:
790 {
791 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
792 std::uniform_real_distribution<float> distribution_f32(-1000.f, 1000.f);
793 fill(tensor, distribution_f32, seed_offset);
794 break;
795 }
796 case DataType::F64:
797 {
798 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
799 std::uniform_real_distribution<double> distribution_f64(-1000.f, 1000.f);
800 fill(tensor, distribution_f64, seed_offset);
801 break;
802 }
803 case DataType::SIZET:
804 {
805 std::uniform_int_distribution<size_t> distribution_sizet(std::numeric_limits<size_t>::lowest(), std::numeric_limits<size_t>::max());
806 fill(tensor, distribution_sizet, seed_offset);
807 break;
808 }
809 default:
810 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
811 }
812 }
813
814 template <typename T>
fill_tensor_uniform_ranged(T && tensor,std::random_device::result_type seed_offset,const std::vector<AssetsLibrary::RangePair> & excluded_range_pairs)815 void AssetsLibrary::fill_tensor_uniform_ranged(T &&tensor,
816 std::random_device::result_type seed_offset,
817 const std::vector<AssetsLibrary::RangePair> &excluded_range_pairs) const
818 {
819 using namespace arm_compute::utils::random;
820
821 switch(tensor.data_type())
822 {
823 case DataType::U8:
824 case DataType::QASYMM8:
825 {
826 const auto converted_pairs = detail::convert_range_pair<uint8_t>(excluded_range_pairs);
827 RangedUniformDistribution<uint8_t> distribution_u8(std::numeric_limits<uint8_t>::lowest(),
828 std::numeric_limits<uint8_t>::max(),
829 converted_pairs);
830 fill(tensor, distribution_u8, seed_offset);
831 break;
832 }
833 case DataType::S8:
834 case DataType::QSYMM8:
835 {
836 const auto converted_pairs = detail::convert_range_pair<int8_t>(excluded_range_pairs);
837 RangedUniformDistribution<int8_t> distribution_s8(std::numeric_limits<int8_t>::lowest(),
838 std::numeric_limits<int8_t>::max(),
839 converted_pairs);
840 fill(tensor, distribution_s8, seed_offset);
841 break;
842 }
843 case DataType::U16:
844 {
845 const auto converted_pairs = detail::convert_range_pair<uint16_t>(excluded_range_pairs);
846 RangedUniformDistribution<uint16_t> distribution_u16(std::numeric_limits<uint16_t>::lowest(),
847 std::numeric_limits<uint16_t>::max(),
848 converted_pairs);
849 fill(tensor, distribution_u16, seed_offset);
850 break;
851 }
852 case DataType::S16:
853 case DataType::QSYMM16:
854 {
855 const auto converted_pairs = detail::convert_range_pair<int16_t>(excluded_range_pairs);
856 RangedUniformDistribution<int16_t> distribution_s16(std::numeric_limits<int16_t>::lowest(),
857 std::numeric_limits<int16_t>::max(),
858 converted_pairs);
859 fill(tensor, distribution_s16, seed_offset);
860 break;
861 }
862 case DataType::U32:
863 {
864 const auto converted_pairs = detail::convert_range_pair<uint32_t>(excluded_range_pairs);
865 RangedUniformDistribution<uint32_t> distribution_u32(std::numeric_limits<uint32_t>::lowest(),
866 std::numeric_limits<uint32_t>::max(),
867 converted_pairs);
868 fill(tensor, distribution_u32, seed_offset);
869 break;
870 }
871 case DataType::S32:
872 {
873 const auto converted_pairs = detail::convert_range_pair<int32_t>(excluded_range_pairs);
874 RangedUniformDistribution<int32_t> distribution_s32(std::numeric_limits<int32_t>::lowest(),
875 std::numeric_limits<int32_t>::max(),
876 converted_pairs);
877 fill(tensor, distribution_s32, seed_offset);
878 break;
879 }
880 case DataType::BFLOAT16:
881 {
882 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
883 const auto converted_pairs = detail::convert_range_pair<float>(excluded_range_pairs);
884 RangedUniformDistribution<float> distribution_bf16(-1000.f, 1000.f, converted_pairs);
885 fill(tensor, distribution_bf16, seed_offset);
886 break;
887 }
888 case DataType::F16:
889 {
890 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
891 const auto converted_pairs = detail::convert_range_pair<float>(excluded_range_pairs);
892 RangedUniformDistribution<float> distribution_f16(-100.f, 100.f, converted_pairs);
893 fill(tensor, distribution_f16, seed_offset);
894 break;
895 }
896 case DataType::F32:
897 {
898 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
899 const auto converted_pairs = detail::convert_range_pair<float>(excluded_range_pairs);
900 RangedUniformDistribution<float> distribution_f32(-1000.f, 1000.f, converted_pairs);
901 fill(tensor, distribution_f32, seed_offset);
902 break;
903 }
904 default:
905 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
906 }
907 }
908
909 template <typename T, typename D>
fill_tensor_uniform(T && tensor,std::random_device::result_type seed_offset,D low,D high)910 void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset, D low, D high) const
911 {
912 switch(tensor.data_type())
913 {
914 case DataType::U8:
915 case DataType::QASYMM8:
916 {
917 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint8_t, D>::value));
918 std::uniform_int_distribution<uint8_t> distribution_u8(low, high);
919 fill(tensor, distribution_u8, seed_offset);
920 break;
921 }
922 case DataType::S8:
923 case DataType::QSYMM8:
924 case DataType::QASYMM8_SIGNED:
925 {
926 ARM_COMPUTE_ERROR_ON(!(std::is_same<int8_t, D>::value));
927 std::uniform_int_distribution<int8_t> distribution_s8(low, high);
928 fill(tensor, distribution_s8, seed_offset);
929 break;
930 }
931 case DataType::U16:
932 {
933 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint16_t, D>::value));
934 std::uniform_int_distribution<uint16_t> distribution_u16(low, high);
935 fill(tensor, distribution_u16, seed_offset);
936 break;
937 }
938 case DataType::S16:
939 case DataType::QSYMM16:
940 {
941 ARM_COMPUTE_ERROR_ON(!(std::is_same<int16_t, D>::value));
942 std::uniform_int_distribution<int16_t> distribution_s16(low, high);
943 fill(tensor, distribution_s16, seed_offset);
944 break;
945 }
946 case DataType::U32:
947 {
948 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint32_t, D>::value));
949 std::uniform_int_distribution<uint32_t> distribution_u32(low, high);
950 fill(tensor, distribution_u32, seed_offset);
951 break;
952 }
953 case DataType::S32:
954 {
955 ARM_COMPUTE_ERROR_ON(!(std::is_same<int32_t, D>::value));
956 std::uniform_int_distribution<int32_t> distribution_s32(low, high);
957 fill(tensor, distribution_s32, seed_offset);
958 break;
959 }
960 case DataType::U64:
961 {
962 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint64_t, D>::value));
963 std::uniform_int_distribution<uint64_t> distribution_u64(low, high);
964 fill(tensor, distribution_u64, seed_offset);
965 break;
966 }
967 case DataType::S64:
968 {
969 ARM_COMPUTE_ERROR_ON(!(std::is_same<int64_t, D>::value));
970 std::uniform_int_distribution<int64_t> distribution_s64(low, high);
971 fill(tensor, distribution_s64, seed_offset);
972 break;
973 }
974 case DataType::BFLOAT16:
975 {
976 std::uniform_real_distribution<float> distribution_bf16(low, high);
977 fill(tensor, distribution_bf16, seed_offset);
978 break;
979 }
980 case DataType::F16:
981 {
982 std::uniform_real_distribution<float> distribution_f16(low, high);
983 fill(tensor, distribution_f16, seed_offset);
984 break;
985 }
986 case DataType::F32:
987 {
988 ARM_COMPUTE_ERROR_ON(!(std::is_same<float, D>::value));
989 std::uniform_real_distribution<float> distribution_f32(low, high);
990 fill(tensor, distribution_f32, seed_offset);
991 break;
992 }
993 case DataType::F64:
994 {
995 ARM_COMPUTE_ERROR_ON(!(std::is_same<double, D>::value));
996 std::uniform_real_distribution<double> distribution_f64(low, high);
997 fill(tensor, distribution_f64, seed_offset);
998 break;
999 }
1000 case DataType::SIZET:
1001 {
1002 ARM_COMPUTE_ERROR_ON(!(std::is_same<size_t, D>::value));
1003 std::uniform_int_distribution<size_t> distribution_sizet(low, high);
1004 fill(tensor, distribution_sizet, seed_offset);
1005 break;
1006 }
1007 default:
1008 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
1009 }
1010 }
1011
1012 template <typename T>
fill_layer_data(T && tensor,std::string name)1013 void AssetsLibrary::fill_layer_data(T &&tensor, std::string name) const
1014 {
1015 #ifdef _WIN32
1016 const std::string path_separator("\\");
1017 #else /* _WIN32 */
1018 const std::string path_separator("/");
1019 #endif /* _WIN32 */
1020 const std::string path = _library_path + path_separator + name;
1021
1022 // Open file
1023 std::ifstream stream(path, std::ios::in | std::ios::binary);
1024 if(!stream.good())
1025 {
1026 throw framework::FileNotFound("Could not load npy file: " + path);
1027 }
1028
1029 validate_npy_header(stream, tensor.data_type(), tensor.shape());
1030
1031 // Read data
1032 if(tensor.padding().empty())
1033 {
1034 // If tensor has no padding read directly from stream.
1035 stream.read(reinterpret_cast<char *>(tensor.data()), tensor.size());
1036 }
1037 else
1038 {
1039 // If tensor has padding accessing tensor elements through execution window.
1040 Window window;
1041 window.use_tensor_dimensions(tensor.shape());
1042
1043 execute_window_loop(window, [&](const Coordinates & id)
1044 {
1045 stream.read(reinterpret_cast<char *>(tensor(id)), tensor.element_size());
1046 });
1047 }
1048 }
1049
1050 template <typename T, typename D>
fill_tensor_value(T && tensor,D value)1051 void AssetsLibrary::fill_tensor_value(T &&tensor, D value) const
1052 {
1053 fill_tensor_uniform(tensor, 0, value, value);
1054 }
1055 } // namespace test
1056 } // namespace arm_compute
1057 #endif /* ARM_COMPUTE_TEST_TENSOR_LIBRARY_H */
1058