• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/runtime/TensorAllocator.h"
25 
26 #include "arm_compute/core/utils/misc/MMappedFile.h"
27 #include "arm_compute/core/utils/misc/Utility.h"
28 #include "arm_compute/runtime/MemoryGroup.h"
29 #include "arm_compute/runtime/MemoryRegion.h"
30 #include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
31 
32 #include "support/MemorySupport.h"
33 
34 #include "tests/Globals.h"
35 #include "tests/Utils.h"
36 #include "tests/framework/Asserts.h"
37 #include "tests/framework/Macros.h"
38 #include "tests/validation/Validation.h"
39 #include "tests/validation/reference/ActivationLayer.h"
40 
41 #include <memory>
42 #include <random>
43 
44 namespace arm_compute
45 {
46 namespace test
47 {
48 namespace validation
49 {
50 TEST_SUITE(NEON)
TEST_SUITE(UNIT)51 TEST_SUITE(UNIT)
52 TEST_SUITE(TensorAllocator)
53 
54 TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
55 {
56     // Init tensor info
57     TensorInfo info(TensorShape(24U, 16U, 3U), 1, DataType::F32);
58 
59     // Allocate memory buffer
60     const size_t total_size = info.total_size();
61     auto         data       = support::cpp14::make_unique<uint8_t[]>(total_size);
62 
63     // Negative case : Import nullptr
64     Tensor t1;
65     t1.allocator()->init(info);
66     ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(nullptr)), framework::LogLevel::ERRORS);
67     ARM_COMPUTE_EXPECT(t1.info()->is_resizable(), framework::LogLevel::ERRORS);
68 
69     // Negative case : Import misaligned pointer
70     Tensor       t2;
71     const size_t required_alignment = 339;
72     t2.allocator()->init(info, required_alignment);
73     ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS);
74     ARM_COMPUTE_EXPECT(t2.info()->is_resizable(), framework::LogLevel::ERRORS);
75 
76     // Negative case : Import memory to a tensor that is memory managed
77     Tensor      t3;
78     MemoryGroup mg;
79     t3.allocator()->set_associated_memory_group(&mg);
80     ARM_COMPUTE_EXPECT(!bool(t3.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS);
81     ARM_COMPUTE_EXPECT(t3.info()->is_resizable(), framework::LogLevel::ERRORS);
82 
83     // Positive case : Set raw pointer
84     Tensor t4;
85     t4.allocator()->init(info);
86     ARM_COMPUTE_EXPECT(bool(t4.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS);
87     ARM_COMPUTE_EXPECT(!t4.info()->is_resizable(), framework::LogLevel::ERRORS);
88     ARM_COMPUTE_EXPECT(t4.buffer() == reinterpret_cast<uint8_t *>(data.get()), framework::LogLevel::ERRORS);
89     t4.allocator()->free();
90     ARM_COMPUTE_EXPECT(t4.info()->is_resizable(), framework::LogLevel::ERRORS);
91     ARM_COMPUTE_EXPECT(t4.buffer() == nullptr, framework::LogLevel::ERRORS);
92 }
93 
TEST_CASE(ImportMemoryMalloc,framework::DatasetMode::ALL)94 TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
95 {
96     const ActivationLayerInfo act_info(ActivationLayerInfo::ActivationFunction::RELU);
97     const TensorShape         shape     = TensorShape(24U, 16U, 3U);
98     const DataType            data_type = DataType::F32;
99 
100     // Create tensor
101     const TensorInfo info(shape, 1, data_type);
102     const size_t     required_alignment = 64;
103     Tensor           tensor;
104     tensor.allocator()->init(info, required_alignment);
105 
106     // Create and configure activation function
107     NEActivationLayer act_func;
108     act_func.configure(&tensor, nullptr, act_info);
109 
110     // Allocate and import tensor
111     const size_t total_size_in_elems = tensor.info()->tensor_shape().total_size();
112     const size_t total_size_in_bytes = tensor.info()->total_size();
113     size_t       space               = total_size_in_bytes + required_alignment;
114     auto         raw_data            = support::cpp14::make_unique<uint8_t[]>(space);
115 
116     void *aligned_ptr = raw_data.get();
117     support::cpp11::align(required_alignment, total_size_in_bytes, aligned_ptr, space);
118 
119     ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(aligned_ptr)), framework::LogLevel::ERRORS);
120     ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
121 
122     // Fill tensor
123     std::uniform_real_distribution<float> distribution(-5.f, 5.f);
124     std::mt19937                          gen(library->seed());
125     auto                                 *typed_ptr = reinterpret_cast<float *>(aligned_ptr);
126     for(unsigned int i = 0; i < total_size_in_elems; ++i)
127     {
128         typed_ptr[i] = distribution(gen);
129     }
130 
131     // Execute function and sync
132     act_func.run();
133 
134     // Validate result by checking that the input has no negative values
135     for(unsigned int i = 0; i < total_size_in_elems; ++i)
136     {
137         ARM_COMPUTE_EXPECT(typed_ptr[i] >= 0, framework::LogLevel::ERRORS);
138     }
139 
140     // Release resources
141     tensor.allocator()->free();
142     ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
143 }
144 
TEST_CASE(ImportMemoryMallocPadded,framework::DatasetMode::ALL)145 TEST_CASE(ImportMemoryMallocPadded, framework::DatasetMode::ALL)
146 {
147     // Create tensor
148     Tensor tensor;
149     tensor.allocator()->init(TensorInfo(TensorShape(24U, 16U, 3U), 1, DataType::F32));
150 
151     // Enforce tensor padding and validate that meta-data were updated
152     // Note: Padding might be updated after the function configuration in case of increased padding requirements
153     const PaddingSize enforced_padding(3U, 5U, 2U, 4U);
154     tensor.info()->extend_padding(enforced_padding);
155     validate(tensor.info()->padding(), enforced_padding);
156 
157     // Create and configure activation function
158     NEActivationLayer act_func;
159     act_func.configure(&tensor, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
160 
161     // Allocate and import tensor
162     const size_t total_size_in_bytes = tensor.info()->total_size();
163     auto         raw_data            = support::cpp14::make_unique<uint8_t[]>(total_size_in_bytes);
164 
165     ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(raw_data.get())), framework::LogLevel::ERRORS);
166     ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
167 
168     // Fill tensor while accounting padding
169     std::uniform_real_distribution<float> distribution(-5.f, 5.f);
170     std::mt19937                          gen(library->seed());
171 
172     Window tensor_window;
173     tensor_window.use_tensor_dimensions(tensor.info()->tensor_shape());
174     Iterator tensor_it(&tensor, tensor_window);
175 
176     execute_window_loop(tensor_window, [&](const Coordinates &)
177     {
178         *reinterpret_cast<float *>(tensor_it.ptr()) = distribution(gen);
179     },
180     tensor_it);
181 
182     // Execute function and sync
183     act_func.run();
184 
185     // Validate result by checking that the input has no negative values
186     execute_window_loop(tensor_window, [&](const Coordinates &)
187     {
188         const float val = *reinterpret_cast<float *>(tensor_it.ptr());
189         ARM_COMPUTE_EXPECT(val >= 0, framework::LogLevel::ERRORS);
190     },
191     tensor_it);
192 
193     // Release resources
194     tensor.allocator()->free();
195     ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
196 }
197 
198 #if !defined(BARE_METAL)
TEST_CASE(ImportMemoryMappedFile,framework::DatasetMode::ALL)199 TEST_CASE(ImportMemoryMappedFile, framework::DatasetMode::ALL)
200 {
201     const ActivationLayerInfo act_info(ActivationLayerInfo::ActivationFunction::RELU);
202     const TensorShape         shape     = TensorShape(24U, 16U, 3U);
203     const DataType            data_type = DataType::F32;
204 
205     // Create tensor
206     const TensorInfo info(shape, 1, data_type);
207     Tensor           tensor;
208     tensor.allocator()->init(info);
209 
210     // Create and configure activation function
211     NEActivationLayer act_func;
212     act_func.configure(&tensor, nullptr, act_info);
213 
214     // Get number of elements
215     const size_t total_size_in_elems = tensor.info()->tensor_shape().total_size();
216     const size_t total_size_in_bytes = tensor.info()->total_size();
217 
218     // Create file
219     std::ofstream output_file("test_mmap_import.bin", std::ios::binary | std::ios::out);
220     output_file.seekp(total_size_in_bytes - 1);
221     output_file.write("", 1);
222     output_file.close();
223 
224     // Map file
225     utils::mmap_io::MMappedFile mmapped_file("test_mmap_import.bin", 0 /** Whole file */, 0);
226     ARM_COMPUTE_EXPECT(mmapped_file.is_mapped(), framework::LogLevel::ERRORS);
227     unsigned char *data = mmapped_file.data();
228 
229     // Import memory mapped memory
230     ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(data)), framework::LogLevel::ERRORS);
231     ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
232 
233     // Fill tensor
234     std::uniform_real_distribution<float> distribution(-5.f, 5.f);
235     std::mt19937                          gen(library->seed());
236     auto                                 *typed_ptr = reinterpret_cast<float *>(data);
237     for(unsigned int i = 0; i < total_size_in_elems; ++i)
238     {
239         typed_ptr[i] = distribution(gen);
240     }
241 
242     // Execute function and sync
243     act_func.run();
244 
245     // Validate result by checking that the input has no negative values
246     for(unsigned int i = 0; i < total_size_in_elems; ++i)
247     {
248         ARM_COMPUTE_EXPECT(typed_ptr[i] >= 0, framework::LogLevel::ERRORS);
249     }
250 
251     // Release resources
252     tensor.allocator()->free();
253     ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
254 }
255 #endif // !defined(BARE_METAL)
256 
TEST_CASE(AlignedAlloc,framework::DatasetMode::ALL)257 TEST_CASE(AlignedAlloc, framework::DatasetMode::ALL)
258 {
259     // Init tensor info
260     TensorInfo   info(TensorShape(24U, 16U, 3U), 1, DataType::F32);
261     const size_t requested_alignment = 1024;
262 
263     Tensor t;
264     t.allocator()->init(info, requested_alignment);
265     t.allocator()->allocate();
266 
267     ARM_COMPUTE_EXPECT(t.buffer() != nullptr, framework::LogLevel::ERRORS);
268     ARM_COMPUTE_EXPECT(t.allocator()->alignment() == requested_alignment, framework::LogLevel::ERRORS);
269     ARM_COMPUTE_EXPECT(arm_compute::utility::check_aligned(reinterpret_cast<void *>(t.buffer()), requested_alignment),
270                        framework::LogLevel::ERRORS);
271 }
272 
273 TEST_SUITE_END()
274 TEST_SUITE_END()
275 TEST_SUITE_END()
276 } // namespace validation
277 } // namespace test
278 } // namespace arm_compute
279