• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/runtime/TensorAllocator.h"
25 
26 #include "arm_compute/core/utils/misc/MMappedFile.h"
27 #include "arm_compute/core/utils/misc/Utility.h"
28 #include "arm_compute/runtime/MemoryGroup.h"
29 #include "arm_compute/runtime/MemoryRegion.h"
30 #include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
31 
32 #include "tests/Globals.h"
33 #include "tests/Utils.h"
34 #include "tests/framework/Asserts.h"
35 #include "tests/framework/Macros.h"
36 #include "tests/validation/Validation.h"
37 #include "tests/validation/reference/ActivationLayer.h"
38 
39 #include <memory>
40 #include <random>
41 
42 namespace arm_compute
43 {
44 namespace test
45 {
46 namespace validation
47 {
48 TEST_SUITE(NEON)
TEST_SUITE(UNIT)49 TEST_SUITE(UNIT)
50 TEST_SUITE(TensorAllocator)
51 
52 TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
53 {
54     // Init tensor info
55     TensorInfo info(TensorShape(24U, 16U, 3U), 1, DataType::F32);
56 
57     // Allocate memory buffer
58     const size_t total_size = info.total_size();
59     auto         data       = std::make_unique<uint8_t[]>(total_size);
60 
61     // Negative case : Import nullptr
62     Tensor t1;
63     t1.allocator()->init(info);
64     ARM_COMPUTE_ASSERT(!bool(t1.allocator()->import_memory(nullptr)));
65     ARM_COMPUTE_ASSERT(t1.info()->is_resizable());
66 
67     // Negative case : Import misaligned pointer
68     Tensor       t2;
69     const size_t required_alignment = 339;
70     t2.allocator()->init(info, required_alignment);
71     ARM_COMPUTE_ASSERT(!bool(t2.allocator()->import_memory(data.get())));
72     ARM_COMPUTE_ASSERT(t2.info()->is_resizable());
73 
74     // Negative case : Import memory to a tensor that is memory managed
75     Tensor      t3;
76     MemoryGroup mg;
77     t3.allocator()->set_associated_memory_group(&mg);
78     ARM_COMPUTE_ASSERT(!bool(t3.allocator()->import_memory(data.get())));
79     ARM_COMPUTE_ASSERT(t3.info()->is_resizable());
80 
81     // Positive case : Set raw pointer
82     Tensor t4;
83     t4.allocator()->init(info);
84     ARM_COMPUTE_ASSERT(bool(t4.allocator()->import_memory(data.get())));
85     ARM_COMPUTE_ASSERT(!t4.info()->is_resizable());
86     ARM_COMPUTE_ASSERT(t4.buffer() == reinterpret_cast<uint8_t *>(data.get()));
87     t4.allocator()->free();
88     ARM_COMPUTE_ASSERT(t4.info()->is_resizable());
89     ARM_COMPUTE_ASSERT(t4.buffer() == nullptr);
90 }
91 
TEST_CASE(ImportMemoryMalloc,framework::DatasetMode::ALL)92 TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
93 {
94     const ActivationLayerInfo act_info(ActivationLayerInfo::ActivationFunction::RELU);
95     const TensorShape         shape     = TensorShape(24U, 16U, 3U);
96     const DataType            data_type = DataType::F32;
97 
98     // Create tensor
99     const TensorInfo info(shape, 1, data_type);
100     const size_t     required_alignment = 64;
101     Tensor           tensor;
102     tensor.allocator()->init(info, required_alignment);
103 
104     // Create and configure activation function
105     NEActivationLayer act_func;
106     act_func.configure(&tensor, nullptr, act_info);
107 
108     // Allocate and import tensor
109     const size_t total_size_in_elems = tensor.info()->tensor_shape().total_size();
110     const size_t total_size_in_bytes = tensor.info()->total_size();
111     size_t       space               = total_size_in_bytes + required_alignment;
112     auto         raw_data            = std::make_unique<uint8_t[]>(space);
113 
114     void *aligned_ptr = raw_data.get();
115     std::align(required_alignment, total_size_in_bytes, aligned_ptr, space);
116 
117     ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(aligned_ptr)));
118     ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable());
119 
120     // Fill tensor
121     std::uniform_real_distribution<float> distribution(-5.f, 5.f);
122     std::mt19937                          gen(library->seed());
123     auto                                 *typed_ptr = reinterpret_cast<float *>(aligned_ptr);
124     for(unsigned int i = 0; i < total_size_in_elems; ++i)
125     {
126         typed_ptr[i] = distribution(gen);
127     }
128 
129     // Execute function and sync
130     act_func.run();
131 
132     // Validate result by checking that the input has no negative values
133     for(unsigned int i = 0; i < total_size_in_elems; ++i)
134     {
135         ARM_COMPUTE_EXPECT(typed_ptr[i] >= 0, framework::LogLevel::ERRORS);
136     }
137 
138     // Release resources
139     tensor.allocator()->free();
140     ARM_COMPUTE_ASSERT(tensor.info()->is_resizable());
141 }
142 
TEST_CASE(ImportMemoryMallocPadded,framework::DatasetMode::ALL)143 TEST_CASE(ImportMemoryMallocPadded, framework::DatasetMode::ALL)
144 {
145     // Create tensor
146     Tensor tensor;
147     tensor.allocator()->init(TensorInfo(TensorShape(24U, 16U, 3U), 1, DataType::F32));
148 
149     // Enforce tensor padding and validate that meta-data were updated
150     // Note: Padding might be updated after the function configuration in case of increased padding requirements
151     const PaddingSize enforced_padding(3U, 5U, 2U, 4U);
152     tensor.info()->extend_padding(enforced_padding);
153     validate(tensor.info()->padding(), enforced_padding);
154 
155     // Create and configure activation function
156     NEActivationLayer act_func;
157     act_func.configure(&tensor, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
158 
159     // Allocate and import tensor
160     const size_t total_size_in_bytes = tensor.info()->total_size();
161     auto         raw_data            = std::make_unique<uint8_t[]>(total_size_in_bytes);
162 
163     ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(raw_data.get())));
164     ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable());
165 
166     // Fill tensor while accounting padding
167     std::uniform_real_distribution<float> distribution(-5.f, 5.f);
168     std::mt19937                          gen(library->seed());
169 
170     Window tensor_window;
171     tensor_window.use_tensor_dimensions(tensor.info()->tensor_shape());
172     Iterator tensor_it(&tensor, tensor_window);
173 
174     execute_window_loop(tensor_window, [&](const Coordinates &)
175     {
176         *reinterpret_cast<float *>(tensor_it.ptr()) = distribution(gen);
177     },
178     tensor_it);
179 
180     // Execute function and sync
181     act_func.run();
182 
183     // Validate result by checking that the input has no negative values
184     execute_window_loop(tensor_window, [&](const Coordinates &)
185     {
186         const float val = *reinterpret_cast<float *>(tensor_it.ptr());
187         ARM_COMPUTE_EXPECT(val >= 0, framework::LogLevel::ERRORS);
188     },
189     tensor_it);
190 
191     // Release resources
192     tensor.allocator()->free();
193     ARM_COMPUTE_ASSERT(tensor.info()->is_resizable());
194 }
195 
196 #if !defined(_WIN64) && !defined(BARE_METAL)
TEST_CASE(ImportMemoryMappedFile,framework::DatasetMode::ALL)197 TEST_CASE(ImportMemoryMappedFile, framework::DatasetMode::ALL)
198 {
199     const ActivationLayerInfo act_info(ActivationLayerInfo::ActivationFunction::RELU);
200     const TensorShape         shape     = TensorShape(24U, 16U, 3U);
201     const DataType            data_type = DataType::F32;
202 
203     // Create tensor
204     const TensorInfo info(shape, 1, data_type);
205     Tensor           tensor;
206     tensor.allocator()->init(info);
207 
208     // Create and configure activation function
209     NEActivationLayer act_func;
210     act_func.configure(&tensor, nullptr, act_info);
211 
212     // Get number of elements
213     const size_t total_size_in_elems = tensor.info()->tensor_shape().total_size();
214     const size_t total_size_in_bytes = tensor.info()->total_size();
215 
216     // Create file
217     std::ofstream output_file("test_mmap_import.bin", std::ios::binary | std::ios::out);
218     output_file.seekp(total_size_in_bytes - 1);
219     output_file.write("", 1);
220     output_file.close();
221 
222     // Map file
223     utils::mmap_io::MMappedFile mmapped_file("test_mmap_import.bin", 0 /** Whole file */, 0);
224     ARM_COMPUTE_ASSERT(mmapped_file.is_mapped());
225     unsigned char *data = mmapped_file.data();
226 
227     // Import memory mapped memory
228     ARM_COMPUTE_ASSERT(bool(tensor.allocator()->import_memory(data)));
229     ARM_COMPUTE_ASSERT(!tensor.info()->is_resizable());
230 
231     // Fill tensor
232     std::uniform_real_distribution<float> distribution(-5.f, 5.f);
233     std::mt19937                          gen(library->seed());
234     auto                                 *typed_ptr = reinterpret_cast<float *>(data);
235     for(unsigned int i = 0; i < total_size_in_elems; ++i)
236     {
237         typed_ptr[i] = distribution(gen);
238     }
239 
240     // Execute function and sync
241     act_func.run();
242 
243     // Validate result by checking that the input has no negative values
244     for(unsigned int i = 0; i < total_size_in_elems; ++i)
245     {
246         ARM_COMPUTE_EXPECT(typed_ptr[i] >= 0, framework::LogLevel::ERRORS);
247     }
248 
249     // Release resources
250     tensor.allocator()->free();
251     ARM_COMPUTE_ASSERT(tensor.info()->is_resizable());
252 }
253 #endif // !defined(_WIN64) && !defined(BARE_METAL)
254 
TEST_CASE(AlignedAlloc,framework::DatasetMode::ALL)255 TEST_CASE(AlignedAlloc, framework::DatasetMode::ALL)
256 {
257     // Init tensor info
258     TensorInfo   info(TensorShape(24U, 16U, 3U), 1, DataType::F32);
259     const size_t requested_alignment = 1024;
260 
261     Tensor t;
262     t.allocator()->init(info, requested_alignment);
263     t.allocator()->allocate();
264 
265     ARM_COMPUTE_ASSERT(t.buffer() != nullptr);
266     ARM_COMPUTE_EXPECT(t.allocator()->alignment() == requested_alignment, framework::LogLevel::ERRORS);
267     ARM_COMPUTE_EXPECT(arm_compute::utility::check_aligned(reinterpret_cast<void *>(t.buffer()), requested_alignment),
268                        framework::LogLevel::ERRORS);
269 }
270 
271 TEST_SUITE_END()
272 TEST_SUITE_END()
273 TEST_SUITE_END()
274 } // namespace validation
275 } // namespace test
276 } // namespace arm_compute
277