1 /*
2 * Copyright (c) 2016-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/TensorAllocator.h"
25
26 #include "arm_compute/core/Coordinates.h"
27 #include "arm_compute/core/Error.h"
28 #include "arm_compute/core/TensorInfo.h"
29 #include "arm_compute/runtime/MemoryGroup.h"
30 #include "arm_compute/runtime/MemoryRegion.h"
31 #include "support/MemorySupport.h"
32
33 #include <cstddef>
34
35 using namespace arm_compute;
36
37 namespace
38 {
validate_subtensor_shape(const TensorInfo & parent_info,const TensorInfo & child_info,const Coordinates & coords)39 bool validate_subtensor_shape(const TensorInfo &parent_info, const TensorInfo &child_info, const Coordinates &coords)
40 {
41 bool is_valid = true;
42 const TensorShape &parent_shape = parent_info.tensor_shape();
43 const TensorShape &child_shape = child_info.tensor_shape();
44 const size_t parent_dims = parent_info.num_dimensions();
45 const size_t child_dims = child_info.num_dimensions();
46
47 if(child_dims <= parent_dims)
48 {
49 for(size_t num_dimensions = child_dims; num_dimensions > 0; --num_dimensions)
50 {
51 const size_t child_dim_size = coords[num_dimensions - 1] + child_shape[num_dimensions - 1];
52
53 if((coords[num_dimensions - 1] < 0) || (child_dim_size > parent_shape[num_dimensions - 1]))
54 {
55 is_valid = false;
56 break;
57 }
58 }
59 }
60 else
61 {
62 is_valid = false;
63 }
64
65 return is_valid;
66 }
67 } // namespace
68
TensorAllocator(IMemoryManageable * owner)69 TensorAllocator::TensorAllocator(IMemoryManageable *owner)
70 : _owner(owner), _associated_memory_group(nullptr), _memory()
71 {
72 }
73
~TensorAllocator()74 TensorAllocator::~TensorAllocator()
75 {
76 info().set_is_resizable(true);
77 }
78
TensorAllocator(TensorAllocator && o)79 TensorAllocator::TensorAllocator(TensorAllocator &&o) noexcept
80 : ITensorAllocator(std::move(o)),
81 _owner(o._owner),
82 _associated_memory_group(o._associated_memory_group),
83 _memory(std::move(o._memory))
84 {
85 o._owner = nullptr;
86 o._associated_memory_group = nullptr;
87 o._memory = Memory();
88 }
89
operator =(TensorAllocator && o)90 TensorAllocator &TensorAllocator::operator=(TensorAllocator &&o) noexcept
91 {
92 if(&o != this)
93 {
94 _owner = o._owner;
95 o._owner = nullptr;
96
97 _associated_memory_group = o._associated_memory_group;
98 o._associated_memory_group = nullptr;
99
100 _memory = std::move(o._memory);
101 o._memory = Memory();
102
103 ITensorAllocator::operator=(std::move(o));
104 }
105 return *this;
106 }
107
init(const TensorAllocator & allocator,const Coordinates & coords,TensorInfo & sub_info)108 void TensorAllocator::init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
109 {
110 // Get parent info
111 const TensorInfo parent_info = allocator.info();
112
113 // Check if coordinates and new shape are within the parent tensor
114 ARM_COMPUTE_ERROR_ON(!validate_subtensor_shape(parent_info, sub_info, coords));
115 ARM_COMPUTE_UNUSED(validate_subtensor_shape);
116
117 // Copy pointer to buffer
118 _memory = Memory(allocator._memory.region());
119
120 // Init tensor info with new dimensions
121 size_t total_size = parent_info.offset_element_in_bytes(coords) + sub_info.total_size() - sub_info.offset_first_element_in_bytes();
122 sub_info.init(sub_info.tensor_shape(), sub_info.format(), parent_info.strides_in_bytes(), parent_info.offset_element_in_bytes(coords), total_size);
123
124 // Set TensorInfo
125 init(sub_info);
126 }
127
data() const128 uint8_t *TensorAllocator::data() const
129 {
130 return (_memory.region() == nullptr) ? nullptr : reinterpret_cast<uint8_t *>(_memory.region()->buffer());
131 }
132
allocate()133 void TensorAllocator::allocate()
134 {
135 // Align to 64-byte boundaries by default if alignment is not specified
136 const size_t alignment_to_use = (alignment() != 0) ? alignment() : 64;
137 if(_associated_memory_group == nullptr)
138 {
139 _memory.set_owned_region(support::cpp14::make_unique<MemoryRegion>(info().total_size(), alignment_to_use));
140 }
141 else
142 {
143 _associated_memory_group->finalize_memory(_owner, _memory, info().total_size(), alignment_to_use);
144 }
145 info().set_is_resizable(false);
146 }
147
free()148 void TensorAllocator::free()
149 {
150 _memory.set_region(nullptr);
151 info().set_is_resizable(true);
152 }
153
import_memory(void * memory)154 Status TensorAllocator::import_memory(void *memory)
155 {
156 ARM_COMPUTE_RETURN_ERROR_ON(memory == nullptr);
157 ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr);
158 ARM_COMPUTE_RETURN_ERROR_ON(alignment() != 0 && !arm_compute::utility::check_aligned(memory, alignment()));
159
160 _memory.set_owned_region(support::cpp14::make_unique<MemoryRegion>(memory, info().total_size()));
161 info().set_is_resizable(false);
162
163 return Status{};
164 }
165
set_associated_memory_group(IMemoryGroup * associated_memory_group)166 void TensorAllocator::set_associated_memory_group(IMemoryGroup *associated_memory_group)
167 {
168 ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
169 ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr && _associated_memory_group != associated_memory_group);
170 ARM_COMPUTE_ERROR_ON(_memory.region() != nullptr && _memory.region()->buffer() != nullptr);
171
172 _associated_memory_group = associated_memory_group;
173 }
174
lock()175 uint8_t *TensorAllocator::lock()
176 {
177 ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
178 return reinterpret_cast<uint8_t *>(_memory.region()->buffer());
179 }
180
unlock()181 void TensorAllocator::unlock()
182 {
183 }
184