• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2021 Google Inc. All rights reserved.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef FLATBUFFERS_VECTOR_DOWNWARD_H_
18 #define FLATBUFFERS_VECTOR_DOWNWARD_H_
19 
20 #include "flatbuffers/base.h"
21 #include "flatbuffers/default_allocator.h"
22 #include "flatbuffers/detached_buffer.h"
23 
24 namespace flatbuffers {
25 
26 // This is a minimal replication of std::vector<uint8_t> functionality,
27 // except growing from higher to lower addresses. i.e push_back() inserts data
28 // in the lowest address in the vector.
29 // Since this vector leaves the lower part unused, we support a "scratch-pad"
30 // that can be stored there for temporary data, to share the allocated space.
31 // Essentially, this supports 2 std::vectors in a single buffer.
32 class vector_downward {
33  public:
vector_downward(size_t initial_size,Allocator * allocator,bool own_allocator,size_t buffer_minalign)34   explicit vector_downward(size_t initial_size, Allocator *allocator,
35                            bool own_allocator, size_t buffer_minalign)
36       : allocator_(allocator),
37         own_allocator_(own_allocator),
38         initial_size_(initial_size),
39         buffer_minalign_(buffer_minalign),
40         reserved_(0),
41         size_(0),
42         buf_(nullptr),
43         cur_(nullptr),
44         scratch_(nullptr) {}
45 
vector_downward(vector_downward && other)46   vector_downward(vector_downward &&other)
47       // clang-format on
48       : allocator_(other.allocator_),
49         own_allocator_(other.own_allocator_),
50         initial_size_(other.initial_size_),
51         buffer_minalign_(other.buffer_minalign_),
52         reserved_(other.reserved_),
53         size_(other.size_),
54         buf_(other.buf_),
55         cur_(other.cur_),
56         scratch_(other.scratch_) {
57     // No change in other.allocator_
58     // No change in other.initial_size_
59     // No change in other.buffer_minalign_
60     other.own_allocator_ = false;
61     other.reserved_ = 0;
62     other.buf_ = nullptr;
63     other.cur_ = nullptr;
64     other.scratch_ = nullptr;
65   }
66 
67   vector_downward &operator=(vector_downward &&other) {
68     // Move construct a temporary and swap idiom
69     vector_downward temp(std::move(other));
70     swap(temp);
71     return *this;
72   }
73 
~vector_downward()74   ~vector_downward() {
75     clear_buffer();
76     clear_allocator();
77   }
78 
reset()79   void reset() {
80     clear_buffer();
81     clear();
82   }
83 
clear()84   void clear() {
85     if (buf_) {
86       cur_ = buf_ + reserved_;
87     } else {
88       reserved_ = 0;
89       cur_ = nullptr;
90     }
91     size_ = 0;
92     clear_scratch();
93   }
94 
clear_scratch()95   void clear_scratch() { scratch_ = buf_; }
96 
clear_allocator()97   void clear_allocator() {
98     if (own_allocator_ && allocator_) { delete allocator_; }
99     allocator_ = nullptr;
100     own_allocator_ = false;
101   }
102 
clear_buffer()103   void clear_buffer() {
104     if (buf_) Deallocate(allocator_, buf_, reserved_);
105     buf_ = nullptr;
106   }
107 
108   // Relinquish the pointer to the caller.
release_raw(size_t & allocated_bytes,size_t & offset)109   uint8_t *release_raw(size_t &allocated_bytes, size_t &offset) {
110     auto *buf = buf_;
111     allocated_bytes = reserved_;
112     offset = static_cast<size_t>(cur_ - buf_);
113 
114     // release_raw only relinquishes the buffer ownership.
115     // Does not deallocate or reset the allocator. Destructor will do that.
116     buf_ = nullptr;
117     clear();
118     return buf;
119   }
120 
121   // Relinquish the pointer to the caller.
release()122   DetachedBuffer release() {
123     // allocator ownership (if any) is transferred to DetachedBuffer.
124     DetachedBuffer fb(allocator_, own_allocator_, buf_, reserved_, cur_,
125                       size());
126     if (own_allocator_) {
127       allocator_ = nullptr;
128       own_allocator_ = false;
129     }
130     buf_ = nullptr;
131     clear();
132     return fb;
133   }
134 
ensure_space(size_t len)135   size_t ensure_space(size_t len) {
136     FLATBUFFERS_ASSERT(cur_ >= scratch_ && scratch_ >= buf_);
137     if (len > static_cast<size_t>(cur_ - scratch_)) { reallocate(len); }
138     // Beyond this, signed offsets may not have enough range:
139     // (FlatBuffers > 2GB not supported).
140     FLATBUFFERS_ASSERT(size() < FLATBUFFERS_MAX_BUFFER_SIZE);
141     return len;
142   }
143 
make_space(size_t len)144   inline uint8_t *make_space(size_t len) {
145     if (len) {
146       ensure_space(len);
147       cur_ -= len;
148       size_ += static_cast<uoffset_t>(len);
149     }
150     return cur_;
151   }
152 
153   // Returns nullptr if using the DefaultAllocator.
get_custom_allocator()154   Allocator *get_custom_allocator() { return allocator_; }
155 
size()156   inline uoffset_t size() const { return size_; }
157 
scratch_size()158   uoffset_t scratch_size() const {
159     return static_cast<uoffset_t>(scratch_ - buf_);
160   }
161 
capacity()162   size_t capacity() const { return reserved_; }
163 
data()164   uint8_t *data() const {
165     FLATBUFFERS_ASSERT(cur_);
166     return cur_;
167   }
168 
scratch_data()169   uint8_t *scratch_data() const {
170     FLATBUFFERS_ASSERT(buf_);
171     return buf_;
172   }
173 
scratch_end()174   uint8_t *scratch_end() const {
175     FLATBUFFERS_ASSERT(scratch_);
176     return scratch_;
177   }
178 
data_at(size_t offset)179   uint8_t *data_at(size_t offset) const { return buf_ + reserved_ - offset; }
180 
push(const uint8_t * bytes,size_t num)181   void push(const uint8_t *bytes, size_t num) {
182     if (num > 0) { memcpy(make_space(num), bytes, num); }
183   }
184 
185   // Specialized version of push() that avoids memcpy call for small data.
push_small(const T & little_endian_t)186   template<typename T> void push_small(const T &little_endian_t) {
187     make_space(sizeof(T));
188     *reinterpret_cast<T *>(cur_) = little_endian_t;
189   }
190 
scratch_push_small(const T & t)191   template<typename T> void scratch_push_small(const T &t) {
192     ensure_space(sizeof(T));
193     *reinterpret_cast<T *>(scratch_) = t;
194     scratch_ += sizeof(T);
195   }
196 
197   // fill() is most frequently called with small byte counts (<= 4),
198   // which is why we're using loops rather than calling memset.
fill(size_t zero_pad_bytes)199   void fill(size_t zero_pad_bytes) {
200     make_space(zero_pad_bytes);
201     for (size_t i = 0; i < zero_pad_bytes; i++) cur_[i] = 0;
202   }
203 
204   // Version for when we know the size is larger.
205   // Precondition: zero_pad_bytes > 0
fill_big(size_t zero_pad_bytes)206   void fill_big(size_t zero_pad_bytes) {
207     memset(make_space(zero_pad_bytes), 0, zero_pad_bytes);
208   }
209 
pop(size_t bytes_to_remove)210   void pop(size_t bytes_to_remove) {
211     cur_ += bytes_to_remove;
212     size_ -= static_cast<uoffset_t>(bytes_to_remove);
213   }
214 
scratch_pop(size_t bytes_to_remove)215   void scratch_pop(size_t bytes_to_remove) { scratch_ -= bytes_to_remove; }
216 
swap(vector_downward & other)217   void swap(vector_downward &other) {
218     using std::swap;
219     swap(allocator_, other.allocator_);
220     swap(own_allocator_, other.own_allocator_);
221     swap(initial_size_, other.initial_size_);
222     swap(buffer_minalign_, other.buffer_minalign_);
223     swap(reserved_, other.reserved_);
224     swap(size_, other.size_);
225     swap(buf_, other.buf_);
226     swap(cur_, other.cur_);
227     swap(scratch_, other.scratch_);
228   }
229 
swap_allocator(vector_downward & other)230   void swap_allocator(vector_downward &other) {
231     using std::swap;
232     swap(allocator_, other.allocator_);
233     swap(own_allocator_, other.own_allocator_);
234   }
235 
236  private:
237   // You shouldn't really be copying instances of this class.
238   FLATBUFFERS_DELETE_FUNC(vector_downward(const vector_downward &));
239   FLATBUFFERS_DELETE_FUNC(vector_downward &operator=(const vector_downward &));
240 
241   Allocator *allocator_;
242   bool own_allocator_;
243   size_t initial_size_;
244   size_t buffer_minalign_;
245   size_t reserved_;
246   uoffset_t size_;
247   uint8_t *buf_;
248   uint8_t *cur_;  // Points at location between empty (below) and used (above).
249   uint8_t *scratch_;  // Points to the end of the scratchpad in use.
250 
reallocate(size_t len)251   void reallocate(size_t len) {
252     auto old_reserved = reserved_;
253     auto old_size = size();
254     auto old_scratch_size = scratch_size();
255     reserved_ +=
256         (std::max)(len, old_reserved ? old_reserved / 2 : initial_size_);
257     reserved_ = (reserved_ + buffer_minalign_ - 1) & ~(buffer_minalign_ - 1);
258     if (buf_) {
259       buf_ = ReallocateDownward(allocator_, buf_, old_reserved, reserved_,
260                                 old_size, old_scratch_size);
261     } else {
262       buf_ = Allocate(allocator_, reserved_);
263     }
264     cur_ = buf_ + reserved_ - old_size;
265     scratch_ = buf_ + old_scratch_size;
266   }
267 };
268 
269 }  // namespace flatbuffers
270 
271 #endif  // FLATBUFFERS_VECTOR_DOWNWARD_H_
272