• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google Inc. All rights reserved.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 extern crate smallvec;
18 
19 use std::cmp::max;
20 use std::marker::PhantomData;
21 use std::ptr::write_bytes;
22 use std::slice::from_raw_parts;
23 
24 use endian_scalar::{read_scalar, emplace_scalar};
25 use primitives::*;
26 use push::{Push, PushAlignment};
27 use table::Table;
28 use vtable::{VTable, field_index_to_field_offset};
29 use vtable_writer::VTableWriter;
30 use vector::{SafeSliceAccess, Vector};
31 
32 pub const N_SMALLVEC_STRING_VECTOR_CAPACITY: usize = 16;
33 
34 #[derive(Clone, Copy, Debug)]
35 struct FieldLoc {
36     off: UOffsetT,
37     id: VOffsetT,
38 }
39 
40 /// FlatBufferBuilder builds a FlatBuffer through manipulating its internal
41 /// state. It has an owned `Vec<u8>` that grows as needed (up to the hardcoded
42 /// limit of 2GiB, which is set by the FlatBuffers format).
43 pub struct FlatBufferBuilder<'fbb> {
44     owned_buf: Vec<u8>,
45     head: usize,
46 
47     field_locs: Vec<FieldLoc>,
48     written_vtable_revpos: Vec<UOffsetT>,
49 
50     nested: bool,
51     finished: bool,
52 
53     min_align: usize,
54 
55     _phantom: PhantomData<&'fbb ()>,
56 }
57 
58 impl<'fbb> FlatBufferBuilder<'fbb> {
59     /// Create a FlatBufferBuilder that is ready for writing.
new() -> Self60     pub fn new() -> Self {
61         Self::new_with_capacity(0)
62     }
63 
64     /// Create a FlatBufferBuilder that is ready for writing, with a
65     /// ready-to-use capacity of the provided size.
66     ///
67     /// The maximum valid value is `FLATBUFFERS_MAX_BUFFER_SIZE`.
new_with_capacity(size: usize) -> Self68     pub fn new_with_capacity(size: usize) -> Self {
69         // we need to check the size here because we create the backing buffer
70         // directly, bypassing the typical way of using grow_owned_buf:
71         assert!(size <= FLATBUFFERS_MAX_BUFFER_SIZE,
72                 "cannot initialize buffer bigger than 2 gigabytes");
73 
74         FlatBufferBuilder {
75             owned_buf: vec![0u8; size],
76             head: size,
77 
78             field_locs: Vec::new(),
79             written_vtable_revpos: Vec::new(),
80 
81             nested: false,
82             finished: false,
83 
84             min_align: 0,
85 
86             _phantom: PhantomData,
87         }
88     }
89 
90     /// Reset the FlatBufferBuilder internal state. Use this method after a
91     /// call to a `finish` function in order to re-use a FlatBufferBuilder.
92     ///
93     /// This function is the only way to reset the `finished` state and start
94     /// again.
95     ///
96     /// If you are using a FlatBufferBuilder repeatedly, make sure to use this
97     /// function, because it re-uses the FlatBufferBuilder's existing
98     /// heap-allocated `Vec<u8>` internal buffer. This offers significant speed
99     /// improvements as compared to creating a new FlatBufferBuilder for every
100     /// new object.
reset(&mut self)101     pub fn reset(&mut self) {
102         // memset only the part of the buffer that could be dirty:
103         {
104             let to_clear = self.owned_buf.len() - self.head;
105             let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr();
106             unsafe { write_bytes(ptr, 0, to_clear); }
107         }
108 
109         self.head = self.owned_buf.len();
110         self.written_vtable_revpos.clear();
111 
112         self.nested = false;
113         self.finished = false;
114 
115         self.min_align = 0;
116     }
117 
118     /// Destroy the FlatBufferBuilder, returning its internal byte vector
119     /// and the index into it that represents the start of valid data.
collapse(self) -> (Vec<u8>, usize)120     pub fn collapse(self) -> (Vec<u8>, usize) {
121         (self.owned_buf, self.head)
122     }
123 
124     /// Push a Push'able value onto the front of the in-progress data.
125     ///
126     /// This function uses traits to provide a unified API for writing
127     /// scalars, tables, vectors, and WIPOffsets.
128     #[inline]
push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output>129     pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
130         let sz = P::size();
131         self.align(sz, P::alignment());
132         self.make_space(sz);
133         {
134             let (dst, rest) = (&mut self.owned_buf[self.head..]).split_at_mut(sz);
135             x.push(dst, rest);
136         }
137         WIPOffset::new(self.used_space() as UOffsetT)
138     }
139 
140     /// Push a Push'able value onto the front of the in-progress data, and
141     /// store a reference to it in the in-progress vtable. If the value matches
142     /// the default, then this is a no-op.
143     #[inline]
push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X)144     pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
145         self.assert_nested("push_slot");
146         if x == default {
147             return;
148         }
149         self.push_slot_always(slotoff, x);
150     }
151 
152     /// Push a Push'able value onto the front of the in-progress data, and
153     /// store a reference to it in the in-progress vtable.
154     #[inline]
push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X)155     pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
156         self.assert_nested("push_slot_always");
157         let off = self.push(x);
158         self.track_field(slotoff, off.value());
159     }
160 
161     /// Retrieve the number of vtables that have been serialized into the
162     /// FlatBuffer. This is primarily used to check vtable deduplication.
163     #[inline]
num_written_vtables(&self) -> usize164     pub fn num_written_vtables(&self) -> usize {
165         self.written_vtable_revpos.len()
166     }
167 
168     /// Start a Table write.
169     ///
170     /// Asserts that the builder is not in a nested state.
171     ///
172     /// Users probably want to use `push_slot` to add values after calling this.
173     #[inline]
start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset>174     pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
175         self.assert_not_nested("start_table can not be called when a table or vector is under construction");
176         self.nested = true;
177 
178         WIPOffset::new(self.used_space() as UOffsetT)
179     }
180 
181     /// End a Table write.
182     ///
183     /// Asserts that the builder is in a nested state.
184     #[inline]
end_table(&mut self, off: WIPOffset<TableUnfinishedWIPOffset>) -> WIPOffset<TableFinishedWIPOffset>185     pub fn end_table(&mut self, off: WIPOffset<TableUnfinishedWIPOffset>) -> WIPOffset<TableFinishedWIPOffset> {
186         self.assert_nested("end_table");
187 
188         let o = self.write_vtable(off);
189 
190         self.nested = false;
191         self.field_locs.clear();
192 
193         WIPOffset::new(o.value())
194     }
195 
196     /// Start a Vector write.
197     ///
198     /// Asserts that the builder is not in a nested state.
199     ///
200     /// Most users will prefer to call `create_vector`.
201     /// Speed optimizing users who choose to create vectors manually using this
202     /// function will want to use `push` to add values.
203     #[inline]
start_vector<T: Push>(&mut self, num_items: usize)204     pub fn start_vector<T: Push>(&mut self, num_items: usize) {
205         self.assert_not_nested("start_vector can not be called when a table or vector is under construction");
206         self.nested = true;
207         self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
208     }
209 
210     /// End a Vector write.
211     ///
212     /// Note that the `num_elems` parameter is the number of written items, not
213     /// the byte count.
214     ///
215     /// Asserts that the builder is in a nested state.
216     #[inline]
end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>>217     pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
218         self.assert_nested("end_vector");
219         self.nested = false;
220         let o = self.push::<UOffsetT>(num_elems as UOffsetT);
221         WIPOffset::new(o.value())
222     }
223 
224     /// Create a utf8 string.
225     ///
226     /// The wire format represents this as a zero-terminated byte vector.
227     #[inline]
create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str>228     pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
229         self.assert_not_nested("create_string can not be called when a table or vector is under construction");
230         WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
231     }
232 
233     /// Create a zero-terminated byte vector.
234     #[inline]
create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]>235     pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
236         self.assert_not_nested("create_byte_string can not be called when a table or vector is under construction");
237         self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
238         self.push(0u8);
239         self.push_bytes_unprefixed(data);
240         self.push(data.len() as UOffsetT);
241         WIPOffset::new(self.used_space() as UOffsetT)
242     }
243 
244     /// Create a vector by memcpy'ing. This is much faster than calling
245     /// `create_vector`, but the underlying type must be represented as
246     /// little-endian on the host machine. This property is encoded in the
247     /// type system through the SafeSliceAccess trait. The following types are
248     /// always safe, on any platform: bool, u8, i8, and any
249     /// FlatBuffers-generated struct.
250     #[inline]
create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(&'a mut self, items: &'b [T]) -> WIPOffset<Vector<'fbb, T>>251     pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(&'a mut self, items: &'b [T]) -> WIPOffset<Vector<'fbb, T>> {
252         self.assert_not_nested("create_vector_direct can not be called when a table or vector is under construction");
253         let elem_size = T::size();
254         self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
255 
256         let bytes = {
257             let ptr = items.as_ptr() as *const T as *const u8;
258             unsafe { from_raw_parts(ptr, items.len() * elem_size) }
259         };
260         self.push_bytes_unprefixed(bytes);
261         self.push(items.len() as UOffsetT);
262 
263         WIPOffset::new(self.used_space() as UOffsetT)
264     }
265 
266     /// Create a vector of strings.
267     ///
268     /// Speed-sensitive users may wish to reduce memory usage by creating the
269     /// vector manually: use `start_vector`, `push`, and `end_vector`.
270     #[inline]
create_vector_of_strings<'a, 'b>(&'a mut self, xs: &'b [&'b str]) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>>271     pub fn create_vector_of_strings<'a, 'b>(&'a mut self, xs: &'b [&'b str]) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
272         self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
273         // internally, smallvec can be a stack-allocated or heap-allocated vector:
274         // if xs.len() > N_SMALLVEC_STRING_VECTOR_CAPACITY then it will overflow to the heap.
275         let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> = smallvec::SmallVec::with_capacity(xs.len());
276         unsafe { offsets.set_len(xs.len()); }
277 
278         // note that this happens in reverse, because the buffer is built back-to-front:
279         for (i, &s) in xs.iter().enumerate().rev() {
280             let o = self.create_string(s);
281             offsets[i] = o;
282         }
283         self.create_vector(&offsets[..])
284     }
285 
286     /// Create a vector of Push-able objects.
287     ///
288     /// Speed-sensitive users may wish to reduce memory usage by creating the
289     /// vector manually: use `start_vector`, `push`, and `end_vector`.
290     #[inline]
create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(&'a mut self, items: &'b [T]) -> WIPOffset<Vector<'fbb, T::Output>>291     pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(&'a mut self, items: &'b [T]) -> WIPOffset<Vector<'fbb, T::Output>> {
292         let elem_size = T::size();
293         self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
294         for i in (0..items.len()).rev() {
295             self.push(items[i]);
296         }
297         WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
298     }
299 
300     /// Get the byte slice for the data that has been written, regardless of
301     /// whether it has been finished.
302     #[inline]
unfinished_data(&self) -> &[u8]303     pub fn unfinished_data(&self) -> &[u8] {
304         &self.owned_buf[self.head..]
305     }
306     /// Get the byte slice for the data that has been written after a call to
307     /// one of the `finish` functions.
308     #[inline]
finished_data(&self) -> &[u8]309     pub fn finished_data(&self) -> &[u8] {
310         self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
311         &self.owned_buf[self.head..]
312     }
313     /// Assert that a field is present in the just-finished Table.
314     ///
315     /// This is somewhat low-level and is mostly used by the generated code.
316     #[inline]
required(&self, tab_revloc: WIPOffset<TableFinishedWIPOffset>, slot_byte_loc: VOffsetT, assert_msg_name: &'static str)317     pub fn required(&self,
318                     tab_revloc: WIPOffset<TableFinishedWIPOffset>,
319                     slot_byte_loc: VOffsetT,
320                     assert_msg_name: &'static str) {
321         let idx = self.used_space() - tab_revloc.value() as usize;
322         let tab = Table::new(&self.owned_buf[self.head..], idx);
323         let o = tab.vtable().get(slot_byte_loc) as usize;
324         assert!(o != 0, "missing required field {}", assert_msg_name);
325     }
326 
327     /// Finalize the FlatBuffer by: aligning it, pushing an optional file
328     /// identifier on to it, pushing a size prefix on to it, and marking the
329     /// internal state of the FlatBufferBuilder as `finished`. Afterwards,
330     /// users can call `finished_data` to get the resulting data.
331     #[inline]
finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>)332     pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
333         self.finish_with_opts(root, file_identifier, true);
334     }
335 
336     /// Finalize the FlatBuffer by: aligning it, pushing an optional file
337     /// identifier on to it, and marking the internal state of the
338     /// FlatBufferBuilder as `finished`. Afterwards, users can call
339     /// `finished_data` to get the resulting data.
340     #[inline]
finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>)341     pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
342         self.finish_with_opts(root, file_identifier, false);
343     }
344 
345     /// Finalize the FlatBuffer by: aligning it and marking the internal state
346     /// of the FlatBufferBuilder as `finished`. Afterwards, users can call
347     /// `finished_data` to get the resulting data.
348     #[inline]
finish_minimal<T>(&mut self, root: WIPOffset<T>)349     pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
350         self.finish_with_opts(root, None, false);
351     }
352 
353     #[inline]
used_space(&self) -> usize354     fn used_space(&self) -> usize {
355         self.owned_buf.len() - self.head as usize
356     }
357 
358     #[inline]
track_field(&mut self, slot_off: VOffsetT, off: UOffsetT)359     fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
360         let fl = FieldLoc {
361             id: slot_off,
362             off: off,
363         };
364         self.field_locs.push(fl);
365     }
366 
367     /// Write the VTable, if it is new.
write_vtable(&mut self, table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>) -> WIPOffset<VTableWIPOffset>368     fn write_vtable(&mut self, table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>) -> WIPOffset<VTableWIPOffset> {
369         self.assert_nested("write_vtable");
370 
371         // Write the vtable offset, which is the start of any Table.
372         // We fill its value later.
373         let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
374             WIPOffset::new(self.push::<UOffsetT>(0xF0F0F0F0 as UOffsetT).value());
375 
376         // Layout of the data this function will create when a new vtable is
377         // needed.
378         // --------------------------------------------------------------------
379         // vtable starts here
380         // | x, x -- vtable len (bytes) [u16]
381         // | x, x -- object inline len (bytes) [u16]
382         // | x, x -- zero, or num bytes from start of object to field #0   [u16]
383         // | ...
384         // | x, x -- zero, or num bytes from start of object to field #n-1 [u16]
385         // vtable ends here
386         // table starts here
387         // | x, x, x, x -- offset (negative direction) to the vtable [i32]
388         // |               aka "vtableoffset"
389         // | -- table inline data begins here, we don't touch it --
390         // table ends here -- aka "table_start"
391         // --------------------------------------------------------------------
392         //
393         // Layout of the data this function will create when we re-use an
394         // existing vtable.
395         //
396         // We always serialize this particular vtable, then compare it to the
397         // other vtables we know about to see if there is a duplicate. If there
398         // is, then we erase the serialized vtable we just made.
399         // We serialize it first so that we are able to do byte-by-byte
400         // comparisons with already-serialized vtables. This 1) saves
401         // bookkeeping space (we only keep revlocs to existing vtables), 2)
402         // allows us to convert to little-endian once, then do
403         // fast memcmp comparisons, and 3) by ensuring we are comparing real
404         // serialized vtables, we can be more assured that we are doing the
405         // comparisons correctly.
406         //
407         // --------------------------------------------------------------------
408         // table starts here
409         // | x, x, x, x -- offset (negative direction) to an existing vtable [i32]
410         // |               aka "vtableoffset"
411         // | -- table inline data begins here, we don't touch it --
412         // table starts here: aka "table_start"
413         // --------------------------------------------------------------------
414 
415         // fill the WIP vtable with zeros:
416         let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
417         self.make_space(vtable_byte_len);
418 
419         // compute the length of the table (not vtable!) in bytes:
420         let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
421         debug_assert!(table_object_size < 0x10000); // vTable use 16bit offsets.
422 
423         // Write the VTable (we may delete it afterwards, if it is a duplicate):
424         let vt_start_pos = self.head;
425         let vt_end_pos = self.head + vtable_byte_len;
426         {
427             // write the vtable header:
428             let vtfw = &mut VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]);
429             vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
430             vtfw.write_object_inline_size(table_object_size as VOffsetT);
431 
432             // serialize every FieldLoc to the vtable:
433             for &fl in self.field_locs.iter() {
434                 let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
435                 debug_assert_eq!(vtfw.get_field_offset(fl.id),
436                                  0,
437                                  "tried to write a vtable field multiple times");
438                 vtfw.write_field_offset(fl.id, pos);
439             }
440         }
441         let dup_vt_use = {
442             let this_vt = VTable::init(&self.owned_buf[..], self.head);
443             self.find_duplicate_stored_vtable_revloc(this_vt)
444         };
445 
446         let vt_use = match dup_vt_use {
447             Some(n) => {
448                 VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]).clear();
449                 self.head += vtable_byte_len;
450                 n
451             }
452             None => {
453                 let new_vt_use = self.used_space() as UOffsetT;
454                 self.written_vtable_revpos.push(new_vt_use);
455                 new_vt_use
456             }
457         };
458 
459         {
460             let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize;
461             let saw = read_scalar::<UOffsetT>(&self.owned_buf[n..n + SIZE_SOFFSET]);
462             debug_assert_eq!(saw, 0xF0F0F0F0);
463             emplace_scalar::<SOffsetT>(&mut self.owned_buf[n..n + SIZE_SOFFSET],
464                                        vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT);
465         }
466 
467         self.field_locs.clear();
468 
469         object_revloc_to_vtable
470     }
471 
472     #[inline]
find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT>473     fn find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT> {
474         for &revloc in self.written_vtable_revpos.iter().rev() {
475             let o = VTable::init(&self.owned_buf[..], self.head + self.used_space() - revloc as usize);
476             if needle == o {
477                 return Some(revloc);
478             }
479         }
480         None
481     }
482 
483     // Only call this when you know it is safe to double the size of the buffer.
484     #[inline]
grow_owned_buf(&mut self)485     fn grow_owned_buf(&mut self) {
486         let old_len = self.owned_buf.len();
487         let new_len = max(1, old_len * 2);
488 
489         let starting_active_size = self.used_space();
490 
491         let diff = new_len - old_len;
492         self.owned_buf.resize(new_len, 0);
493         self.head += diff;
494 
495         let ending_active_size = self.used_space();
496         debug_assert_eq!(starting_active_size, ending_active_size);
497 
498         if new_len == 1 {
499             return;
500         }
501 
502         // calculate the midpoint, and safely copy the old end data to the new
503         // end position:
504         let middle = new_len / 2;
505         {
506             let (left, right) = &mut self.owned_buf[..].split_at_mut(middle);
507             right.copy_from_slice(left);
508         }
509         // finally, zero out the old end data.
510         {
511             let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
512             unsafe { write_bytes(ptr, 0, middle); }
513         }
514     }
515 
516     // with or without a size prefix changes how we load the data, so finish*
517     // functions are split along those lines.
finish_with_opts<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>, size_prefixed: bool)518     fn finish_with_opts<T>(&mut self,
519                            root: WIPOffset<T>,
520                            file_identifier: Option<&str>,
521                            size_prefixed: bool) {
522         self.assert_not_finished("buffer cannot be finished when it is already finished");
523         self.assert_not_nested("buffer cannot be finished when a table or vector is under construction");
524         self.written_vtable_revpos.clear();
525 
526         let to_align = {
527             // for the root offset:
528             let a = SIZE_UOFFSET;
529             // for the size prefix:
530             let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
531             // for the file identifier (a string that is not zero-terminated):
532             let c = if file_identifier.is_some() {
533                 FILE_IDENTIFIER_LENGTH
534             } else {
535                 0
536             };
537             a + b + c
538         };
539 
540         {
541             let ma = PushAlignment::new(self.min_align);
542             self.align(to_align, ma);
543         }
544 
545         if let Some(ident) = file_identifier {
546             debug_assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
547             self.push_bytes_unprefixed(ident.as_bytes());
548         }
549 
550         self.push(root);
551 
552         if size_prefixed {
553             let sz = self.used_space() as UOffsetT;
554             self.push::<UOffsetT>(sz);
555         }
556         self.finished = true;
557     }
558 
559     #[inline]
align(&mut self, len: usize, alignment: PushAlignment)560     fn align(&mut self, len: usize, alignment: PushAlignment) {
561         self.track_min_align(alignment.value());
562         let s = self.used_space() as usize;
563         self.make_space(padding_bytes(s + len, alignment.value()));
564     }
565 
566     #[inline]
track_min_align(&mut self, alignment: usize)567     fn track_min_align(&mut self, alignment: usize) {
568         self.min_align = max(self.min_align, alignment);
569     }
570 
571     #[inline]
push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT572     fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
573         let n = self.make_space(x.len());
574         &mut self.owned_buf[n..n + x.len()].copy_from_slice(x);
575 
576         n as UOffsetT
577     }
578 
579     #[inline]
make_space(&mut self, want: usize) -> usize580     fn make_space(&mut self, want: usize) -> usize {
581         self.ensure_capacity(want);
582         self.head -= want;
583         self.head
584     }
585 
586     #[inline]
ensure_capacity(&mut self, want: usize) -> usize587     fn ensure_capacity(&mut self, want: usize) -> usize {
588         if self.unused_ready_space() >= want {
589             return want;
590         }
591         assert!(want <= FLATBUFFERS_MAX_BUFFER_SIZE,
592                 "cannot grow buffer beyond 2 gigabytes");
593 
594         while self.unused_ready_space() < want {
595             self.grow_owned_buf();
596         }
597         want
598     }
599     #[inline]
unused_ready_space(&self) -> usize600     fn unused_ready_space(&self) -> usize {
601         self.head
602     }
603     #[inline]
assert_nested(&self, fn_name: &'static str)604     fn assert_nested(&self, fn_name: &'static str) {
605         // we don't assert that self.field_locs.len() >0 because the vtable
606         // could be empty (e.g. for empty tables, or for all-default values).
607         debug_assert!(self.nested, format!("incorrect FlatBufferBuilder usage: {} must be called while in a nested state", fn_name));
608     }
609     #[inline]
assert_not_nested(&self, msg: &'static str)610     fn assert_not_nested(&self, msg: &'static str) {
611         debug_assert!(!self.nested, msg);
612     }
613     #[inline]
assert_finished(&self, msg: &'static str)614     fn assert_finished(&self, msg: &'static str) {
615         debug_assert!(self.finished, msg);
616     }
617     #[inline]
assert_not_finished(&self, msg: &'static str)618     fn assert_not_finished(&self, msg: &'static str) {
619         debug_assert!(!self.finished, msg);
620     }
621 
622 }
623 
624 /// Compute the length of the vtable needed to represent the provided FieldLocs.
625 /// If there are no FieldLocs, then provide the minimum number of bytes
626 /// required: enough to write the VTable header.
627 #[inline]
get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize628 fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
629     let max_voffset = field_locs.iter().map(|fl| fl.id).max();
630     match max_voffset {
631         None => { field_index_to_field_offset(0) as usize }
632         Some(mv) => { mv as usize + SIZE_VOFFSET }
633     }
634 }
635 
636 #[inline]
padding_bytes(buf_size: usize, scalar_size: usize) -> usize637 fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize {
638     // ((!buf_size) + 1) & (scalar_size - 1)
639     (!buf_size).wrapping_add(1) & (scalar_size.wrapping_sub(1))
640 }
641