• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google Inc. All rights reserved.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 extern crate smallvec;
18 
19 use std::cmp::max;
20 use std::iter::{DoubleEndedIterator, ExactSizeIterator};
21 use std::marker::PhantomData;
22 use std::ptr::write_bytes;
23 use std::slice::from_raw_parts;
24 
25 use crate::endian_scalar::{emplace_scalar, read_scalar_at};
26 use crate::primitives::*;
27 use crate::push::{Push, PushAlignment};
28 use crate::table::Table;
29 use crate::vector::{SafeSliceAccess, Vector};
30 use crate::vtable::{field_index_to_field_offset, VTable};
31 use crate::vtable_writer::VTableWriter;
32 
33 pub const N_SMALLVEC_STRING_VECTOR_CAPACITY: usize = 16;
34 
35 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
36 struct FieldLoc {
37     off: UOffsetT,
38     id: VOffsetT,
39 }
40 
41 /// FlatBufferBuilder builds a FlatBuffer through manipulating its internal
42 /// state. It has an owned `Vec<u8>` that grows as needed (up to the hardcoded
43 /// limit of 2GiB, which is set by the FlatBuffers format).
44 #[derive(Clone, Debug, Eq, PartialEq)]
45 pub struct FlatBufferBuilder<'fbb> {
46     owned_buf: Vec<u8>,
47     head: usize,
48 
49     field_locs: Vec<FieldLoc>,
50     written_vtable_revpos: Vec<UOffsetT>,
51 
52     nested: bool,
53     finished: bool,
54 
55     min_align: usize,
56     force_defaults: bool,
57     strings_pool: Vec<WIPOffset<&'fbb str>>,
58 
59     _phantom: PhantomData<&'fbb ()>,
60 }
61 
62 impl<'fbb> FlatBufferBuilder<'fbb> {
63     /// Create a FlatBufferBuilder that is ready for writing.
new() -> Self64     pub fn new() -> Self {
65         Self::with_capacity(0)
66     }
67     #[deprecated(note = "replaced with `with_capacity`", since = "0.8.5")]
new_with_capacity(size: usize) -> Self68     pub fn new_with_capacity(size: usize) -> Self {
69         Self::with_capacity(size)
70     }
71     /// Create a FlatBufferBuilder that is ready for writing, with a
72     /// ready-to-use capacity of the provided size.
73     ///
74     /// The maximum valid value is `FLATBUFFERS_MAX_BUFFER_SIZE`.
with_capacity(size: usize) -> Self75     pub fn with_capacity(size: usize) -> Self {
76         Self::from_vec(vec![0; size])
77     }
78     /// Create a FlatBufferBuilder that is ready for writing, reusing
79     /// an existing vector.
from_vec(buffer: Vec<u8>) -> Self80     pub fn from_vec(buffer: Vec<u8>) -> Self {
81         // we need to check the size here because we create the backing buffer
82         // directly, bypassing the typical way of using grow_owned_buf:
83         assert!(
84             buffer.len() <= FLATBUFFERS_MAX_BUFFER_SIZE,
85             "cannot initialize buffer bigger than 2 gigabytes"
86         );
87         let head = buffer.len();
88         FlatBufferBuilder {
89             owned_buf: buffer,
90             head,
91 
92             field_locs: Vec::new(),
93             written_vtable_revpos: Vec::new(),
94 
95             nested: false,
96             finished: false,
97 
98             min_align: 0,
99             force_defaults: false,
100             strings_pool: Vec::new(),
101 
102             _phantom: PhantomData,
103         }
104     }
105 
106     /// Reset the FlatBufferBuilder internal state. Use this method after a
107     /// call to a `finish` function in order to re-use a FlatBufferBuilder.
108     ///
109     /// This function is the only way to reset the `finished` state and start
110     /// again.
111     ///
112     /// If you are using a FlatBufferBuilder repeatedly, make sure to use this
113     /// function, because it re-uses the FlatBufferBuilder's existing
114     /// heap-allocated `Vec<u8>` internal buffer. This offers significant speed
115     /// improvements as compared to creating a new FlatBufferBuilder for every
116     /// new object.
reset(&mut self)117     pub fn reset(&mut self) {
118         // memset only the part of the buffer that could be dirty:
119         {
120             let to_clear = self.owned_buf.len() - self.head;
121             let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr();
122             unsafe {
123                 write_bytes(ptr, 0, to_clear);
124             }
125         }
126 
127         self.head = self.owned_buf.len();
128         self.written_vtable_revpos.clear();
129 
130         self.nested = false;
131         self.finished = false;
132 
133         self.min_align = 0;
134         self.strings_pool.clear();
135     }
136 
137     /// Destroy the FlatBufferBuilder, returning its internal byte vector
138     /// and the index into it that represents the start of valid data.
collapse(self) -> (Vec<u8>, usize)139     pub fn collapse(self) -> (Vec<u8>, usize) {
140         (self.owned_buf, self.head)
141     }
142 
143     /// Push a Push'able value onto the front of the in-progress data.
144     ///
145     /// This function uses traits to provide a unified API for writing
146     /// scalars, tables, vectors, and WIPOffsets.
147     #[inline]
push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output>148     pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
149         let sz = P::size();
150         self.align(sz, P::alignment());
151         self.make_space(sz);
152         {
153             let (dst, rest) = (&mut self.owned_buf[self.head..]).split_at_mut(sz);
154             x.push(dst, rest);
155         }
156         WIPOffset::new(self.used_space() as UOffsetT)
157     }
158 
159     /// Push a Push'able value onto the front of the in-progress data, and
160     /// store a reference to it in the in-progress vtable. If the value matches
161     /// the default, then this is a no-op.
162     #[inline]
push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X)163     pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
164         self.assert_nested("push_slot");
165         if x != default || self.force_defaults {
166             self.push_slot_always(slotoff, x);
167         }
168     }
169 
170     /// Push a Push'able value onto the front of the in-progress data, and
171     /// store a reference to it in the in-progress vtable.
172     #[inline]
push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X)173     pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
174         self.assert_nested("push_slot_always");
175         let off = self.push(x);
176         self.track_field(slotoff, off.value());
177     }
178 
179     /// Retrieve the number of vtables that have been serialized into the
180     /// FlatBuffer. This is primarily used to check vtable deduplication.
181     #[inline]
num_written_vtables(&self) -> usize182     pub fn num_written_vtables(&self) -> usize {
183         self.written_vtable_revpos.len()
184     }
185 
186     /// Start a Table write.
187     ///
188     /// Asserts that the builder is not in a nested state.
189     ///
190     /// Users probably want to use `push_slot` to add values after calling this.
191     #[inline]
start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset>192     pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
193         self.assert_not_nested(
194             "start_table can not be called when a table or vector is under construction",
195         );
196         self.nested = true;
197 
198         WIPOffset::new(self.used_space() as UOffsetT)
199     }
200 
201     /// End a Table write.
202     ///
203     /// Asserts that the builder is in a nested state.
204     #[inline]
end_table( &mut self, off: WIPOffset<TableUnfinishedWIPOffset>, ) -> WIPOffset<TableFinishedWIPOffset>205     pub fn end_table(
206         &mut self,
207         off: WIPOffset<TableUnfinishedWIPOffset>,
208     ) -> WIPOffset<TableFinishedWIPOffset> {
209         self.assert_nested("end_table");
210 
211         let o = self.write_vtable(off);
212 
213         self.nested = false;
214         self.field_locs.clear();
215 
216         WIPOffset::new(o.value())
217     }
218 
219     /// Start a Vector write.
220     ///
221     /// Asserts that the builder is not in a nested state.
222     ///
223     /// Most users will prefer to call `create_vector`.
224     /// Speed optimizing users who choose to create vectors manually using this
225     /// function will want to use `push` to add values.
226     #[inline]
start_vector<T: Push>(&mut self, num_items: usize)227     pub fn start_vector<T: Push>(&mut self, num_items: usize) {
228         self.assert_not_nested(
229             "start_vector can not be called when a table or vector is under construction",
230         );
231         self.nested = true;
232         self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
233     }
234 
235     /// End a Vector write.
236     ///
237     /// Note that the `num_elems` parameter is the number of written items, not
238     /// the byte count.
239     ///
240     /// Asserts that the builder is in a nested state.
241     #[inline]
end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>>242     pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
243         self.assert_nested("end_vector");
244         self.nested = false;
245         let o = self.push::<UOffsetT>(num_elems as UOffsetT);
246         WIPOffset::new(o.value())
247     }
248 
249     #[inline]
create_shared_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str>250     pub fn create_shared_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
251         self.assert_not_nested(
252             "create_shared_string can not be called when a table or vector is under construction",
253         );
254 
255         // Saves a ref to owned_buf since rust doesnt like us refrencing it
256         // in the binary_search_by code.
257         let buf = &self.owned_buf;
258 
259         let found = self.strings_pool.binary_search_by(|offset| {
260             let ptr = offset.value() as usize;
261             // Gets The pointer to the size of the string
262             let str_memory = &buf[buf.len() - ptr..];
263             // Gets the size of the written string from buffer
264             let size =
265                 u32::from_le_bytes([str_memory[0], str_memory[1], str_memory[2], str_memory[3]])
266                     as usize;
267             // Size of the string size
268             let string_size: usize = 4;
269             // Fetches actual string bytes from index of string after string size
270             // to the size of string plus string size
271             let iter = str_memory[string_size..size + string_size].iter();
272             // Compares bytes of fetched string and current writable string
273             iter.cloned().cmp(s.bytes())
274         });
275 
276         match found {
277             Ok(index) => self.strings_pool[index],
278             Err(index) => {
279                 let address = WIPOffset::new(self.create_byte_string(s.as_bytes()).value());
280                 self.strings_pool.insert(index, address);
281                 address
282             }
283         }
284     }
285 
286     /// Create a utf8 string.
287     ///
288     /// The wire format represents this as a zero-terminated byte vector.
289     #[inline]
create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str>290     pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
291         self.assert_not_nested(
292             "create_string can not be called when a table or vector is under construction",
293         );
294         WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
295     }
296 
297     /// Create a zero-terminated byte vector.
298     #[inline]
create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]>299     pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
300         self.assert_not_nested(
301             "create_byte_string can not be called when a table or vector is under construction",
302         );
303         self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
304         self.push(0u8);
305         self.push_bytes_unprefixed(data);
306         self.push(data.len() as UOffsetT);
307         WIPOffset::new(self.used_space() as UOffsetT)
308     }
309 
310     /// Create a vector by memcpy'ing. This is much faster than calling
311     /// `create_vector`, but the underlying type must be represented as
312     /// little-endian on the host machine. This property is encoded in the
313     /// type system through the SafeSliceAccess trait. The following types are
314     /// always safe, on any platform: bool, u8, i8, and any
315     /// FlatBuffers-generated struct.
316     #[inline]
create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>( &'a mut self, items: &'b [T], ) -> WIPOffset<Vector<'fbb, T>>317     pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(
318         &'a mut self,
319         items: &'b [T],
320     ) -> WIPOffset<Vector<'fbb, T>> {
321         self.assert_not_nested(
322             "create_vector_direct can not be called when a table or vector is under construction",
323         );
324         let elem_size = T::size();
325         self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
326 
327         let bytes = {
328             let ptr = items.as_ptr() as *const T as *const u8;
329             unsafe { from_raw_parts(ptr, items.len() * elem_size) }
330         };
331         self.push_bytes_unprefixed(bytes);
332         self.push(items.len() as UOffsetT);
333 
334         WIPOffset::new(self.used_space() as UOffsetT)
335     }
336 
337     /// Create a vector of strings.
338     ///
339     /// Speed-sensitive users may wish to reduce memory usage by creating the
340     /// vector manually: use `start_vector`, `push`, and `end_vector`.
341     #[inline]
create_vector_of_strings<'a, 'b>( &'a mut self, xs: &'b [&'b str], ) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>>342     pub fn create_vector_of_strings<'a, 'b>(
343         &'a mut self,
344         xs: &'b [&'b str],
345     ) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
346         self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
347         // internally, smallvec can be a stack-allocated or heap-allocated vector:
348         // if xs.len() > N_SMALLVEC_STRING_VECTOR_CAPACITY then it will overflow to the heap.
349         let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> =
350             smallvec::SmallVec::with_capacity(xs.len());
351         unsafe {
352             offsets.set_len(xs.len());
353         }
354 
355         // note that this happens in reverse, because the buffer is built back-to-front:
356         for (i, &s) in xs.iter().enumerate().rev() {
357             let o = self.create_string(s);
358             offsets[i] = o;
359         }
360         self.create_vector(&offsets[..])
361     }
362 
363     /// Create a vector of Push-able objects.
364     ///
365     /// Speed-sensitive users may wish to reduce memory usage by creating the
366     /// vector manually: use `start_vector`, `push`, and `end_vector`.
367     #[inline]
create_vector<'a: 'b, 'b, T: Push + Copy + 'b>( &'a mut self, items: &'b [T], ) -> WIPOffset<Vector<'fbb, T::Output>>368     pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(
369         &'a mut self,
370         items: &'b [T],
371     ) -> WIPOffset<Vector<'fbb, T::Output>> {
372         let elem_size = T::size();
373         self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
374         for i in (0..items.len()).rev() {
375             self.push(items[i]);
376         }
377         WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
378     }
379 
380     /// Create a vector of Push-able objects.
381     ///
382     /// Speed-sensitive users may wish to reduce memory usage by creating the
383     /// vector manually: use `start_vector`, `push`, and `end_vector`.
384     #[inline]
create_vector_from_iter<T: Push + Copy>( &mut self, items: impl ExactSizeIterator<Item = T> + DoubleEndedIterator, ) -> WIPOffset<Vector<'fbb, T::Output>>385     pub fn create_vector_from_iter<T: Push + Copy>(
386         &mut self,
387         items: impl ExactSizeIterator<Item = T> + DoubleEndedIterator,
388     ) -> WIPOffset<Vector<'fbb, T::Output>> {
389         let elem_size = T::size();
390         let len = items.len();
391         self.align(len * elem_size, T::alignment().max_of(SIZE_UOFFSET));
392         for item in items.rev() {
393             self.push(item);
394         }
395         WIPOffset::new(self.push::<UOffsetT>(len as UOffsetT).value())
396     }
397 
398     /// Set whether default values are stored.
399     ///
400     /// In order to save space, fields that are set to their default value
401     /// aren't stored in the buffer. Setting `force_defaults` to `true`
402     /// disables this optimization.
403     ///
404     /// By default, `force_defaults` is `false`.
405     #[inline]
force_defaults(&mut self, force_defaults: bool)406     pub fn force_defaults(&mut self, force_defaults: bool) {
407         self.force_defaults = force_defaults;
408     }
409 
410     /// Get the byte slice for the data that has been written, regardless of
411     /// whether it has been finished.
412     #[inline]
unfinished_data(&self) -> &[u8]413     pub fn unfinished_data(&self) -> &[u8] {
414         &self.owned_buf[self.head..]
415     }
416     /// Get the byte slice for the data that has been written after a call to
417     /// one of the `finish` functions.
418     /// # Panics
419     /// Panics if the buffer is not finished.
420     #[inline]
finished_data(&self) -> &[u8]421     pub fn finished_data(&self) -> &[u8] {
422         self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
423         &self.owned_buf[self.head..]
424     }
425     /// Returns a mutable view of a finished buffer and location of where the flatbuffer starts.
426     /// Note that modifying the flatbuffer data may corrupt it.
427     /// # Panics
428     /// Panics if the flatbuffer is not finished.
429     #[inline]
mut_finished_buffer(&mut self) -> (&mut [u8], usize)430     pub fn mut_finished_buffer(&mut self) -> (&mut [u8], usize) {
431         (&mut self.owned_buf, self.head)
432     }
433     /// Assert that a field is present in the just-finished Table.
434     ///
435     /// This is somewhat low-level and is mostly used by the generated code.
436     #[inline]
required( &self, tab_revloc: WIPOffset<TableFinishedWIPOffset>, slot_byte_loc: VOffsetT, assert_msg_name: &'static str, )437     pub fn required(
438         &self,
439         tab_revloc: WIPOffset<TableFinishedWIPOffset>,
440         slot_byte_loc: VOffsetT,
441         assert_msg_name: &'static str,
442     ) {
443         let idx = self.used_space() - tab_revloc.value() as usize;
444         let tab = Table::new(&self.owned_buf[self.head..], idx);
445         let o = tab.vtable().get(slot_byte_loc) as usize;
446         assert!(o != 0, "missing required field {}", assert_msg_name);
447     }
448 
449     /// Finalize the FlatBuffer by: aligning it, pushing an optional file
450     /// identifier on to it, pushing a size prefix on to it, and marking the
451     /// internal state of the FlatBufferBuilder as `finished`. Afterwards,
452     /// users can call `finished_data` to get the resulting data.
453     #[inline]
finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>)454     pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
455         self.finish_with_opts(root, file_identifier, true);
456     }
457 
458     /// Finalize the FlatBuffer by: aligning it, pushing an optional file
459     /// identifier on to it, and marking the internal state of the
460     /// FlatBufferBuilder as `finished`. Afterwards, users can call
461     /// `finished_data` to get the resulting data.
462     #[inline]
finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>)463     pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
464         self.finish_with_opts(root, file_identifier, false);
465     }
466 
467     /// Finalize the FlatBuffer by: aligning it and marking the internal state
468     /// of the FlatBufferBuilder as `finished`. Afterwards, users can call
469     /// `finished_data` to get the resulting data.
470     #[inline]
finish_minimal<T>(&mut self, root: WIPOffset<T>)471     pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
472         self.finish_with_opts(root, None, false);
473     }
474 
475     #[inline]
used_space(&self) -> usize476     fn used_space(&self) -> usize {
477         self.owned_buf.len() - self.head as usize
478     }
479 
480     #[inline]
track_field(&mut self, slot_off: VOffsetT, off: UOffsetT)481     fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
482         let fl = FieldLoc { id: slot_off, off };
483         self.field_locs.push(fl);
484     }
485 
486     /// Write the VTable, if it is new.
write_vtable( &mut self, table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>, ) -> WIPOffset<VTableWIPOffset>487     fn write_vtable(
488         &mut self,
489         table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
490     ) -> WIPOffset<VTableWIPOffset> {
491         self.assert_nested("write_vtable");
492 
493         // Write the vtable offset, which is the start of any Table.
494         // We fill its value later.
495         let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
496             WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0).value());
497 
498         // Layout of the data this function will create when a new vtable is
499         // needed.
500         // --------------------------------------------------------------------
501         // vtable starts here
502         // | x, x -- vtable len (bytes) [u16]
503         // | x, x -- object inline len (bytes) [u16]
504         // | x, x -- zero, or num bytes from start of object to field #0   [u16]
505         // | ...
506         // | x, x -- zero, or num bytes from start of object to field #n-1 [u16]
507         // vtable ends here
508         // table starts here
509         // | x, x, x, x -- offset (negative direction) to the vtable [i32]
510         // |               aka "vtableoffset"
511         // | -- table inline data begins here, we don't touch it --
512         // table ends here -- aka "table_start"
513         // --------------------------------------------------------------------
514         //
515         // Layout of the data this function will create when we re-use an
516         // existing vtable.
517         //
518         // We always serialize this particular vtable, then compare it to the
519         // other vtables we know about to see if there is a duplicate. If there
520         // is, then we erase the serialized vtable we just made.
521         // We serialize it first so that we are able to do byte-by-byte
522         // comparisons with already-serialized vtables. This 1) saves
523         // bookkeeping space (we only keep revlocs to existing vtables), 2)
524         // allows us to convert to little-endian once, then do
525         // fast memcmp comparisons, and 3) by ensuring we are comparing real
526         // serialized vtables, we can be more assured that we are doing the
527         // comparisons correctly.
528         //
529         // --------------------------------------------------------------------
530         // table starts here
531         // | x, x, x, x -- offset (negative direction) to an existing vtable [i32]
532         // |               aka "vtableoffset"
533         // | -- table inline data begins here, we don't touch it --
534         // table starts here: aka "table_start"
535         // --------------------------------------------------------------------
536 
537         // fill the WIP vtable with zeros:
538         let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
539         self.make_space(vtable_byte_len);
540 
541         // compute the length of the table (not vtable!) in bytes:
542         let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
543         debug_assert!(table_object_size < 0x10000); // vTable use 16bit offsets.
544 
545         // Write the VTable (we may delete it afterwards, if it is a duplicate):
546         let vt_start_pos = self.head;
547         let vt_end_pos = self.head + vtable_byte_len;
548         {
549             // write the vtable header:
550             let vtfw = &mut VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]);
551             vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
552             vtfw.write_object_inline_size(table_object_size as VOffsetT);
553 
554             // serialize every FieldLoc to the vtable:
555             for &fl in self.field_locs.iter() {
556                 let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
557                 debug_assert_eq!(
558                     vtfw.get_field_offset(fl.id),
559                     0,
560                     "tried to write a vtable field multiple times"
561                 );
562                 vtfw.write_field_offset(fl.id, pos);
563             }
564         }
565         let dup_vt_use = {
566             let this_vt = VTable::init(&self.owned_buf[..], self.head);
567             self.find_duplicate_stored_vtable_revloc(this_vt)
568         };
569 
570         let vt_use = match dup_vt_use {
571             Some(n) => {
572                 VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]).clear();
573                 self.head += vtable_byte_len;
574                 n
575             }
576             None => {
577                 let new_vt_use = self.used_space() as UOffsetT;
578                 self.written_vtable_revpos.push(new_vt_use);
579                 new_vt_use
580             }
581         };
582 
583         {
584             let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize;
585             let saw = unsafe { read_scalar_at::<UOffsetT>(&self.owned_buf, n) };
586             debug_assert_eq!(saw, 0xF0F0_F0F0);
587             unsafe {
588                 emplace_scalar::<SOffsetT>(
589                     &mut self.owned_buf[n..n + SIZE_SOFFSET],
590                     vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
591                 );
592             }
593         }
594 
595         self.field_locs.clear();
596 
597         object_revloc_to_vtable
598     }
599 
600     #[inline]
find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT>601     fn find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT> {
602         for &revloc in self.written_vtable_revpos.iter().rev() {
603             let o = VTable::init(
604                 &self.owned_buf[..],
605                 self.head + self.used_space() - revloc as usize,
606             );
607             if needle == o {
608                 return Some(revloc);
609             }
610         }
611         None
612     }
613 
614     // Only call this when you know it is safe to double the size of the buffer.
615     #[inline]
grow_owned_buf(&mut self)616     fn grow_owned_buf(&mut self) {
617         let old_len = self.owned_buf.len();
618         let new_len = max(1, old_len * 2);
619 
620         let starting_active_size = self.used_space();
621 
622         let diff = new_len - old_len;
623         self.owned_buf.resize(new_len, 0);
624         self.head += diff;
625 
626         let ending_active_size = self.used_space();
627         debug_assert_eq!(starting_active_size, ending_active_size);
628 
629         if new_len == 1 {
630             return;
631         }
632 
633         // calculate the midpoint, and safely copy the old end data to the new
634         // end position:
635         let middle = new_len / 2;
636         {
637             let (left, right) = &mut self.owned_buf[..].split_at_mut(middle);
638             right.copy_from_slice(left);
639         }
640         // finally, zero out the old end data.
641         {
642             let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
643             unsafe {
644                 write_bytes(ptr, 0, middle);
645             }
646         }
647     }
648 
649     // with or without a size prefix changes how we load the data, so finish*
650     // functions are split along those lines.
finish_with_opts<T>( &mut self, root: WIPOffset<T>, file_identifier: Option<&str>, size_prefixed: bool, )651     fn finish_with_opts<T>(
652         &mut self,
653         root: WIPOffset<T>,
654         file_identifier: Option<&str>,
655         size_prefixed: bool,
656     ) {
657         self.assert_not_finished("buffer cannot be finished when it is already finished");
658         self.assert_not_nested(
659             "buffer cannot be finished when a table or vector is under construction",
660         );
661         self.written_vtable_revpos.clear();
662 
663         let to_align = {
664             // for the root offset:
665             let a = SIZE_UOFFSET;
666             // for the size prefix:
667             let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
668             // for the file identifier (a string that is not zero-terminated):
669             let c = if file_identifier.is_some() {
670                 FILE_IDENTIFIER_LENGTH
671             } else {
672                 0
673             };
674             a + b + c
675         };
676 
677         {
678             let ma = PushAlignment::new(self.min_align);
679             self.align(to_align, ma);
680         }
681 
682         if let Some(ident) = file_identifier {
683             debug_assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
684             self.push_bytes_unprefixed(ident.as_bytes());
685         }
686 
687         self.push(root);
688 
689         if size_prefixed {
690             let sz = self.used_space() as UOffsetT;
691             self.push::<UOffsetT>(sz);
692         }
693         self.finished = true;
694     }
695 
696     #[inline]
align(&mut self, len: usize, alignment: PushAlignment)697     fn align(&mut self, len: usize, alignment: PushAlignment) {
698         self.track_min_align(alignment.value());
699         let s = self.used_space() as usize;
700         self.make_space(padding_bytes(s + len, alignment.value()));
701     }
702 
703     #[inline]
track_min_align(&mut self, alignment: usize)704     fn track_min_align(&mut self, alignment: usize) {
705         self.min_align = max(self.min_align, alignment);
706     }
707 
708     #[inline]
push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT709     fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
710         let n = self.make_space(x.len());
711         self.owned_buf[n..n + x.len()].copy_from_slice(x);
712 
713         n as UOffsetT
714     }
715 
716     #[inline]
make_space(&mut self, want: usize) -> usize717     fn make_space(&mut self, want: usize) -> usize {
718         self.ensure_capacity(want);
719         self.head -= want;
720         self.head
721     }
722 
723     #[inline]
ensure_capacity(&mut self, want: usize) -> usize724     fn ensure_capacity(&mut self, want: usize) -> usize {
725         if self.unused_ready_space() >= want {
726             return want;
727         }
728         assert!(
729             want <= FLATBUFFERS_MAX_BUFFER_SIZE,
730             "cannot grow buffer beyond 2 gigabytes"
731         );
732 
733         while self.unused_ready_space() < want {
734             self.grow_owned_buf();
735         }
736         want
737     }
738     #[inline]
unused_ready_space(&self) -> usize739     fn unused_ready_space(&self) -> usize {
740         self.head
741     }
742     #[inline]
assert_nested(&self, fn_name: &'static str)743     fn assert_nested(&self, fn_name: &'static str) {
744         // we don't assert that self.field_locs.len() >0 because the vtable
745         // could be empty (e.g. for empty tables, or for all-default values).
746         debug_assert!(
747             self.nested,
748             "incorrect FlatBufferBuilder usage: {} must be called while in a nested state",
749             fn_name
750         );
751     }
752     #[inline]
assert_not_nested(&self, msg: &'static str)753     fn assert_not_nested(&self, msg: &'static str) {
754         debug_assert!(!self.nested, "{}", msg);
755     }
756     #[inline]
assert_finished(&self, msg: &'static str)757     fn assert_finished(&self, msg: &'static str) {
758         debug_assert!(self.finished, "{}", msg);
759     }
760     #[inline]
assert_not_finished(&self, msg: &'static str)761     fn assert_not_finished(&self, msg: &'static str) {
762         debug_assert!(!self.finished, "{}", msg);
763     }
764 }
765 
766 /// Compute the length of the vtable needed to represent the provided FieldLocs.
767 /// If there are no FieldLocs, then provide the minimum number of bytes
768 /// required: enough to write the VTable header.
769 #[inline]
get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize770 fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
771     let max_voffset = field_locs.iter().map(|fl| fl.id).max();
772     match max_voffset {
773         None => field_index_to_field_offset(0) as usize,
774         Some(mv) => mv as usize + SIZE_VOFFSET,
775     }
776 }
777 
778 #[inline]
padding_bytes(buf_size: usize, scalar_size: usize) -> usize779 fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize {
780     // ((!buf_size) + 1) & (scalar_size - 1)
781     (!buf_size).wrapping_add(1) & (scalar_size.wrapping_sub(1))
782 }
783 
784 impl<'fbb> Default for FlatBufferBuilder<'fbb> {
default() -> Self785     fn default() -> Self {
786         Self::with_capacity(0)
787     }
788 }
789