• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google Inc. All rights reserved.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 extern crate smallvec;
18 
19 use core::cmp::max;
20 use core::iter::{DoubleEndedIterator, ExactSizeIterator};
21 use core::marker::PhantomData;
22 use core::ptr::write_bytes;
23 use core::slice::from_raw_parts;
24 #[cfg(feature = "no_std")]
25 use alloc::{vec, vec::Vec};
26 
27 use crate::endian_scalar::{emplace_scalar, read_scalar_at};
28 use crate::primitives::*;
29 use crate::push::{Push, PushAlignment};
30 use crate::table::Table;
31 use crate::vector::{SafeSliceAccess, Vector};
32 use crate::vtable::{field_index_to_field_offset, VTable};
33 use crate::vtable_writer::VTableWriter;
34 
35 pub const N_SMALLVEC_STRING_VECTOR_CAPACITY: usize = 16;
36 
37 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
38 struct FieldLoc {
39     off: UOffsetT,
40     id: VOffsetT,
41 }
42 
43 /// FlatBufferBuilder builds a FlatBuffer through manipulating its internal
44 /// state. It has an owned `Vec<u8>` that grows as needed (up to the hardcoded
45 /// limit of 2GiB, which is set by the FlatBuffers format).
46 #[derive(Clone, Debug, Eq, PartialEq)]
47 pub struct FlatBufferBuilder<'fbb> {
48     owned_buf: Vec<u8>,
49     head: usize,
50 
51     field_locs: Vec<FieldLoc>,
52     written_vtable_revpos: Vec<UOffsetT>,
53 
54     nested: bool,
55     finished: bool,
56 
57     min_align: usize,
58     force_defaults: bool,
59     strings_pool: Vec<WIPOffset<&'fbb str>>,
60 
61     _phantom: PhantomData<&'fbb ()>,
62 }
63 
64 impl<'fbb> FlatBufferBuilder<'fbb> {
65     /// Create a FlatBufferBuilder that is ready for writing.
new() -> Self66     pub fn new() -> Self {
67         Self::with_capacity(0)
68     }
69     #[deprecated(note = "replaced with `with_capacity`", since = "0.8.5")]
new_with_capacity(size: usize) -> Self70     pub fn new_with_capacity(size: usize) -> Self {
71         Self::with_capacity(size)
72     }
73     /// Create a FlatBufferBuilder that is ready for writing, with a
74     /// ready-to-use capacity of the provided size.
75     ///
76     /// The maximum valid value is `FLATBUFFERS_MAX_BUFFER_SIZE`.
with_capacity(size: usize) -> Self77     pub fn with_capacity(size: usize) -> Self {
78         Self::from_vec(vec![0; size])
79     }
80     /// Create a FlatBufferBuilder that is ready for writing, reusing
81     /// an existing vector.
from_vec(buffer: Vec<u8>) -> Self82     pub fn from_vec(buffer: Vec<u8>) -> Self {
83         // we need to check the size here because we create the backing buffer
84         // directly, bypassing the typical way of using grow_owned_buf:
85         assert!(
86             buffer.len() <= FLATBUFFERS_MAX_BUFFER_SIZE,
87             "cannot initialize buffer bigger than 2 gigabytes"
88         );
89         let head = buffer.len();
90         FlatBufferBuilder {
91             owned_buf: buffer,
92             head,
93 
94             field_locs: Vec::new(),
95             written_vtable_revpos: Vec::new(),
96 
97             nested: false,
98             finished: false,
99 
100             min_align: 0,
101             force_defaults: false,
102             strings_pool: Vec::new(),
103 
104             _phantom: PhantomData,
105         }
106     }
107 
108     /// Reset the FlatBufferBuilder internal state. Use this method after a
109     /// call to a `finish` function in order to re-use a FlatBufferBuilder.
110     ///
111     /// This function is the only way to reset the `finished` state and start
112     /// again.
113     ///
114     /// If you are using a FlatBufferBuilder repeatedly, make sure to use this
115     /// function, because it re-uses the FlatBufferBuilder's existing
116     /// heap-allocated `Vec<u8>` internal buffer. This offers significant speed
117     /// improvements as compared to creating a new FlatBufferBuilder for every
118     /// new object.
reset(&mut self)119     pub fn reset(&mut self) {
120         // memset only the part of the buffer that could be dirty:
121         {
122             let to_clear = self.owned_buf.len() - self.head;
123             let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr();
124             unsafe {
125                 write_bytes(ptr, 0, to_clear);
126             }
127         }
128 
129         self.head = self.owned_buf.len();
130         self.written_vtable_revpos.clear();
131 
132         self.nested = false;
133         self.finished = false;
134 
135         self.min_align = 0;
136         self.strings_pool.clear();
137     }
138 
139     /// Destroy the FlatBufferBuilder, returning its internal byte vector
140     /// and the index into it that represents the start of valid data.
collapse(self) -> (Vec<u8>, usize)141     pub fn collapse(self) -> (Vec<u8>, usize) {
142         (self.owned_buf, self.head)
143     }
144 
145     /// Push a Push'able value onto the front of the in-progress data.
146     ///
147     /// This function uses traits to provide a unified API for writing
148     /// scalars, tables, vectors, and WIPOffsets.
149     #[inline]
push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output>150     pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
151         let sz = P::size();
152         self.align(sz, P::alignment());
153         self.make_space(sz);
154         {
155             let (dst, rest) = (&mut self.owned_buf[self.head..]).split_at_mut(sz);
156             x.push(dst, rest);
157         }
158         WIPOffset::new(self.used_space() as UOffsetT)
159     }
160 
161     /// Push a Push'able value onto the front of the in-progress data, and
162     /// store a reference to it in the in-progress vtable. If the value matches
163     /// the default, then this is a no-op.
164     #[inline]
push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X)165     pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
166         self.assert_nested("push_slot");
167         if x != default || self.force_defaults {
168             self.push_slot_always(slotoff, x);
169         }
170     }
171 
172     /// Push a Push'able value onto the front of the in-progress data, and
173     /// store a reference to it in the in-progress vtable.
174     #[inline]
push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X)175     pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
176         self.assert_nested("push_slot_always");
177         let off = self.push(x);
178         self.track_field(slotoff, off.value());
179     }
180 
181     /// Retrieve the number of vtables that have been serialized into the
182     /// FlatBuffer. This is primarily used to check vtable deduplication.
183     #[inline]
num_written_vtables(&self) -> usize184     pub fn num_written_vtables(&self) -> usize {
185         self.written_vtable_revpos.len()
186     }
187 
188     /// Start a Table write.
189     ///
190     /// Asserts that the builder is not in a nested state.
191     ///
192     /// Users probably want to use `push_slot` to add values after calling this.
193     #[inline]
start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset>194     pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
195         self.assert_not_nested(
196             "start_table can not be called when a table or vector is under construction",
197         );
198         self.nested = true;
199 
200         WIPOffset::new(self.used_space() as UOffsetT)
201     }
202 
203     /// End a Table write.
204     ///
205     /// Asserts that the builder is in a nested state.
206     #[inline]
end_table( &mut self, off: WIPOffset<TableUnfinishedWIPOffset>, ) -> WIPOffset<TableFinishedWIPOffset>207     pub fn end_table(
208         &mut self,
209         off: WIPOffset<TableUnfinishedWIPOffset>,
210     ) -> WIPOffset<TableFinishedWIPOffset> {
211         self.assert_nested("end_table");
212 
213         let o = self.write_vtable(off);
214 
215         self.nested = false;
216         self.field_locs.clear();
217 
218         WIPOffset::new(o.value())
219     }
220 
221     /// Start a Vector write.
222     ///
223     /// Asserts that the builder is not in a nested state.
224     ///
225     /// Most users will prefer to call `create_vector`.
226     /// Speed optimizing users who choose to create vectors manually using this
227     /// function will want to use `push` to add values.
228     #[inline]
start_vector<T: Push>(&mut self, num_items: usize)229     pub fn start_vector<T: Push>(&mut self, num_items: usize) {
230         self.assert_not_nested(
231             "start_vector can not be called when a table or vector is under construction",
232         );
233         self.nested = true;
234         self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
235     }
236 
237     /// End a Vector write.
238     ///
239     /// Note that the `num_elems` parameter is the number of written items, not
240     /// the byte count.
241     ///
242     /// Asserts that the builder is in a nested state.
243     #[inline]
end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>>244     pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
245         self.assert_nested("end_vector");
246         self.nested = false;
247         let o = self.push::<UOffsetT>(num_elems as UOffsetT);
248         WIPOffset::new(o.value())
249     }
250 
251     #[inline]
create_shared_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str>252     pub fn create_shared_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
253         self.assert_not_nested(
254             "create_shared_string can not be called when a table or vector is under construction",
255         );
256 
257         // Saves a ref to owned_buf since rust doesnt like us refrencing it
258         // in the binary_search_by code.
259         let buf = &self.owned_buf;
260 
261         let found = self.strings_pool.binary_search_by(|offset| {
262             let ptr = offset.value() as usize;
263             // Gets The pointer to the size of the string
264             let str_memory = &buf[buf.len() - ptr..];
265             // Gets the size of the written string from buffer
266             let size =
267                 u32::from_le_bytes([str_memory[0], str_memory[1], str_memory[2], str_memory[3]])
268                     as usize;
269             // Size of the string size
270             let string_size: usize = 4;
271             // Fetches actual string bytes from index of string after string size
272             // to the size of string plus string size
273             let iter = str_memory[string_size..size + string_size].iter();
274             // Compares bytes of fetched string and current writable string
275             iter.cloned().cmp(s.bytes())
276         });
277 
278         match found {
279             Ok(index) => self.strings_pool[index],
280             Err(index) => {
281                 let address = WIPOffset::new(self.create_byte_string(s.as_bytes()).value());
282                 self.strings_pool.insert(index, address);
283                 address
284             }
285         }
286     }
287 
288     /// Create a utf8 string.
289     ///
290     /// The wire format represents this as a zero-terminated byte vector.
291     #[inline]
create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str>292     pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
293         self.assert_not_nested(
294             "create_string can not be called when a table or vector is under construction",
295         );
296         WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
297     }
298 
299     /// Create a zero-terminated byte vector.
300     #[inline]
create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]>301     pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
302         self.assert_not_nested(
303             "create_byte_string can not be called when a table or vector is under construction",
304         );
305         self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
306         self.push(0u8);
307         self.push_bytes_unprefixed(data);
308         self.push(data.len() as UOffsetT);
309         WIPOffset::new(self.used_space() as UOffsetT)
310     }
311 
312     /// Create a vector by memcpy'ing. This is much faster than calling
313     /// `create_vector`, but the underlying type must be represented as
314     /// little-endian on the host machine. This property is encoded in the
315     /// type system through the SafeSliceAccess trait. The following types are
316     /// always safe, on any platform: bool, u8, i8, and any
317     /// FlatBuffers-generated struct.
318     #[inline]
create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>( &'a mut self, items: &'b [T], ) -> WIPOffset<Vector<'fbb, T>>319     pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(
320         &'a mut self,
321         items: &'b [T],
322     ) -> WIPOffset<Vector<'fbb, T>> {
323         self.assert_not_nested(
324             "create_vector_direct can not be called when a table or vector is under construction",
325         );
326         let elem_size = T::size();
327         self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
328 
329         let bytes = {
330             let ptr = items.as_ptr() as *const T as *const u8;
331             unsafe { from_raw_parts(ptr, items.len() * elem_size) }
332         };
333         self.push_bytes_unprefixed(bytes);
334         self.push(items.len() as UOffsetT);
335 
336         WIPOffset::new(self.used_space() as UOffsetT)
337     }
338 
339     /// Create a vector of strings.
340     ///
341     /// Speed-sensitive users may wish to reduce memory usage by creating the
342     /// vector manually: use `start_vector`, `push`, and `end_vector`.
343     #[inline]
create_vector_of_strings<'a, 'b>( &'a mut self, xs: &'b [&'b str], ) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>>344     pub fn create_vector_of_strings<'a, 'b>(
345         &'a mut self,
346         xs: &'b [&'b str],
347     ) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
348         self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
349         // internally, smallvec can be a stack-allocated or heap-allocated vector:
350         // if xs.len() > N_SMALLVEC_STRING_VECTOR_CAPACITY then it will overflow to the heap.
351         let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> =
352             smallvec::SmallVec::with_capacity(xs.len());
353         unsafe {
354             offsets.set_len(xs.len());
355         }
356 
357         // note that this happens in reverse, because the buffer is built back-to-front:
358         for (i, &s) in xs.iter().enumerate().rev() {
359             let o = self.create_string(s);
360             offsets[i] = o;
361         }
362         self.create_vector(&offsets[..])
363     }
364 
365     /// Create a vector of Push-able objects.
366     ///
367     /// Speed-sensitive users may wish to reduce memory usage by creating the
368     /// vector manually: use `start_vector`, `push`, and `end_vector`.
369     #[inline]
create_vector<'a: 'b, 'b, T: Push + Copy + 'b>( &'a mut self, items: &'b [T], ) -> WIPOffset<Vector<'fbb, T::Output>>370     pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(
371         &'a mut self,
372         items: &'b [T],
373     ) -> WIPOffset<Vector<'fbb, T::Output>> {
374         let elem_size = T::size();
375         self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
376         for i in (0..items.len()).rev() {
377             self.push(items[i]);
378         }
379         WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
380     }
381 
382     /// Create a vector of Push-able objects.
383     ///
384     /// Speed-sensitive users may wish to reduce memory usage by creating the
385     /// vector manually: use `start_vector`, `push`, and `end_vector`.
386     #[inline]
create_vector_from_iter<T: Push + Copy>( &mut self, items: impl ExactSizeIterator<Item = T> + DoubleEndedIterator, ) -> WIPOffset<Vector<'fbb, T::Output>>387     pub fn create_vector_from_iter<T: Push + Copy>(
388         &mut self,
389         items: impl ExactSizeIterator<Item = T> + DoubleEndedIterator,
390     ) -> WIPOffset<Vector<'fbb, T::Output>> {
391         let elem_size = T::size();
392         let len = items.len();
393         self.align(len * elem_size, T::alignment().max_of(SIZE_UOFFSET));
394         for item in items.rev() {
395             self.push(item);
396         }
397         WIPOffset::new(self.push::<UOffsetT>(len as UOffsetT).value())
398     }
399 
400     /// Set whether default values are stored.
401     ///
402     /// In order to save space, fields that are set to their default value
403     /// aren't stored in the buffer. Setting `force_defaults` to `true`
404     /// disables this optimization.
405     ///
406     /// By default, `force_defaults` is `false`.
407     #[inline]
force_defaults(&mut self, force_defaults: bool)408     pub fn force_defaults(&mut self, force_defaults: bool) {
409         self.force_defaults = force_defaults;
410     }
411 
412     /// Get the byte slice for the data that has been written, regardless of
413     /// whether it has been finished.
414     #[inline]
unfinished_data(&self) -> &[u8]415     pub fn unfinished_data(&self) -> &[u8] {
416         &self.owned_buf[self.head..]
417     }
418     /// Get the byte slice for the data that has been written after a call to
419     /// one of the `finish` functions.
420     /// # Panics
421     /// Panics if the buffer is not finished.
422     #[inline]
finished_data(&self) -> &[u8]423     pub fn finished_data(&self) -> &[u8] {
424         self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
425         &self.owned_buf[self.head..]
426     }
427     /// Returns a mutable view of a finished buffer and location of where the flatbuffer starts.
428     /// Note that modifying the flatbuffer data may corrupt it.
429     /// # Panics
430     /// Panics if the flatbuffer is not finished.
431     #[inline]
mut_finished_buffer(&mut self) -> (&mut [u8], usize)432     pub fn mut_finished_buffer(&mut self) -> (&mut [u8], usize) {
433         (&mut self.owned_buf, self.head)
434     }
435     /// Assert that a field is present in the just-finished Table.
436     ///
437     /// This is somewhat low-level and is mostly used by the generated code.
438     #[inline]
required( &self, tab_revloc: WIPOffset<TableFinishedWIPOffset>, slot_byte_loc: VOffsetT, assert_msg_name: &'static str, )439     pub fn required(
440         &self,
441         tab_revloc: WIPOffset<TableFinishedWIPOffset>,
442         slot_byte_loc: VOffsetT,
443         assert_msg_name: &'static str,
444     ) {
445         let idx = self.used_space() - tab_revloc.value() as usize;
446         let tab = Table::new(&self.owned_buf[self.head..], idx);
447         let o = tab.vtable().get(slot_byte_loc) as usize;
448         assert!(o != 0, "missing required field {}", assert_msg_name);
449     }
450 
451     /// Finalize the FlatBuffer by: aligning it, pushing an optional file
452     /// identifier on to it, pushing a size prefix on to it, and marking the
453     /// internal state of the FlatBufferBuilder as `finished`. Afterwards,
454     /// users can call `finished_data` to get the resulting data.
455     #[inline]
finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>)456     pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
457         self.finish_with_opts(root, file_identifier, true);
458     }
459 
460     /// Finalize the FlatBuffer by: aligning it, pushing an optional file
461     /// identifier on to it, and marking the internal state of the
462     /// FlatBufferBuilder as `finished`. Afterwards, users can call
463     /// `finished_data` to get the resulting data.
464     #[inline]
finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>)465     pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
466         self.finish_with_opts(root, file_identifier, false);
467     }
468 
469     /// Finalize the FlatBuffer by: aligning it and marking the internal state
470     /// of the FlatBufferBuilder as `finished`. Afterwards, users can call
471     /// `finished_data` to get the resulting data.
472     #[inline]
finish_minimal<T>(&mut self, root: WIPOffset<T>)473     pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
474         self.finish_with_opts(root, None, false);
475     }
476 
477     #[inline]
used_space(&self) -> usize478     fn used_space(&self) -> usize {
479         self.owned_buf.len() - self.head as usize
480     }
481 
482     #[inline]
track_field(&mut self, slot_off: VOffsetT, off: UOffsetT)483     fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
484         let fl = FieldLoc { id: slot_off, off };
485         self.field_locs.push(fl);
486     }
487 
488     /// Write the VTable, if it is new.
write_vtable( &mut self, table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>, ) -> WIPOffset<VTableWIPOffset>489     fn write_vtable(
490         &mut self,
491         table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
492     ) -> WIPOffset<VTableWIPOffset> {
493         self.assert_nested("write_vtable");
494 
495         // Write the vtable offset, which is the start of any Table.
496         // We fill its value later.
497         let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
498             WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0).value());
499 
500         // Layout of the data this function will create when a new vtable is
501         // needed.
502         // --------------------------------------------------------------------
503         // vtable starts here
504         // | x, x -- vtable len (bytes) [u16]
505         // | x, x -- object inline len (bytes) [u16]
506         // | x, x -- zero, or num bytes from start of object to field #0   [u16]
507         // | ...
508         // | x, x -- zero, or num bytes from start of object to field #n-1 [u16]
509         // vtable ends here
510         // table starts here
511         // | x, x, x, x -- offset (negative direction) to the vtable [i32]
512         // |               aka "vtableoffset"
513         // | -- table inline data begins here, we don't touch it --
514         // table ends here -- aka "table_start"
515         // --------------------------------------------------------------------
516         //
517         // Layout of the data this function will create when we re-use an
518         // existing vtable.
519         //
520         // We always serialize this particular vtable, then compare it to the
521         // other vtables we know about to see if there is a duplicate. If there
522         // is, then we erase the serialized vtable we just made.
523         // We serialize it first so that we are able to do byte-by-byte
524         // comparisons with already-serialized vtables. This 1) saves
525         // bookkeeping space (we only keep revlocs to existing vtables), 2)
526         // allows us to convert to little-endian once, then do
527         // fast memcmp comparisons, and 3) by ensuring we are comparing real
528         // serialized vtables, we can be more assured that we are doing the
529         // comparisons correctly.
530         //
531         // --------------------------------------------------------------------
532         // table starts here
533         // | x, x, x, x -- offset (negative direction) to an existing vtable [i32]
534         // |               aka "vtableoffset"
535         // | -- table inline data begins here, we don't touch it --
536         // table starts here: aka "table_start"
537         // --------------------------------------------------------------------
538 
539         // fill the WIP vtable with zeros:
540         let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
541         self.make_space(vtable_byte_len);
542 
543         // compute the length of the table (not vtable!) in bytes:
544         let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
545         debug_assert!(table_object_size < 0x10000); // vTable use 16bit offsets.
546 
547         // Write the VTable (we may delete it afterwards, if it is a duplicate):
548         let vt_start_pos = self.head;
549         let vt_end_pos = self.head + vtable_byte_len;
550         {
551             // write the vtable header:
552             let vtfw = &mut VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]);
553             vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
554             vtfw.write_object_inline_size(table_object_size as VOffsetT);
555 
556             // serialize every FieldLoc to the vtable:
557             for &fl in self.field_locs.iter() {
558                 let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
559                 vtfw.write_field_offset(fl.id, pos);
560             }
561         }
562         let new_vt_bytes = &self.owned_buf[vt_start_pos..vt_end_pos];
563         let found = self.written_vtable_revpos.binary_search_by(|old_vtable_revpos: &UOffsetT| {
564             let old_vtable_pos = self.owned_buf.len() - *old_vtable_revpos as usize;
565             let old_vtable = VTable::init(&self.owned_buf, old_vtable_pos);
566             new_vt_bytes.cmp(old_vtable.as_bytes())
567         });
568         let final_vtable_revpos = match found {
569             Ok(i) => {
570                 // The new vtable is a duplicate so clear it.
571                 VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]).clear();
572                 self.head += vtable_byte_len;
573                 self.written_vtable_revpos[i]
574             }
575             Err(i) => {
576                 // This is a new vtable. Add it to the cache.
577                 let new_vt_revpos = self.used_space() as UOffsetT;
578                 self.written_vtable_revpos.insert(i, new_vt_revpos);
579                 new_vt_revpos
580             }
581         };
582         // Write signed offset from table to its vtable.
583         let table_pos = self.owned_buf.len() - object_revloc_to_vtable.value() as usize;
584         let tmp_soffset_to_vt = unsafe { read_scalar_at::<UOffsetT>(&self.owned_buf, table_pos) };
585         debug_assert_eq!(tmp_soffset_to_vt, 0xF0F0_F0F0);
586         unsafe {
587             emplace_scalar::<SOffsetT>(
588                 &mut self.owned_buf[table_pos..table_pos + SIZE_SOFFSET],
589                 final_vtable_revpos as SOffsetT - object_revloc_to_vtable.value() as SOffsetT
590             );
591         }
592 
593         self.field_locs.clear();
594 
595         object_revloc_to_vtable
596     }
597 
598     // Only call this when you know it is safe to double the size of the buffer.
599     #[inline]
grow_owned_buf(&mut self)600     fn grow_owned_buf(&mut self) {
601         let old_len = self.owned_buf.len();
602         let new_len = max(1, old_len * 2);
603 
604         let starting_active_size = self.used_space();
605 
606         let diff = new_len - old_len;
607         self.owned_buf.resize(new_len, 0);
608         self.head += diff;
609 
610         let ending_active_size = self.used_space();
611         debug_assert_eq!(starting_active_size, ending_active_size);
612 
613         if new_len == 1 {
614             return;
615         }
616 
617         // calculate the midpoint, and safely copy the old end data to the new
618         // end position:
619         let middle = new_len / 2;
620         {
621             let (left, right) = &mut self.owned_buf[..].split_at_mut(middle);
622             right.copy_from_slice(left);
623         }
624         // finally, zero out the old end data.
625         {
626             let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
627             unsafe {
628                 write_bytes(ptr, 0, middle);
629             }
630         }
631     }
632 
633     // with or without a size prefix changes how we load the data, so finish*
634     // functions are split along those lines.
finish_with_opts<T>( &mut self, root: WIPOffset<T>, file_identifier: Option<&str>, size_prefixed: bool, )635     fn finish_with_opts<T>(
636         &mut self,
637         root: WIPOffset<T>,
638         file_identifier: Option<&str>,
639         size_prefixed: bool,
640     ) {
641         self.assert_not_finished("buffer cannot be finished when it is already finished");
642         self.assert_not_nested(
643             "buffer cannot be finished when a table or vector is under construction",
644         );
645         self.written_vtable_revpos.clear();
646 
647         let to_align = {
648             // for the root offset:
649             let a = SIZE_UOFFSET;
650             // for the size prefix:
651             let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
652             // for the file identifier (a string that is not zero-terminated):
653             let c = if file_identifier.is_some() {
654                 FILE_IDENTIFIER_LENGTH
655             } else {
656                 0
657             };
658             a + b + c
659         };
660 
661         {
662             let ma = PushAlignment::new(self.min_align);
663             self.align(to_align, ma);
664         }
665 
666         if let Some(ident) = file_identifier {
667             debug_assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
668             self.push_bytes_unprefixed(ident.as_bytes());
669         }
670 
671         self.push(root);
672 
673         if size_prefixed {
674             let sz = self.used_space() as UOffsetT;
675             self.push::<UOffsetT>(sz);
676         }
677         self.finished = true;
678     }
679 
680     #[inline]
align(&mut self, len: usize, alignment: PushAlignment)681     fn align(&mut self, len: usize, alignment: PushAlignment) {
682         self.track_min_align(alignment.value());
683         let s = self.used_space() as usize;
684         self.make_space(padding_bytes(s + len, alignment.value()));
685     }
686 
687     #[inline]
track_min_align(&mut self, alignment: usize)688     fn track_min_align(&mut self, alignment: usize) {
689         self.min_align = max(self.min_align, alignment);
690     }
691 
692     #[inline]
push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT693     fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
694         let n = self.make_space(x.len());
695         self.owned_buf[n..n + x.len()].copy_from_slice(x);
696 
697         n as UOffsetT
698     }
699 
700     #[inline]
make_space(&mut self, want: usize) -> usize701     fn make_space(&mut self, want: usize) -> usize {
702         self.ensure_capacity(want);
703         self.head -= want;
704         self.head
705     }
706 
707     #[inline]
ensure_capacity(&mut self, want: usize) -> usize708     fn ensure_capacity(&mut self, want: usize) -> usize {
709         if self.unused_ready_space() >= want {
710             return want;
711         }
712         assert!(
713             want <= FLATBUFFERS_MAX_BUFFER_SIZE,
714             "cannot grow buffer beyond 2 gigabytes"
715         );
716 
717         while self.unused_ready_space() < want {
718             self.grow_owned_buf();
719         }
720         want
721     }
722     #[inline]
unused_ready_space(&self) -> usize723     fn unused_ready_space(&self) -> usize {
724         self.head
725     }
726     #[inline]
assert_nested(&self, fn_name: &'static str)727     fn assert_nested(&self, fn_name: &'static str) {
728         // we don't assert that self.field_locs.len() >0 because the vtable
729         // could be empty (e.g. for empty tables, or for all-default values).
730         debug_assert!(
731             self.nested,
732             "incorrect FlatBufferBuilder usage: {} must be called while in a nested state",
733             fn_name
734         );
735     }
736     #[inline]
assert_not_nested(&self, msg: &'static str)737     fn assert_not_nested(&self, msg: &'static str) {
738         debug_assert!(!self.nested, "{}", msg);
739     }
740     #[inline]
assert_finished(&self, msg: &'static str)741     fn assert_finished(&self, msg: &'static str) {
742         debug_assert!(self.finished, "{}", msg);
743     }
744     #[inline]
assert_not_finished(&self, msg: &'static str)745     fn assert_not_finished(&self, msg: &'static str) {
746         debug_assert!(!self.finished, "{}", msg);
747     }
748 }
749 
750 /// Compute the length of the vtable needed to represent the provided FieldLocs.
751 /// If there are no FieldLocs, then provide the minimum number of bytes
752 /// required: enough to write the VTable header.
753 #[inline]
get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize754 fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
755     let max_voffset = field_locs.iter().map(|fl| fl.id).max();
756     match max_voffset {
757         None => field_index_to_field_offset(0) as usize,
758         Some(mv) => mv as usize + SIZE_VOFFSET,
759     }
760 }
761 
762 #[inline]
padding_bytes(buf_size: usize, scalar_size: usize) -> usize763 fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize {
764     // ((!buf_size) + 1) & (scalar_size - 1)
765     (!buf_size).wrapping_add(1) & (scalar_size.wrapping_sub(1))
766 }
767 
768 impl<'fbb> Default for FlatBufferBuilder<'fbb> {
default() -> Self769     fn default() -> Self {
770         Self::with_capacity(0)
771     }
772 }
773