1 /*
2 * Copyright 2017 Google Inc. All rights reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef FLATBUFFERS_FLEXBUFFERS_H_
18 #define FLATBUFFERS_FLEXBUFFERS_H_
19
20 #include <algorithm>
21 #include <map>
22 // Used to select STL variant.
23 #include "flatbuffers/base.h"
24 // We use the basic binary writing functions from the regular FlatBuffers.
25 #include "flatbuffers/util.h"
26
27 #ifdef _MSC_VER
28 # include <intrin.h>
29 #endif
30
31 #if defined(_MSC_VER)
32 # pragma warning(push)
33 # pragma warning(disable : 4127) // C4127: conditional expression is constant
34 #endif
35
36 namespace flexbuffers {
37
38 class Reference;
39 class Map;
40
41 // These are used in the lower 2 bits of a type field to determine the size of
42 // the elements (and or size field) of the item pointed to (e.g. vector).
43 enum BitWidth {
44 BIT_WIDTH_8 = 0,
45 BIT_WIDTH_16 = 1,
46 BIT_WIDTH_32 = 2,
47 BIT_WIDTH_64 = 3,
48 };
49
50 // These are used as the upper 6 bits of a type field to indicate the actual
51 // type.
52 enum Type {
53 FBT_NULL = 0,
54 FBT_INT = 1,
55 FBT_UINT = 2,
56 FBT_FLOAT = 3,
57 // Types above stored inline, types below (except FBT_BOOL) store an offset.
58 FBT_KEY = 4,
59 FBT_STRING = 5,
60 FBT_INDIRECT_INT = 6,
61 FBT_INDIRECT_UINT = 7,
62 FBT_INDIRECT_FLOAT = 8,
63 FBT_MAP = 9,
64 FBT_VECTOR = 10, // Untyped.
65 FBT_VECTOR_INT = 11, // Typed any size (stores no type table).
66 FBT_VECTOR_UINT = 12,
67 FBT_VECTOR_FLOAT = 13,
68 FBT_VECTOR_KEY = 14,
69 // DEPRECATED, use FBT_VECTOR or FBT_VECTOR_KEY instead.
70 // Read test.cpp/FlexBuffersDeprecatedTest() for details on why.
71 FBT_VECTOR_STRING_DEPRECATED = 15,
72 FBT_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field).
73 FBT_VECTOR_UINT2 = 17,
74 FBT_VECTOR_FLOAT2 = 18,
75 FBT_VECTOR_INT3 = 19, // Typed triple (no type table, no size field).
76 FBT_VECTOR_UINT3 = 20,
77 FBT_VECTOR_FLOAT3 = 21,
78 FBT_VECTOR_INT4 = 22, // Typed quad (no type table, no size field).
79 FBT_VECTOR_UINT4 = 23,
80 FBT_VECTOR_FLOAT4 = 24,
81 FBT_BLOB = 25,
82 FBT_BOOL = 26,
83 FBT_VECTOR_BOOL =
84 36, // To Allow the same type of conversion of type to vector type
85
86 FBT_MAX_TYPE = 37
87 };
88
IsInline(Type t)89 inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; }
90
IsTypedVectorElementType(Type t)91 inline bool IsTypedVectorElementType(Type t) {
92 return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL;
93 }
94
IsTypedVector(Type t)95 inline bool IsTypedVector(Type t) {
96 return (t >= FBT_VECTOR_INT && t <= FBT_VECTOR_STRING_DEPRECATED) ||
97 t == FBT_VECTOR_BOOL;
98 }
99
IsFixedTypedVector(Type t)100 inline bool IsFixedTypedVector(Type t) {
101 return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4;
102 }
103
104 inline Type ToTypedVector(Type t, size_t fixed_len = 0) {
105 FLATBUFFERS_ASSERT(IsTypedVectorElementType(t));
106 switch (fixed_len) {
107 case 0: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT);
108 case 2: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT2);
109 case 3: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT3);
110 case 4: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT4);
111 default: FLATBUFFERS_ASSERT(0); return FBT_NULL;
112 }
113 }
114
ToTypedVectorElementType(Type t)115 inline Type ToTypedVectorElementType(Type t) {
116 FLATBUFFERS_ASSERT(IsTypedVector(t));
117 return static_cast<Type>(t - FBT_VECTOR_INT + FBT_INT);
118 }
119
ToFixedTypedVectorElementType(Type t,uint8_t * len)120 inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) {
121 FLATBUFFERS_ASSERT(IsFixedTypedVector(t));
122 auto fixed_type = t - FBT_VECTOR_INT2;
123 *len = static_cast<uint8_t>(fixed_type / 3 +
124 2); // 3 types each, starting from length 2.
125 return static_cast<Type>(fixed_type % 3 + FBT_INT);
126 }
127
128 // TODO: implement proper support for 8/16bit floats, or decide not to
129 // support them.
130 typedef int16_t half;
131 typedef int8_t quarter;
132
133 // TODO: can we do this without conditionals using intrinsics or inline asm
134 // on some platforms? Given branch prediction the method below should be
135 // decently quick, but it is the most frequently executed function.
136 // We could do an (unaligned) 64-bit read if we ifdef out the platforms for
137 // which that doesn't work (or where we'd read into un-owned memory).
138 template<typename R, typename T1, typename T2, typename T4, typename T8>
ReadSizedScalar(const uint8_t * data,uint8_t byte_width)139 R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) {
140 return byte_width < 4
141 ? (byte_width < 2
142 ? static_cast<R>(flatbuffers::ReadScalar<T1>(data))
143 : static_cast<R>(flatbuffers::ReadScalar<T2>(data)))
144 : (byte_width < 8
145 ? static_cast<R>(flatbuffers::ReadScalar<T4>(data))
146 : static_cast<R>(flatbuffers::ReadScalar<T8>(data)));
147 }
148
ReadInt64(const uint8_t * data,uint8_t byte_width)149 inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) {
150 return ReadSizedScalar<int64_t, int8_t, int16_t, int32_t, int64_t>(
151 data, byte_width);
152 }
153
ReadUInt64(const uint8_t * data,uint8_t byte_width)154 inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) {
155 // This is the "hottest" function (all offset lookups use this), so worth
156 // optimizing if possible.
157 // TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a
158 // constant, which here it isn't. Test if memcpy is still faster than
159 // the conditionals in ReadSizedScalar. Can also use inline asm.
160
161 // clang-format off
162 #if defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC)
163 // This is 64-bit Windows only, __movsb does not work on 32-bit Windows.
164 uint64_t u = 0;
165 __movsb(reinterpret_cast<uint8_t *>(&u),
166 reinterpret_cast<const uint8_t *>(data), byte_width);
167 return flatbuffers::EndianScalar(u);
168 #else
169 return ReadSizedScalar<uint64_t, uint8_t, uint16_t, uint32_t, uint64_t>(
170 data, byte_width);
171 #endif
172 // clang-format on
173 }
174
ReadDouble(const uint8_t * data,uint8_t byte_width)175 inline double ReadDouble(const uint8_t *data, uint8_t byte_width) {
176 return ReadSizedScalar<double, quarter, half, float, double>(data,
177 byte_width);
178 }
179
Indirect(const uint8_t * offset,uint8_t byte_width)180 inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) {
181 return offset - ReadUInt64(offset, byte_width);
182 }
183
Indirect(const uint8_t * offset)184 template<typename T> const uint8_t *Indirect(const uint8_t *offset) {
185 return offset - flatbuffers::ReadScalar<T>(offset);
186 }
187
WidthU(uint64_t u)188 inline BitWidth WidthU(uint64_t u) {
189 #define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) \
190 { \
191 if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \
192 }
193 FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8);
194 FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16);
195 FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32);
196 #undef FLATBUFFERS_GET_FIELD_BIT_WIDTH
197 return BIT_WIDTH_64;
198 }
199
WidthI(int64_t i)200 inline BitWidth WidthI(int64_t i) {
201 auto u = static_cast<uint64_t>(i) << 1;
202 return WidthU(i >= 0 ? u : ~u);
203 }
204
WidthF(double f)205 inline BitWidth WidthF(double f) {
206 return static_cast<double>(static_cast<float>(f)) == f ? BIT_WIDTH_32
207 : BIT_WIDTH_64;
208 }
209
210 // Base class of all types below.
211 // Points into the data buffer and allows access to one type.
212 class Object {
213 public:
Object(const uint8_t * data,uint8_t byte_width)214 Object(const uint8_t *data, uint8_t byte_width)
215 : data_(data), byte_width_(byte_width) {}
216
217 protected:
218 const uint8_t *data_;
219 uint8_t byte_width_;
220 };
221
222 // Object that has a size, obtained either from size prefix, or elsewhere.
223 class Sized : public Object {
224 public:
225 // Size prefix.
Sized(const uint8_t * data,uint8_t byte_width)226 Sized(const uint8_t *data, uint8_t byte_width)
227 : Object(data, byte_width), size_(read_size()) {}
228 // Manual size.
Sized(const uint8_t * data,uint8_t byte_width,size_t sz)229 Sized(const uint8_t *data, uint8_t byte_width, size_t sz)
230 : Object(data, byte_width), size_(sz) {}
size()231 size_t size() const { return size_; }
232 // Access size stored in `byte_width_` bytes before data_ pointer.
read_size()233 size_t read_size() const {
234 return static_cast<size_t>(ReadUInt64(data_ - byte_width_, byte_width_));
235 }
236
237 protected:
238 size_t size_;
239 };
240
241 class String : public Sized {
242 public:
243 // Size prefix.
String(const uint8_t * data,uint8_t byte_width)244 String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
245 // Manual size.
String(const uint8_t * data,uint8_t byte_width,size_t sz)246 String(const uint8_t *data, uint8_t byte_width, size_t sz)
247 : Sized(data, byte_width, sz) {}
248
length()249 size_t length() const { return size(); }
c_str()250 const char *c_str() const { return reinterpret_cast<const char *>(data_); }
str()251 std::string str() const { return std::string(c_str(), size()); }
252
EmptyString()253 static String EmptyString() {
254 static const char *empty_string = "";
255 return String(reinterpret_cast<const uint8_t *>(empty_string), 1, 0);
256 }
IsTheEmptyString()257 bool IsTheEmptyString() const { return data_ == EmptyString().data_; }
258 };
259
260 class Blob : public Sized {
261 public:
Blob(const uint8_t * data_buf,uint8_t byte_width)262 Blob(const uint8_t *data_buf, uint8_t byte_width)
263 : Sized(data_buf, byte_width) {}
264
EmptyBlob()265 static Blob EmptyBlob() {
266 static const uint8_t empty_blob[] = { 0 /*len*/ };
267 return Blob(empty_blob + 1, 1);
268 }
IsTheEmptyBlob()269 bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; }
data()270 const uint8_t *data() const { return data_; }
271 };
272
273 class Vector : public Sized {
274 public:
Vector(const uint8_t * data,uint8_t byte_width)275 Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
276
277 Reference operator[](size_t i) const;
278
EmptyVector()279 static Vector EmptyVector() {
280 static const uint8_t empty_vector[] = { 0 /*len*/ };
281 return Vector(empty_vector + 1, 1);
282 }
IsTheEmptyVector()283 bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; }
284 };
285
286 class TypedVector : public Sized {
287 public:
TypedVector(const uint8_t * data,uint8_t byte_width,Type element_type)288 TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type)
289 : Sized(data, byte_width), type_(element_type) {}
290
291 Reference operator[](size_t i) const;
292
EmptyTypedVector()293 static TypedVector EmptyTypedVector() {
294 static const uint8_t empty_typed_vector[] = { 0 /*len*/ };
295 return TypedVector(empty_typed_vector + 1, 1, FBT_INT);
296 }
IsTheEmptyVector()297 bool IsTheEmptyVector() const {
298 return data_ == TypedVector::EmptyTypedVector().data_;
299 }
300
ElementType()301 Type ElementType() { return type_; }
302
303 friend Reference;
304
305 private:
306 Type type_;
307
308 friend Map;
309 };
310
311 class FixedTypedVector : public Object {
312 public:
FixedTypedVector(const uint8_t * data,uint8_t byte_width,Type element_type,uint8_t len)313 FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type,
314 uint8_t len)
315 : Object(data, byte_width), type_(element_type), len_(len) {}
316
317 Reference operator[](size_t i) const;
318
EmptyFixedTypedVector()319 static FixedTypedVector EmptyFixedTypedVector() {
320 static const uint8_t fixed_empty_vector[] = { 0 /* unused */ };
321 return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0);
322 }
IsTheEmptyFixedTypedVector()323 bool IsTheEmptyFixedTypedVector() const {
324 return data_ == FixedTypedVector::EmptyFixedTypedVector().data_;
325 }
326
ElementType()327 Type ElementType() const { return type_; }
size()328 uint8_t size() const { return len_; }
329
330 private:
331 Type type_;
332 uint8_t len_;
333 };
334
335 class Map : public Vector {
336 public:
Map(const uint8_t * data,uint8_t byte_width)337 Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {}
338
339 Reference operator[](const char *key) const;
340 Reference operator[](const std::string &key) const;
341
Values()342 Vector Values() const { return Vector(data_, byte_width_); }
343
Keys()344 TypedVector Keys() const {
345 const size_t num_prefixed_fields = 3;
346 auto keys_offset = data_ - byte_width_ * num_prefixed_fields;
347 return TypedVector(Indirect(keys_offset, byte_width_),
348 static_cast<uint8_t>(
349 ReadUInt64(keys_offset + byte_width_, byte_width_)),
350 FBT_KEY);
351 }
352
EmptyMap()353 static Map EmptyMap() {
354 static const uint8_t empty_map[] = {
355 0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/
356 };
357 return Map(empty_map + 4, 1);
358 }
359
IsTheEmptyMap()360 bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; }
361 };
362
IndentString(std::string & s,int indent,const char * indent_string)363 inline void IndentString(std::string &s, int indent,
364 const char *indent_string) {
365 for (int i = 0; i < indent; i++) s += indent_string;
366 }
367
368 template<typename T>
AppendToString(std::string & s,T && v,bool keys_quoted,bool indented,int cur_indent,const char * indent_string,bool natural_utf8)369 void AppendToString(std::string &s, T &&v, bool keys_quoted, bool indented,
370 int cur_indent, const char *indent_string,
371 bool natural_utf8) {
372 s += "[";
373 s += indented ? "\n" : " ";
374 for (size_t i = 0; i < v.size(); i++) {
375 if (i) {
376 s += ",";
377 s += indented ? "\n" : " ";
378 }
379 if (indented) IndentString(s, cur_indent, indent_string);
380 v[i].ToString(true, keys_quoted, s, indented, cur_indent,
381 indent_string, natural_utf8);
382 }
383 if (indented) {
384 s += "\n";
385 IndentString(s, cur_indent - 1, indent_string);
386 } else {
387 s += " ";
388 }
389 s += "]";
390 }
391
392 template<typename T>
AppendToString(std::string & s,T && v,bool keys_quoted)393 void AppendToString(std::string &s, T &&v, bool keys_quoted) {
394 AppendToString(s, v, keys_quoted);
395 }
396
397
398 class Reference {
399 public:
Reference()400 Reference()
401 : data_(nullptr), parent_width_(0), byte_width_(0), type_(FBT_NULL) {}
402
Reference(const uint8_t * data,uint8_t parent_width,uint8_t byte_width,Type type)403 Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width,
404 Type type)
405 : data_(data),
406 parent_width_(parent_width),
407 byte_width_(byte_width),
408 type_(type) {}
409
Reference(const uint8_t * data,uint8_t parent_width,uint8_t packed_type)410 Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type)
411 : data_(data),
412 parent_width_(parent_width),
413 byte_width_(static_cast<uint8_t>(1 << (packed_type & 3))),
414 type_(static_cast<Type>(packed_type >> 2)) {}
415
GetType()416 Type GetType() const { return type_; }
417
IsNull()418 bool IsNull() const { return type_ == FBT_NULL; }
IsBool()419 bool IsBool() const { return type_ == FBT_BOOL; }
IsInt()420 bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; }
IsUInt()421 bool IsUInt() const {
422 return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT;
423 }
IsIntOrUint()424 bool IsIntOrUint() const { return IsInt() || IsUInt(); }
IsFloat()425 bool IsFloat() const {
426 return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT;
427 }
IsNumeric()428 bool IsNumeric() const { return IsIntOrUint() || IsFloat(); }
IsString()429 bool IsString() const { return type_ == FBT_STRING; }
IsKey()430 bool IsKey() const { return type_ == FBT_KEY; }
IsVector()431 bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; }
IsUntypedVector()432 bool IsUntypedVector() const { return type_ == FBT_VECTOR; }
IsTypedVector()433 bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); }
IsFixedTypedVector()434 bool IsFixedTypedVector() const {
435 return flexbuffers::IsFixedTypedVector(type_);
436 }
IsAnyVector()437 bool IsAnyVector() const {
438 return (IsTypedVector() || IsFixedTypedVector() || IsVector());
439 }
IsMap()440 bool IsMap() const { return type_ == FBT_MAP; }
IsBlob()441 bool IsBlob() const { return type_ == FBT_BLOB; }
AsBool()442 bool AsBool() const {
443 return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_)
444 : AsUInt64()) != 0;
445 }
446
447 // Reads any type as a int64_t. Never fails, does most sensible conversion.
448 // Truncates floats, strings are attempted to be parsed for a number,
449 // vectors/maps return their size. Returns 0 if all else fails.
AsInt64()450 int64_t AsInt64() const {
451 if (type_ == FBT_INT) {
452 // A fast path for the common case.
453 return ReadInt64(data_, parent_width_);
454 } else
455 switch (type_) {
456 case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_);
457 case FBT_UINT: return ReadUInt64(data_, parent_width_);
458 case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_);
459 case FBT_FLOAT:
460 return static_cast<int64_t>(ReadDouble(data_, parent_width_));
461 case FBT_INDIRECT_FLOAT:
462 return static_cast<int64_t>(ReadDouble(Indirect(), byte_width_));
463 case FBT_NULL: return 0;
464 case FBT_STRING: return flatbuffers::StringToInt(AsString().c_str());
465 case FBT_VECTOR: return static_cast<int64_t>(AsVector().size());
466 case FBT_BOOL: return ReadInt64(data_, parent_width_);
467 default:
468 // Convert other things to int.
469 return 0;
470 }
471 }
472
473 // TODO: could specialize these to not use AsInt64() if that saves
474 // extension ops in generated code, and use a faster op than ReadInt64.
AsInt32()475 int32_t AsInt32() const { return static_cast<int32_t>(AsInt64()); }
AsInt16()476 int16_t AsInt16() const { return static_cast<int16_t>(AsInt64()); }
AsInt8()477 int8_t AsInt8() const { return static_cast<int8_t>(AsInt64()); }
478
AsUInt64()479 uint64_t AsUInt64() const {
480 if (type_ == FBT_UINT) {
481 // A fast path for the common case.
482 return ReadUInt64(data_, parent_width_);
483 } else
484 switch (type_) {
485 case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_);
486 case FBT_INT: return ReadInt64(data_, parent_width_);
487 case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_);
488 case FBT_FLOAT:
489 return static_cast<uint64_t>(ReadDouble(data_, parent_width_));
490 case FBT_INDIRECT_FLOAT:
491 return static_cast<uint64_t>(ReadDouble(Indirect(), byte_width_));
492 case FBT_NULL: return 0;
493 case FBT_STRING: return flatbuffers::StringToUInt(AsString().c_str());
494 case FBT_VECTOR: return static_cast<uint64_t>(AsVector().size());
495 case FBT_BOOL: return ReadUInt64(data_, parent_width_);
496 default:
497 // Convert other things to uint.
498 return 0;
499 }
500 }
501
AsUInt32()502 uint32_t AsUInt32() const { return static_cast<uint32_t>(AsUInt64()); }
AsUInt16()503 uint16_t AsUInt16() const { return static_cast<uint16_t>(AsUInt64()); }
AsUInt8()504 uint8_t AsUInt8() const { return static_cast<uint8_t>(AsUInt64()); }
505
AsDouble()506 double AsDouble() const {
507 if (type_ == FBT_FLOAT) {
508 // A fast path for the common case.
509 return ReadDouble(data_, parent_width_);
510 } else
511 switch (type_) {
512 case FBT_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_);
513 case FBT_INT:
514 return static_cast<double>(ReadInt64(data_, parent_width_));
515 case FBT_UINT:
516 return static_cast<double>(ReadUInt64(data_, parent_width_));
517 case FBT_INDIRECT_INT:
518 return static_cast<double>(ReadInt64(Indirect(), byte_width_));
519 case FBT_INDIRECT_UINT:
520 return static_cast<double>(ReadUInt64(Indirect(), byte_width_));
521 case FBT_NULL: return 0.0;
522 case FBT_STRING: {
523 double d;
524 flatbuffers::StringToNumber(AsString().c_str(), &d);
525 return d;
526 }
527 case FBT_VECTOR: return static_cast<double>(AsVector().size());
528 case FBT_BOOL:
529 return static_cast<double>(ReadUInt64(data_, parent_width_));
530 default:
531 // Convert strings and other things to float.
532 return 0;
533 }
534 }
535
AsFloat()536 float AsFloat() const { return static_cast<float>(AsDouble()); }
537
AsKey()538 const char *AsKey() const {
539 if (type_ == FBT_KEY || type_ == FBT_STRING) {
540 return reinterpret_cast<const char *>(Indirect());
541 } else {
542 return "";
543 }
544 }
545
546 // This function returns the empty string if you try to read something that
547 // is not a string or key.
AsString()548 String AsString() const {
549 if (type_ == FBT_STRING) {
550 return String(Indirect(), byte_width_);
551 } else if (type_ == FBT_KEY) {
552 auto key = Indirect();
553 return String(key, byte_width_,
554 strlen(reinterpret_cast<const char *>(key)));
555 } else {
556 return String::EmptyString();
557 }
558 }
559
560 // Unlike AsString(), this will convert any type to a std::string.
ToString()561 std::string ToString() const {
562 std::string s;
563 ToString(false, false, s);
564 return s;
565 }
566
567 // Convert any type to a JSON-like string. strings_quoted determines if
568 // string values at the top level receive "" quotes (inside other values
569 // they always do). keys_quoted determines if keys are quoted, at any level.
ToString(bool strings_quoted,bool keys_quoted,std::string & s)570 void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const {
571 ToString(strings_quoted, keys_quoted, s, false, 0, "", false);
572 }
573
574 // This version additionally allow you to specify if you want indentation.
575 void ToString(bool strings_quoted, bool keys_quoted, std::string &s,
576 bool indented, int cur_indent, const char *indent_string,
577 bool natural_utf8 = false) const {
578 if (type_ == FBT_STRING) {
579 String str(Indirect(), byte_width_);
580 if (strings_quoted) {
581 flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, natural_utf8);
582 } else {
583 s.append(str.c_str(), str.length());
584 }
585 } else if (IsKey()) {
586 auto str = AsKey();
587 if (keys_quoted) {
588 flatbuffers::EscapeString(str, strlen(str), &s, true, natural_utf8);
589 } else {
590 s += str;
591 }
592 } else if (IsInt()) {
593 s += flatbuffers::NumToString(AsInt64());
594 } else if (IsUInt()) {
595 s += flatbuffers::NumToString(AsUInt64());
596 } else if (IsFloat()) {
597 s += flatbuffers::NumToString(AsDouble());
598 } else if (IsNull()) {
599 s += "null";
600 } else if (IsBool()) {
601 s += AsBool() ? "true" : "false";
602 } else if (IsMap()) {
603 s += "{";
604 s += indented ? "\n" : " ";
605 auto m = AsMap();
606 auto keys = m.Keys();
607 auto vals = m.Values();
608 for (size_t i = 0; i < keys.size(); i++) {
609 bool kq = keys_quoted;
610 if (!kq) {
611 // FlexBuffers keys may contain arbitrary characters, only allow
612 // unquoted if it looks like an "identifier":
613 const char *p = keys[i].AsKey();
614 if (!flatbuffers::is_alpha(*p) && *p != '_') {
615 kq = true;
616 } else {
617 while (*++p) {
618 if (!flatbuffers::is_alnum(*p) && *p != '_') {
619 kq = true;
620 break;
621 }
622 }
623 }
624 }
625 if (indented) IndentString(s, cur_indent + 1, indent_string);
626 keys[i].ToString(true, kq, s);
627 s += ": ";
628 vals[i].ToString(true, keys_quoted, s, indented, cur_indent + 1, indent_string,
629 natural_utf8);
630 if (i < keys.size() - 1) {
631 s += ",";
632 if (!indented) s += " ";
633 }
634 if (indented) s += "\n";
635 }
636 if (!indented) s += " ";
637 if (indented) IndentString(s, cur_indent, indent_string);
638 s += "}";
639 } else if (IsVector()) {
640 AppendToString<Vector>(s, AsVector(), keys_quoted, indented,
641 cur_indent + 1, indent_string, natural_utf8);
642 } else if (IsTypedVector()) {
643 AppendToString<TypedVector>(s, AsTypedVector(), keys_quoted, indented,
644 cur_indent + 1, indent_string,
645 natural_utf8);
646 } else if (IsFixedTypedVector()) {
647 AppendToString<FixedTypedVector>(s, AsFixedTypedVector(), keys_quoted,
648 indented, cur_indent + 1, indent_string,
649 natural_utf8);
650 } else if (IsBlob()) {
651 auto blob = AsBlob();
652 flatbuffers::EscapeString(reinterpret_cast<const char *>(blob.data()),
653 blob.size(), &s, true, false);
654 } else {
655 s += "(?)";
656 }
657 }
658
659 // This function returns the empty blob if you try to read a not-blob.
660 // Strings can be viewed as blobs too.
AsBlob()661 Blob AsBlob() const {
662 if (type_ == FBT_BLOB || type_ == FBT_STRING) {
663 return Blob(Indirect(), byte_width_);
664 } else {
665 return Blob::EmptyBlob();
666 }
667 }
668
669 // This function returns the empty vector if you try to read a not-vector.
670 // Maps can be viewed as vectors too.
AsVector()671 Vector AsVector() const {
672 if (type_ == FBT_VECTOR || type_ == FBT_MAP) {
673 return Vector(Indirect(), byte_width_);
674 } else {
675 return Vector::EmptyVector();
676 }
677 }
678
AsTypedVector()679 TypedVector AsTypedVector() const {
680 if (IsTypedVector()) {
681 auto tv =
682 TypedVector(Indirect(), byte_width_, ToTypedVectorElementType(type_));
683 if (tv.type_ == FBT_STRING) {
684 // These can't be accessed as strings, since we don't know the bit-width
685 // of the size field, see the declaration of
686 // FBT_VECTOR_STRING_DEPRECATED above for details.
687 // We change the type here to be keys, which are a subtype of strings,
688 // and will ignore the size field. This will truncate strings with
689 // embedded nulls.
690 tv.type_ = FBT_KEY;
691 }
692 return tv;
693 } else {
694 return TypedVector::EmptyTypedVector();
695 }
696 }
697
AsFixedTypedVector()698 FixedTypedVector AsFixedTypedVector() const {
699 if (IsFixedTypedVector()) {
700 uint8_t len = 0;
701 auto vtype = ToFixedTypedVectorElementType(type_, &len);
702 return FixedTypedVector(Indirect(), byte_width_, vtype, len);
703 } else {
704 return FixedTypedVector::EmptyFixedTypedVector();
705 }
706 }
707
AsMap()708 Map AsMap() const {
709 if (type_ == FBT_MAP) {
710 return Map(Indirect(), byte_width_);
711 } else {
712 return Map::EmptyMap();
713 }
714 }
715
716 template<typename T> T As() const;
717
718 // Experimental: Mutation functions.
719 // These allow scalars in an already created buffer to be updated in-place.
720 // Since by default scalars are stored in the smallest possible space,
721 // the new value may not fit, in which case these functions return false.
722 // To avoid this, you can construct the values you intend to mutate using
723 // Builder::ForceMinimumBitWidth.
MutateInt(int64_t i)724 bool MutateInt(int64_t i) {
725 if (type_ == FBT_INT) {
726 return Mutate(data_, i, parent_width_, WidthI(i));
727 } else if (type_ == FBT_INDIRECT_INT) {
728 return Mutate(Indirect(), i, byte_width_, WidthI(i));
729 } else if (type_ == FBT_UINT) {
730 auto u = static_cast<uint64_t>(i);
731 return Mutate(data_, u, parent_width_, WidthU(u));
732 } else if (type_ == FBT_INDIRECT_UINT) {
733 auto u = static_cast<uint64_t>(i);
734 return Mutate(Indirect(), u, byte_width_, WidthU(u));
735 } else {
736 return false;
737 }
738 }
739
MutateBool(bool b)740 bool MutateBool(bool b) {
741 return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8);
742 }
743
MutateUInt(uint64_t u)744 bool MutateUInt(uint64_t u) {
745 if (type_ == FBT_UINT) {
746 return Mutate(data_, u, parent_width_, WidthU(u));
747 } else if (type_ == FBT_INDIRECT_UINT) {
748 return Mutate(Indirect(), u, byte_width_, WidthU(u));
749 } else if (type_ == FBT_INT) {
750 auto i = static_cast<int64_t>(u);
751 return Mutate(data_, i, parent_width_, WidthI(i));
752 } else if (type_ == FBT_INDIRECT_INT) {
753 auto i = static_cast<int64_t>(u);
754 return Mutate(Indirect(), i, byte_width_, WidthI(i));
755 } else {
756 return false;
757 }
758 }
759
MutateFloat(float f)760 bool MutateFloat(float f) {
761 if (type_ == FBT_FLOAT) {
762 return MutateF(data_, f, parent_width_, BIT_WIDTH_32);
763 } else if (type_ == FBT_INDIRECT_FLOAT) {
764 return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32);
765 } else {
766 return false;
767 }
768 }
769
MutateFloat(double d)770 bool MutateFloat(double d) {
771 if (type_ == FBT_FLOAT) {
772 return MutateF(data_, d, parent_width_, WidthF(d));
773 } else if (type_ == FBT_INDIRECT_FLOAT) {
774 return MutateF(Indirect(), d, byte_width_, WidthF(d));
775 } else {
776 return false;
777 }
778 }
779
MutateString(const char * str,size_t len)780 bool MutateString(const char *str, size_t len) {
781 auto s = AsString();
782 if (s.IsTheEmptyString()) return false;
783 // This is very strict, could allow shorter strings, but that creates
784 // garbage.
785 if (s.length() != len) return false;
786 memcpy(const_cast<char *>(s.c_str()), str, len);
787 return true;
788 }
MutateString(const char * str)789 bool MutateString(const char *str) { return MutateString(str, strlen(str)); }
MutateString(const std::string & str)790 bool MutateString(const std::string &str) {
791 return MutateString(str.data(), str.length());
792 }
793
794 private:
Indirect()795 const uint8_t *Indirect() const {
796 return flexbuffers::Indirect(data_, parent_width_);
797 }
798
799 template<typename T>
Mutate(const uint8_t * dest,T t,size_t byte_width,BitWidth value_width)800 bool Mutate(const uint8_t *dest, T t, size_t byte_width,
801 BitWidth value_width) {
802 auto fits = static_cast<size_t>(static_cast<size_t>(1U) << value_width) <=
803 byte_width;
804 if (fits) {
805 t = flatbuffers::EndianScalar(t);
806 memcpy(const_cast<uint8_t *>(dest), &t, byte_width);
807 }
808 return fits;
809 }
810
811 template<typename T>
MutateF(const uint8_t * dest,T t,size_t byte_width,BitWidth value_width)812 bool MutateF(const uint8_t *dest, T t, size_t byte_width,
813 BitWidth value_width) {
814 if (byte_width == sizeof(double))
815 return Mutate(dest, static_cast<double>(t), byte_width, value_width);
816 if (byte_width == sizeof(float))
817 return Mutate(dest, static_cast<float>(t), byte_width, value_width);
818 FLATBUFFERS_ASSERT(false);
819 return false;
820 }
821
822 friend class Verifier;
823
824 const uint8_t *data_;
825 uint8_t parent_width_;
826 uint8_t byte_width_;
827 Type type_;
828 };
829
830 // Template specialization for As().
831 template<> inline bool Reference::As<bool>() const { return AsBool(); }
832
833 template<> inline int8_t Reference::As<int8_t>() const { return AsInt8(); }
834 template<> inline int16_t Reference::As<int16_t>() const { return AsInt16(); }
835 template<> inline int32_t Reference::As<int32_t>() const { return AsInt32(); }
836 template<> inline int64_t Reference::As<int64_t>() const { return AsInt64(); }
837
838 template<> inline uint8_t Reference::As<uint8_t>() const { return AsUInt8(); }
839 template<> inline uint16_t Reference::As<uint16_t>() const {
840 return AsUInt16();
841 }
842 template<> inline uint32_t Reference::As<uint32_t>() const {
843 return AsUInt32();
844 }
845 template<> inline uint64_t Reference::As<uint64_t>() const {
846 return AsUInt64();
847 }
848
849 template<> inline double Reference::As<double>() const { return AsDouble(); }
850 template<> inline float Reference::As<float>() const { return AsFloat(); }
851
852 template<> inline String Reference::As<String>() const { return AsString(); }
853 template<> inline std::string Reference::As<std::string>() const {
854 return AsString().str();
855 }
856
857 template<> inline Blob Reference::As<Blob>() const { return AsBlob(); }
858 template<> inline Vector Reference::As<Vector>() const { return AsVector(); }
859 template<> inline TypedVector Reference::As<TypedVector>() const {
860 return AsTypedVector();
861 }
862 template<> inline FixedTypedVector Reference::As<FixedTypedVector>() const {
863 return AsFixedTypedVector();
864 }
865 template<> inline Map Reference::As<Map>() const { return AsMap(); }
866
PackedType(BitWidth bit_width,Type type)867 inline uint8_t PackedType(BitWidth bit_width, Type type) {
868 return static_cast<uint8_t>(bit_width | (type << 2));
869 }
870
NullPackedType()871 inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); }
872
873 // Vector accessors.
874 // Note: if you try to access outside of bounds, you get a Null value back
875 // instead. Normally this would be an assert, but since this is "dynamically
876 // typed" data, you may not want that (someone sends you a 2d vector and you
877 // wanted 3d).
878 // The Null converts seamlessly into a default value for any other type.
879 // TODO(wvo): Could introduce an #ifdef that makes this into an assert?
880 inline Reference Vector::operator[](size_t i) const {
881 auto len = size();
882 if (i >= len) return Reference(nullptr, 1, NullPackedType());
883 auto packed_type = (data_ + len * byte_width_)[i];
884 auto elem = data_ + i * byte_width_;
885 return Reference(elem, byte_width_, packed_type);
886 }
887
888 inline Reference TypedVector::operator[](size_t i) const {
889 auto len = size();
890 if (i >= len) return Reference(nullptr, 1, NullPackedType());
891 auto elem = data_ + i * byte_width_;
892 return Reference(elem, byte_width_, 1, type_);
893 }
894
895 inline Reference FixedTypedVector::operator[](size_t i) const {
896 if (i >= len_) return Reference(nullptr, 1, NullPackedType());
897 auto elem = data_ + i * byte_width_;
898 return Reference(elem, byte_width_, 1, type_);
899 }
900
KeyCompare(const void * key,const void * elem)901 template<typename T> int KeyCompare(const void *key, const void *elem) {
902 auto str_elem = reinterpret_cast<const char *>(
903 Indirect<T>(reinterpret_cast<const uint8_t *>(elem)));
904 auto skey = reinterpret_cast<const char *>(key);
905 return strcmp(skey, str_elem);
906 }
907
908 inline Reference Map::operator[](const char *key) const {
909 auto keys = Keys();
910 // We can't pass keys.byte_width_ to the comparison function, so we have
911 // to pick the right one ahead of time.
912 int (*comp)(const void *, const void *) = nullptr;
913 switch (keys.byte_width_) {
914 case 1: comp = KeyCompare<uint8_t>; break;
915 case 2: comp = KeyCompare<uint16_t>; break;
916 case 4: comp = KeyCompare<uint32_t>; break;
917 case 8: comp = KeyCompare<uint64_t>; break;
918 default: FLATBUFFERS_ASSERT(false); return Reference();
919 }
920 auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp);
921 if (!res) return Reference(nullptr, 1, NullPackedType());
922 auto i = (reinterpret_cast<uint8_t *>(res) - keys.data_) / keys.byte_width_;
923 return (*static_cast<const Vector *>(this))[i];
924 }
925
926 inline Reference Map::operator[](const std::string &key) const {
927 return (*this)[key.c_str()];
928 }
929
GetRoot(const uint8_t * buffer,size_t size)930 inline Reference GetRoot(const uint8_t *buffer, size_t size) {
931 // See Finish() below for the serialization counterpart of this.
932 // The root starts at the end of the buffer, so we parse backwards from there.
933 auto end = buffer + size;
934 auto byte_width = *--end;
935 auto packed_type = *--end;
936 end -= byte_width; // The root data item.
937 return Reference(end, byte_width, packed_type);
938 }
939
GetRoot(const std::vector<uint8_t> & buffer)940 inline Reference GetRoot(const std::vector<uint8_t> &buffer) {
941 return GetRoot(buffer.data(), buffer.size());
942 }
943
944 // Flags that configure how the Builder behaves.
945 // The "Share" flags determine if the Builder automatically tries to pool
946 // this type. Pooling can reduce the size of serialized data if there are
947 // multiple maps of the same kind, at the expense of slightly slower
948 // serialization (the cost of lookups) and more memory use (std::set).
949 // By default this is on for keys, but off for strings.
950 // Turn keys off if you have e.g. only one map.
951 // Turn strings on if you expect many non-unique string values.
952 // Additionally, sharing key vectors can save space if you have maps with
953 // identical field populations.
954 enum BuilderFlag {
955 BUILDER_FLAG_NONE = 0,
956 BUILDER_FLAG_SHARE_KEYS = 1,
957 BUILDER_FLAG_SHARE_STRINGS = 2,
958 BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3,
959 BUILDER_FLAG_SHARE_KEY_VECTORS = 4,
960 BUILDER_FLAG_SHARE_ALL = 7,
961 };
962
963 class Builder FLATBUFFERS_FINAL_CLASS {
964 public:
965 Builder(size_t initial_size = 256,
966 BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS)
buf_(initial_size)967 : buf_(initial_size),
968 finished_(false),
969 has_duplicate_keys_(false),
970 flags_(flags),
971 force_min_bit_width_(BIT_WIDTH_8),
972 key_pool(KeyOffsetCompare(buf_)),
973 string_pool(StringOffsetCompare(buf_)) {
974 buf_.clear();
975 }
976
977 #ifdef FLATBUFFERS_DEFAULT_DECLARATION
978 Builder(Builder &&) = default;
979 Builder &operator=(Builder &&) = default;
980 #endif
981
982 /// @brief Get the serialized buffer (after you call `Finish()`).
983 /// @return Returns a vector owned by this class.
GetBuffer()984 const std::vector<uint8_t> &GetBuffer() const {
985 Finished();
986 return buf_;
987 }
988
989 // Size of the buffer. Does not include unfinished values.
GetSize()990 size_t GetSize() const { return buf_.size(); }
991
992 // Reset all state so we can re-use the buffer.
Clear()993 void Clear() {
994 buf_.clear();
995 stack_.clear();
996 finished_ = false;
997 // flags_ remains as-is;
998 force_min_bit_width_ = BIT_WIDTH_8;
999 key_pool.clear();
1000 string_pool.clear();
1001 }
1002
1003 // All value constructing functions below have two versions: one that
1004 // takes a key (for placement inside a map) and one that doesn't (for inside
1005 // vectors and elsewhere).
1006
Null()1007 void Null() { stack_.push_back(Value()); }
Null(const char * key)1008 void Null(const char *key) {
1009 Key(key);
1010 Null();
1011 }
1012
Int(int64_t i)1013 void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); }
Int(const char * key,int64_t i)1014 void Int(const char *key, int64_t i) {
1015 Key(key);
1016 Int(i);
1017 }
1018
UInt(uint64_t u)1019 void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); }
UInt(const char * key,uint64_t u)1020 void UInt(const char *key, uint64_t u) {
1021 Key(key);
1022 UInt(u);
1023 }
1024
Float(float f)1025 void Float(float f) { stack_.push_back(Value(f)); }
Float(const char * key,float f)1026 void Float(const char *key, float f) {
1027 Key(key);
1028 Float(f);
1029 }
1030
Double(double f)1031 void Double(double f) { stack_.push_back(Value(f)); }
Double(const char * key,double d)1032 void Double(const char *key, double d) {
1033 Key(key);
1034 Double(d);
1035 }
1036
Bool(bool b)1037 void Bool(bool b) { stack_.push_back(Value(b)); }
Bool(const char * key,bool b)1038 void Bool(const char *key, bool b) {
1039 Key(key);
1040 Bool(b);
1041 }
1042
IndirectInt(int64_t i)1043 void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); }
IndirectInt(const char * key,int64_t i)1044 void IndirectInt(const char *key, int64_t i) {
1045 Key(key);
1046 IndirectInt(i);
1047 }
1048
IndirectUInt(uint64_t u)1049 void IndirectUInt(uint64_t u) {
1050 PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u));
1051 }
IndirectUInt(const char * key,uint64_t u)1052 void IndirectUInt(const char *key, uint64_t u) {
1053 Key(key);
1054 IndirectUInt(u);
1055 }
1056
IndirectFloat(float f)1057 void IndirectFloat(float f) {
1058 PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32);
1059 }
IndirectFloat(const char * key,float f)1060 void IndirectFloat(const char *key, float f) {
1061 Key(key);
1062 IndirectFloat(f);
1063 }
1064
IndirectDouble(double f)1065 void IndirectDouble(double f) {
1066 PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f));
1067 }
IndirectDouble(const char * key,double d)1068 void IndirectDouble(const char *key, double d) {
1069 Key(key);
1070 IndirectDouble(d);
1071 }
1072
Key(const char * str,size_t len)1073 size_t Key(const char *str, size_t len) {
1074 auto sloc = buf_.size();
1075 WriteBytes(str, len + 1);
1076 if (flags_ & BUILDER_FLAG_SHARE_KEYS) {
1077 auto it = key_pool.find(sloc);
1078 if (it != key_pool.end()) {
1079 // Already in the buffer. Remove key we just serialized, and use
1080 // existing offset instead.
1081 buf_.resize(sloc);
1082 sloc = *it;
1083 } else {
1084 key_pool.insert(sloc);
1085 }
1086 }
1087 stack_.push_back(Value(static_cast<uint64_t>(sloc), FBT_KEY, BIT_WIDTH_8));
1088 return sloc;
1089 }
1090
Key(const char * str)1091 size_t Key(const char *str) { return Key(str, strlen(str)); }
Key(const std::string & str)1092 size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); }
1093
String(const char * str,size_t len)1094 size_t String(const char *str, size_t len) {
1095 auto reset_to = buf_.size();
1096 auto sloc = CreateBlob(str, len, 1, FBT_STRING);
1097 if (flags_ & BUILDER_FLAG_SHARE_STRINGS) {
1098 StringOffset so(sloc, len);
1099 auto it = string_pool.find(so);
1100 if (it != string_pool.end()) {
1101 // Already in the buffer. Remove string we just serialized, and use
1102 // existing offset instead.
1103 buf_.resize(reset_to);
1104 sloc = it->first;
1105 stack_.back().u_ = sloc;
1106 } else {
1107 string_pool.insert(so);
1108 }
1109 }
1110 return sloc;
1111 }
String(const char * str)1112 size_t String(const char *str) { return String(str, strlen(str)); }
String(const std::string & str)1113 size_t String(const std::string &str) {
1114 return String(str.c_str(), str.size());
1115 }
String(const flexbuffers::String & str)1116 void String(const flexbuffers::String &str) {
1117 String(str.c_str(), str.length());
1118 }
1119
String(const char * key,const char * str)1120 void String(const char *key, const char *str) {
1121 Key(key);
1122 String(str);
1123 }
String(const char * key,const std::string & str)1124 void String(const char *key, const std::string &str) {
1125 Key(key);
1126 String(str);
1127 }
String(const char * key,const flexbuffers::String & str)1128 void String(const char *key, const flexbuffers::String &str) {
1129 Key(key);
1130 String(str);
1131 }
1132
Blob(const void * data,size_t len)1133 size_t Blob(const void *data, size_t len) {
1134 return CreateBlob(data, len, 0, FBT_BLOB);
1135 }
Blob(const std::vector<uint8_t> & v)1136 size_t Blob(const std::vector<uint8_t> &v) {
1137 return CreateBlob(v.data(), v.size(), 0, FBT_BLOB);
1138 }
1139
Blob(const char * key,const void * data,size_t len)1140 void Blob(const char *key, const void *data, size_t len) {
1141 Key(key);
1142 Blob(data, len);
1143 }
Blob(const char * key,const std::vector<uint8_t> & v)1144 void Blob(const char *key, const std::vector<uint8_t> &v) {
1145 Key(key);
1146 Blob(v);
1147 }
1148
1149 // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String),
1150 // e.g. Vector etc. Also in overloaded versions.
1151 // Also some FlatBuffers types?
1152
StartVector()1153 size_t StartVector() { return stack_.size(); }
StartVector(const char * key)1154 size_t StartVector(const char *key) {
1155 Key(key);
1156 return stack_.size();
1157 }
StartMap()1158 size_t StartMap() { return stack_.size(); }
StartMap(const char * key)1159 size_t StartMap(const char *key) {
1160 Key(key);
1161 return stack_.size();
1162 }
1163
1164 // TODO(wvo): allow this to specify an alignment greater than the natural
1165 // alignment.
EndVector(size_t start,bool typed,bool fixed)1166 size_t EndVector(size_t start, bool typed, bool fixed) {
1167 auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed);
1168 // Remove temp elements and return vector.
1169 stack_.resize(start);
1170 stack_.push_back(vec);
1171 return static_cast<size_t>(vec.u_);
1172 }
1173
EndMap(size_t start)1174 size_t EndMap(size_t start) {
1175 // We should have interleaved keys and values on the stack.
1176 auto len = MapElementCount(start);
1177 // Make sure keys are all strings:
1178 for (auto key = start; key < stack_.size(); key += 2) {
1179 FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY);
1180 }
1181 // Now sort values, so later we can do a binary search lookup.
1182 // We want to sort 2 array elements at a time.
1183 struct TwoValue {
1184 Value key;
1185 Value val;
1186 };
1187 // TODO(wvo): strict aliasing?
1188 // TODO(wvo): allow the caller to indicate the data is already sorted
1189 // for maximum efficiency? With an assert to check sortedness to make sure
1190 // we're not breaking binary search.
1191 // Or, we can track if the map is sorted as keys are added which would be
1192 // be quite cheap (cheaper than checking it here), so we can skip this
1193 // step automatically when appliccable, and encourage people to write in
1194 // sorted fashion.
1195 // std::sort is typically already a lot faster on sorted data though.
1196 auto dict = reinterpret_cast<TwoValue *>(stack_.data() + start);
1197 std::sort(
1198 dict, dict + len, [&](const TwoValue &a, const TwoValue &b) -> bool {
1199 auto as = reinterpret_cast<const char *>(buf_.data() + a.key.u_);
1200 auto bs = reinterpret_cast<const char *>(buf_.data() + b.key.u_);
1201 auto comp = strcmp(as, bs);
1202 // We want to disallow duplicate keys, since this results in a
1203 // map where values cannot be found.
1204 // But we can't assert here (since we don't want to fail on
1205 // random JSON input) or have an error mechanism.
1206 // Instead, we set has_duplicate_keys_ in the builder to
1207 // signal this.
1208 // TODO: Have to check for pointer equality, as some sort
1209 // implementation apparently call this function with the same
1210 // element?? Why?
1211 if (!comp && &a != &b) has_duplicate_keys_ = true;
1212 return comp < 0;
1213 });
1214 // First create a vector out of all keys.
1215 // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share
1216 // the first vector.
1217 auto keys = CreateVector(start, len, 2, true, false);
1218 auto vec = CreateVector(start + 1, len, 2, false, false, &keys);
1219 // Remove temp elements and return map.
1220 stack_.resize(start);
1221 stack_.push_back(vec);
1222 return static_cast<size_t>(vec.u_);
1223 }
1224
1225 // Call this after EndMap to see if the map had any duplicate keys.
1226 // Any map with such keys won't be able to retrieve all values.
HasDuplicateKeys()1227 bool HasDuplicateKeys() const { return has_duplicate_keys_; }
1228
Vector(F f)1229 template<typename F> size_t Vector(F f) {
1230 auto start = StartVector();
1231 f();
1232 return EndVector(start, false, false);
1233 }
Vector(F f,T & state)1234 template<typename F, typename T> size_t Vector(F f, T &state) {
1235 auto start = StartVector();
1236 f(state);
1237 return EndVector(start, false, false);
1238 }
Vector(const char * key,F f)1239 template<typename F> size_t Vector(const char *key, F f) {
1240 auto start = StartVector(key);
1241 f();
1242 return EndVector(start, false, false);
1243 }
1244 template<typename F, typename T>
Vector(const char * key,F f,T & state)1245 size_t Vector(const char *key, F f, T &state) {
1246 auto start = StartVector(key);
1247 f(state);
1248 return EndVector(start, false, false);
1249 }
1250
Vector(const T * elems,size_t len)1251 template<typename T> void Vector(const T *elems, size_t len) {
1252 if (flatbuffers::is_scalar<T>::value) {
1253 // This path should be a lot quicker and use less space.
1254 ScalarVector(elems, len, false);
1255 } else {
1256 auto start = StartVector();
1257 for (size_t i = 0; i < len; i++) Add(elems[i]);
1258 EndVector(start, false, false);
1259 }
1260 }
1261 template<typename T>
Vector(const char * key,const T * elems,size_t len)1262 void Vector(const char *key, const T *elems, size_t len) {
1263 Key(key);
1264 Vector(elems, len);
1265 }
Vector(const std::vector<T> & vec)1266 template<typename T> void Vector(const std::vector<T> &vec) {
1267 Vector(vec.data(), vec.size());
1268 }
1269
TypedVector(F f)1270 template<typename F> size_t TypedVector(F f) {
1271 auto start = StartVector();
1272 f();
1273 return EndVector(start, true, false);
1274 }
TypedVector(F f,T & state)1275 template<typename F, typename T> size_t TypedVector(F f, T &state) {
1276 auto start = StartVector();
1277 f(state);
1278 return EndVector(start, true, false);
1279 }
TypedVector(const char * key,F f)1280 template<typename F> size_t TypedVector(const char *key, F f) {
1281 auto start = StartVector(key);
1282 f();
1283 return EndVector(start, true, false);
1284 }
1285 template<typename F, typename T>
TypedVector(const char * key,F f,T & state)1286 size_t TypedVector(const char *key, F f, T &state) {
1287 auto start = StartVector(key);
1288 f(state);
1289 return EndVector(start, true, false);
1290 }
1291
FixedTypedVector(const T * elems,size_t len)1292 template<typename T> size_t FixedTypedVector(const T *elems, size_t len) {
1293 // We only support a few fixed vector lengths. Anything bigger use a
1294 // regular typed vector.
1295 FLATBUFFERS_ASSERT(len >= 2 && len <= 4);
1296 // And only scalar values.
1297 static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
1298 return ScalarVector(elems, len, true);
1299 }
1300
1301 template<typename T>
FixedTypedVector(const char * key,const T * elems,size_t len)1302 size_t FixedTypedVector(const char *key, const T *elems, size_t len) {
1303 Key(key);
1304 return FixedTypedVector(elems, len);
1305 }
1306
Map(F f)1307 template<typename F> size_t Map(F f) {
1308 auto start = StartMap();
1309 f();
1310 return EndMap(start);
1311 }
Map(F f,T & state)1312 template<typename F, typename T> size_t Map(F f, T &state) {
1313 auto start = StartMap();
1314 f(state);
1315 return EndMap(start);
1316 }
Map(const char * key,F f)1317 template<typename F> size_t Map(const char *key, F f) {
1318 auto start = StartMap(key);
1319 f();
1320 return EndMap(start);
1321 }
Map(const char * key,F f,T & state)1322 template<typename F, typename T> size_t Map(const char *key, F f, T &state) {
1323 auto start = StartMap(key);
1324 f(state);
1325 return EndMap(start);
1326 }
Map(const std::map<std::string,T> & map)1327 template<typename T> void Map(const std::map<std::string, T> &map) {
1328 auto start = StartMap();
1329 for (auto it = map.begin(); it != map.end(); ++it)
1330 Add(it->first.c_str(), it->second);
1331 EndMap(start);
1332 }
1333
MapElementCount(size_t start)1334 size_t MapElementCount(size_t start) {
1335 // Make sure it is an even number:
1336 auto len = stack_.size() - start;
1337 FLATBUFFERS_ASSERT(!(len & 1));
1338 len /= 2;
1339 return len;
1340 }
1341
1342 // If you wish to share a value explicitly (a value not shared automatically
1343 // through one of the BUILDER_FLAG_SHARE_* flags) you can do so with these
1344 // functions. Or if you wish to turn those flags off for performance reasons
1345 // and still do some explicit sharing. For example:
1346 // builder.IndirectDouble(M_PI);
1347 // auto id = builder.LastValue(); // Remember where we stored it.
1348 // .. more code goes here ..
1349 // builder.ReuseValue(id); // Refers to same double by offset.
1350 // LastValue works regardless of whether the value has a key or not.
1351 // Works on any data type.
1352 struct Value;
LastValue()1353 Value LastValue() { return stack_.back(); }
ReuseValue(Value v)1354 void ReuseValue(Value v) { stack_.push_back(v); }
ReuseValue(const char * key,Value v)1355 void ReuseValue(const char *key, Value v) {
1356 Key(key);
1357 ReuseValue(v);
1358 }
1359
1360 // Undo the last element serialized. Call once for a value and once for a
1361 // key.
Undo()1362 void Undo() {
1363 stack_.pop_back();
1364 }
1365
1366 // Overloaded Add that tries to call the correct function above.
Add(int8_t i)1367 void Add(int8_t i) { Int(i); }
Add(int16_t i)1368 void Add(int16_t i) { Int(i); }
Add(int32_t i)1369 void Add(int32_t i) { Int(i); }
Add(int64_t i)1370 void Add(int64_t i) { Int(i); }
Add(uint8_t u)1371 void Add(uint8_t u) { UInt(u); }
Add(uint16_t u)1372 void Add(uint16_t u) { UInt(u); }
Add(uint32_t u)1373 void Add(uint32_t u) { UInt(u); }
Add(uint64_t u)1374 void Add(uint64_t u) { UInt(u); }
Add(float f)1375 void Add(float f) { Float(f); }
Add(double d)1376 void Add(double d) { Double(d); }
Add(bool b)1377 void Add(bool b) { Bool(b); }
Add(const char * str)1378 void Add(const char *str) { String(str); }
Add(const std::string & str)1379 void Add(const std::string &str) { String(str); }
Add(const flexbuffers::String & str)1380 void Add(const flexbuffers::String &str) { String(str); }
1381
Add(const std::vector<T> & vec)1382 template<typename T> void Add(const std::vector<T> &vec) { Vector(vec); }
1383
Add(const char * key,const T & t)1384 template<typename T> void Add(const char *key, const T &t) {
1385 Key(key);
1386 Add(t);
1387 }
1388
Add(const std::map<std::string,T> & map)1389 template<typename T> void Add(const std::map<std::string, T> &map) {
1390 Map(map);
1391 }
1392
1393 template<typename T> void operator+=(const T &t) { Add(t); }
1394
1395 // This function is useful in combination with the Mutate* functions above.
1396 // It forces elements of vectors and maps to have a minimum size, such that
1397 // they can later be updated without failing.
1398 // Call with no arguments to reset.
1399 void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) {
1400 force_min_bit_width_ = bw;
1401 }
1402
Finish()1403 void Finish() {
1404 // If you hit this assert, you likely have objects that were never included
1405 // in a parent. You need to have exactly one root to finish a buffer.
1406 // Check your Start/End calls are matched, and all objects are inside
1407 // some other object.
1408 FLATBUFFERS_ASSERT(stack_.size() == 1);
1409
1410 // Write root value.
1411 auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0));
1412 WriteAny(stack_[0], byte_width);
1413 // Write root type.
1414 Write(stack_[0].StoredPackedType(), 1);
1415 // Write root size. Normally determined by parent, but root has no parent :)
1416 Write(byte_width, 1);
1417
1418 finished_ = true;
1419 }
1420
1421 private:
Finished()1422 void Finished() const {
1423 // If you get this assert, you're attempting to get access a buffer
1424 // which hasn't been finished yet. Be sure to call
1425 // Builder::Finish with your root object.
1426 FLATBUFFERS_ASSERT(finished_);
1427 }
1428
1429 // Align to prepare for writing a scalar with a certain size.
Align(BitWidth alignment)1430 uint8_t Align(BitWidth alignment) {
1431 auto byte_width = 1U << alignment;
1432 buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width),
1433 0);
1434 return static_cast<uint8_t>(byte_width);
1435 }
1436
WriteBytes(const void * val,size_t size)1437 void WriteBytes(const void *val, size_t size) {
1438 buf_.insert(buf_.end(), reinterpret_cast<const uint8_t *>(val),
1439 reinterpret_cast<const uint8_t *>(val) + size);
1440 }
1441
Write(T val,size_t byte_width)1442 template<typename T> void Write(T val, size_t byte_width) {
1443 FLATBUFFERS_ASSERT(sizeof(T) >= byte_width);
1444 val = flatbuffers::EndianScalar(val);
1445 WriteBytes(&val, byte_width);
1446 }
1447
WriteDouble(double f,uint8_t byte_width)1448 void WriteDouble(double f, uint8_t byte_width) {
1449 switch (byte_width) {
1450 case 8: Write(f, byte_width); break;
1451 case 4: Write(static_cast<float>(f), byte_width); break;
1452 // case 2: Write(static_cast<half>(f), byte_width); break;
1453 // case 1: Write(static_cast<quarter>(f), byte_width); break;
1454 default: FLATBUFFERS_ASSERT(0);
1455 }
1456 }
1457
WriteOffset(uint64_t o,uint8_t byte_width)1458 void WriteOffset(uint64_t o, uint8_t byte_width) {
1459 auto reloff = buf_.size() - o;
1460 FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8));
1461 Write(reloff, byte_width);
1462 }
1463
PushIndirect(T val,Type type,BitWidth bit_width)1464 template<typename T> void PushIndirect(T val, Type type, BitWidth bit_width) {
1465 auto byte_width = Align(bit_width);
1466 auto iloc = buf_.size();
1467 Write(val, byte_width);
1468 stack_.push_back(Value(static_cast<uint64_t>(iloc), type, bit_width));
1469 }
1470
WidthB(size_t byte_width)1471 static BitWidth WidthB(size_t byte_width) {
1472 switch (byte_width) {
1473 case 1: return BIT_WIDTH_8;
1474 case 2: return BIT_WIDTH_16;
1475 case 4: return BIT_WIDTH_32;
1476 case 8: return BIT_WIDTH_64;
1477 default: FLATBUFFERS_ASSERT(false); return BIT_WIDTH_64;
1478 }
1479 }
1480
GetScalarType()1481 template<typename T> static Type GetScalarType() {
1482 static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
1483 return flatbuffers::is_floating_point<T>::value ? FBT_FLOAT
1484 : flatbuffers::is_same<T, bool>::value
1485 ? FBT_BOOL
1486 : (flatbuffers::is_unsigned<T>::value ? FBT_UINT : FBT_INT);
1487 }
1488
1489 public:
1490 // This was really intended to be private, except for LastValue/ReuseValue.
1491 struct Value {
1492 union {
1493 int64_t i_;
1494 uint64_t u_;
1495 double f_;
1496 };
1497
1498 Type type_;
1499
1500 // For scalars: of itself, for vector: of its elements, for string: length.
1501 BitWidth min_bit_width_;
1502
ValueValue1503 Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {}
1504
ValueValue1505 Value(bool b)
1506 : u_(static_cast<uint64_t>(b)),
1507 type_(FBT_BOOL),
1508 min_bit_width_(BIT_WIDTH_8) {}
1509
ValueValue1510 Value(int64_t i, Type t, BitWidth bw)
1511 : i_(i), type_(t), min_bit_width_(bw) {}
ValueValue1512 Value(uint64_t u, Type t, BitWidth bw)
1513 : u_(u), type_(t), min_bit_width_(bw) {}
1514
ValueValue1515 Value(float f)
1516 : f_(static_cast<double>(f)),
1517 type_(FBT_FLOAT),
1518 min_bit_width_(BIT_WIDTH_32) {}
ValueValue1519 Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {}
1520
1521 uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const {
1522 return PackedType(StoredWidth(parent_bit_width_), type_);
1523 }
1524
ElemWidthValue1525 BitWidth ElemWidth(size_t buf_size, size_t elem_index) const {
1526 if (IsInline(type_)) {
1527 return min_bit_width_;
1528 } else {
1529 // We have an absolute offset, but want to store a relative offset
1530 // elem_index elements beyond the current buffer end. Since whether
1531 // the relative offset fits in a certain byte_width depends on
1532 // the size of the elements before it (and their alignment), we have
1533 // to test for each size in turn.
1534 for (size_t byte_width = 1;
1535 byte_width <= sizeof(flatbuffers::largest_scalar_t);
1536 byte_width *= 2) {
1537 // Where are we going to write this offset?
1538 auto offset_loc = buf_size +
1539 flatbuffers::PaddingBytes(buf_size, byte_width) +
1540 elem_index * byte_width;
1541 // Compute relative offset.
1542 auto offset = offset_loc - u_;
1543 // Does it fit?
1544 auto bit_width = WidthU(offset);
1545 if (static_cast<size_t>(static_cast<size_t>(1U) << bit_width) ==
1546 byte_width)
1547 return bit_width;
1548 }
1549 FLATBUFFERS_ASSERT(false); // Must match one of the sizes above.
1550 return BIT_WIDTH_64;
1551 }
1552 }
1553
1554 BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const {
1555 if (IsInline(type_)) {
1556 return (std::max)(min_bit_width_, parent_bit_width_);
1557 } else {
1558 return min_bit_width_;
1559 }
1560 }
1561 };
1562
1563 private:
WriteAny(const Value & val,uint8_t byte_width)1564 void WriteAny(const Value &val, uint8_t byte_width) {
1565 switch (val.type_) {
1566 case FBT_NULL:
1567 case FBT_INT: Write(val.i_, byte_width); break;
1568 case FBT_BOOL:
1569 case FBT_UINT: Write(val.u_, byte_width); break;
1570 case FBT_FLOAT: WriteDouble(val.f_, byte_width); break;
1571 default: WriteOffset(val.u_, byte_width); break;
1572 }
1573 }
1574
CreateBlob(const void * data,size_t len,size_t trailing,Type type)1575 size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) {
1576 auto bit_width = WidthU(len);
1577 auto byte_width = Align(bit_width);
1578 Write<uint64_t>(len, byte_width);
1579 auto sloc = buf_.size();
1580 WriteBytes(data, len + trailing);
1581 stack_.push_back(Value(static_cast<uint64_t>(sloc), type, bit_width));
1582 return sloc;
1583 }
1584
1585 template<typename T>
ScalarVector(const T * elems,size_t len,bool fixed)1586 size_t ScalarVector(const T *elems, size_t len, bool fixed) {
1587 auto vector_type = GetScalarType<T>();
1588 auto byte_width = sizeof(T);
1589 auto bit_width = WidthB(byte_width);
1590 // If you get this assert, you're trying to write a vector with a size
1591 // field that is bigger than the scalars you're trying to write (e.g. a
1592 // byte vector > 255 elements). For such types, write a "blob" instead.
1593 // TODO: instead of asserting, could write vector with larger elements
1594 // instead, though that would be wasteful.
1595 FLATBUFFERS_ASSERT(WidthU(len) <= bit_width);
1596 Align(bit_width);
1597 if (!fixed) Write<uint64_t>(len, byte_width);
1598 auto vloc = buf_.size();
1599 for (size_t i = 0; i < len; i++) Write(elems[i], byte_width);
1600 stack_.push_back(Value(static_cast<uint64_t>(vloc),
1601 ToTypedVector(vector_type, fixed ? len : 0),
1602 bit_width));
1603 return vloc;
1604 }
1605
1606 Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed,
1607 bool fixed, const Value *keys = nullptr) {
1608 FLATBUFFERS_ASSERT(
1609 !fixed ||
1610 typed); // typed=false, fixed=true combination is not supported.
1611 // Figure out smallest bit width we can store this vector with.
1612 auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len));
1613 auto prefix_elems = 1;
1614 if (keys) {
1615 // If this vector is part of a map, we will pre-fix an offset to the keys
1616 // to this vector.
1617 bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0));
1618 prefix_elems += 2;
1619 }
1620 Type vector_type = FBT_KEY;
1621 // Check bit widths and types for all elements.
1622 for (size_t i = start; i < stack_.size(); i += step) {
1623 auto elem_width =
1624 stack_[i].ElemWidth(buf_.size(), i - start + prefix_elems);
1625 bit_width = (std::max)(bit_width, elem_width);
1626 if (typed) {
1627 if (i == start) {
1628 vector_type = stack_[i].type_;
1629 } else {
1630 // If you get this assert, you are writing a typed vector with
1631 // elements that are not all the same type.
1632 FLATBUFFERS_ASSERT(vector_type == stack_[i].type_);
1633 }
1634 }
1635 }
1636 // If you get this assert, your typed types are not one of:
1637 // Int / UInt / Float / Key.
1638 FLATBUFFERS_ASSERT(!typed || IsTypedVectorElementType(vector_type));
1639 auto byte_width = Align(bit_width);
1640 // Write vector. First the keys width/offset if available, and size.
1641 if (keys) {
1642 WriteOffset(keys->u_, byte_width);
1643 Write<uint64_t>(1ULL << keys->min_bit_width_, byte_width);
1644 }
1645 if (!fixed) Write<uint64_t>(vec_len, byte_width);
1646 // Then the actual data.
1647 auto vloc = buf_.size();
1648 for (size_t i = start; i < stack_.size(); i += step) {
1649 WriteAny(stack_[i], byte_width);
1650 }
1651 // Then the types.
1652 if (!typed) {
1653 for (size_t i = start; i < stack_.size(); i += step) {
1654 buf_.push_back(stack_[i].StoredPackedType(bit_width));
1655 }
1656 }
1657 return Value(static_cast<uint64_t>(vloc),
1658 keys ? FBT_MAP
1659 : (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0)
1660 : FBT_VECTOR),
1661 bit_width);
1662 }
1663
1664 // You shouldn't really be copying instances of this class.
1665 Builder(const Builder &);
1666 Builder &operator=(const Builder &);
1667
1668 std::vector<uint8_t> buf_;
1669 std::vector<Value> stack_;
1670
1671 bool finished_;
1672 bool has_duplicate_keys_;
1673
1674 BuilderFlag flags_;
1675
1676 BitWidth force_min_bit_width_;
1677
1678 struct KeyOffsetCompare {
KeyOffsetCompareKeyOffsetCompare1679 explicit KeyOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {}
operatorKeyOffsetCompare1680 bool operator()(size_t a, size_t b) const {
1681 auto stra = reinterpret_cast<const char *>(buf_->data() + a);
1682 auto strb = reinterpret_cast<const char *>(buf_->data() + b);
1683 return strcmp(stra, strb) < 0;
1684 }
1685 const std::vector<uint8_t> *buf_;
1686 };
1687
1688 typedef std::pair<size_t, size_t> StringOffset;
1689 struct StringOffsetCompare {
StringOffsetCompareStringOffsetCompare1690 explicit StringOffsetCompare(const std::vector<uint8_t> &buf)
1691 : buf_(&buf) {}
operatorStringOffsetCompare1692 bool operator()(const StringOffset &a, const StringOffset &b) const {
1693 auto stra = buf_->data() + a.first;
1694 auto strb = buf_->data() + b.first;
1695 auto cr = memcmp(stra, strb, (std::min)(a.second, b.second) + 1);
1696 return cr < 0 || (cr == 0 && a.second < b.second);
1697 }
1698 const std::vector<uint8_t> *buf_;
1699 };
1700
1701 typedef std::set<size_t, KeyOffsetCompare> KeyOffsetMap;
1702 typedef std::set<StringOffset, StringOffsetCompare> StringOffsetMap;
1703
1704 KeyOffsetMap key_pool;
1705 StringOffsetMap string_pool;
1706
1707 friend class Verifier;
1708 };
1709
1710 // Helper class to verify the integrity of a FlexBuffer
1711 class Verifier FLATBUFFERS_FINAL_CLASS {
1712 public:
1713 Verifier(const uint8_t *buf, size_t buf_len,
1714 // Supplying this vector likely results in faster verification
1715 // of larger buffers with many shared keys/strings, but
1716 // comes at the cost of using additional memory the same size of
1717 // the buffer being verified, so it is by default off.
1718 std::vector<uint8_t> *reuse_tracker = nullptr,
1719 bool _check_alignment = true, size_t max_depth = 64)
buf_(buf)1720 : buf_(buf),
1721 size_(buf_len),
1722 depth_(0),
1723 max_depth_(max_depth),
1724 num_vectors_(0),
1725 max_vectors_(buf_len),
1726 check_alignment_(_check_alignment),
1727 reuse_tracker_(reuse_tracker) {
1728 FLATBUFFERS_ASSERT(static_cast<int32_t>(size_) < FLATBUFFERS_MAX_BUFFER_SIZE);
1729 if (reuse_tracker_) {
1730 reuse_tracker_->clear();
1731 reuse_tracker_->resize(size_, PackedType(BIT_WIDTH_8, FBT_NULL));
1732 }
1733 }
1734
1735 private:
1736 // Central location where any verification failures register.
Check(bool ok)1737 bool Check(bool ok) const {
1738 // clang-format off
1739 #ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
1740 FLATBUFFERS_ASSERT(ok);
1741 #endif
1742 // clang-format on
1743 return ok;
1744 }
1745
1746 // Verify any range within the buffer.
VerifyFrom(size_t elem,size_t elem_len)1747 bool VerifyFrom(size_t elem, size_t elem_len) const {
1748 return Check(elem_len < size_ && elem <= size_ - elem_len);
1749 }
VerifyBefore(size_t elem,size_t elem_len)1750 bool VerifyBefore(size_t elem, size_t elem_len) const {
1751 return Check(elem_len <= elem);
1752 }
1753
VerifyFromPointer(const uint8_t * p,size_t len)1754 bool VerifyFromPointer(const uint8_t *p, size_t len) {
1755 auto o = static_cast<size_t>(p - buf_);
1756 return VerifyFrom(o, len);
1757 }
VerifyBeforePointer(const uint8_t * p,size_t len)1758 bool VerifyBeforePointer(const uint8_t *p, size_t len) {
1759 auto o = static_cast<size_t>(p - buf_);
1760 return VerifyBefore(o, len);
1761 }
1762
VerifyByteWidth(size_t width)1763 bool VerifyByteWidth(size_t width) {
1764 return Check(width == 1 || width == 2 || width == 4 || width == 8);
1765 }
1766
VerifyType(int type)1767 bool VerifyType(int type) { return Check(type >= 0 && type < FBT_MAX_TYPE); }
1768
VerifyOffset(uint64_t off,const uint8_t * p)1769 bool VerifyOffset(uint64_t off, const uint8_t *p) {
1770 return Check(off <= static_cast<uint64_t>(size_)) &&
1771 off <= static_cast<uint64_t>(p - buf_);
1772 }
1773
VerifyAlignment(const uint8_t * p,size_t size)1774 bool VerifyAlignment(const uint8_t *p, size_t size) const {
1775 auto o = static_cast<size_t>(p - buf_);
1776 return Check((o & (size - 1)) == 0 || !check_alignment_);
1777 }
1778
1779 // Macro, since we want to escape from parent function & use lazy args.
1780 #define FLEX_CHECK_VERIFIED(P, PACKED_TYPE) \
1781 if (reuse_tracker_) { \
1782 auto packed_type = PACKED_TYPE; \
1783 auto existing = (*reuse_tracker_)[P - buf_]; \
1784 if (existing == packed_type) return true; \
1785 /* Fail verification if already set with different type! */ \
1786 if (!Check(existing == 0)) return false; \
1787 (*reuse_tracker_)[P - buf_] = packed_type; \
1788 }
1789
VerifyVector(Reference r,const uint8_t * p,Type elem_type)1790 bool VerifyVector(Reference r, const uint8_t *p, Type elem_type) {
1791 // Any kind of nesting goes thru this function, so guard against that
1792 // here, both with simple nesting checks, and the reuse tracker if on.
1793 depth_++;
1794 num_vectors_++;
1795 if (!Check(depth_ <= max_depth_ && num_vectors_ <= max_vectors_))
1796 return false;
1797 auto size_byte_width = r.byte_width_;
1798 if (!VerifyBeforePointer(p, size_byte_width)) return false;
1799 FLEX_CHECK_VERIFIED(p - size_byte_width,
1800 PackedType(Builder::WidthB(size_byte_width), r.type_));
1801 auto sized = Sized(p, size_byte_width);
1802 auto num_elems = sized.size();
1803 auto elem_byte_width = r.type_ == FBT_STRING || r.type_ == FBT_BLOB
1804 ? uint8_t(1)
1805 : r.byte_width_;
1806 auto max_elems = SIZE_MAX / elem_byte_width;
1807 if (!Check(num_elems < max_elems))
1808 return false; // Protect against byte_size overflowing.
1809 auto byte_size = num_elems * elem_byte_width;
1810 if (!VerifyFromPointer(p, byte_size)) return false;
1811 if (elem_type == FBT_NULL) {
1812 // Verify type bytes after the vector.
1813 if (!VerifyFromPointer(p + byte_size, num_elems)) return false;
1814 auto v = Vector(p, size_byte_width);
1815 for (size_t i = 0; i < num_elems; i++)
1816 if (!VerifyRef(v[i])) return false;
1817 } else if (elem_type == FBT_KEY) {
1818 auto v = TypedVector(p, elem_byte_width, FBT_KEY);
1819 for (size_t i = 0; i < num_elems; i++)
1820 if (!VerifyRef(v[i])) return false;
1821 } else {
1822 FLATBUFFERS_ASSERT(IsInline(elem_type));
1823 }
1824 depth_--;
1825 return true;
1826 }
1827
VerifyKeys(const uint8_t * p,uint8_t byte_width)1828 bool VerifyKeys(const uint8_t *p, uint8_t byte_width) {
1829 // The vector part of the map has already been verified.
1830 const size_t num_prefixed_fields = 3;
1831 if (!VerifyBeforePointer(p, byte_width * num_prefixed_fields)) return false;
1832 p -= byte_width * num_prefixed_fields;
1833 auto off = ReadUInt64(p, byte_width);
1834 if (!VerifyOffset(off, p)) return false;
1835 auto key_byte_with =
1836 static_cast<uint8_t>(ReadUInt64(p + byte_width, byte_width));
1837 if (!VerifyByteWidth(key_byte_with)) return false;
1838 return VerifyVector(Reference(p, byte_width, key_byte_with, FBT_VECTOR_KEY),
1839 p - off, FBT_KEY);
1840 }
1841
VerifyKey(const uint8_t * p)1842 bool VerifyKey(const uint8_t *p) {
1843 FLEX_CHECK_VERIFIED(p, PackedType(BIT_WIDTH_8, FBT_KEY));
1844 while (p < buf_ + size_)
1845 if (*p++) return true;
1846 return false;
1847 }
1848
1849 #undef FLEX_CHECK_VERIFIED
1850
VerifyTerminator(const String & s)1851 bool VerifyTerminator(const String &s) {
1852 return VerifyFromPointer(reinterpret_cast<const uint8_t *>(s.c_str()),
1853 s.size() + 1);
1854 }
1855
VerifyRef(Reference r)1856 bool VerifyRef(Reference r) {
1857 // r.parent_width_ and r.data_ already verified.
1858 if (!VerifyByteWidth(r.byte_width_) || !VerifyType(r.type_)) {
1859 return false;
1860 }
1861 if (IsInline(r.type_)) {
1862 // Inline scalars, don't require further verification.
1863 return true;
1864 }
1865 // All remaining types are an offset.
1866 auto off = ReadUInt64(r.data_, r.parent_width_);
1867 if (!VerifyOffset(off, r.data_)) return false;
1868 auto p = r.Indirect();
1869 if (!VerifyAlignment(p, r.byte_width_)) return false;
1870 switch (r.type_) {
1871 case FBT_INDIRECT_INT:
1872 case FBT_INDIRECT_UINT:
1873 case FBT_INDIRECT_FLOAT: return VerifyFromPointer(p, r.byte_width_);
1874 case FBT_KEY: return VerifyKey(p);
1875 case FBT_MAP:
1876 return VerifyVector(r, p, FBT_NULL) && VerifyKeys(p, r.byte_width_);
1877 case FBT_VECTOR: return VerifyVector(r, p, FBT_NULL);
1878 case FBT_VECTOR_INT: return VerifyVector(r, p, FBT_INT);
1879 case FBT_VECTOR_BOOL:
1880 case FBT_VECTOR_UINT: return VerifyVector(r, p, FBT_UINT);
1881 case FBT_VECTOR_FLOAT: return VerifyVector(r, p, FBT_FLOAT);
1882 case FBT_VECTOR_KEY: return VerifyVector(r, p, FBT_KEY);
1883 case FBT_VECTOR_STRING_DEPRECATED:
1884 // Use of FBT_KEY here intentional, see elsewhere.
1885 return VerifyVector(r, p, FBT_KEY);
1886 case FBT_BLOB: return VerifyVector(r, p, FBT_UINT);
1887 case FBT_STRING:
1888 return VerifyVector(r, p, FBT_UINT) &&
1889 VerifyTerminator(String(p, r.byte_width_));
1890 case FBT_VECTOR_INT2:
1891 case FBT_VECTOR_UINT2:
1892 case FBT_VECTOR_FLOAT2:
1893 case FBT_VECTOR_INT3:
1894 case FBT_VECTOR_UINT3:
1895 case FBT_VECTOR_FLOAT3:
1896 case FBT_VECTOR_INT4:
1897 case FBT_VECTOR_UINT4:
1898 case FBT_VECTOR_FLOAT4: {
1899 uint8_t len = 0;
1900 auto vtype = ToFixedTypedVectorElementType(r.type_, &len);
1901 if (!VerifyType(vtype)) return false;
1902 return VerifyFromPointer(p, static_cast<size_t>(r.byte_width_) * len);
1903 }
1904 default: return false;
1905 }
1906 }
1907
1908 public:
VerifyBuffer()1909 bool VerifyBuffer() {
1910 if (!Check(size_ >= 3)) return false;
1911 auto end = buf_ + size_;
1912 auto byte_width = *--end;
1913 auto packed_type = *--end;
1914 return VerifyByteWidth(byte_width) && Check(end - buf_ >= byte_width) &&
1915 VerifyRef(Reference(end - byte_width, byte_width, packed_type));
1916 }
1917
1918 private:
1919 const uint8_t *buf_;
1920 size_t size_;
1921 size_t depth_;
1922 const size_t max_depth_;
1923 size_t num_vectors_;
1924 const size_t max_vectors_;
1925 bool check_alignment_;
1926 std::vector<uint8_t> *reuse_tracker_;
1927 };
1928
1929 // Utility function that constructs the Verifier for you, see above for
1930 // parameters.
1931 inline bool VerifyBuffer(const uint8_t *buf, size_t buf_len,
1932 std::vector<uint8_t> *reuse_tracker = nullptr) {
1933 Verifier verifier(buf, buf_len, reuse_tracker);
1934 return verifier.VerifyBuffer();
1935 }
1936
1937 } // namespace flexbuffers
1938
1939 #if defined(_MSC_VER)
1940 # pragma warning(pop)
1941 #endif
1942
1943 #endif // FLATBUFFERS_FLEXBUFFERS_H_
1944