1 /*
2 * Copyright 2015 Google Inc. All rights reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "flatbuffers/reflection.h"
18 #include "flatbuffers/util.h"
19
20 // Helper functionality for reflection.
21
22 namespace flatbuffers {
23
GetAnyValueI(reflection::BaseType type,const uint8_t * data)24 int64_t GetAnyValueI(reflection::BaseType type, const uint8_t *data) {
25 # define FLATBUFFERS_GET(T) static_cast<int64_t>(ReadScalar<T>(data))
26 switch (type) {
27 case reflection::UType:
28 case reflection::Bool:
29 case reflection::UByte: return FLATBUFFERS_GET(uint8_t);
30 case reflection::Byte: return FLATBUFFERS_GET(int8_t);
31 case reflection::Short: return FLATBUFFERS_GET(int16_t);
32 case reflection::UShort: return FLATBUFFERS_GET(uint16_t);
33 case reflection::Int: return FLATBUFFERS_GET(int32_t);
34 case reflection::UInt: return FLATBUFFERS_GET(uint32_t);
35 case reflection::Long: return FLATBUFFERS_GET(int64_t);
36 case reflection::ULong: return FLATBUFFERS_GET(uint64_t);
37 case reflection::Float: return FLATBUFFERS_GET(float);
38 case reflection::Double: return FLATBUFFERS_GET(double);
39 case reflection::String: {
40 auto s = reinterpret_cast<const String *>(ReadScalar<uoffset_t>(data) +
41 data);
42 return s ? StringToInt(s->c_str()) : 0;
43 }
44 default: return 0; // Tables & vectors do not make sense.
45 }
46 # undef FLATBUFFERS_GET
47 }
48
GetAnyValueF(reflection::BaseType type,const uint8_t * data)49 double GetAnyValueF(reflection::BaseType type, const uint8_t *data) {
50 switch (type) {
51 case reflection::Float: return static_cast<double>(ReadScalar<float>(data));
52 case reflection::Double: return ReadScalar<double>(data);
53 case reflection::String: {
54 auto s = reinterpret_cast<const String *>(ReadScalar<uoffset_t>(data) +
55 data);
56 return s ? strtod(s->c_str(), nullptr) : 0.0;
57 }
58 default: return static_cast<double>(GetAnyValueI(type, data));
59 }
60 }
61
GetAnyValueS(reflection::BaseType type,const uint8_t * data,const reflection::Schema * schema,int type_index)62 std::string GetAnyValueS(reflection::BaseType type, const uint8_t *data,
63 const reflection::Schema *schema, int type_index) {
64 switch (type) {
65 case reflection::Float:
66 case reflection::Double: return NumToString(GetAnyValueF(type, data));
67 case reflection::String: {
68 auto s = reinterpret_cast<const String *>(ReadScalar<uoffset_t>(data) +
69 data);
70 return s ? s->c_str() : "";
71 }
72 case reflection::Obj:
73 if (schema) {
74 // Convert the table to a string. This is mostly for debugging purposes,
75 // and does NOT promise to be JSON compliant.
76 // Also prefixes the type.
77 auto &objectdef = *schema->objects()->Get(type_index);
78 auto s = objectdef.name()->str();
79 if (objectdef.is_struct()) {
80 s += "(struct)"; // TODO: implement this as well.
81 } else {
82 auto table_field = reinterpret_cast<const Table *>(
83 ReadScalar<uoffset_t>(data) + data);
84 s += " { ";
85 auto fielddefs = objectdef.fields();
86 for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
87 auto &fielddef = **it;
88 if (!table_field->CheckField(fielddef.offset())) continue;
89 auto val = GetAnyFieldS(*table_field, fielddef, schema);
90 if (fielddef.type()->base_type() == reflection::String)
91 val = "\"" + val + "\""; // Doesn't deal with escape codes etc.
92 s += fielddef.name()->str();
93 s += ": ";
94 s += val;
95 s += ", ";
96 }
97 s += "}";
98 }
99 return s;
100 } else {
101 return "(table)";
102 }
103 case reflection::Vector:
104 return "[(elements)]"; // TODO: implement this as well.
105 case reflection::Union:
106 return "(union)"; // TODO: implement this as well.
107 default: return NumToString(GetAnyValueI(type, data));
108 }
109 }
110
SetAnyValueI(reflection::BaseType type,uint8_t * data,int64_t val)111 void SetAnyValueI(reflection::BaseType type, uint8_t *data, int64_t val) {
112 # define FLATBUFFERS_SET(T) WriteScalar(data, static_cast<T>(val))
113 switch (type) {
114 case reflection::UType:
115 case reflection::Bool:
116 case reflection::UByte: FLATBUFFERS_SET(uint8_t ); break;
117 case reflection::Byte: FLATBUFFERS_SET(int8_t ); break;
118 case reflection::Short: FLATBUFFERS_SET(int16_t ); break;
119 case reflection::UShort: FLATBUFFERS_SET(uint16_t); break;
120 case reflection::Int: FLATBUFFERS_SET(int32_t ); break;
121 case reflection::UInt: FLATBUFFERS_SET(uint32_t); break;
122 case reflection::Long: FLATBUFFERS_SET(int64_t ); break;
123 case reflection::ULong: FLATBUFFERS_SET(uint64_t); break;
124 case reflection::Float: FLATBUFFERS_SET(float ); break;
125 case reflection::Double: FLATBUFFERS_SET(double ); break;
126 // TODO: support strings
127 default: break;
128 }
129 # undef FLATBUFFERS_SET
130 }
131
SetAnyValueF(reflection::BaseType type,uint8_t * data,double val)132 void SetAnyValueF(reflection::BaseType type, uint8_t *data, double val) {
133 switch (type) {
134 case reflection::Float: WriteScalar(data, static_cast<float>(val)); break;
135 case reflection::Double: WriteScalar(data, val); break;
136 // TODO: support strings.
137 default: SetAnyValueI(type, data, static_cast<int64_t>(val)); break;
138 }
139 }
140
SetAnyValueS(reflection::BaseType type,uint8_t * data,const char * val)141 void SetAnyValueS(reflection::BaseType type, uint8_t *data, const char *val) {
142 switch (type) {
143 case reflection::Float:
144 case reflection::Double:
145 SetAnyValueF(type, data, strtod(val, nullptr));
146 break;
147 // TODO: support strings.
148 default: SetAnyValueI(type, data, StringToInt(val)); break;
149 }
150 }
151
152 // Resize a FlatBuffer in-place by iterating through all offsets in the buffer
153 // and adjusting them by "delta" if they straddle the start offset.
154 // Once that is done, bytes can now be inserted/deleted safely.
155 // "delta" may be negative (shrinking).
156 // Unless "delta" is a multiple of the largest alignment, you'll create a small
157 // amount of garbage space in the buffer (usually 0..7 bytes).
158 // If your FlatBuffer's root table is not the schema's root table, you should
159 // pass in your root_table type as well.
160 class ResizeContext {
161 public:
ResizeContext(const reflection::Schema & schema,uoffset_t start,int delta,std::vector<uint8_t> * flatbuf,const reflection::Object * root_table=nullptr)162 ResizeContext(const reflection::Schema &schema, uoffset_t start, int delta,
163 std::vector<uint8_t> *flatbuf,
164 const reflection::Object *root_table = nullptr)
165 : schema_(schema), startptr_(flatbuf->data() + start),
166 delta_(delta), buf_(*flatbuf),
167 dag_check_(flatbuf->size() / sizeof(uoffset_t), false) {
168 auto mask = static_cast<int>(sizeof(largest_scalar_t) - 1);
169 delta_ = (delta_ + mask) & ~mask;
170 if (!delta_) return; // We can't shrink by less than largest_scalar_t.
171 // Now change all the offsets by delta_.
172 auto root = GetAnyRoot(buf_.data());
173 Straddle<uoffset_t, 1>(buf_.data(), root, buf_.data());
174 ResizeTable(root_table ? *root_table : *schema.root_table(), root);
175 // We can now add or remove bytes at start.
176 if (delta_ > 0) buf_.insert(buf_.begin() + start, delta_, 0);
177 else buf_.erase(buf_.begin() + start, buf_.begin() + start - delta_);
178 }
179
180 // Check if the range between first (lower address) and second straddles
181 // the insertion point. If it does, change the offset at offsetloc (of
182 // type T, with direction D).
Straddle(const void * first,const void * second,void * offsetloc)183 template<typename T, int D> void Straddle(const void *first,
184 const void *second,
185 void *offsetloc) {
186 if (first <= startptr_ && second >= startptr_) {
187 WriteScalar<T>(offsetloc, ReadScalar<T>(offsetloc) + delta_ * D);
188 DagCheck(offsetloc) = true;
189 }
190 }
191
192 // This returns a boolean that records if the corresponding offset location
193 // has been modified already. If so, we can't even read the corresponding
194 // offset, since it is pointing to a location that is illegal until the
195 // resize actually happens.
196 // This must be checked for every offset, since we can't know which offsets
197 // will straddle and which won't.
DagCheck(const void * offsetloc)198 uint8_t &DagCheck(const void *offsetloc) {
199 auto dag_idx = reinterpret_cast<const uoffset_t *>(offsetloc) -
200 reinterpret_cast<const uoffset_t *>(buf_.data());
201 return dag_check_[dag_idx];
202 }
203
ResizeTable(const reflection::Object & objectdef,Table * table)204 void ResizeTable(const reflection::Object &objectdef, Table *table) {
205 if (DagCheck(table))
206 return; // Table already visited.
207 auto vtable = table->GetVTable();
208 // Early out: since all fields inside the table must point forwards in
209 // memory, if the insertion point is before the table we can stop here.
210 auto tableloc = reinterpret_cast<uint8_t *>(table);
211 if (startptr_ <= tableloc) {
212 // Check if insertion point is between the table and a vtable that
213 // precedes it. This can't happen in current construction code, but check
214 // just in case we ever change the way flatbuffers are built.
215 Straddle<soffset_t, -1>(vtable, table, table);
216 } else {
217 // Check each field.
218 auto fielddefs = objectdef.fields();
219 for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
220 auto &fielddef = **it;
221 auto base_type = fielddef.type()->base_type();
222 // Ignore scalars.
223 if (base_type <= reflection::Double) continue;
224 // Ignore fields that are not stored.
225 auto offset = table->GetOptionalFieldOffset(fielddef.offset());
226 if (!offset) continue;
227 // Ignore structs.
228 auto subobjectdef = base_type == reflection::Obj ?
229 schema_.objects()->Get(fielddef.type()->index()) : nullptr;
230 if (subobjectdef && subobjectdef->is_struct()) continue;
231 // Get this fields' offset, and read it if safe.
232 auto offsetloc = tableloc + offset;
233 if (DagCheck(offsetloc))
234 continue; // This offset already visited.
235 auto ref = offsetloc + ReadScalar<uoffset_t>(offsetloc);
236 Straddle<uoffset_t, 1>(offsetloc, ref, offsetloc);
237 // Recurse.
238 switch (base_type) {
239 case reflection::Obj: {
240 ResizeTable(*subobjectdef, reinterpret_cast<Table *>(ref));
241 break;
242 }
243 case reflection::Vector: {
244 auto elem_type = fielddef.type()->element();
245 if (elem_type != reflection::Obj && elem_type != reflection::String)
246 break;
247 auto vec = reinterpret_cast<Vector<uoffset_t> *>(ref);
248 auto elemobjectdef = elem_type == reflection::Obj
249 ? schema_.objects()->Get(fielddef.type()->index())
250 : nullptr;
251 if (elemobjectdef && elemobjectdef->is_struct()) break;
252 for (uoffset_t i = 0; i < vec->size(); i++) {
253 auto loc = vec->Data() + i * sizeof(uoffset_t);
254 if (DagCheck(loc))
255 continue; // This offset already visited.
256 auto dest = loc + vec->Get(i);
257 Straddle<uoffset_t, 1>(loc, dest ,loc);
258 if (elemobjectdef)
259 ResizeTable(*elemobjectdef, reinterpret_cast<Table *>(dest));
260 }
261 break;
262 }
263 case reflection::Union: {
264 ResizeTable(GetUnionType(schema_, objectdef, fielddef, *table),
265 reinterpret_cast<Table *>(ref));
266 break;
267 }
268 case reflection::String:
269 break;
270 default:
271 assert(false);
272 }
273 }
274 // Check if the vtable offset points beyond the insertion point.
275 // Must do this last, since GetOptionalFieldOffset above still reads
276 // this value.
277 Straddle<soffset_t, -1>(table, vtable, table);
278 }
279 }
280
281 void operator=(const ResizeContext &rc);
282
283 private:
284 const reflection::Schema &schema_;
285 uint8_t *startptr_;
286 int delta_;
287 std::vector<uint8_t> &buf_;
288 std::vector<uint8_t> dag_check_;
289 };
290
SetString(const reflection::Schema & schema,const std::string & val,const String * str,std::vector<uint8_t> * flatbuf,const reflection::Object * root_table)291 void SetString(const reflection::Schema &schema, const std::string &val,
292 const String *str, std::vector<uint8_t> *flatbuf,
293 const reflection::Object *root_table) {
294 auto delta = static_cast<int>(val.size()) - static_cast<int>(str->Length());
295 auto str_start = static_cast<uoffset_t>(
296 reinterpret_cast<const uint8_t *>(str) - flatbuf->data());
297 auto start = str_start + static_cast<uoffset_t>(sizeof(uoffset_t));
298 if (delta) {
299 // Clear the old string, since we don't want parts of it remaining.
300 memset(flatbuf->data() + start, 0, str->Length());
301 // Different size, we must expand (or contract).
302 ResizeContext(schema, start, delta, flatbuf, root_table);
303 // Set the new length.
304 WriteScalar(flatbuf->data() + str_start,
305 static_cast<uoffset_t>(val.size()));
306 }
307 // Copy new data. Safe because we created the right amount of space.
308 memcpy(flatbuf->data() + start, val.c_str(), val.size() + 1);
309 }
310
ResizeAnyVector(const reflection::Schema & schema,uoffset_t newsize,const VectorOfAny * vec,uoffset_t num_elems,uoffset_t elem_size,std::vector<uint8_t> * flatbuf,const reflection::Object * root_table)311 uint8_t *ResizeAnyVector(const reflection::Schema &schema, uoffset_t newsize,
312 const VectorOfAny *vec, uoffset_t num_elems,
313 uoffset_t elem_size, std::vector<uint8_t> *flatbuf,
314 const reflection::Object *root_table) {
315 auto delta_elem = static_cast<int>(newsize) - static_cast<int>(num_elems);
316 auto delta_bytes = delta_elem * static_cast<int>(elem_size);
317 auto vec_start = reinterpret_cast<const uint8_t *>(vec) - flatbuf->data();
318 auto start = static_cast<uoffset_t>(vec_start + sizeof(uoffset_t) +
319 elem_size * num_elems);
320 if (delta_bytes) {
321 if (delta_elem < 0) {
322 // Clear elements we're throwing away, since some might remain in the
323 // buffer.
324 auto size_clear = -delta_elem * elem_size;
325 memset(flatbuf->data() + start - size_clear, 0, size_clear);
326 }
327 ResizeContext(schema, start, delta_bytes, flatbuf, root_table);
328 WriteScalar(flatbuf->data() + vec_start, newsize); // Length field.
329 // Set new elements to 0.. this can be overwritten by the caller.
330 if (delta_elem > 0) {
331 memset(flatbuf->data() + start, 0, delta_elem * elem_size);
332 }
333 }
334 return flatbuf->data() + start;
335 }
336
AddFlatBuffer(std::vector<uint8_t> & flatbuf,const uint8_t * newbuf,size_t newlen)337 const uint8_t *AddFlatBuffer(std::vector<uint8_t> &flatbuf,
338 const uint8_t *newbuf, size_t newlen) {
339 // Align to sizeof(uoffset_t) past sizeof(largest_scalar_t) since we're
340 // going to chop off the root offset.
341 while ((flatbuf.size() & (sizeof(uoffset_t) - 1)) ||
342 !(flatbuf.size() & (sizeof(largest_scalar_t) - 1))) {
343 flatbuf.push_back(0);
344 }
345 auto insertion_point = static_cast<uoffset_t>(flatbuf.size());
346 // Insert the entire FlatBuffer minus the root pointer.
347 flatbuf.insert(flatbuf.end(), newbuf + sizeof(uoffset_t), newbuf + newlen);
348 auto root_offset = ReadScalar<uoffset_t>(newbuf) - sizeof(uoffset_t);
349 return flatbuf.data() + insertion_point + root_offset;
350 }
351
CopyInline(FlatBufferBuilder & fbb,const reflection::Field & fielddef,const Table & table,size_t align,size_t size)352 void CopyInline(FlatBufferBuilder &fbb, const reflection::Field &fielddef,
353 const Table &table, size_t align, size_t size) {
354 fbb.Align(align);
355 fbb.PushBytes(table.GetStruct<const uint8_t *>(fielddef.offset()), size);
356 fbb.TrackField(fielddef.offset(), fbb.GetSize());
357 }
358
CopyTable(FlatBufferBuilder & fbb,const reflection::Schema & schema,const reflection::Object & objectdef,const Table & table,bool use_string_pooling)359 Offset<const Table *> CopyTable(FlatBufferBuilder &fbb,
360 const reflection::Schema &schema,
361 const reflection::Object &objectdef,
362 const Table &table,
363 bool use_string_pooling) {
364 // Before we can construct the table, we have to first generate any
365 // subobjects, and collect their offsets.
366 std::vector<uoffset_t> offsets;
367 auto fielddefs = objectdef.fields();
368 for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
369 auto &fielddef = **it;
370 // Skip if field is not present in the source.
371 if (!table.CheckField(fielddef.offset())) continue;
372 uoffset_t offset = 0;
373 switch (fielddef.type()->base_type()) {
374 case reflection::String: {
375 offset = use_string_pooling
376 ? fbb.CreateSharedString(GetFieldS(table, fielddef)).o
377 : fbb.CreateString(GetFieldS(table, fielddef)).o;
378 break;
379 }
380 case reflection::Obj: {
381 auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index());
382 if (!subobjectdef.is_struct()) {
383 offset = CopyTable(fbb, schema, subobjectdef,
384 *GetFieldT(table, fielddef)).o;
385 }
386 break;
387 }
388 case reflection::Union: {
389 auto &subobjectdef = GetUnionType(schema, objectdef, fielddef, table);
390 offset = CopyTable(fbb, schema, subobjectdef,
391 *GetFieldT(table, fielddef)).o;
392 break;
393 }
394 case reflection::Vector: {
395 auto vec = table.GetPointer<const Vector<Offset<Table>> *>(
396 fielddef.offset());
397 auto element_base_type = fielddef.type()->element();
398 auto elemobjectdef = element_base_type == reflection::Obj
399 ? schema.objects()->Get(fielddef.type()->index())
400 : nullptr;
401 switch (element_base_type) {
402 case reflection::String: {
403 std::vector<Offset<const String *>> elements(vec->size());
404 auto vec_s = reinterpret_cast<const Vector<Offset<String>> *>(vec);
405 for (uoffset_t i = 0; i < vec_s->size(); i++) {
406 elements[i] = use_string_pooling
407 ? fbb.CreateSharedString(vec_s->Get(i)).o
408 : fbb.CreateString(vec_s->Get(i)).o;
409 }
410 offset = fbb.CreateVector(elements).o;
411 break;
412 }
413 case reflection::Obj: {
414 if (!elemobjectdef->is_struct()) {
415 std::vector<Offset<const Table *>> elements(vec->size());
416 for (uoffset_t i = 0; i < vec->size(); i++) {
417 elements[i] =
418 CopyTable(fbb, schema, *elemobjectdef, *vec->Get(i));
419 }
420 offset = fbb.CreateVector(elements).o;
421 break;
422 }
423 // FALL-THRU:
424 }
425 default: { // Scalars and structs.
426 auto element_size = GetTypeSize(element_base_type);
427 if (elemobjectdef && elemobjectdef->is_struct())
428 element_size = elemobjectdef->bytesize();
429 fbb.StartVector(element_size, vec->size());
430 fbb.PushBytes(vec->Data(), element_size * vec->size());
431 offset = fbb.EndVector(vec->size());
432 break;
433 }
434 }
435 break;
436 }
437 default: // Scalars.
438 break;
439 }
440 if (offset) {
441 offsets.push_back(offset);
442 }
443 }
444 // Now we can build the actual table from either offsets or scalar data.
445 auto start = objectdef.is_struct()
446 ? fbb.StartStruct(objectdef.minalign())
447 : fbb.StartTable();
448 size_t offset_idx = 0;
449 for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
450 auto &fielddef = **it;
451 if (!table.CheckField(fielddef.offset())) continue;
452 auto base_type = fielddef.type()->base_type();
453 switch (base_type) {
454 case reflection::Obj: {
455 auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index());
456 if (subobjectdef.is_struct()) {
457 CopyInline(fbb, fielddef, table, subobjectdef.minalign(),
458 subobjectdef.bytesize());
459 break;
460 }
461 // else: FALL-THRU:
462 }
463 case reflection::Union:
464 case reflection::String:
465 case reflection::Vector:
466 fbb.AddOffset(fielddef.offset(), Offset<void>(offsets[offset_idx++]));
467 break;
468 default: { // Scalars.
469 auto size = GetTypeSize(base_type);
470 CopyInline(fbb, fielddef, table, size, size);
471 break;
472 }
473 }
474 }
475 assert(offset_idx == offsets.size());
476 if (objectdef.is_struct()) {
477 fbb.ClearOffsets();
478 return fbb.EndStruct();
479 } else {
480 return fbb.EndTable(start, static_cast<voffset_t>(fielddefs->size()));
481 }
482 }
483
VerifyStruct(flatbuffers::Verifier & v,const flatbuffers::Table & parent_table,voffset_t field_offset,const reflection::Object & obj,bool required)484 bool VerifyStruct(flatbuffers::Verifier &v,
485 const flatbuffers::Table &parent_table,
486 voffset_t field_offset,
487 const reflection::Object &obj,
488 bool required) {
489 auto offset = parent_table.GetOptionalFieldOffset(field_offset);
490 if (required && !offset) {
491 return false;
492 }
493
494 return !offset || v.Verify(reinterpret_cast<const uint8_t*>(&parent_table)
495 + offset, obj.bytesize());
496 }
497
VerifyVectorOfStructs(flatbuffers::Verifier & v,const flatbuffers::Table & parent_table,voffset_t field_offset,const reflection::Object & obj,bool required)498 bool VerifyVectorOfStructs(flatbuffers::Verifier &v,
499 const flatbuffers::Table &parent_table,
500 voffset_t field_offset,
501 const reflection::Object &obj,
502 bool required) {
503 auto p = parent_table.GetPointer<const uint8_t*>(field_offset);
504 const uint8_t* end;
505 if (required && !p) {
506 return false;
507 }
508
509 return !p || v.VerifyVector(p, obj.bytesize(), &end);
510 }
511
512 // forward declare to resolve cyclic deps between VerifyObject and VerifyVector
513 bool VerifyObject(flatbuffers::Verifier &v,
514 const reflection::Schema &schema,
515 const reflection::Object &obj,
516 const flatbuffers::Table *table,
517 bool isRequired);
518
VerifyVector(flatbuffers::Verifier & v,const reflection::Schema & schema,const flatbuffers::Table & table,const reflection::Field & vec_field)519 bool VerifyVector(flatbuffers::Verifier &v,
520 const reflection::Schema &schema,
521 const flatbuffers::Table &table,
522 const reflection::Field &vec_field) {
523 assert(vec_field.type()->base_type() == reflection::Vector);
524 if (!table.VerifyField<uoffset_t>(v, vec_field.offset()))
525 return false;
526
527 switch (vec_field.type()->element()) {
528 case reflection::None:
529 assert(false);
530 break;
531 case reflection::UType:
532 return v.Verify(flatbuffers::GetFieldV<uint8_t>(table, vec_field));
533 case reflection::Bool:
534 case reflection::Byte:
535 case reflection::UByte:
536 return v.Verify(flatbuffers::GetFieldV<int8_t>(table, vec_field));
537 case reflection::Short:
538 case reflection::UShort:
539 return v.Verify(flatbuffers::GetFieldV<int16_t>(table, vec_field));
540 case reflection::Int:
541 case reflection::UInt:
542 return v.Verify(flatbuffers::GetFieldV<int32_t>(table, vec_field));
543 case reflection::Long:
544 case reflection::ULong:
545 return v.Verify(flatbuffers::GetFieldV<int64_t>(table, vec_field));
546 case reflection::Float:
547 return v.Verify(flatbuffers::GetFieldV<float>(table, vec_field));
548 case reflection::Double:
549 return v.Verify(flatbuffers::GetFieldV<double>(table, vec_field));
550 case reflection::String: {
551 auto vecString =
552 flatbuffers::GetFieldV<flatbuffers::
553 Offset<flatbuffers::String>>(table, vec_field);
554 if (v.Verify(vecString) && v.VerifyVectorOfStrings(vecString)) {
555 return true;
556 } else {
557 return false;
558 }
559 }
560 case reflection::Vector:
561 assert(false);
562 break;
563 case reflection::Obj: {
564 auto obj = schema.objects()->Get(vec_field.type()->index());
565 if (obj->is_struct()) {
566 if (!VerifyVectorOfStructs(v, table, vec_field.offset(), *obj,
567 vec_field.required())) {
568 return false;
569 }
570 } else {
571 auto vec =
572 flatbuffers::GetFieldV<flatbuffers::
573 Offset<flatbuffers::Table>>(table, vec_field);
574 if (!v.Verify(vec))
575 return false;
576 if (vec) {
577 for (uoffset_t j = 0; j < vec->size(); j++) {
578 if (!VerifyObject(v, schema, *obj, vec->Get(j), true)) {
579 return false;
580 }
581 }
582 }
583 }
584 return true;
585 }
586 case reflection::Union:
587 assert(false);
588 break;
589 default:
590 assert(false);
591 break;
592 }
593
594 return false;
595 }
596
VerifyObject(flatbuffers::Verifier & v,const reflection::Schema & schema,const reflection::Object & obj,const flatbuffers::Table * table,bool required)597 bool VerifyObject(flatbuffers::Verifier &v,
598 const reflection::Schema &schema,
599 const reflection::Object &obj,
600 const flatbuffers::Table *table,
601 bool required) {
602 if (!table) {
603 if (!required)
604 return true;
605 else
606 return false;
607 }
608
609 if (!table->VerifyTableStart(v))
610 return false;
611
612 for (uoffset_t i = 0; i < obj.fields()->size(); i++) {
613 auto field_def = obj.fields()->Get(i);
614 switch (field_def->type()->base_type()) {
615 case reflection::None:
616 assert(false);
617 break;
618 case reflection::UType:
619 if (!table->VerifyField<uint8_t>(v, field_def->offset()))
620 return false;
621 break;
622 case reflection::Bool:
623 case reflection::Byte:
624 case reflection::UByte:
625 if (!table->VerifyField<int8_t>(v, field_def->offset()))
626 return false;
627 break;
628 case reflection::Short:
629 case reflection::UShort:
630 if (!table->VerifyField<int16_t>(v, field_def->offset()))
631 return false;
632 break;
633 case reflection::Int:
634 case reflection::UInt:
635 if (!table->VerifyField<int32_t>(v, field_def->offset()))
636 return false;
637 break;
638 case reflection::Long:
639 case reflection::ULong:
640 if (!table->VerifyField<int64_t>(v, field_def->offset()))
641 return false;
642 break;
643 case reflection::Float:
644 if (!table->VerifyField<float>(v, field_def->offset()))
645 return false;
646 break;
647 case reflection::Double:
648 if (!table->VerifyField<double>(v, field_def->offset()))
649 return false;
650 break;
651 case reflection::String:
652 if (!table->VerifyField<uoffset_t>(v, field_def->offset()) ||
653 !v.Verify(flatbuffers::GetFieldS(*table, *field_def))) {
654 return false;
655 }
656 break;
657 case reflection::Vector:
658 if (!VerifyVector(v, schema, *table, *field_def))
659 return false;
660 break;
661 case reflection::Obj: {
662 auto child_obj = schema.objects()->Get(field_def->type()->index());
663 if (child_obj->is_struct()) {
664 if (!VerifyStruct(v, *table, field_def->offset(), *child_obj,
665 field_def->required())) {
666 return false;
667 }
668 } else {
669 if (!VerifyObject(v, schema, *child_obj,
670 flatbuffers::GetFieldT(*table, *field_def),
671 field_def->required())) {
672 return false;
673 }
674 }
675 break;
676 }
677 case reflection::Union: {
678 // get union type from the prev field
679 voffset_t utype_offset = field_def->offset() - sizeof(voffset_t);
680 auto utype = table->GetField<uint8_t>(utype_offset, 0);
681 if (utype != 0) {
682 // Means we have this union field present
683 auto fb_enum = schema.enums()->Get(field_def->type()->index());
684 auto child_obj = fb_enum->values()->Get(utype)->object();
685 if (!VerifyObject(v, schema, *child_obj,
686 flatbuffers::GetFieldT(*table, *field_def),
687 field_def->required())) {
688 return false;
689 }
690 }
691 break;
692 }
693 default:
694 assert(false);
695 break;
696 }
697 }
698
699 return true;
700 }
701
Verify(const reflection::Schema & schema,const reflection::Object & root,const uint8_t * buf,size_t length)702 bool Verify(const reflection::Schema &schema,
703 const reflection::Object &root,
704 const uint8_t *buf,
705 size_t length) {
706 Verifier v(buf, length);
707 return VerifyObject(v, schema, root, flatbuffers::GetAnyRoot(buf), true);
708 }
709
710 } // namespace flatbuffers
711