1 /*
2 * Copyright 2018 Google Inc. All rights reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 use crate::endian_scalar::read_scalar_at;
18 use crate::follow::Follow;
19 use crate::primitives::*;
20
21 /// VTable encapsulates read-only usage of a vtable. It is only to be used
22 /// by generated code.
23 #[derive(Debug)]
24 pub struct VTable<'a> {
25 buf: &'a [u8],
26 loc: usize,
27 }
28
29 impl<'a> PartialEq for VTable<'a> {
eq(&self, other: &VTable) -> bool30 fn eq(&self, other: &VTable) -> bool {
31 self.as_bytes().eq(other.as_bytes())
32 }
33 }
34
35 impl<'a> VTable<'a> {
init(buf: &'a [u8], loc: usize) -> Self36 pub fn init(buf: &'a [u8], loc: usize) -> Self {
37 VTable { buf, loc }
38 }
num_fields(&self) -> usize39 pub fn num_fields(&self) -> usize {
40 (self.num_bytes() / SIZE_VOFFSET) - 2
41 }
num_bytes(&self) -> usize42 pub fn num_bytes(&self) -> usize {
43 unsafe { read_scalar_at::<VOffsetT>(self.buf, self.loc) as usize }
44 }
object_inline_num_bytes(&self) -> usize45 pub fn object_inline_num_bytes(&self) -> usize {
46 let n = unsafe { read_scalar_at::<VOffsetT>(self.buf, self.loc + SIZE_VOFFSET) };
47 n as usize
48 }
get_field(&self, idx: usize) -> VOffsetT49 pub fn get_field(&self, idx: usize) -> VOffsetT {
50 // TODO(rw): distinguish between None and 0?
51 if idx > self.num_fields() {
52 return 0;
53 }
54 unsafe {
55 read_scalar_at::<VOffsetT>(
56 self.buf,
57 self.loc + SIZE_VOFFSET + SIZE_VOFFSET + SIZE_VOFFSET * idx,
58 )
59 }
60 }
get(&self, byte_loc: VOffsetT) -> VOffsetT61 pub fn get(&self, byte_loc: VOffsetT) -> VOffsetT {
62 // TODO(rw): distinguish between None and 0?
63 if byte_loc as usize >= self.num_bytes() {
64 return 0;
65 }
66 unsafe { read_scalar_at::<VOffsetT>(self.buf, self.loc + byte_loc as usize) }
67 }
as_bytes(&self) -> &[u8]68 pub fn as_bytes(&self) -> &[u8] {
69 let len = self.num_bytes();
70 &self.buf[self.loc..self.loc + len]
71 }
72 }
73
74 #[allow(dead_code)]
field_index_to_field_offset(field_id: VOffsetT) -> VOffsetT75 pub fn field_index_to_field_offset(field_id: VOffsetT) -> VOffsetT {
76 // Should correspond to what end_table() below builds up.
77 let fixed_fields = 2; // Vtable size and Object Size.
78 ((field_id + fixed_fields) * (SIZE_VOFFSET as VOffsetT)) as VOffsetT
79 }
80
81 #[allow(dead_code)]
field_offset_to_field_index(field_o: VOffsetT) -> VOffsetT82 pub fn field_offset_to_field_index(field_o: VOffsetT) -> VOffsetT {
83 debug_assert!(field_o >= 2);
84 let fixed_fields = 2; // VTable size and Object Size.
85 (field_o / (SIZE_VOFFSET as VOffsetT)) - fixed_fields
86 }
87
88 impl<'a> Follow<'a> for VTable<'a> {
89 type Inner = VTable<'a>;
follow(buf: &'a [u8], loc: usize) -> Self::Inner90 fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
91 VTable::init(buf, loc)
92 }
93 }
94