• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/objects/layout-descriptor.h"
6 
7 #include <sstream>
8 
9 #include "src/base/bits.h"
10 #include "src/handles/handles-inl.h"
11 #include "src/objects/objects-inl.h"
12 
13 namespace v8 {
14 namespace internal {
15 
New(Isolate * isolate,Handle<Map> map,Handle<DescriptorArray> descriptors,int num_descriptors)16 Handle<LayoutDescriptor> LayoutDescriptor::New(
17     Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors,
18     int num_descriptors) {
19   if (!FLAG_unbox_double_fields) return handle(FastPointerLayout(), isolate);
20 
21   int layout_descriptor_length =
22       CalculateCapacity(*map, *descriptors, num_descriptors);
23 
24   if (layout_descriptor_length == 0) {
25     // No double fields were found, use fast pointer layout.
26     return handle(FastPointerLayout(), isolate);
27   }
28 
29   // Initially, layout descriptor corresponds to an object with all fields
30   // tagged.
31   Handle<LayoutDescriptor> layout_descriptor_handle =
32       LayoutDescriptor::New(isolate, layout_descriptor_length);
33 
34   LayoutDescriptor layout_descriptor = Initialize(
35       *layout_descriptor_handle, *map, *descriptors, num_descriptors);
36 
37   return handle(layout_descriptor, isolate);
38 }
39 
ShareAppend(Isolate * isolate,Handle<Map> map,PropertyDetails details)40 Handle<LayoutDescriptor> LayoutDescriptor::ShareAppend(
41     Isolate* isolate, Handle<Map> map, PropertyDetails details) {
42   DCHECK(map->owns_descriptors());
43   Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
44                                              isolate);
45 
46   if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
47     DCHECK(details.location() != kField ||
48            layout_descriptor->IsTagged(details.field_index()));
49     return layout_descriptor;
50   }
51   int field_index = details.field_index();
52   layout_descriptor = LayoutDescriptor::EnsureCapacity(
53       isolate, layout_descriptor, field_index + details.field_width_in_words());
54 
55   DisallowHeapAllocation no_allocation;
56   LayoutDescriptor layout_desc = *layout_descriptor;
57   layout_desc = layout_desc.SetRawData(field_index);
58   if (details.field_width_in_words() > 1) {
59     layout_desc = layout_desc.SetRawData(field_index + 1);
60   }
61   return handle(layout_desc, isolate);
62 }
63 
AppendIfFastOrUseFull(Isolate * isolate,Handle<Map> map,PropertyDetails details,Handle<LayoutDescriptor> full_layout_descriptor)64 Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
65     Isolate* isolate, Handle<Map> map, PropertyDetails details,
66     Handle<LayoutDescriptor> full_layout_descriptor) {
67   DisallowHeapAllocation no_allocation;
68   LayoutDescriptor layout_descriptor = map->layout_descriptor(kAcquireLoad);
69   if (layout_descriptor.IsSlowLayout()) {
70     return full_layout_descriptor;
71   }
72   if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
73     DCHECK(details.location() != kField ||
74            layout_descriptor.IsTagged(details.field_index()));
75     return handle(layout_descriptor, isolate);
76   }
77   int field_index = details.field_index();
78   int new_capacity = field_index + details.field_width_in_words();
79   if (new_capacity > layout_descriptor.capacity()) {
80     // Current map's layout descriptor runs out of space, so use the full
81     // layout descriptor.
82     return full_layout_descriptor;
83   }
84 
85   layout_descriptor = layout_descriptor.SetRawData(field_index);
86   if (details.field_width_in_words() > 1) {
87     layout_descriptor = layout_descriptor.SetRawData(field_index + 1);
88   }
89   return handle(layout_descriptor, isolate);
90 }
91 
EnsureCapacity(Isolate * isolate,Handle<LayoutDescriptor> layout_descriptor,int new_capacity)92 Handle<LayoutDescriptor> LayoutDescriptor::EnsureCapacity(
93     Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor,
94     int new_capacity) {
95   int old_capacity = layout_descriptor->capacity();
96   if (new_capacity <= old_capacity) {
97     return layout_descriptor;
98   }
99   Handle<LayoutDescriptor> new_layout_descriptor =
100       LayoutDescriptor::New(isolate, new_capacity);
101   DCHECK(new_layout_descriptor->IsSlowLayout());
102 
103   if (layout_descriptor->IsSlowLayout()) {
104     memcpy(new_layout_descriptor->GetDataStartAddress(),
105            layout_descriptor->GetDataStartAddress(),
106            layout_descriptor->DataSize());
107     return new_layout_descriptor;
108   } else {
109     // Fast layout.
110     uint32_t value = static_cast<uint32_t>(Smi::ToInt(*layout_descriptor));
111     new_layout_descriptor->set_layout_word(0, value);
112     return new_layout_descriptor;
113   }
114 }
115 
IsTagged(int field_index,int max_sequence_length,int * out_sequence_length)116 bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
117                                 int* out_sequence_length) {
118   DCHECK_GT(max_sequence_length, 0);
119   if (IsFastPointerLayout()) {
120     *out_sequence_length = max_sequence_length;
121     return true;
122   }
123 
124   int layout_word_index;
125   int layout_bit_index;
126 
127   if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
128     // Out of bounds queries are considered tagged.
129     *out_sequence_length = max_sequence_length;
130     return true;
131   }
132   uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
133 
134   uint32_t value = IsSlowLayout() ? get_layout_word(layout_word_index)
135                                   : static_cast<uint32_t>(Smi::ToInt(*this));
136 
137   bool is_tagged = (value & layout_mask) == 0;
138   if (!is_tagged) value = ~value;  // Count set bits instead of cleared bits.
139   value = value & ~(layout_mask - 1);  // Clear bits we are not interested in.
140   int sequence_length;
141   if (IsSlowLayout()) {
142     sequence_length = base::bits::CountTrailingZeros(value) - layout_bit_index;
143 
144     if (layout_bit_index + sequence_length == kBitsPerLayoutWord) {
145       // This is a contiguous sequence till the end of current word, proceed
146       // counting in the subsequent words.
147       ++layout_word_index;
148       int num_words = number_of_layout_words();
149       for (; layout_word_index < num_words; layout_word_index++) {
150         value = get_layout_word(layout_word_index);
151         bool cur_is_tagged = (value & 1) == 0;
152         if (cur_is_tagged != is_tagged) break;
153         if (!is_tagged) value = ~value;  // Count set bits instead.
154         int cur_sequence_length = base::bits::CountTrailingZeros(value);
155         sequence_length += cur_sequence_length;
156         if (sequence_length >= max_sequence_length) break;
157         if (cur_sequence_length != kBitsPerLayoutWord) break;
158       }
159       if (is_tagged && (field_index + sequence_length == capacity())) {
160         // The contiguous sequence of tagged fields lasts till the end of the
161         // layout descriptor which means that all the fields starting from
162         // field_index are tagged.
163         sequence_length = std::numeric_limits<int>::max();
164       }
165     }
166   } else {  // Fast layout.
167     sequence_length = std::min(base::bits::CountTrailingZeros(value),
168                                static_cast<unsigned>(kBitsInSmiLayout)) -
169                       layout_bit_index;
170     if (is_tagged && (field_index + sequence_length == capacity())) {
171       // The contiguous sequence of tagged fields lasts till the end of the
172       // layout descriptor which means that all the fields starting from
173       // field_index are tagged.
174       sequence_length = std::numeric_limits<int>::max();
175     }
176   }
177   *out_sequence_length = std::min(sequence_length, max_sequence_length);
178   return is_tagged;
179 }
180 
NewForTesting(Isolate * isolate,int length)181 Handle<LayoutDescriptor> LayoutDescriptor::NewForTesting(Isolate* isolate,
182                                                          int length) {
183   return New(isolate, length);
184 }
185 
SetTaggedForTesting(int field_index,bool tagged)186 LayoutDescriptor LayoutDescriptor::SetTaggedForTesting(int field_index,
187                                                        bool tagged) {
188   return SetTagged(field_index, tagged);
189 }
190 
IsTagged(int offset_in_bytes,int end_offset,int * out_end_of_contiguous_region_offset)191 bool LayoutDescriptorHelper::IsTagged(
192     int offset_in_bytes, int end_offset,
193     int* out_end_of_contiguous_region_offset) {
194   DCHECK(IsAligned(offset_in_bytes, kTaggedSize));
195   DCHECK(IsAligned(end_offset, kTaggedSize));
196   DCHECK(offset_in_bytes < end_offset);
197   if (all_fields_tagged_) {
198     *out_end_of_contiguous_region_offset = end_offset;
199     DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
200     return true;
201   }
202   int max_sequence_length = (end_offset - offset_in_bytes) / kTaggedSize;
203   int field_index = std::max(0, (offset_in_bytes - header_size_) / kTaggedSize);
204   int sequence_length;
205   bool tagged = layout_descriptor_.IsTagged(field_index, max_sequence_length,
206                                             &sequence_length);
207   DCHECK_GT(sequence_length, 0);
208   if (offset_in_bytes < header_size_) {
209     // Object headers do not contain non-tagged fields. Check if the contiguous
210     // region continues after the header.
211     if (tagged) {
212       // First field is tagged, calculate end offset from there.
213       *out_end_of_contiguous_region_offset =
214           header_size_ + sequence_length * kTaggedSize;
215 
216     } else {
217       *out_end_of_contiguous_region_offset = header_size_;
218     }
219     DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
220     return true;
221   }
222   *out_end_of_contiguous_region_offset =
223       offset_in_bytes + sequence_length * kTaggedSize;
224   DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
225   return tagged;
226 }
227 
Trim(Heap * heap,Map map,DescriptorArray descriptors,int num_descriptors)228 LayoutDescriptor LayoutDescriptor::Trim(Heap* heap, Map map,
229                                         DescriptorArray descriptors,
230                                         int num_descriptors) {
231   DisallowHeapAllocation no_allocation;
232   // Fast mode descriptors are never shared and therefore always fully
233   // correspond to their map.
234   if (!IsSlowLayout()) return *this;
235 
236   int layout_descriptor_length =
237       CalculateCapacity(map, descriptors, num_descriptors);
238   // It must not become fast-mode descriptor here, because otherwise it has to
239   // be fast pointer layout descriptor already but it's is slow mode now.
240   DCHECK_LT(kBitsInSmiLayout, layout_descriptor_length);
241 
242   // Trim, clean and reinitialize this slow-mode layout descriptor.
243   int new_backing_store_length =
244       GetSlowModeBackingStoreLength(layout_descriptor_length);
245   int backing_store_length = length();
246   if (new_backing_store_length != backing_store_length) {
247     DCHECK_LT(new_backing_store_length, backing_store_length);
248     int delta = backing_store_length - new_backing_store_length;
249     heap->RightTrimFixedArray(*this, delta);
250   }
251   memset(GetDataStartAddress(), 0, DataSize());
252   LayoutDescriptor layout_descriptor =
253       Initialize(*this, map, descriptors, num_descriptors);
254   DCHECK_EQ(*this, layout_descriptor);
255   return layout_descriptor;
256 }
257 
IsConsistentWithMap(Map map,bool check_tail)258 bool LayoutDescriptor::IsConsistentWithMap(Map map, bool check_tail) {
259   if (FLAG_unbox_double_fields) {
260     DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
261     int last_field_index = 0;
262     for (InternalIndex i : map.IterateOwnDescriptors()) {
263       PropertyDetails details = descriptors.GetDetails(i);
264       if (details.location() != kField) continue;
265       FieldIndex field_index = FieldIndex::ForDescriptor(map, i);
266       bool tagged_expected =
267           !field_index.is_inobject() || !details.representation().IsDouble();
268       for (int bit = 0; bit < details.field_width_in_words(); bit++) {
269         bool tagged_actual = IsTagged(details.field_index() + bit);
270         DCHECK_EQ(tagged_expected, tagged_actual);
271         if (tagged_actual != tagged_expected) return false;
272       }
273       last_field_index =
274           std::max(last_field_index,
275                    details.field_index() + details.field_width_in_words());
276     }
277     if (check_tail) {
278       int n = capacity();
279       for (int i = last_field_index; i < n; i++) {
280         DCHECK(IsTagged(i));
281       }
282     }
283   }
284   return true;
285 }
286 }  // namespace internal
287 }  // namespace v8
288