• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_HEAP_INL_H_
29 #define V8_HEAP_INL_H_
30 
31 #include "log.h"
32 #include "v8-counters.h"
33 
34 namespace v8 {
35 namespace internal {
36 
MaxObjectSizeInPagedSpace()37 int Heap::MaxObjectSizeInPagedSpace() {
38   return Page::kMaxHeapObjectSize;
39 }
40 
41 
AllocateSymbol(Vector<const char> str,int chars,uint32_t length_field)42 Object* Heap::AllocateSymbol(Vector<const char> str,
43                              int chars,
44                              uint32_t length_field) {
45   unibrow::Utf8InputBuffer<> buffer(str.start(),
46                                     static_cast<unsigned>(str.length()));
47   return AllocateInternalSymbol(&buffer, chars, length_field);
48 }
49 
50 
AllocateRaw(int size_in_bytes,AllocationSpace space,AllocationSpace retry_space)51 Object* Heap::AllocateRaw(int size_in_bytes,
52                           AllocationSpace space,
53                           AllocationSpace retry_space) {
54   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
55   ASSERT(space != NEW_SPACE ||
56          retry_space == OLD_POINTER_SPACE ||
57          retry_space == OLD_DATA_SPACE);
58 #ifdef DEBUG
59   if (FLAG_gc_interval >= 0 &&
60       !disallow_allocation_failure_ &&
61       Heap::allocation_timeout_-- <= 0) {
62     return Failure::RetryAfterGC(size_in_bytes, space);
63   }
64   Counters::objs_since_last_full.Increment();
65   Counters::objs_since_last_young.Increment();
66 #endif
67   Object* result;
68   if (NEW_SPACE == space) {
69     result = new_space_.AllocateRaw(size_in_bytes);
70     if (always_allocate() && result->IsFailure()) {
71       space = retry_space;
72     } else {
73       return result;
74     }
75   }
76 
77   if (OLD_POINTER_SPACE == space) {
78     result = old_pointer_space_->AllocateRaw(size_in_bytes);
79   } else if (OLD_DATA_SPACE == space) {
80     result = old_data_space_->AllocateRaw(size_in_bytes);
81   } else if (CODE_SPACE == space) {
82     result = code_space_->AllocateRaw(size_in_bytes);
83   } else if (LO_SPACE == space) {
84     result = lo_space_->AllocateRaw(size_in_bytes);
85   } else if (CELL_SPACE == space) {
86     result = cell_space_->AllocateRaw(size_in_bytes);
87   } else {
88     ASSERT(MAP_SPACE == space);
89     result = map_space_->AllocateRaw(size_in_bytes);
90   }
91   if (result->IsFailure()) old_gen_exhausted_ = true;
92   return result;
93 }
94 
95 
NumberFromInt32(int32_t value)96 Object* Heap::NumberFromInt32(int32_t value) {
97   if (Smi::IsValid(value)) return Smi::FromInt(value);
98   // Bypass NumberFromDouble to avoid various redundant checks.
99   return AllocateHeapNumber(FastI2D(value));
100 }
101 
102 
NumberFromUint32(uint32_t value)103 Object* Heap::NumberFromUint32(uint32_t value) {
104   if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
105     return Smi::FromInt((int32_t)value);
106   }
107   // Bypass NumberFromDouble to avoid various redundant checks.
108   return AllocateHeapNumber(FastUI2D(value));
109 }
110 
111 
AllocateRawMap()112 Object* Heap::AllocateRawMap() {
113 #ifdef DEBUG
114   Counters::objs_since_last_full.Increment();
115   Counters::objs_since_last_young.Increment();
116 #endif
117   Object* result = map_space_->AllocateRaw(Map::kSize);
118   if (result->IsFailure()) old_gen_exhausted_ = true;
119   return result;
120 }
121 
122 
AllocateRawCell()123 Object* Heap::AllocateRawCell() {
124 #ifdef DEBUG
125   Counters::objs_since_last_full.Increment();
126   Counters::objs_since_last_young.Increment();
127 #endif
128   Object* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize);
129   if (result->IsFailure()) old_gen_exhausted_ = true;
130   return result;
131 }
132 
133 
InNewSpace(Object * object)134 bool Heap::InNewSpace(Object* object) {
135   return new_space_.Contains(object);
136 }
137 
138 
InFromSpace(Object * object)139 bool Heap::InFromSpace(Object* object) {
140   return new_space_.FromSpaceContains(object);
141 }
142 
143 
InToSpace(Object * object)144 bool Heap::InToSpace(Object* object) {
145   return new_space_.ToSpaceContains(object);
146 }
147 
148 
ShouldBePromoted(Address old_address,int object_size)149 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
150   // An object should be promoted if:
151   // - the object has survived a scavenge operation or
152   // - to space is already 25% full.
153   return old_address < new_space_.age_mark()
154       || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
155 }
156 
157 
RecordWrite(Address address,int offset)158 void Heap::RecordWrite(Address address, int offset) {
159   if (new_space_.Contains(address)) return;
160   ASSERT(!new_space_.FromSpaceContains(address));
161   SLOW_ASSERT(Contains(address + offset));
162   Page::SetRSet(address, offset);
163 }
164 
165 
TargetSpace(HeapObject * object)166 OldSpace* Heap::TargetSpace(HeapObject* object) {
167   InstanceType type = object->map()->instance_type();
168   AllocationSpace space = TargetSpaceId(type);
169   return (space == OLD_POINTER_SPACE)
170       ? old_pointer_space_
171       : old_data_space_;
172 }
173 
174 
TargetSpaceId(InstanceType type)175 AllocationSpace Heap::TargetSpaceId(InstanceType type) {
176   // Heap numbers and sequential strings are promoted to old data space, all
177   // other object types are promoted to old pointer space.  We do not use
178   // object->IsHeapNumber() and object->IsSeqString() because we already
179   // know that object has the heap object tag.
180   ASSERT((type != CODE_TYPE) && (type != MAP_TYPE));
181   bool has_pointers =
182       type != HEAP_NUMBER_TYPE &&
183       (type >= FIRST_NONSTRING_TYPE ||
184        (type & kStringRepresentationMask) != kSeqStringTag);
185   return has_pointers ? OLD_POINTER_SPACE : OLD_DATA_SPACE;
186 }
187 
188 
CopyBlock(Object ** dst,Object ** src,int byte_size)189 void Heap::CopyBlock(Object** dst, Object** src, int byte_size) {
190   ASSERT(IsAligned(byte_size, kPointerSize));
191 
192   // Use block copying memcpy if the segment we're copying is
193   // enough to justify the extra call/setup overhead.
194   static const int kBlockCopyLimit = 16 * kPointerSize;
195 
196   if (byte_size >= kBlockCopyLimit) {
197     memcpy(dst, src, byte_size);
198   } else {
199     int remaining = byte_size / kPointerSize;
200     do {
201       remaining--;
202       *dst++ = *src++;
203     } while (remaining > 0);
204   }
205 }
206 
207 
ScavengeObject(HeapObject ** p,HeapObject * object)208 void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
209   ASSERT(InFromSpace(object));
210 
211   // We use the first word (where the map pointer usually is) of a heap
212   // object to record the forwarding pointer.  A forwarding pointer can
213   // point to an old space, the code space, or the to space of the new
214   // generation.
215   MapWord first_word = object->map_word();
216 
217   // If the first word is a forwarding address, the object has already been
218   // copied.
219   if (first_word.IsForwardingAddress()) {
220     *p = first_word.ToForwardingAddress();
221     return;
222   }
223 
224   // Call the slow part of scavenge object.
225   return ScavengeObjectSlow(p, object);
226 }
227 
228 
AdjustAmountOfExternalAllocatedMemory(int change_in_bytes)229 int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
230   ASSERT(HasBeenSetup());
231   int amount = amount_of_external_allocated_memory_ + change_in_bytes;
232   if (change_in_bytes >= 0) {
233     // Avoid overflow.
234     if (amount > amount_of_external_allocated_memory_) {
235       amount_of_external_allocated_memory_ = amount;
236     }
237     int amount_since_last_global_gc =
238         amount_of_external_allocated_memory_ -
239         amount_of_external_allocated_memory_at_last_global_gc_;
240     if (amount_since_last_global_gc > external_allocation_limit_) {
241       CollectAllGarbage(false);
242     }
243   } else {
244     // Avoid underflow.
245     if (amount >= 0) {
246       amount_of_external_allocated_memory_ = amount;
247     }
248   }
249   ASSERT(amount_of_external_allocated_memory_ >= 0);
250   return amount_of_external_allocated_memory_;
251 }
252 
253 
SetLastScriptId(Object * last_script_id)254 void Heap::SetLastScriptId(Object* last_script_id) {
255   roots_[kLastScriptIdRootIndex] = last_script_id;
256 }
257 
258 
259 #define GC_GREEDY_CHECK() \
260   ASSERT(!FLAG_gc_greedy || v8::internal::Heap::GarbageCollectionGreedyCheck())
261 
262 
263 // Calls the FUNCTION_CALL function and retries it up to three times
264 // to guarantee that any allocations performed during the call will
265 // succeed if there's enough memory.
266 
267 // Warning: Do not use the identifiers __object__ or __scope__ in a
268 // call to this macro.
269 
270 #define CALL_AND_RETRY(FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)         \
271   do {                                                                    \
272     GC_GREEDY_CHECK();                                                    \
273     Object* __object__ = FUNCTION_CALL;                                   \
274     if (!__object__->IsFailure()) RETURN_VALUE;                           \
275     if (__object__->IsOutOfMemoryFailure()) {                             \
276       v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0");      \
277     }                                                                     \
278     if (!__object__->IsRetryAfterGC()) RETURN_EMPTY;                      \
279     Heap::CollectGarbage(Failure::cast(__object__)->requested(),          \
280                          Failure::cast(__object__)->allocation_space());  \
281     __object__ = FUNCTION_CALL;                                           \
282     if (!__object__->IsFailure()) RETURN_VALUE;                           \
283     if (__object__->IsOutOfMemoryFailure()) {                             \
284       v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1");      \
285     }                                                                     \
286     if (!__object__->IsRetryAfterGC()) RETURN_EMPTY;                      \
287     Counters::gc_last_resort_from_handles.Increment();                    \
288     Heap::CollectAllGarbage(false);                                       \
289     {                                                                     \
290       AlwaysAllocateScope __scope__;                                      \
291       __object__ = FUNCTION_CALL;                                         \
292     }                                                                     \
293     if (!__object__->IsFailure()) RETURN_VALUE;                           \
294     if (__object__->IsOutOfMemoryFailure() ||                             \
295         __object__->IsRetryAfterGC()) {                                   \
296       /* TODO(1181417): Fix this. */                                      \
297       v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2");      \
298     }                                                                     \
299     RETURN_EMPTY;                                                         \
300   } while (false)
301 
302 
303 #define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE)                \
304   CALL_AND_RETRY(FUNCTION_CALL,                                \
305                  return Handle<TYPE>(TYPE::cast(__object__)),  \
306                  return Handle<TYPE>())
307 
308 
309 #define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \
310   CALL_AND_RETRY(FUNCTION_CALL, return, return)
311 
312 
313 #ifdef DEBUG
314 
allow_allocation(bool new_state)315 inline bool Heap::allow_allocation(bool new_state) {
316   bool old = allocation_allowed_;
317   allocation_allowed_ = new_state;
318   return old;
319 }
320 
321 #endif
322 
323 
324 } }  // namespace v8::internal
325 
326 #endif  // V8_HEAP_INL_H_
327