• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20 
21 #include "config.h"
22 #include "Heap.h"
23 
24 #include "CodeBlock.h"
25 #include "ConservativeRoots.h"
26 #include "GCActivityCallback.h"
27 #include "Interpreter.h"
28 #include "JSGlobalData.h"
29 #include "JSGlobalObject.h"
30 #include "JSLock.h"
31 #include "JSONObject.h"
32 #include "Tracing.h"
33 #include <algorithm>
34 
35 #define COLLECT_ON_EVERY_SLOW_ALLOCATION 0
36 
37 using namespace std;
38 
39 namespace JSC {
40 
41 const size_t minBytesPerCycle = 512 * 1024;
42 
Heap(JSGlobalData * globalData)43 Heap::Heap(JSGlobalData* globalData)
44     : m_operationInProgress(NoOperation)
45     , m_markedSpace(globalData)
46     , m_markListSet(0)
47     , m_activityCallback(DefaultGCActivityCallback::create(this))
48     , m_globalData(globalData)
49     , m_machineThreads(this)
50     , m_markStack(globalData->jsArrayVPtr)
51     , m_handleHeap(globalData)
52     , m_extraCost(0)
53 {
54     m_markedSpace.setHighWaterMark(minBytesPerCycle);
55     (*m_activityCallback)();
56 }
57 
~Heap()58 Heap::~Heap()
59 {
60     // The destroy function must already have been called, so assert this.
61     ASSERT(!m_globalData);
62 }
63 
destroy()64 void Heap::destroy()
65 {
66     JSLock lock(SilenceAssertionsOnly);
67 
68     if (!m_globalData)
69         return;
70 
71     ASSERT(!m_globalData->dynamicGlobalObject);
72     ASSERT(m_operationInProgress == NoOperation);
73 
74     // The global object is not GC protected at this point, so sweeping may delete it
75     // (and thus the global data) before other objects that may use the global data.
76     RefPtr<JSGlobalData> protect(m_globalData);
77 
78 #if ENABLE(JIT)
79     m_globalData->jitStubs->clearHostFunctionStubs();
80 #endif
81 
82     delete m_markListSet;
83     m_markListSet = 0;
84     m_markedSpace.clearMarks();
85     m_handleHeap.finalizeWeakHandles();
86     m_markedSpace.destroy();
87 
88     m_globalData = 0;
89 }
90 
reportExtraMemoryCostSlowCase(size_t cost)91 void Heap::reportExtraMemoryCostSlowCase(size_t cost)
92 {
93     // Our frequency of garbage collection tries to balance memory use against speed
94     // by collecting based on the number of newly created values. However, for values
95     // that hold on to a great deal of memory that's not in the form of other JS values,
96     // that is not good enough - in some cases a lot of those objects can pile up and
97     // use crazy amounts of memory without a GC happening. So we track these extra
98     // memory costs. Only unusually large objects are noted, and we only keep track
99     // of this extra cost until the next GC. In garbage collected languages, most values
100     // are either very short lived temporaries, or have extremely long lifetimes. So
101     // if a large value survives one garbage collection, there is not much point to
102     // collecting more frequently as long as it stays alive.
103 
104     if (m_extraCost > maxExtraCost && m_extraCost > m_markedSpace.highWaterMark() / 2)
105         collectAllGarbage();
106     m_extraCost += cost;
107 }
108 
allocateSlowCase(size_t bytes)109 void* Heap::allocateSlowCase(size_t bytes)
110 {
111     ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
112     ASSERT(JSLock::lockCount() > 0);
113     ASSERT(JSLock::currentThreadIsHoldingLock());
114     ASSERT(bytes <= MarkedSpace::maxCellSize);
115     ASSERT(m_operationInProgress == NoOperation);
116 
117 #if COLLECT_ON_EVERY_SLOW_ALLOCATION
118     collectAllGarbage();
119     ASSERT(m_operationInProgress == NoOperation);
120 #endif
121 
122     reset(DoNotSweep);
123 
124     m_operationInProgress = Allocation;
125     void* result = m_markedSpace.allocate(bytes);
126     m_operationInProgress = NoOperation;
127 
128     ASSERT(result);
129     return result;
130 }
131 
protect(JSValue k)132 void Heap::protect(JSValue k)
133 {
134     ASSERT(k);
135     ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
136 
137     if (!k.isCell())
138         return;
139 
140     m_protectedValues.add(k.asCell());
141 }
142 
unprotect(JSValue k)143 bool Heap::unprotect(JSValue k)
144 {
145     ASSERT(k);
146     ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
147 
148     if (!k.isCell())
149         return false;
150 
151     return m_protectedValues.remove(k.asCell());
152 }
153 
markProtectedObjects(HeapRootMarker & heapRootMarker)154 void Heap::markProtectedObjects(HeapRootMarker& heapRootMarker)
155 {
156     ProtectCountSet::iterator end = m_protectedValues.end();
157     for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
158         heapRootMarker.mark(&it->first);
159 }
160 
pushTempSortVector(Vector<ValueStringPair> * tempVector)161 void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector)
162 {
163     m_tempSortingVectors.append(tempVector);
164 }
165 
popTempSortVector(Vector<ValueStringPair> * tempVector)166 void Heap::popTempSortVector(Vector<ValueStringPair>* tempVector)
167 {
168     ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
169     m_tempSortingVectors.removeLast();
170 }
171 
markTempSortVectors(HeapRootMarker & heapRootMarker)172 void Heap::markTempSortVectors(HeapRootMarker& heapRootMarker)
173 {
174     typedef Vector<Vector<ValueStringPair>* > VectorOfValueStringVectors;
175 
176     VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
177     for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
178         Vector<ValueStringPair>* tempSortingVector = *it;
179 
180         Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
181         for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
182             if (vectorIt->first)
183                 heapRootMarker.mark(&vectorIt->first);
184         }
185     }
186 }
187 
registerFile()188 inline RegisterFile& Heap::registerFile()
189 {
190     return m_globalData->interpreter->registerFile();
191 }
192 
markRoots()193 void Heap::markRoots()
194 {
195 #ifndef NDEBUG
196     if (m_globalData->isSharedInstance()) {
197         ASSERT(JSLock::lockCount() > 0);
198         ASSERT(JSLock::currentThreadIsHoldingLock());
199     }
200 #endif
201 
202     void* dummy;
203 
204     ASSERT(m_operationInProgress == NoOperation);
205     if (m_operationInProgress != NoOperation)
206         CRASH();
207 
208     m_operationInProgress = Collection;
209 
210     MarkStack& markStack = m_markStack;
211     HeapRootMarker heapRootMarker(markStack);
212 
213     // We gather conservative roots before clearing mark bits because
214     // conservative gathering uses the mark bits from our last mark pass to
215     // determine whether a reference is valid.
216     ConservativeRoots machineThreadRoots(this);
217     m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
218 
219     ConservativeRoots registerFileRoots(this);
220     registerFile().gatherConservativeRoots(registerFileRoots);
221 
222     m_markedSpace.clearMarks();
223 
224     markStack.append(machineThreadRoots);
225     markStack.drain();
226 
227     markStack.append(registerFileRoots);
228     markStack.drain();
229 
230     markProtectedObjects(heapRootMarker);
231     markStack.drain();
232 
233     markTempSortVectors(heapRootMarker);
234     markStack.drain();
235 
236     if (m_markListSet && m_markListSet->size())
237         MarkedArgumentBuffer::markLists(heapRootMarker, *m_markListSet);
238     if (m_globalData->exception)
239         heapRootMarker.mark(&m_globalData->exception);
240     markStack.drain();
241 
242     m_handleHeap.markStrongHandles(heapRootMarker);
243     markStack.drain();
244 
245     m_handleStack.mark(heapRootMarker);
246     markStack.drain();
247 
248     // Mark the small strings cache as late as possible, since it will clear
249     // itself if nothing else has marked it.
250     // FIXME: Change the small strings cache to use Weak<T>.
251     m_globalData->smallStrings.markChildren(heapRootMarker);
252     markStack.drain();
253 
254     // Weak handles must be marked last, because their owners use the set of
255     // opaque roots to determine reachability.
256     int lastOpaqueRootCount;
257     do {
258         lastOpaqueRootCount = markStack.opaqueRootCount();
259         m_handleHeap.markWeakHandles(heapRootMarker);
260         markStack.drain();
261     // If the set of opaque roots has grown, more weak handles may have become reachable.
262     } while (lastOpaqueRootCount != markStack.opaqueRootCount());
263 
264     markStack.reset();
265 
266     m_operationInProgress = NoOperation;
267 }
268 
objectCount() const269 size_t Heap::objectCount() const
270 {
271     return m_markedSpace.objectCount();
272 }
273 
size() const274 size_t Heap::size() const
275 {
276     return m_markedSpace.size();
277 }
278 
capacity() const279 size_t Heap::capacity() const
280 {
281     return m_markedSpace.capacity();
282 }
283 
globalObjectCount()284 size_t Heap::globalObjectCount()
285 {
286     return m_globalData->globalObjectCount;
287 }
288 
protectedGlobalObjectCount()289 size_t Heap::protectedGlobalObjectCount()
290 {
291     size_t count = m_handleHeap.protectedGlobalObjectCount();
292 
293     ProtectCountSet::iterator end = m_protectedValues.end();
294     for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
295         if (it->first->isObject() && asObject(it->first)->isGlobalObject())
296             count++;
297     }
298 
299     return count;
300 }
301 
protectedObjectCount()302 size_t Heap::protectedObjectCount()
303 {
304     return m_protectedValues.size();
305 }
306 
307 class TypeCounter {
308 public:
309     TypeCounter();
310     void operator()(JSCell*);
311     PassOwnPtr<TypeCountSet> take();
312 
313 private:
314     const char* typeName(JSCell*);
315     OwnPtr<TypeCountSet> m_typeCountSet;
316 };
317 
TypeCounter()318 inline TypeCounter::TypeCounter()
319     : m_typeCountSet(new TypeCountSet)
320 {
321 }
322 
typeName(JSCell * cell)323 inline const char* TypeCounter::typeName(JSCell* cell)
324 {
325     if (cell->isString())
326         return "string";
327     if (cell->isGetterSetter())
328         return "Getter-Setter";
329     if (cell->isAPIValueWrapper())
330         return "API wrapper";
331     if (cell->isPropertyNameIterator())
332         return "For-in iterator";
333     if (const ClassInfo* info = cell->classInfo())
334         return info->className;
335     if (!cell->isObject())
336         return "[empty cell]";
337     return "Object";
338 }
339 
operator ()(JSCell * cell)340 inline void TypeCounter::operator()(JSCell* cell)
341 {
342     m_typeCountSet->add(typeName(cell));
343 }
344 
take()345 inline PassOwnPtr<TypeCountSet> TypeCounter::take()
346 {
347     return m_typeCountSet.release();
348 }
349 
protectedObjectTypeCounts()350 PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
351 {
352     TypeCounter typeCounter;
353 
354     ProtectCountSet::iterator end = m_protectedValues.end();
355     for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
356         typeCounter(it->first);
357     m_handleHeap.protectedObjectTypeCounts(typeCounter);
358 
359     return typeCounter.take();
360 }
361 
protectedObjectTypeCounts(TypeCounter & typeCounter)362 void HandleHeap::protectedObjectTypeCounts(TypeCounter& typeCounter)
363 {
364     Node* end = m_strongList.end();
365     for (Node* node = m_strongList.begin(); node != end; node = node->next()) {
366         JSValue value = *node->slot();
367         if (value && value.isCell())
368             typeCounter(value.asCell());
369     }
370 }
371 
objectTypeCounts()372 PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
373 {
374     TypeCounter typeCounter;
375     forEach(typeCounter);
376     return typeCounter.take();
377 }
378 
isBusy()379 bool Heap::isBusy()
380 {
381     return m_operationInProgress != NoOperation;
382 }
383 
collectAllGarbage()384 void Heap::collectAllGarbage()
385 {
386     reset(DoSweep);
387 }
388 
reset(SweepToggle sweepToggle)389 void Heap::reset(SweepToggle sweepToggle)
390 {
391     ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
392     JAVASCRIPTCORE_GC_BEGIN();
393 
394     markRoots();
395     m_handleHeap.finalizeWeakHandles();
396 
397     JAVASCRIPTCORE_GC_MARKED();
398 
399     m_markedSpace.reset();
400     m_extraCost = 0;
401 
402 #if ENABLE(JSC_ZOMBIES)
403     sweepToggle = DoSweep;
404 #endif
405 
406     if (sweepToggle == DoSweep) {
407         m_markedSpace.sweep();
408         m_markedSpace.shrink();
409     }
410 
411     // To avoid pathological GC churn in large heaps, we set the allocation high
412     // water mark to be proportional to the current size of the heap. The exact
413     // proportion is a bit arbitrary. A 2X multiplier gives a 1:1 (heap size :
414     // new bytes allocated) proportion, and seems to work well in benchmarks.
415     size_t proportionalBytes = 2 * m_markedSpace.size();
416     m_markedSpace.setHighWaterMark(max(proportionalBytes, minBytesPerCycle));
417 
418     JAVASCRIPTCORE_GC_END();
419 
420     (*m_activityCallback)();
421 }
422 
setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)423 void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
424 {
425     m_activityCallback = activityCallback;
426 }
427 
activityCallback()428 GCActivityCallback* Heap::activityCallback()
429 {
430     return m_activityCallback.get();
431 }
432 
433 } // namespace JSC
434