1/*
2 *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2013, 2014 Apple Inc. All rights reserved.
3 *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 *  This library is free software; you can redistribute it and/or
6 *  modify it under the terms of the GNU Lesser General Public
7 *  License as published by the Free Software Foundation; either
8 *  version 2 of the License, or (at your option) any later version.
9 *
10 *  This library is distributed in the hope that it will be useful,
11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 *  Lesser General Public License for more details.
14 *
15 *  You should have received a copy of the GNU Lesser General Public
16 *  License along with this library; if not, write to the Free Software
17 *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18 *
19 */
20
21#include "config.h"
22#include "Heap.h"
23
24#include "CodeBlock.h"
25#include "ConservativeRoots.h"
26#include "CopiedSpace.h"
27#include "CopiedSpaceInlines.h"
28#include "CopyVisitorInlines.h"
29#include "DFGWorklist.h"
30#include "DelayedReleaseScope.h"
31#include "EdenGCActivityCallback.h"
32#include "FullGCActivityCallback.h"
33#include "GCActivityCallback.h"
34#include "GCIncomingRefCountedSetInlines.h"
35#include "HeapIterationScope.h"
36#include "HeapRootVisitor.h"
37#include "HeapStatistics.h"
38#include "IncrementalSweeper.h"
39#include "Interpreter.h"
40#include "JSGlobalObject.h"
41#include "JSLock.h"
42#include "JSONObject.h"
43#include "JSCInlines.h"
44#include "JSVirtualMachineInternal.h"
45#include "RecursiveAllocationScope.h"
46#include "Tracing.h"
47#include "UnlinkedCodeBlock.h"
48#include "VM.h"
49#include "WeakSetInlines.h"
50#include <algorithm>
51#include <wtf/RAMSize.h>
52#include <wtf/CurrentTime.h>
53#include <wtf/ProcessID.h>
54
55using namespace std;
56using namespace JSC;
57
58namespace JSC {
59
60namespace {
61
62static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
63static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
64
65#define ENABLE_GC_LOGGING 0
66
67#if ENABLE(GC_LOGGING)
68#if COMPILER(CLANG)
69#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
70_Pragma("clang diagnostic push") \
71_Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
72_Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
73static type name arguments; \
74_Pragma("clang diagnostic pop")
75#else
76#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
77static type name arguments;
78#endif // COMPILER(CLANG)
79
80struct GCTimer {
81    GCTimer(const char* name)
82        : m_name(name)
83    {
84    }
85    ~GCTimer()
86    {
87        logData(m_allCollectionData, "(All)");
88        logData(m_edenCollectionData, "(Eden)");
89        logData(m_fullCollectionData, "(Full)");
90    }
91
92    struct TimeRecord {
93        TimeRecord()
94            : m_time(0)
95            , m_min(std::numeric_limits<double>::infinity())
96            , m_max(0)
97            , m_count(0)
98        {
99        }
100
101        double m_time;
102        double m_min;
103        double m_max;
104        size_t m_count;
105    };
106
107    void logData(const TimeRecord& data, const char* extra)
108    {
109        dataLogF("[%d] %s %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n",
110            getCurrentProcessID(),
111            m_name, extra,
112            data.m_time * 1000,
113            data.m_time * 1000 / data.m_count,
114            data.m_min * 1000,
115            data.m_max * 1000,
116            data.m_count);
117    }
118
119    void updateData(TimeRecord& data, double duration)
120    {
121        if (duration < data.m_min)
122            data.m_min = duration;
123        if (duration > data.m_max)
124            data.m_max = duration;
125        data.m_count++;
126        data.m_time += duration;
127    }
128
129    void didFinishPhase(HeapOperation collectionType, double duration)
130    {
131        TimeRecord& data = collectionType == EdenCollection ? m_edenCollectionData : m_fullCollectionData;
132        updateData(data, duration);
133        updateData(m_allCollectionData, duration);
134    }
135
136    TimeRecord m_allCollectionData;
137    TimeRecord m_fullCollectionData;
138    TimeRecord m_edenCollectionData;
139    const char* m_name;
140};
141
142struct GCTimerScope {
143    GCTimerScope(GCTimer* timer, HeapOperation collectionType)
144        : m_timer(timer)
145        , m_start(WTF::monotonicallyIncreasingTime())
146        , m_collectionType(collectionType)
147    {
148    }
149    ~GCTimerScope()
150    {
151        double delta = WTF::monotonicallyIncreasingTime() - m_start;
152        m_timer->didFinishPhase(m_collectionType, delta);
153    }
154    GCTimer* m_timer;
155    double m_start;
156    HeapOperation m_collectionType;
157};
158
159struct GCCounter {
160    GCCounter(const char* name)
161        : m_name(name)
162        , m_count(0)
163        , m_total(0)
164        , m_min(10000000)
165        , m_max(0)
166    {
167    }
168
169    void count(size_t amount)
170    {
171        m_count++;
172        m_total += amount;
173        if (amount < m_min)
174            m_min = amount;
175        if (amount > m_max)
176            m_max = amount;
177    }
178    ~GCCounter()
179    {
180        dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), m_name, m_total, m_total / m_count, m_min, m_max);
181    }
182    const char* m_name;
183    size_t m_count;
184    size_t m_total;
185    size_t m_min;
186    size_t m_max;
187};
188
189#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer, m_operationInProgress)
190#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
191
192#else
193
194#define GCPHASE(name) do { } while (false)
195#define GCCOUNTER(name, value) do { } while (false)
196#endif
197
198static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
199{
200    if (heapType == LargeHeap)
201        return min(largeHeapSize, ramSize / 4);
202    return smallHeapSize;
203}
204
205static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
206{
207    // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
208    if (heapSize < ramSize / 4)
209        return 2 * heapSize;
210    if (heapSize < ramSize / 2)
211        return 1.5 * heapSize;
212    return 1.25 * heapSize;
213}
214
215static inline bool isValidSharedInstanceThreadState(VM* vm)
216{
217    return vm->currentThreadIsHoldingAPILock();
218}
219
220static inline bool isValidThreadState(VM* vm)
221{
222    if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
223        return false;
224
225    if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
226        return false;
227
228    return true;
229}
230
231struct MarkObject : public MarkedBlock::VoidFunctor {
232    void operator()(JSCell* cell)
233    {
234        if (cell->isZapped())
235            return;
236        Heap::heap(cell)->setMarked(cell);
237    }
238};
239
240struct Count : public MarkedBlock::CountFunctor {
241    void operator()(JSCell*) { count(1); }
242};
243
244struct CountIfGlobalObject : MarkedBlock::CountFunctor {
245    void operator()(JSCell* cell) {
246        if (!cell->isObject())
247            return;
248        if (!asObject(cell)->isGlobalObject())
249            return;
250        count(1);
251    }
252};
253
254class RecordType {
255public:
256    typedef PassOwnPtr<TypeCountSet> ReturnType;
257
258    RecordType();
259    void operator()(JSCell*);
260    ReturnType returnValue();
261
262private:
263    const char* typeName(JSCell*);
264    OwnPtr<TypeCountSet> m_typeCountSet;
265};
266
267inline RecordType::RecordType()
268    : m_typeCountSet(adoptPtr(new TypeCountSet))
269{
270}
271
272inline const char* RecordType::typeName(JSCell* cell)
273{
274    const ClassInfo* info = cell->classInfo();
275    if (!info || !info->className)
276        return "[unknown]";
277    return info->className;
278}
279
280inline void RecordType::operator()(JSCell* cell)
281{
282    m_typeCountSet->add(typeName(cell));
283}
284
285inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
286{
287    return m_typeCountSet.release();
288}
289
290} // anonymous namespace
291
292Heap::Heap(VM* vm, HeapType heapType)
293    : m_heapType(heapType)
294    , m_ramSize(ramSize())
295    , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
296    , m_sizeAfterLastCollect(0)
297    , m_sizeAfterLastFullCollect(0)
298    , m_sizeBeforeLastFullCollect(0)
299    , m_sizeAfterLastEdenCollect(0)
300    , m_sizeBeforeLastEdenCollect(0)
301    , m_bytesAllocatedThisCycle(0)
302    , m_bytesAbandonedSinceLastFullCollect(0)
303    , m_maxEdenSize(m_minBytesPerCycle)
304    , m_maxHeapSize(m_minBytesPerCycle)
305    , m_shouldDoFullCollection(false)
306    , m_totalBytesVisited(0)
307    , m_totalBytesCopied(0)
308    , m_operationInProgress(NoOperation)
309    , m_blockAllocator()
310    , m_objectSpace(this)
311    , m_storageSpace(this)
312    , m_extraMemoryUsage(0)
313    , m_machineThreads(this)
314    , m_sharedData(vm)
315    , m_slotVisitor(m_sharedData)
316    , m_copyVisitor(m_sharedData)
317    , m_handleSet(vm)
318    , m_codeBlocks(m_blockAllocator)
319    , m_isSafeToCollect(false)
320    , m_writeBarrierBuffer(256)
321    , m_vm(vm)
322    // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously
323    // schedule the timer if we've never done a collection.
324    , m_lastFullGCLength(0.01)
325    , m_lastEdenGCLength(0.01)
326    , m_lastCodeDiscardTime(WTF::monotonicallyIncreasingTime())
327    , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
328#if ENABLE(GGC)
329    , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
330#else
331    , m_edenActivityCallback(m_fullActivityCallback)
332#endif
333    , m_sweeper(IncrementalSweeper::create(this))
334    , m_deferralDepth(0)
335{
336    m_storageSpace.init();
337}
338
339Heap::~Heap()
340{
341}
342
343bool Heap::isPagedOut(double deadline)
344{
345    return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
346}
347
348// The VM is being destroyed and the collector will never run again.
349// Run all pending finalizers now because we won't get another chance.
350void Heap::lastChanceToFinalize()
351{
352    RELEASE_ASSERT(!m_vm->entryScope);
353    RELEASE_ASSERT(m_operationInProgress == NoOperation);
354
355    m_objectSpace.lastChanceToFinalize();
356}
357
358void Heap::reportExtraMemoryCostSlowCase(size_t cost)
359{
360    // Our frequency of garbage collection tries to balance memory use against speed
361    // by collecting based on the number of newly created values. However, for values
362    // that hold on to a great deal of memory that's not in the form of other JS values,
363    // that is not good enough - in some cases a lot of those objects can pile up and
364    // use crazy amounts of memory without a GC happening. So we track these extra
365    // memory costs. Only unusually large objects are noted, and we only keep track
366    // of this extra cost until the next GC. In garbage collected languages, most values
367    // are either very short lived temporaries, or have extremely long lifetimes. So
368    // if a large value survives one garbage collection, there is not much point to
369    // collecting more frequently as long as it stays alive.
370
371    didAllocate(cost);
372    collectIfNecessaryOrDefer();
373}
374
375void Heap::reportAbandonedObjectGraph()
376{
377    // Our clients don't know exactly how much memory they
378    // are abandoning so we just guess for them.
379    double abandonedBytes = 0.1 * m_sizeAfterLastCollect;
380
381    // We want to accelerate the next collection. Because memory has just
382    // been abandoned, the next collection has the potential to
383    // be more profitable. Since allocation is the trigger for collection,
384    // we hasten the next collection by pretending that we've allocated more memory.
385    didAbandon(abandonedBytes);
386}
387
388void Heap::didAbandon(size_t bytes)
389{
390    if (m_fullActivityCallback) {
391        m_fullActivityCallback->didAllocate(
392            m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
393    }
394    m_bytesAbandonedSinceLastFullCollect += bytes;
395}
396
397void Heap::protect(JSValue k)
398{
399    ASSERT(k);
400    ASSERT(m_vm->currentThreadIsHoldingAPILock());
401
402    if (!k.isCell())
403        return;
404
405    m_protectedValues.add(k.asCell());
406}
407
408bool Heap::unprotect(JSValue k)
409{
410    ASSERT(k);
411    ASSERT(m_vm->currentThreadIsHoldingAPILock());
412
413    if (!k.isCell())
414        return false;
415
416    return m_protectedValues.remove(k.asCell());
417}
418
419void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
420{
421    if (m_arrayBuffers.addReference(cell, buffer)) {
422        collectIfNecessaryOrDefer();
423        didAllocate(buffer->gcSizeEstimateInBytes());
424    }
425}
426
427void Heap::pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
428{
429    m_tempSortingVectors.append(tempVector);
430}
431
432void Heap::popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
433{
434    ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
435    m_tempSortingVectors.removeLast();
436}
437
438void Heap::harvestWeakReferences()
439{
440    m_slotVisitor.harvestWeakReferences();
441}
442
443void Heap::finalizeUnconditionalFinalizers()
444{
445    GCPHASE(FinalizeUnconditionalFinalizers);
446    m_slotVisitor.finalizeUnconditionalFinalizers();
447}
448
449inline JSStack& Heap::stack()
450{
451    return m_vm->interpreter->stack();
452}
453
454void Heap::willStartIterating()
455{
456    m_objectSpace.willStartIterating();
457}
458
459void Heap::didFinishIterating()
460{
461    m_objectSpace.didFinishIterating();
462}
463
464void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
465{
466    ASSERT(isValidThreadState(m_vm));
467    ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
468    stack().gatherConservativeRoots(stackRoots);
469    size_t stackRootCount = stackRoots.size();
470    JSCell** registerRoots = stackRoots.roots();
471    for (size_t i = 0; i < stackRootCount; i++) {
472        setMarked(registerRoots[i]);
473        registerRoots[i]->setMarked();
474        roots.add(registerRoots[i]);
475    }
476}
477
478void Heap::markRoots(double gcStartTime)
479{
480    SamplingRegion samplingRegion("Garbage Collection: Marking");
481
482    GCPHASE(MarkRoots);
483    ASSERT(isValidThreadState(m_vm));
484
485#if ENABLE(GGC)
486    Vector<const JSCell*> rememberedSet(m_slotVisitor.markStack().size());
487    m_slotVisitor.markStack().fillVector(rememberedSet);
488#else
489    Vector<const JSCell*> rememberedSet;
490#endif
491
492    if (m_operationInProgress == EdenCollection)
493        m_codeBlocks.clearMarksForEdenCollection(rememberedSet);
494    else
495        m_codeBlocks.clearMarksForFullCollection();
496
497    // We gather conservative roots before clearing mark bits because conservative
498    // gathering uses the mark bits to determine whether a reference is valid.
499    void* dummy;
500    ConservativeRoots conservativeRoots(&m_objectSpace.blocks(), &m_storageSpace);
501    gatherStackRoots(conservativeRoots, &dummy);
502    gatherJSStackRoots(conservativeRoots);
503    gatherScratchBufferRoots(conservativeRoots);
504
505    sanitizeStackForVM(m_vm);
506
507    clearLivenessData();
508
509    m_sharedData.didStartMarking();
510    m_slotVisitor.didStartMarking();
511    HeapRootVisitor heapRootVisitor(m_slotVisitor);
512
513    {
514        ParallelModeEnabler enabler(m_slotVisitor);
515
516        visitExternalRememberedSet();
517        visitSmallStrings();
518        visitConservativeRoots(conservativeRoots);
519        visitProtectedObjects(heapRootVisitor);
520        visitTempSortVectors(heapRootVisitor);
521        visitArgumentBuffers(heapRootVisitor);
522        visitException(heapRootVisitor);
523        visitStrongHandles(heapRootVisitor);
524        visitHandleStack(heapRootVisitor);
525        traceCodeBlocksAndJITStubRoutines();
526        converge();
527    }
528
529    // Weak references must be marked last because their liveness depends on
530    // the liveness of the rest of the object graph.
531    visitWeakHandles(heapRootVisitor);
532
533    clearRememberedSet(rememberedSet);
534    m_sharedData.didFinishMarking();
535    updateObjectCounts(gcStartTime);
536    resetVisitors();
537}
538
539void Heap::copyBackingStores()
540{
541    if (m_operationInProgress == EdenCollection)
542        m_storageSpace.startedCopying<EdenCollection>();
543    else {
544        ASSERT(m_operationInProgress == FullCollection);
545        m_storageSpace.startedCopying<FullCollection>();
546    }
547
548    if (m_storageSpace.shouldDoCopyPhase()) {
549        m_sharedData.didStartCopying();
550        m_copyVisitor.startCopying();
551        m_copyVisitor.copyFromShared();
552        m_copyVisitor.doneCopying();
553        // We need to wait for everybody to finish and return their CopiedBlocks
554        // before signaling that the phase is complete.
555        m_storageSpace.doneCopying();
556        m_sharedData.didFinishCopying();
557    } else
558        m_storageSpace.doneCopying();
559}
560
561void Heap::gatherStackRoots(ConservativeRoots& roots, void** dummy)
562{
563    GCPHASE(GatherStackRoots);
564    m_jitStubRoutines.clearMarks();
565    m_machineThreads.gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks, dummy);
566}
567
568void Heap::gatherJSStackRoots(ConservativeRoots& roots)
569{
570#if !ENABLE(JIT)
571    GCPHASE(GatherJSStackRoots);
572    stack().gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks);
573#else
574    UNUSED_PARAM(roots);
575#endif
576}
577
578void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
579{
580#if ENABLE(DFG_JIT)
581    GCPHASE(GatherScratchBufferRoots);
582    m_vm->gatherConservativeRoots(roots);
583#else
584    UNUSED_PARAM(roots);
585#endif
586}
587
588void Heap::clearLivenessData()
589{
590    GCPHASE(ClearLivenessData);
591    m_objectSpace.clearNewlyAllocated();
592    m_objectSpace.clearMarks();
593}
594
595void Heap::visitExternalRememberedSet()
596{
597#if JSC_OBJC_API_ENABLED
598    scanExternalRememberedSet(*m_vm, m_slotVisitor);
599#endif
600}
601
602void Heap::visitSmallStrings()
603{
604    GCPHASE(VisitSmallStrings);
605    m_vm->smallStrings.visitStrongReferences(m_slotVisitor);
606
607    if (Options::logGC() == GCLogging::Verbose)
608        dataLog("Small strings:\n", m_slotVisitor);
609
610    m_slotVisitor.donateAndDrain();
611}
612
613void Heap::visitConservativeRoots(ConservativeRoots& roots)
614{
615    GCPHASE(VisitConservativeRoots);
616    m_slotVisitor.append(roots);
617
618    if (Options::logGC() == GCLogging::Verbose)
619        dataLog("Conservative Roots:\n", m_slotVisitor);
620
621    m_slotVisitor.donateAndDrain();
622}
623
624void Heap::visitCompilerWorklistWeakReferences()
625{
626#if ENABLE(DFG_JIT)
627    for (auto worklist : m_suspendedCompilerWorklists)
628        worklist->visitWeakReferences(m_slotVisitor, m_codeBlocks);
629
630    if (Options::logGC() == GCLogging::Verbose)
631        dataLog("DFG Worklists:\n", m_slotVisitor);
632#endif
633}
634
635void Heap::removeDeadCompilerWorklistEntries()
636{
637#if ENABLE(DFG_JIT)
638    GCPHASE(FinalizeDFGWorklists);
639    for (auto worklist : m_suspendedCompilerWorklists)
640        worklist->removeDeadPlans(*m_vm);
641#endif
642}
643
644void Heap::visitProtectedObjects(HeapRootVisitor& heapRootVisitor)
645{
646    GCPHASE(VisitProtectedObjects);
647
648    for (auto& pair : m_protectedValues)
649        heapRootVisitor.visit(&pair.key);
650
651    if (Options::logGC() == GCLogging::Verbose)
652        dataLog("Protected Objects:\n", m_slotVisitor);
653
654    m_slotVisitor.donateAndDrain();
655}
656
657void Heap::visitTempSortVectors(HeapRootVisitor& heapRootVisitor)
658{
659    GCPHASE(VisitTempSortVectors);
660    typedef Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>*> VectorOfValueStringVectors;
661
662    for (auto* vector : m_tempSortingVectors) {
663        for (auto& valueStringPair : *vector) {
664            if (valueStringPair.first)
665                heapRootVisitor.visit(&valueStringPair.first);
666        }
667    }
668
669    if (Options::logGC() == GCLogging::Verbose)
670        dataLog("Temp Sort Vectors:\n", m_slotVisitor);
671
672    m_slotVisitor.donateAndDrain();
673}
674
675void Heap::visitArgumentBuffers(HeapRootVisitor& visitor)
676{
677    GCPHASE(MarkingArgumentBuffers);
678    if (!m_markListSet || !m_markListSet->size())
679        return;
680
681    MarkedArgumentBuffer::markLists(visitor, *m_markListSet);
682
683    if (Options::logGC() == GCLogging::Verbose)
684        dataLog("Argument Buffers:\n", m_slotVisitor);
685
686    m_slotVisitor.donateAndDrain();
687}
688
689void Heap::visitException(HeapRootVisitor& visitor)
690{
691    GCPHASE(MarkingException);
692    if (!m_vm->exception())
693        return;
694
695    visitor.visit(m_vm->addressOfException());
696
697    if (Options::logGC() == GCLogging::Verbose)
698        dataLog("Exceptions:\n", m_slotVisitor);
699
700    m_slotVisitor.donateAndDrain();
701}
702
703void Heap::visitStrongHandles(HeapRootVisitor& visitor)
704{
705    GCPHASE(VisitStrongHandles);
706    m_handleSet.visitStrongHandles(visitor);
707
708    if (Options::logGC() == GCLogging::Verbose)
709        dataLog("Strong Handles:\n", m_slotVisitor);
710
711    m_slotVisitor.donateAndDrain();
712}
713
714void Heap::visitHandleStack(HeapRootVisitor& visitor)
715{
716    GCPHASE(VisitHandleStack);
717    m_handleStack.visit(visitor);
718
719    if (Options::logGC() == GCLogging::Verbose)
720        dataLog("Handle Stack:\n", m_slotVisitor);
721
722    m_slotVisitor.donateAndDrain();
723}
724
725void Heap::traceCodeBlocksAndJITStubRoutines()
726{
727    GCPHASE(TraceCodeBlocksAndJITStubRoutines);
728    m_codeBlocks.traceMarked(m_slotVisitor);
729    m_jitStubRoutines.traceMarkedStubRoutines(m_slotVisitor);
730
731    if (Options::logGC() == GCLogging::Verbose)
732        dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor);
733
734    m_slotVisitor.donateAndDrain();
735}
736
737void Heap::converge()
738{
739#if ENABLE(PARALLEL_GC)
740    GCPHASE(Convergence);
741    m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
742#endif
743}
744
745void Heap::visitWeakHandles(HeapRootVisitor& visitor)
746{
747    GCPHASE(VisitingLiveWeakHandles);
748    while (true) {
749        m_objectSpace.visitWeakSets(visitor);
750        harvestWeakReferences();
751        visitCompilerWorklistWeakReferences();
752        m_codeBlocks.traceMarked(m_slotVisitor); // New "executing" code blocks may be discovered.
753        if (m_slotVisitor.isEmpty())
754            break;
755
756        if (Options::logGC() == GCLogging::Verbose)
757            dataLog("Live Weak Handles:\n", m_slotVisitor);
758
759        {
760            ParallelModeEnabler enabler(m_slotVisitor);
761            m_slotVisitor.donateAndDrain();
762#if ENABLE(PARALLEL_GC)
763            m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
764#endif
765        }
766    }
767}
768
769void Heap::clearRememberedSet(Vector<const JSCell*>& rememberedSet)
770{
771#if ENABLE(GGC)
772    GCPHASE(ClearRememberedSet);
773    for (auto* cell : rememberedSet) {
774        MarkedBlock::blockFor(cell)->clearRemembered(cell);
775        const_cast<JSCell*>(cell)->setRemembered(false);
776    }
777#else
778    UNUSED_PARAM(rememberedSet);
779#endif
780}
781
782void Heap::updateObjectCounts(double gcStartTime)
783{
784    GCCOUNTER(VisitedValueCount, m_slotVisitor.visitCount());
785
786    if (Options::logGC() == GCLogging::Verbose) {
787        size_t visitCount = m_slotVisitor.visitCount();
788#if ENABLE(PARALLEL_GC)
789        visitCount += m_sharedData.childVisitCount();
790#endif
791        dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime);
792    }
793
794    if (m_operationInProgress == EdenCollection) {
795        m_totalBytesVisited += m_slotVisitor.bytesVisited();
796        m_totalBytesCopied += m_slotVisitor.bytesCopied();
797    } else {
798        ASSERT(m_operationInProgress == FullCollection);
799        m_totalBytesVisited = m_slotVisitor.bytesVisited();
800        m_totalBytesCopied = m_slotVisitor.bytesCopied();
801    }
802#if ENABLE(PARALLEL_GC)
803    m_totalBytesVisited += m_sharedData.childBytesVisited();
804    m_totalBytesCopied += m_sharedData.childBytesCopied();
805#endif
806}
807
808void Heap::resetVisitors()
809{
810    m_slotVisitor.reset();
811#if ENABLE(PARALLEL_GC)
812    m_sharedData.resetChildren();
813#endif
814    m_sharedData.reset();
815}
816
817size_t Heap::objectCount()
818{
819    return m_objectSpace.objectCount();
820}
821
822size_t Heap::extraSize()
823{
824    return m_extraMemoryUsage + m_arrayBuffers.size();
825}
826
827size_t Heap::size()
828{
829    return m_objectSpace.size() + m_storageSpace.size() + extraSize();
830}
831
832size_t Heap::capacity()
833{
834    return m_objectSpace.capacity() + m_storageSpace.capacity() + extraSize();
835}
836
837size_t Heap::sizeAfterCollect()
838{
839    // The result here may not agree with the normal Heap::size().
840    // This is due to the fact that we only count live copied bytes
841    // rather than all used (including dead) copied bytes, thus it's
842    // always the case that m_totalBytesCopied <= m_storageSpace.size().
843    ASSERT(m_totalBytesCopied <= m_storageSpace.size());
844    return m_totalBytesVisited + m_totalBytesCopied + extraSize();
845}
846
847size_t Heap::protectedGlobalObjectCount()
848{
849    return forEachProtectedCell<CountIfGlobalObject>();
850}
851
852size_t Heap::globalObjectCount()
853{
854    HeapIterationScope iterationScope(*this);
855    return m_objectSpace.forEachLiveCell<CountIfGlobalObject>(iterationScope);
856}
857
858size_t Heap::protectedObjectCount()
859{
860    return forEachProtectedCell<Count>();
861}
862
863PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
864{
865    return forEachProtectedCell<RecordType>();
866}
867
868PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
869{
870    HeapIterationScope iterationScope(*this);
871    return m_objectSpace.forEachLiveCell<RecordType>(iterationScope);
872}
873
874void Heap::deleteAllCompiledCode()
875{
876    // If JavaScript is running, it's not safe to delete code, since we'll end
877    // up deleting code that is live on the stack.
878    if (m_vm->entryScope)
879        return;
880
881    // If we have things on any worklist, then don't delete code. This is kind of
882    // a weird heuristic. It's definitely not safe to throw away code that is on
883    // the worklist. But this change was made in a hurry so we just avoid throwing
884    // away any code if there is any code on any worklist. I suspect that this
885    // might not actually be too dumb: if there is code on worklists then that
886    // means that we are running some hot JS code right now. Maybe causing
887    // recompilations isn't a good idea.
888#if ENABLE(DFG_JIT)
889    for (unsigned i = DFG::numberOfWorklists(); i--;) {
890        if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
891            if (worklist->isActiveForVM(*vm()))
892                return;
893        }
894    }
895#endif // ENABLE(DFG_JIT)
896
897    for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
898        if (!current->isFunctionExecutable())
899            continue;
900        static_cast<FunctionExecutable*>(current)->clearCodeIfNotCompiling();
901    }
902
903    ASSERT(m_operationInProgress == FullCollection || m_operationInProgress == NoOperation);
904    m_codeBlocks.clearMarksForFullCollection();
905    m_codeBlocks.deleteUnmarkedAndUnreferenced(FullCollection);
906}
907
908void Heap::deleteAllUnlinkedFunctionCode()
909{
910    for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
911        if (!current->isFunctionExecutable())
912            continue;
913        static_cast<FunctionExecutable*>(current)->clearUnlinkedCodeForRecompilationIfNotCompiling();
914    }
915}
916
917void Heap::clearUnmarkedExecutables()
918{
919    GCPHASE(ClearUnmarkedExecutables);
920    ExecutableBase* next;
921    for (ExecutableBase* current = m_compiledCode.head(); current; current = next) {
922        next = current->next();
923        if (isMarked(current))
924            continue;
925
926        // We do this because executable memory is limited on some platforms and because
927        // CodeBlock requires eager finalization.
928        ExecutableBase::clearCodeVirtual(current);
929        m_compiledCode.remove(current);
930    }
931}
932
933void Heap::deleteUnmarkedCompiledCode()
934{
935    GCPHASE(DeleteCodeBlocks);
936    clearUnmarkedExecutables();
937    m_codeBlocks.deleteUnmarkedAndUnreferenced(m_operationInProgress);
938    m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
939}
940
941void Heap::addToRememberedSet(const JSCell* cell)
942{
943    ASSERT(cell);
944    ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread());
945    if (isRemembered(cell))
946        return;
947    MarkedBlock::blockFor(cell)->setRemembered(cell);
948    const_cast<JSCell*>(cell)->setRemembered(true);
949    m_slotVisitor.unconditionallyAppend(const_cast<JSCell*>(cell));
950}
951
952void Heap::collectAllGarbage()
953{
954    if (!m_isSafeToCollect)
955        return;
956
957    collect(FullCollection);
958
959    SamplingRegion samplingRegion("Garbage Collection: Sweeping");
960    DelayedReleaseScope delayedReleaseScope(m_objectSpace);
961    m_objectSpace.sweep();
962    m_objectSpace.shrink();
963}
964
965static double minute = 60.0;
966
967void Heap::collect(HeapOperation collectionType)
968{
969#if ENABLE(ALLOCATION_LOGGING)
970    dataLogF("JSC GC starting collection.\n");
971#endif
972
973    double before = 0;
974    if (Options::logGC()) {
975        dataLog("[GC: ");
976        before = currentTimeMS();
977    }
978
979    SamplingRegion samplingRegion("Garbage Collection");
980
981    RELEASE_ASSERT(!m_deferralDepth);
982    ASSERT(vm()->currentThreadIsHoldingAPILock());
983    RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
984    ASSERT(m_isSafeToCollect);
985    JAVASCRIPTCORE_GC_BEGIN();
986    RELEASE_ASSERT(m_operationInProgress == NoOperation);
987
988    suspendCompilerThreads();
989    willStartCollection(collectionType);
990    GCPHASE(Collect);
991
992    double gcStartTime = WTF::monotonicallyIncreasingTime();
993
994    deleteOldCode(gcStartTime);
995    flushOldStructureIDTables();
996    stopAllocation();
997    flushWriteBarrierBuffer();
998
999    markRoots(gcStartTime);
1000
1001    JAVASCRIPTCORE_GC_MARKED();
1002
1003    reapWeakHandles();
1004    sweepArrayBuffers();
1005    snapshotMarkedSpace();
1006
1007    copyBackingStores();
1008
1009    finalizeUnconditionalFinalizers();
1010    removeDeadCompilerWorklistEntries();
1011    deleteUnmarkedCompiledCode();
1012    deleteSourceProviderCaches();
1013    notifyIncrementalSweeper();
1014    rememberCurrentlyExecutingCodeBlocks();
1015
1016    resetAllocators();
1017    updateAllocationLimits();
1018    didFinishCollection(gcStartTime);
1019    resumeCompilerThreads();
1020
1021    if (Options::logGC()) {
1022        double after = currentTimeMS();
1023        dataLog(after - before, " ms]\n");
1024    }
1025}
1026
1027void Heap::suspendCompilerThreads()
1028{
1029#if ENABLE(DFG_JIT)
1030    GCPHASE(SuspendCompilerThreads);
1031    ASSERT(m_suspendedCompilerWorklists.isEmpty());
1032    for (unsigned i = DFG::numberOfWorklists(); i--;) {
1033        if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
1034            m_suspendedCompilerWorklists.append(worklist);
1035            worklist->suspendAllThreads();
1036        }
1037    }
1038#endif
1039}
1040
1041void Heap::willStartCollection(HeapOperation collectionType)
1042{
1043    GCPHASE(StartingCollection);
1044    if (shouldDoFullCollection(collectionType)) {
1045        m_operationInProgress = FullCollection;
1046        m_slotVisitor.clearMarkStack();
1047        m_shouldDoFullCollection = false;
1048        if (Options::logGC())
1049            dataLog("FullCollection, ");
1050    } else {
1051        m_operationInProgress = EdenCollection;
1052        if (Options::logGC())
1053            dataLog("EdenCollection, ");
1054    }
1055    if (m_operationInProgress == FullCollection) {
1056        m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1057        m_extraMemoryUsage = 0;
1058
1059        if (m_fullActivityCallback)
1060            m_fullActivityCallback->willCollect();
1061    } else {
1062        ASSERT(m_operationInProgress == EdenCollection);
1063        m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1064    }
1065
1066    if (m_edenActivityCallback)
1067        m_edenActivityCallback->willCollect();
1068}
1069
1070void Heap::deleteOldCode(double gcStartTime)
1071{
1072    if (m_operationInProgress == EdenCollection)
1073        return;
1074
1075    GCPHASE(DeleteOldCode);
1076    if (gcStartTime - m_lastCodeDiscardTime > minute) {
1077        deleteAllCompiledCode();
1078        m_lastCodeDiscardTime = WTF::monotonicallyIncreasingTime();
1079    }
1080}
1081
1082void Heap::flushOldStructureIDTables()
1083{
1084    GCPHASE(FlushOldStructureIDTables);
1085    m_structureIDTable.flushOldTables();
1086}
1087
1088void Heap::flushWriteBarrierBuffer()
1089{
1090    GCPHASE(FlushWriteBarrierBuffer);
1091    if (m_operationInProgress == EdenCollection) {
1092        m_writeBarrierBuffer.flush(*this);
1093        return;
1094    }
1095    m_writeBarrierBuffer.reset();
1096}
1097
1098void Heap::stopAllocation()
1099{
1100    GCPHASE(StopAllocation);
1101    m_objectSpace.stopAllocating();
1102    if (m_operationInProgress == FullCollection)
1103        m_storageSpace.didStartFullCollection();
1104}
1105
1106void Heap::reapWeakHandles()
1107{
1108    GCPHASE(ReapingWeakHandles);
1109    m_objectSpace.reapWeakSets();
1110}
1111
1112void Heap::sweepArrayBuffers()
1113{
1114    GCPHASE(SweepingArrayBuffers);
1115    m_arrayBuffers.sweep();
1116}
1117
1118struct MarkedBlockSnapshotFunctor : public MarkedBlock::VoidFunctor {
1119    MarkedBlockSnapshotFunctor(Vector<MarkedBlock*>& blocks)
1120        : m_index(0)
1121        , m_blocks(blocks)
1122    {
1123    }
1124
1125    void operator()(MarkedBlock* block) { m_blocks[m_index++] = block; }
1126
1127    size_t m_index;
1128    Vector<MarkedBlock*>& m_blocks;
1129};
1130
1131void Heap::snapshotMarkedSpace()
1132{
1133    GCPHASE(SnapshotMarkedSpace);
1134    if (m_operationInProgress != FullCollection)
1135        return;
1136
1137    m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
1138    MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
1139    m_objectSpace.forEachBlock(functor);
1140}
1141
1142void Heap::deleteSourceProviderCaches()
1143{
1144    GCPHASE(DeleteSourceProviderCaches);
1145    m_vm->clearSourceProviderCaches();
1146}
1147
1148void Heap::notifyIncrementalSweeper()
1149{
1150    GCPHASE(NotifyIncrementalSweeper);
1151    if (m_operationInProgress != FullCollection)
1152        return;
1153    m_sweeper->startSweeping(m_blockSnapshot);
1154}
1155
1156void Heap::rememberCurrentlyExecutingCodeBlocks()
1157{
1158    GCPHASE(RememberCurrentlyExecutingCodeBlocks);
1159    m_codeBlocks.rememberCurrentlyExecutingCodeBlocks(this);
1160}
1161
1162void Heap::resetAllocators()
1163{
1164    GCPHASE(ResetAllocators);
1165    m_objectSpace.resetAllocators();
1166}
1167
1168void Heap::updateAllocationLimits()
1169{
1170    GCPHASE(UpdateAllocationLimits);
1171    size_t currentHeapSize = sizeAfterCollect();
1172    if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
1173        HeapStatistics::exitWithFailure();
1174
1175    if (m_operationInProgress == FullCollection) {
1176        // To avoid pathological GC churn in very small and very large heaps, we set
1177        // the new allocation limit based on the current size of the heap, with a
1178        // fixed minimum.
1179        m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
1180        m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1181        m_sizeAfterLastFullCollect = currentHeapSize;
1182        m_bytesAbandonedSinceLastFullCollect = 0;
1183    } else {
1184        ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
1185        m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1186        m_sizeAfterLastEdenCollect = currentHeapSize;
1187        double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
1188        double minEdenToOldGenerationRatio = 1.0 / 3.0;
1189        if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
1190            m_shouldDoFullCollection = true;
1191        m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
1192        m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1193        if (m_fullActivityCallback) {
1194            ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
1195            m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
1196        }
1197    }
1198
1199    m_sizeAfterLastCollect = currentHeapSize;
1200    m_bytesAllocatedThisCycle = 0;
1201
1202    if (Options::logGC())
1203        dataLog(currentHeapSize / 1024, " kb, ");
1204}
1205
1206void Heap::didFinishCollection(double gcStartTime)
1207{
1208    GCPHASE(FinishingCollection);
1209    double gcEndTime = WTF::monotonicallyIncreasingTime();
1210    if (m_operationInProgress == FullCollection)
1211        m_lastFullGCLength = gcEndTime - gcStartTime;
1212    else
1213        m_lastEdenGCLength = gcEndTime - gcStartTime;
1214
1215    if (Options::recordGCPauseTimes())
1216        HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);
1217    RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
1218
1219    m_operationInProgress = NoOperation;
1220    JAVASCRIPTCORE_GC_END();
1221
1222    if (Options::useZombieMode())
1223        zombifyDeadObjects();
1224
1225    if (Options::objectsAreImmortal())
1226        markDeadObjects();
1227
1228    if (Options::showObjectStatistics())
1229        HeapStatistics::showObjectStatistics(this);
1230
1231    if (Options::logGC() == GCLogging::Verbose)
1232        GCLogging::dumpObjectGraph(this);
1233}
1234
1235void Heap::resumeCompilerThreads()
1236{
1237#if ENABLE(DFG_JIT)
1238    GCPHASE(ResumeCompilerThreads);
1239    for (auto worklist : m_suspendedCompilerWorklists)
1240        worklist->resumeAllThreads();
1241    m_suspendedCompilerWorklists.clear();
1242#endif
1243}
1244
1245void Heap::markDeadObjects()
1246{
1247    HeapIterationScope iterationScope(*this);
1248    m_objectSpace.forEachDeadCell<MarkObject>(iterationScope);
1249}
1250
1251void Heap::setFullActivityCallback(PassRefPtr<FullGCActivityCallback> activityCallback)
1252{
1253    m_fullActivityCallback = activityCallback;
1254}
1255
1256void Heap::setEdenActivityCallback(PassRefPtr<EdenGCActivityCallback> activityCallback)
1257{
1258    m_edenActivityCallback = activityCallback;
1259}
1260
1261GCActivityCallback* Heap::fullActivityCallback()
1262{
1263    return m_fullActivityCallback.get();
1264}
1265
1266GCActivityCallback* Heap::edenActivityCallback()
1267{
1268    return m_edenActivityCallback.get();
1269}
1270
1271void Heap::setIncrementalSweeper(PassOwnPtr<IncrementalSweeper> sweeper)
1272{
1273    m_sweeper = sweeper;
1274}
1275
1276IncrementalSweeper* Heap::sweeper()
1277{
1278    return m_sweeper.get();
1279}
1280
1281void Heap::setGarbageCollectionTimerEnabled(bool enable)
1282{
1283    if (m_fullActivityCallback)
1284        m_fullActivityCallback->setEnabled(enable);
1285    if (m_edenActivityCallback)
1286        m_edenActivityCallback->setEnabled(enable);
1287}
1288
1289void Heap::didAllocate(size_t bytes)
1290{
1291    if (m_edenActivityCallback)
1292        m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
1293    m_bytesAllocatedThisCycle += bytes;
1294}
1295
1296bool Heap::isValidAllocation(size_t)
1297{
1298    if (!isValidThreadState(m_vm))
1299        return false;
1300
1301    if (m_operationInProgress != NoOperation)
1302        return false;
1303
1304    return true;
1305}
1306
1307void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
1308{
1309    WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
1310}
1311
1312void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
1313{
1314    HandleSlot slot = handle.slot();
1315    Finalizer finalizer = reinterpret_cast<Finalizer>(context);
1316    finalizer(slot->asCell());
1317    WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
1318}
1319
1320void Heap::addCompiledCode(ExecutableBase* executable)
1321{
1322    m_compiledCode.append(executable);
1323}
1324
1325class Zombify : public MarkedBlock::VoidFunctor {
1326public:
1327    void operator()(JSCell* cell)
1328    {
1329        void** current = reinterpret_cast<void**>(cell);
1330
1331        // We want to maintain zapped-ness because that's how we know if we've called
1332        // the destructor.
1333        if (cell->isZapped())
1334            current++;
1335
1336        void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
1337        for (; current < limit; current++)
1338            *current = zombifiedBits;
1339    }
1340};
1341
1342void Heap::zombifyDeadObjects()
1343{
1344    // Sweep now because destructors will crash once we're zombified.
1345    {
1346        SamplingRegion samplingRegion("Garbage Collection: Sweeping");
1347        DelayedReleaseScope delayedReleaseScope(m_objectSpace);
1348        m_objectSpace.zombifySweep();
1349    }
1350    HeapIterationScope iterationScope(*this);
1351    m_objectSpace.forEachDeadCell<Zombify>(iterationScope);
1352}
1353
1354void Heap::flushWriteBarrierBuffer(JSCell* cell)
1355{
1356#if ENABLE(GGC)
1357    m_writeBarrierBuffer.flush(*this);
1358    m_writeBarrierBuffer.add(cell);
1359#else
1360    UNUSED_PARAM(cell);
1361#endif
1362}
1363
1364bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType) const
1365{
1366#if ENABLE(GGC)
1367    if (Options::alwaysDoFullCollection())
1368        return true;
1369
1370    switch (requestedCollectionType) {
1371    case EdenCollection:
1372        return false;
1373    case FullCollection:
1374        return true;
1375    case AnyCollection:
1376        return m_shouldDoFullCollection;
1377    default:
1378        RELEASE_ASSERT_NOT_REACHED();
1379        return false;
1380    }
1381    RELEASE_ASSERT_NOT_REACHED();
1382    return false;
1383#else
1384    UNUSED_PARAM(requestedCollectionType);
1385    return true;
1386#endif
1387}
1388
1389} // namespace JSC
1390