1/*
2 * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "GCThreadSharedData.h"
28
29#include "CopyVisitor.h"
30#include "CopyVisitorInlines.h"
31#include "GCThread.h"
32#include "MarkStack.h"
33#include "JSCInlines.h"
34#include "SlotVisitor.h"
35#include "SlotVisitorInlines.h"
36#include "VM.h"
37
38namespace JSC {
39
40#if ENABLE(PARALLEL_GC)
41void GCThreadSharedData::resetChildren()
42{
43    for (size_t i = 0; i < m_gcThreads.size(); ++i)
44        m_gcThreads[i]->slotVisitor()->reset();
45}
46
47size_t GCThreadSharedData::childVisitCount()
48{
49    unsigned long result = 0;
50    for (unsigned i = 0; i < m_gcThreads.size(); ++i)
51        result += m_gcThreads[i]->slotVisitor()->visitCount();
52    return result;
53}
54
55size_t GCThreadSharedData::childBytesVisited()
56{
57    size_t result = 0;
58    for (unsigned i = 0; i < m_gcThreads.size(); ++i)
59        result += m_gcThreads[i]->slotVisitor()->bytesVisited();
60    return result;
61}
62
63size_t GCThreadSharedData::childBytesCopied()
64{
65    size_t result = 0;
66    for (unsigned i = 0; i < m_gcThreads.size(); ++i)
67        result += m_gcThreads[i]->slotVisitor()->bytesCopied();
68    return result;
69}
70#endif
71
72GCThreadSharedData::GCThreadSharedData(VM* vm)
73    : m_vm(vm)
74    , m_copiedSpace(&vm->heap.m_storageSpace)
75    , m_shouldHashCons(false)
76    , m_sharedMarkStack(vm->heap.blockAllocator())
77    , m_numberOfActiveParallelMarkers(0)
78    , m_parallelMarkersShouldExit(false)
79    , m_copyIndex(0)
80    , m_numberOfActiveGCThreads(0)
81    , m_gcThreadsShouldWait(false)
82    , m_currentPhase(NoPhase)
83{
84    m_copyLock.Init();
85#if ENABLE(PARALLEL_GC)
86    // Grab the lock so the new GC threads can be properly initialized before they start running.
87    std::unique_lock<std::mutex> lock(m_phaseMutex);
88    for (unsigned i = 1; i < Options::numberOfGCMarkers(); ++i) {
89        m_numberOfActiveGCThreads++;
90        SlotVisitor* slotVisitor = new SlotVisitor(*this);
91        CopyVisitor* copyVisitor = new CopyVisitor(*this);
92        GCThread* newThread = new GCThread(*this, slotVisitor, copyVisitor);
93        ThreadIdentifier threadID = createThread(GCThread::gcThreadStartFunc, newThread, "JavaScriptCore::Marking");
94        newThread->initializeThreadID(threadID);
95        m_gcThreads.append(newThread);
96    }
97
98    // Wait for all the GCThreads to get to the right place.
99    m_activityConditionVariable.wait(lock, [this] { return !m_numberOfActiveGCThreads; });
100#endif
101}
102
103GCThreadSharedData::~GCThreadSharedData()
104{
105#if ENABLE(PARALLEL_GC)
106    // Destroy our marking threads.
107    {
108        std::lock_guard<std::mutex> markingLock(m_markingMutex);
109        std::lock_guard<std::mutex> phaseLock(m_phaseMutex);
110        ASSERT(m_currentPhase == NoPhase);
111        m_parallelMarkersShouldExit = true;
112        m_gcThreadsShouldWait = false;
113        m_currentPhase = Exit;
114        m_phaseConditionVariable.notify_all();
115    }
116    for (unsigned i = 0; i < m_gcThreads.size(); ++i) {
117        waitForThreadCompletion(m_gcThreads[i]->threadID());
118        delete m_gcThreads[i];
119    }
120#endif
121}
122
123void GCThreadSharedData::reset()
124{
125    ASSERT(m_sharedMarkStack.isEmpty());
126
127    m_weakReferenceHarvesters.removeAll();
128
129    if (m_shouldHashCons) {
130        m_vm->resetNewStringsSinceLastHashCons();
131        m_shouldHashCons = false;
132    }
133}
134
135void GCThreadSharedData::startNextPhase(GCPhase phase)
136{
137    std::lock_guard<std::mutex> lock(m_phaseMutex);
138    ASSERT(!m_gcThreadsShouldWait);
139    ASSERT(m_currentPhase == NoPhase);
140    m_gcThreadsShouldWait = true;
141    m_currentPhase = phase;
142    m_phaseConditionVariable.notify_all();
143}
144
145void GCThreadSharedData::endCurrentPhase()
146{
147    ASSERT(m_gcThreadsShouldWait);
148    std::unique_lock<std::mutex> lock(m_phaseMutex);
149    m_currentPhase = NoPhase;
150    m_gcThreadsShouldWait = false;
151    m_phaseConditionVariable.notify_all();
152    m_activityConditionVariable.wait(lock, [this] { return !m_numberOfActiveGCThreads; });
153}
154
155void GCThreadSharedData::didStartMarking()
156{
157    if (m_vm->heap.operationInProgress() == FullCollection) {
158#if ENABLE(PARALLEL_GC)
159        m_opaqueRoots.clear();
160#else
161        ASSERT(m_opaqueRoots.isEmpty());
162#endif
163}
164    std::lock_guard<std::mutex> lock(m_markingMutex);
165    m_parallelMarkersShouldExit = false;
166    startNextPhase(Mark);
167}
168
169void GCThreadSharedData::didFinishMarking()
170{
171    {
172        std::lock_guard<std::mutex> lock(m_markingMutex);
173        m_parallelMarkersShouldExit = true;
174        m_markingConditionVariable.notify_all();
175    }
176
177    ASSERT(m_currentPhase == Mark);
178    endCurrentPhase();
179}
180
181void GCThreadSharedData::didStartCopying()
182{
183    {
184        SpinLockHolder locker(&m_copyLock);
185        if (m_vm->heap.operationInProgress() == EdenCollection) {
186            // Reset the vector to be empty, but don't throw away the backing store.
187            m_blocksToCopy.shrink(0);
188            for (CopiedBlock* block = m_copiedSpace->m_newGen.fromSpace->head(); block; block = block->next())
189                m_blocksToCopy.append(block);
190        } else {
191            ASSERT(m_vm->heap.operationInProgress() == FullCollection);
192            WTF::copyToVector(m_copiedSpace->m_blockSet, m_blocksToCopy);
193        }
194        m_copyIndex = 0;
195    }
196
197    // We do this here so that we avoid a race condition where the main thread can
198    // blow through all of the copying work before the GCThreads fully wake up.
199    // The GCThreads then request a block from the CopiedSpace when the copying phase
200    // has completed, which isn't allowed.
201    for (size_t i = 0; i < m_gcThreads.size(); i++)
202        m_gcThreads[i]->copyVisitor()->startCopying();
203
204    startNextPhase(Copy);
205}
206
207void GCThreadSharedData::didFinishCopying()
208{
209    ASSERT(m_currentPhase == Copy);
210    endCurrentPhase();
211}
212
213} // namespace JSC
214