1/*
2 * Copyright (C) 2005, 2008, 2012, 2014 Apple Inc. All rights reserved.
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the NU
12 * Library General Public License for more details.
13 *
14 * You should have received a copy of the GNU Library General Public License
15 * along with this library; see the file COPYING.LIB.  If not, write to
16 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
17 * Boston, MA 02110-1301, USA
18 *
19 */
20
21#include "config.h"
22#include "JSLock.h"
23
24#include "Heap.h"
25#include "CallFrame.h"
26#include "JSGlobalObject.h"
27#include "JSObject.h"
28#include "JSCInlines.h"
29#include <thread>
30
31namespace JSC {
32
33std::mutex* GlobalJSLock::s_sharedInstanceMutex;
34
35GlobalJSLock::GlobalJSLock()
36{
37    s_sharedInstanceMutex->lock();
38}
39
40GlobalJSLock::~GlobalJSLock()
41{
42    s_sharedInstanceMutex->unlock();
43}
44
45void GlobalJSLock::initialize()
46{
47    s_sharedInstanceMutex = new std::mutex();
48}
49
50JSLockHolder::JSLockHolder(ExecState* exec)
51    : m_vm(&exec->vm())
52{
53    init();
54}
55
56JSLockHolder::JSLockHolder(VM* vm)
57    : m_vm(vm)
58{
59    init();
60}
61
62JSLockHolder::JSLockHolder(VM& vm)
63    : m_vm(&vm)
64{
65    init();
66}
67
68void JSLockHolder::init()
69{
70    m_vm->apiLock().lock();
71}
72
73JSLockHolder::~JSLockHolder()
74{
75    RefPtr<JSLock> apiLock(&m_vm->apiLock());
76    m_vm.clear();
77    apiLock->unlock();
78}
79
80JSLock::JSLock(VM* vm)
81    : m_ownerThreadID(std::thread::id())
82    , m_lockCount(0)
83    , m_lockDropDepth(0)
84    , m_hasExclusiveThread(false)
85    , m_vm(vm)
86    , m_entryAtomicStringTable(nullptr)
87{
88}
89
90JSLock::~JSLock()
91{
92}
93
94void JSLock::willDestroyVM(VM* vm)
95{
96    ASSERT_UNUSED(vm, m_vm == vm);
97    m_vm = nullptr;
98}
99
100void JSLock::setExclusiveThread(std::thread::id threadId)
101{
102    RELEASE_ASSERT(!m_lockCount && m_ownerThreadID == std::thread::id());
103    m_hasExclusiveThread = (threadId != std::thread::id());
104    m_ownerThreadID = threadId;
105}
106
107void JSLock::lock()
108{
109    lock(1);
110}
111
112void JSLock::lock(intptr_t lockCount)
113{
114    ASSERT(lockCount > 0);
115    if (currentThreadIsHoldingLock()) {
116        m_lockCount += lockCount;
117        return;
118    }
119
120    if (!m_hasExclusiveThread) {
121        m_lock.lock();
122        m_ownerThreadID = std::this_thread::get_id();
123    }
124    ASSERT(!m_lockCount);
125    m_lockCount = lockCount;
126
127    didAcquireLock();
128}
129
130void JSLock::didAcquireLock()
131{
132    // FIXME: What should happen to the per-thread identifier table if we don't have a VM?
133    if (!m_vm)
134        return;
135
136    RELEASE_ASSERT(!m_vm->stackPointerAtVMEntry());
137    void* p = &p; // A proxy for the current stack pointer.
138    m_vm->setStackPointerAtVMEntry(p);
139
140    WTFThreadData& threadData = wtfThreadData();
141    m_vm->setLastStackTop(threadData.savedLastStackTop());
142
143    ASSERT(!m_entryAtomicStringTable);
144    m_entryAtomicStringTable = threadData.setCurrentAtomicStringTable(m_vm->atomicStringTable());
145    ASSERT(m_entryAtomicStringTable);
146
147    m_vm->heap.machineThreads().addCurrentThread();
148}
149
150void JSLock::unlock()
151{
152    unlock(1);
153}
154
155void JSLock::unlock(intptr_t unlockCount)
156{
157    RELEASE_ASSERT(currentThreadIsHoldingLock());
158    ASSERT(m_lockCount >= unlockCount);
159
160    m_lockCount -= unlockCount;
161
162    if (!m_lockCount) {
163        willReleaseLock();
164
165        if (!m_hasExclusiveThread) {
166            m_ownerThreadID = std::thread::id();
167            m_lock.unlock();
168        }
169    }
170}
171
172void JSLock::willReleaseLock()
173{
174    if (m_vm)
175        m_vm->setStackPointerAtVMEntry(nullptr);
176
177    if (m_entryAtomicStringTable) {
178        wtfThreadData().setCurrentAtomicStringTable(m_entryAtomicStringTable);
179        m_entryAtomicStringTable = nullptr;
180    }
181}
182
183void JSLock::lock(ExecState* exec)
184{
185    exec->vm().apiLock().lock();
186}
187
188void JSLock::unlock(ExecState* exec)
189{
190    exec->vm().apiLock().unlock();
191}
192
193bool JSLock::currentThreadIsHoldingLock()
194{
195    ASSERT(!m_hasExclusiveThread || (exclusiveThread() == std::this_thread::get_id()));
196    if (m_hasExclusiveThread)
197        return !!m_lockCount;
198    return m_ownerThreadID == std::this_thread::get_id();
199}
200
201// This function returns the number of locks that were dropped.
202unsigned JSLock::dropAllLocks(DropAllLocks* dropper)
203{
204    if (m_hasExclusiveThread) {
205        ASSERT(exclusiveThread() == std::this_thread::get_id());
206        return 0;
207    }
208
209    // Check if this thread is currently holding the lock.
210    // FIXME: Maybe we want to require this, guard with an ASSERT?
211    if (!currentThreadIsHoldingLock())
212        return 0;
213
214    ++m_lockDropDepth;
215
216    dropper->setDropDepth(m_lockDropDepth);
217
218    WTFThreadData& threadData = wtfThreadData();
219    threadData.setSavedStackPointerAtVMEntry(m_vm->stackPointerAtVMEntry());
220    threadData.setSavedLastStackTop(m_vm->lastStackTop());
221
222    unsigned droppedLockCount = m_lockCount;
223    unlock(droppedLockCount);
224
225    return droppedLockCount;
226}
227
228void JSLock::grabAllLocks(DropAllLocks* dropper, unsigned droppedLockCount)
229{
230    ASSERT(!m_hasExclusiveThread || !droppedLockCount);
231
232    // If no locks were dropped, nothing to do!
233    if (!droppedLockCount)
234        return;
235
236    ASSERT(!currentThreadIsHoldingLock());
237    lock(droppedLockCount);
238
239    while (dropper->dropDepth() != m_lockDropDepth) {
240        unlock(droppedLockCount);
241        std::this_thread::yield();
242        lock(droppedLockCount);
243    }
244
245    --m_lockDropDepth;
246
247    WTFThreadData& threadData = wtfThreadData();
248    m_vm->setStackPointerAtVMEntry(threadData.savedStackPointerAtVMEntry());
249    m_vm->setLastStackTop(threadData.savedLastStackTop());
250}
251
252JSLock::DropAllLocks::DropAllLocks(VM* vm)
253    : m_droppedLockCount(0)
254    // If the VM is in the middle of being destroyed then we don't want to resurrect it
255    // by allowing DropAllLocks to ref it. By this point the JSLock has already been
256    // released anyways, so it doesn't matter that DropAllLocks is a no-op.
257    , m_vm(vm->refCount() ? vm : nullptr)
258{
259    if (!m_vm)
260        return;
261    wtfThreadData().resetCurrentAtomicStringTable();
262    RELEASE_ASSERT(!m_vm->isCollectorBusy());
263    m_droppedLockCount = m_vm->apiLock().dropAllLocks(this);
264}
265
266JSLock::DropAllLocks::DropAllLocks(ExecState* exec)
267    : DropAllLocks(exec ? &exec->vm() : nullptr)
268{
269}
270
271JSLock::DropAllLocks::DropAllLocks(VM& vm)
272    : DropAllLocks(&vm)
273{
274}
275
276JSLock::DropAllLocks::~DropAllLocks()
277{
278    if (!m_vm)
279        return;
280    m_vm->apiLock().grabAllLocks(this, m_droppedLockCount);
281    wtfThreadData().setCurrentAtomicStringTable(m_vm->atomicStringTable());
282}
283
284} // namespace JSC
285