1/*
2 * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "MarkedAllocator.h"
28
29#include "DelayedReleaseScope.h"
30#include "GCActivityCallback.h"
31#include "Heap.h"
32#include "IncrementalSweeper.h"
33#include "JSCInlines.h"
34#include "VM.h"
35#include <wtf/CurrentTime.h>
36
37namespace JSC {
38
39static bool isListPagedOut(double deadline, DoublyLinkedList<MarkedBlock>& list)
40{
41    unsigned itersSinceLastTimeCheck = 0;
42    MarkedBlock* block = list.head();
43    while (block) {
44        block = block->next();
45        ++itersSinceLastTimeCheck;
46        if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
47            double currentTime = WTF::monotonicallyIncreasingTime();
48            if (currentTime > deadline)
49                return true;
50            itersSinceLastTimeCheck = 0;
51        }
52    }
53    return false;
54}
55
56bool MarkedAllocator::isPagedOut(double deadline)
57{
58    if (isListPagedOut(deadline, m_blockList))
59        return true;
60    return false;
61}
62
63inline void* MarkedAllocator::tryAllocateHelper(size_t bytes)
64{
65    // We need a while loop to check the free list because the DelayedReleaseScope
66    // could cause arbitrary code to execute and exhaust the free list that we
67    // thought had elements in it.
68    while (!m_freeList.head) {
69        DelayedReleaseScope delayedReleaseScope(*m_markedSpace);
70        if (m_currentBlock) {
71            ASSERT(m_currentBlock == m_nextBlockToSweep);
72            m_currentBlock->didConsumeFreeList();
73            m_nextBlockToSweep = m_currentBlock->next();
74        }
75
76        MarkedBlock* next;
77        for (MarkedBlock*& block = m_nextBlockToSweep; block; block = next) {
78            next = block->next();
79
80            MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList);
81
82            double utilization = ((double)MarkedBlock::blockSize - (double)freeList.bytes) / (double)MarkedBlock::blockSize;
83            if (utilization >= Options::minMarkedBlockUtilization()) {
84                ASSERT(freeList.bytes || !freeList.head);
85                m_blockList.remove(block);
86                m_retiredBlocks.push(block);
87                block->didRetireBlock(freeList);
88                continue;
89            }
90
91            if (bytes > block->cellSize()) {
92                block->stopAllocating(freeList);
93                continue;
94            }
95
96            m_currentBlock = block;
97            m_freeList = freeList;
98            break;
99        }
100
101        if (!m_freeList.head) {
102            m_currentBlock = 0;
103            return 0;
104        }
105    }
106
107    ASSERT(m_freeList.head);
108    void* head = tryPopFreeList(bytes);
109    ASSERT(head);
110    m_markedSpace->didAllocateInBlock(m_currentBlock);
111    return head;
112}
113
114inline void* MarkedAllocator::tryPopFreeList(size_t bytes)
115{
116    ASSERT(m_currentBlock);
117    if (bytes > m_currentBlock->cellSize())
118        return 0;
119
120    MarkedBlock::FreeCell* head = m_freeList.head;
121    m_freeList.head = head->next;
122    return head;
123}
124
125inline void* MarkedAllocator::tryAllocate(size_t bytes)
126{
127    ASSERT(!m_heap->isBusy());
128    m_heap->m_operationInProgress = Allocation;
129    void* result = tryAllocateHelper(bytes);
130
131    // Due to the DelayedReleaseScope in tryAllocateHelper, some other thread might have
132    // created a new block after we thought we didn't find any free cells.
133    while (!result && m_currentBlock) {
134        // A new block was added by another thread so try popping the free list.
135        result = tryPopFreeList(bytes);
136        if (result)
137            break;
138        // The free list was empty, so call tryAllocateHelper to do the normal sweeping stuff.
139        result = tryAllocateHelper(bytes);
140    }
141
142    m_heap->m_operationInProgress = NoOperation;
143    ASSERT(result || !m_currentBlock);
144    return result;
145}
146
147ALWAYS_INLINE void MarkedAllocator::doTestCollectionsIfNeeded()
148{
149    if (!Options::slowPathAllocsBetweenGCs())
150        return;
151
152    static unsigned allocationCount = 0;
153    if (!allocationCount) {
154        if (!m_heap->isDeferred())
155            m_heap->collectAllGarbage();
156        ASSERT(m_heap->m_operationInProgress == NoOperation);
157    }
158    if (++allocationCount >= Options::slowPathAllocsBetweenGCs())
159        allocationCount = 0;
160}
161
162void* MarkedAllocator::allocateSlowCase(size_t bytes)
163{
164    ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock());
165    doTestCollectionsIfNeeded();
166
167    ASSERT(!m_markedSpace->isIterating());
168    ASSERT(!m_freeList.head);
169    m_heap->didAllocate(m_freeList.bytes);
170
171    void* result = tryAllocate(bytes);
172
173    if (LIKELY(result != 0))
174        return result;
175
176    if (m_heap->collectIfNecessaryOrDefer()) {
177        result = tryAllocate(bytes);
178        if (result)
179            return result;
180    }
181
182    ASSERT(!m_heap->shouldCollect());
183
184    MarkedBlock* block = allocateBlock(bytes);
185    ASSERT(block);
186    addBlock(block);
187
188    result = tryAllocate(bytes);
189    ASSERT(result);
190    return result;
191}
192
193MarkedBlock* MarkedAllocator::allocateBlock(size_t bytes)
194{
195    size_t minBlockSize = MarkedBlock::blockSize;
196    size_t minAllocationSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(MarkedBlock) + bytes);
197    size_t blockSize = std::max(minBlockSize, minAllocationSize);
198
199    size_t cellSize = m_cellSize ? m_cellSize : WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(bytes);
200
201    if (blockSize == MarkedBlock::blockSize)
202        return MarkedBlock::create(m_heap->blockAllocator().allocate<MarkedBlock>(), this, cellSize, m_destructorType);
203    return MarkedBlock::create(m_heap->blockAllocator().allocateCustomSize(blockSize, MarkedBlock::blockSize), this, cellSize, m_destructorType);
204}
205
206void MarkedAllocator::addBlock(MarkedBlock* block)
207{
208    ASSERT(!m_currentBlock);
209    ASSERT(!m_freeList.head);
210
211    m_blockList.append(block);
212    m_nextBlockToSweep = block;
213    m_markedSpace->didAddBlock(block);
214}
215
216void MarkedAllocator::removeBlock(MarkedBlock* block)
217{
218    if (m_currentBlock == block) {
219        m_currentBlock = m_currentBlock->next();
220        m_freeList = MarkedBlock::FreeList();
221    }
222    if (m_nextBlockToSweep == block)
223        m_nextBlockToSweep = m_nextBlockToSweep->next();
224
225    block->willRemoveBlock();
226    m_blockList.remove(block);
227}
228
229void MarkedAllocator::reset()
230{
231    m_lastActiveBlock = 0;
232    m_currentBlock = 0;
233    m_freeList = MarkedBlock::FreeList();
234    if (m_heap->operationInProgress() == FullCollection)
235        m_blockList.append(m_retiredBlocks);
236
237    m_nextBlockToSweep = m_blockList.head();
238}
239
240struct LastChanceToFinalize : MarkedBlock::VoidFunctor {
241    void operator()(MarkedBlock* block) { block->lastChanceToFinalize(); }
242};
243
244void MarkedAllocator::lastChanceToFinalize()
245{
246    m_blockList.append(m_retiredBlocks);
247    LastChanceToFinalize functor;
248    forEachBlock(functor);
249}
250
251} // namespace JSC
252