1/*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "CopiedSpace.h"
28
29#include "CopiedSpaceInlines.h"
30#include "GCActivityCallback.h"
31#include "JSCInlines.h"
32#include "Options.h"
33
34namespace JSC {
35
36CopiedSpace::CopiedSpace(Heap* heap)
37    : m_heap(heap)
38    , m_inCopyingPhase(false)
39    , m_shouldDoCopyPhase(false)
40    , m_numberOfLoanedBlocks(0)
41{
42    m_toSpaceLock.Init();
43}
44
45CopiedSpace::~CopiedSpace()
46{
47    while (!m_oldGen.toSpace->isEmpty())
48        m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_oldGen.toSpace->removeHead()));
49
50    while (!m_oldGen.fromSpace->isEmpty())
51        m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_oldGen.fromSpace->removeHead()));
52
53    while (!m_oldGen.oversizeBlocks.isEmpty())
54        m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(m_oldGen.oversizeBlocks.removeHead()));
55
56    while (!m_newGen.toSpace->isEmpty())
57        m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_newGen.toSpace->removeHead()));
58
59    while (!m_newGen.fromSpace->isEmpty())
60        m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_newGen.fromSpace->removeHead()));
61
62    while (!m_newGen.oversizeBlocks.isEmpty())
63        m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(m_newGen.oversizeBlocks.removeHead()));
64
65    ASSERT(m_oldGen.toSpace->isEmpty());
66    ASSERT(m_oldGen.fromSpace->isEmpty());
67    ASSERT(m_oldGen.oversizeBlocks.isEmpty());
68    ASSERT(m_newGen.toSpace->isEmpty());
69    ASSERT(m_newGen.fromSpace->isEmpty());
70    ASSERT(m_newGen.oversizeBlocks.isEmpty());
71}
72
73void CopiedSpace::init()
74{
75    m_oldGen.toSpace = &m_oldGen.blocks1;
76    m_oldGen.fromSpace = &m_oldGen.blocks2;
77
78    m_newGen.toSpace = &m_newGen.blocks1;
79    m_newGen.fromSpace = &m_newGen.blocks2;
80
81    allocateBlock();
82}
83
84CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr)
85{
86    if (isOversize(bytes))
87        return tryAllocateOversize(bytes, outPtr);
88
89    ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock());
90    m_heap->didAllocate(m_allocator.currentCapacity());
91
92    allocateBlock();
93
94    *outPtr = m_allocator.forceAllocate(bytes);
95    return true;
96}
97
98CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
99{
100    ASSERT(isOversize(bytes));
101
102    CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocateCustomSize(sizeof(CopiedBlock) + bytes, CopiedBlock::blockSize));
103    m_newGen.oversizeBlocks.push(block);
104    m_newGen.blockFilter.add(reinterpret_cast<Bits>(block));
105    m_blockSet.add(block);
106    ASSERT(!block->isOld());
107
108    CopiedAllocator allocator;
109    allocator.setCurrentBlock(block);
110    *outPtr = allocator.forceAllocate(bytes);
111    allocator.resetCurrentBlock();
112
113    m_heap->didAllocate(block->region()->blockSize());
114
115    return true;
116}
117
118CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize)
119{
120    if (oldSize >= newSize)
121        return true;
122
123    void* oldPtr = *ptr;
124    ASSERT(!m_heap->vm()->isInitializingObject());
125
126    if (CopiedSpace::blockFor(oldPtr)->isOversize() || isOversize(newSize))
127        return tryReallocateOversize(ptr, oldSize, newSize);
128
129    if (m_allocator.tryReallocate(oldPtr, oldSize, newSize))
130        return true;
131
132    void* result = 0;
133    if (!tryAllocate(newSize, &result)) {
134        *ptr = 0;
135        return false;
136    }
137    memcpy(result, oldPtr, oldSize);
138    *ptr = result;
139    return true;
140}
141
142CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize)
143{
144    ASSERT(isOversize(oldSize) || isOversize(newSize));
145    ASSERT(newSize > oldSize);
146
147    void* oldPtr = *ptr;
148
149    void* newPtr = 0;
150    if (!tryAllocateOversize(newSize, &newPtr)) {
151        *ptr = 0;
152        return false;
153    }
154
155    memcpy(newPtr, oldPtr, oldSize);
156
157    CopiedBlock* oldBlock = CopiedSpace::blockFor(oldPtr);
158    if (oldBlock->isOversize()) {
159        if (oldBlock->isOld())
160            m_oldGen.oversizeBlocks.remove(oldBlock);
161        else
162            m_newGen.oversizeBlocks.remove(oldBlock);
163        m_blockSet.remove(oldBlock);
164        m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(oldBlock));
165    }
166
167    *ptr = newPtr;
168    return true;
169}
170
171void CopiedSpace::doneFillingBlock(CopiedBlock* block, CopiedBlock** exchange)
172{
173    ASSERT(m_inCopyingPhase);
174
175    if (exchange)
176        *exchange = allocateBlockForCopyingPhase();
177
178    if (!block)
179        return;
180
181    if (!block->dataSize()) {
182        recycleBorrowedBlock(block);
183        return;
184    }
185
186    block->zeroFillWilderness();
187
188    {
189        // Always put the block into the old gen because it's being promoted!
190        SpinLockHolder locker(&m_toSpaceLock);
191        m_oldGen.toSpace->push(block);
192        m_blockSet.add(block);
193        m_oldGen.blockFilter.add(reinterpret_cast<Bits>(block));
194    }
195
196    {
197        MutexLocker locker(m_loanedBlocksLock);
198        ASSERT(m_numberOfLoanedBlocks > 0);
199        ASSERT(m_inCopyingPhase);
200        m_numberOfLoanedBlocks--;
201        if (!m_numberOfLoanedBlocks)
202            m_loanedBlocksCondition.signal();
203    }
204}
205
206void CopiedSpace::didStartFullCollection()
207{
208    ASSERT(heap()->operationInProgress() == FullCollection);
209    ASSERT(m_oldGen.fromSpace->isEmpty());
210    ASSERT(m_newGen.fromSpace->isEmpty());
211
212#ifndef NDEBUG
213    for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next())
214        ASSERT(!block->liveBytes());
215
216    for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next())
217        ASSERT(!block->liveBytes());
218#endif
219
220    for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next())
221        block->didSurviveGC();
222
223    for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next())
224        block->didSurviveGC();
225}
226
227void CopiedSpace::doneCopying()
228{
229    {
230        MutexLocker locker(m_loanedBlocksLock);
231        while (m_numberOfLoanedBlocks > 0)
232            m_loanedBlocksCondition.wait(m_loanedBlocksLock);
233    }
234
235    ASSERT(m_inCopyingPhase == m_shouldDoCopyPhase);
236    m_inCopyingPhase = false;
237
238    DoublyLinkedList<CopiedBlock>* toSpace;
239    DoublyLinkedList<CopiedBlock>* fromSpace;
240    TinyBloomFilter* blockFilter;
241    if (heap()->operationInProgress() == FullCollection) {
242        toSpace = m_oldGen.toSpace;
243        fromSpace = m_oldGen.fromSpace;
244        blockFilter = &m_oldGen.blockFilter;
245    } else {
246        toSpace = m_newGen.toSpace;
247        fromSpace = m_newGen.fromSpace;
248        blockFilter = &m_newGen.blockFilter;
249    }
250
251    while (!fromSpace->isEmpty()) {
252        CopiedBlock* block = fromSpace->removeHead();
253        // We don't add the block to the blockSet because it was never removed.
254        ASSERT(m_blockSet.contains(block));
255        blockFilter->add(reinterpret_cast<Bits>(block));
256        block->didSurviveGC();
257        toSpace->push(block);
258    }
259
260    if (heap()->operationInProgress() == EdenCollection) {
261        m_oldGen.toSpace->append(*m_newGen.toSpace);
262        m_oldGen.oversizeBlocks.append(m_newGen.oversizeBlocks);
263        m_oldGen.blockFilter.add(m_newGen.blockFilter);
264        m_newGen.blockFilter.reset();
265    }
266
267    ASSERT(m_newGen.toSpace->isEmpty());
268    ASSERT(m_newGen.fromSpace->isEmpty());
269    ASSERT(m_newGen.oversizeBlocks.isEmpty());
270
271    allocateBlock();
272
273    m_shouldDoCopyPhase = false;
274}
275
276size_t CopiedSpace::size()
277{
278    size_t calculatedSize = 0;
279
280    for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next())
281        calculatedSize += block->size();
282
283    for (CopiedBlock* block = m_oldGen.fromSpace->head(); block; block = block->next())
284        calculatedSize += block->size();
285
286    for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next())
287        calculatedSize += block->size();
288
289    for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next())
290        calculatedSize += block->size();
291
292    for (CopiedBlock* block = m_newGen.fromSpace->head(); block; block = block->next())
293        calculatedSize += block->size();
294
295    for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next())
296        calculatedSize += block->size();
297
298    return calculatedSize;
299}
300
301size_t CopiedSpace::capacity()
302{
303    size_t calculatedCapacity = 0;
304
305    for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next())
306        calculatedCapacity += block->capacity();
307
308    for (CopiedBlock* block = m_oldGen.fromSpace->head(); block; block = block->next())
309        calculatedCapacity += block->capacity();
310
311    for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next())
312        calculatedCapacity += block->capacity();
313
314    for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next())
315        calculatedCapacity += block->capacity();
316
317    for (CopiedBlock* block = m_newGen.fromSpace->head(); block; block = block->next())
318        calculatedCapacity += block->capacity();
319
320    for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next())
321        calculatedCapacity += block->capacity();
322
323    return calculatedCapacity;
324}
325
326static bool isBlockListPagedOut(double deadline, DoublyLinkedList<CopiedBlock>* list)
327{
328    unsigned itersSinceLastTimeCheck = 0;
329    CopiedBlock* current = list->head();
330    while (current) {
331        current = current->next();
332        ++itersSinceLastTimeCheck;
333        if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
334            double currentTime = WTF::monotonicallyIncreasingTime();
335            if (currentTime > deadline)
336                return true;
337            itersSinceLastTimeCheck = 0;
338        }
339    }
340
341    return false;
342}
343
344bool CopiedSpace::isPagedOut(double deadline)
345{
346    return isBlockListPagedOut(deadline, m_oldGen.toSpace)
347        || isBlockListPagedOut(deadline, m_oldGen.fromSpace)
348        || isBlockListPagedOut(deadline, &m_oldGen.oversizeBlocks)
349        || isBlockListPagedOut(deadline, m_newGen.toSpace)
350        || isBlockListPagedOut(deadline, m_newGen.fromSpace)
351        || isBlockListPagedOut(deadline, &m_newGen.oversizeBlocks);
352}
353
354} // namespace JSC
355