1/*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "MarkedBlock.h"
28
29#include "IncrementalSweeper.h"
30#include "JSCell.h"
31#include "JSDestructibleObject.h"
32#include "Operations.h"
33
34namespace JSC {
35
36MarkedBlock* MarkedBlock::create(DeadBlock* block, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
37{
38    ASSERT(reinterpret_cast<size_t>(block) == (reinterpret_cast<size_t>(block) & blockMask));
39    Region* region = block->region();
40    return new (NotNull, block) MarkedBlock(region, allocator, cellSize, destructorType);
41}
42
43MarkedBlock::MarkedBlock(Region* region, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
44    : HeapBlock<MarkedBlock>(region)
45    , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
46    , m_endAtom((allocator->cellSize() ? atomsPerBlock : region->blockSize() / atomSize) - m_atomsPerCell + 1)
47    , m_destructorType(destructorType)
48    , m_allocator(allocator)
49    , m_state(New) // All cells start out unmarked.
50    , m_weakSet(allocator->heap()->vm())
51{
52    ASSERT(allocator);
53    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
54}
55
56inline void MarkedBlock::callDestructor(JSCell* cell)
57{
58    // A previous eager sweep may already have run cell's destructor.
59    if (cell->isZapped())
60        return;
61
62#if ENABLE(SIMPLE_HEAP_PROFILING)
63    m_heap->m_destroyedTypeCounts.countVPtr(vptr);
64#endif
65
66    cell->methodTableForDestruction()->destroy(cell);
67    cell->zap();
68}
69
70template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, MarkedBlock::DestructorType dtorType>
71MarkedBlock::FreeList MarkedBlock::specializedSweep()
72{
73    ASSERT(blockState != Allocated && blockState != FreeListed);
74    ASSERT(!(dtorType == MarkedBlock::None && sweepMode == SweepOnly));
75
76    // This produces a free list that is ordered in reverse through the block.
77    // This is fine, since the allocation code makes no assumptions about the
78    // order of the free list.
79    FreeCell* head = 0;
80    size_t count = 0;
81    for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
82        if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i))))
83            continue;
84
85        JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
86
87        if (dtorType != MarkedBlock::None && blockState != New)
88            callDestructor(cell);
89
90        if (sweepMode == SweepToFreeList) {
91            FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
92            freeCell->next = head;
93            head = freeCell;
94            ++count;
95        }
96    }
97
98    // We only want to discard the newlyAllocated bits if we're creating a FreeList,
99    // otherwise we would lose information on what's currently alive.
100    if (sweepMode == SweepToFreeList && m_newlyAllocated)
101        m_newlyAllocated.clear();
102
103    m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked);
104    return FreeList(head, count * cellSize());
105}
106
107MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
108{
109    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
110
111    m_weakSet.sweep();
112
113    if (sweepMode == SweepOnly && m_destructorType == MarkedBlock::None)
114        return FreeList();
115
116    if (m_destructorType == MarkedBlock::ImmortalStructure)
117        return sweepHelper<MarkedBlock::ImmortalStructure>(sweepMode);
118    if (m_destructorType == MarkedBlock::Normal)
119        return sweepHelper<MarkedBlock::Normal>(sweepMode);
120    return sweepHelper<MarkedBlock::None>(sweepMode);
121}
122
123template<MarkedBlock::DestructorType dtorType>
124MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
125{
126    switch (m_state) {
127    case New:
128        ASSERT(sweepMode == SweepToFreeList);
129        return specializedSweep<New, SweepToFreeList, dtorType>();
130    case FreeListed:
131        // Happens when a block transitions to fully allocated.
132        ASSERT(sweepMode == SweepToFreeList);
133        return FreeList();
134    case Allocated:
135        RELEASE_ASSERT_NOT_REACHED();
136        return FreeList();
137    case Marked:
138        return sweepMode == SweepToFreeList
139            ? specializedSweep<Marked, SweepToFreeList, dtorType>()
140            : specializedSweep<Marked, SweepOnly, dtorType>();
141    }
142
143    RELEASE_ASSERT_NOT_REACHED();
144    return FreeList();
145}
146
147class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor {
148public:
149    SetNewlyAllocatedFunctor(MarkedBlock* block)
150        : m_block(block)
151    {
152    }
153
154    void operator()(JSCell* cell)
155    {
156        ASSERT(MarkedBlock::blockFor(cell) == m_block);
157        m_block->setNewlyAllocated(cell);
158    }
159
160private:
161    MarkedBlock* m_block;
162};
163
164void MarkedBlock::canonicalizeCellLivenessData(const FreeList& freeList)
165{
166    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
167    FreeCell* head = freeList.head;
168
169    if (m_state == Marked) {
170        // If the block is in the Marked state then we know that:
171        // 1) It was not used for allocation during the previous allocation cycle.
172        // 2) It may have dead objects, and we only know them to be dead by the
173        //    fact that their mark bits are unset.
174        // Hence if the block is Marked we need to leave it Marked.
175
176        ASSERT(!head);
177        return;
178    }
179
180    ASSERT(m_state == FreeListed);
181
182    // Roll back to a coherent state for Heap introspection. Cells newly
183    // allocated from our free list are not currently marked, so we need another
184    // way to tell what's live vs dead.
185
186    ASSERT(!m_newlyAllocated);
187    m_newlyAllocated = adoptPtr(new WTF::Bitmap<atomsPerBlock>());
188
189    SetNewlyAllocatedFunctor functor(this);
190    forEachCell(functor);
191
192    FreeCell* next;
193    for (FreeCell* current = head; current; current = next) {
194        next = current->next;
195        reinterpret_cast<JSCell*>(current)->zap();
196        clearNewlyAllocated(current);
197    }
198
199    m_state = Marked;
200}
201
202} // namespace JSC
203