1/*
2 * Copyright (C) 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "BoundaryTagInlines.h"
27#include "Heap.h"
28#include "LargeChunk.h"
29#include "Line.h"
30#include "MediumChunk.h"
31#include "Page.h"
32#include "PerProcess.h"
33#include "SmallChunk.h"
34#include "XLargeChunk.h"
35#include <thread>
36
37namespace bmalloc {
38
39static inline void sleep(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds duration)
40{
41    if (duration == std::chrono::milliseconds(0))
42        return;
43
44    lock.unlock();
45    std::this_thread::sleep_for(duration);
46    lock.lock();
47}
48
49Heap::Heap(std::lock_guard<StaticMutex>&)
50    : m_isAllocatingPages(false)
51    , m_scavenger(*this, &Heap::concurrentScavenge)
52{
53}
54
55void Heap::concurrentScavenge()
56{
57    std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
58    scavenge(lock, scavengeSleepDuration);
59}
60
61void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
62{
63    scavengeSmallPages(lock, sleepDuration);
64    scavengeMediumPages(lock, sleepDuration);
65    scavengeLargeRanges(lock, sleepDuration);
66
67    sleep(lock, sleepDuration);
68}
69
70void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
71{
72    while (1) {
73        if (m_isAllocatingPages) {
74            m_isAllocatingPages = false;
75
76            sleep(lock, sleepDuration);
77            continue;
78        }
79
80        if (!m_smallPages.size())
81            return;
82        m_vmHeap.deallocateSmallPage(lock, m_smallPages.pop());
83    }
84}
85
86void Heap::scavengeMediumPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
87{
88    while (1) {
89        if (m_isAllocatingPages) {
90            m_isAllocatingPages = false;
91
92            sleep(lock, sleepDuration);
93            continue;
94        }
95
96        if (!m_mediumPages.size())
97            return;
98        m_vmHeap.deallocateMediumPage(lock, m_mediumPages.pop());
99    }
100}
101
102void Heap::scavengeLargeRanges(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
103{
104    while (1) {
105        if (m_isAllocatingPages) {
106            m_isAllocatingPages = false;
107
108            sleep(lock, sleepDuration);
109            continue;
110        }
111
112        Range range = m_largeRanges.takeGreedy(vmPageSize);
113        if (!range)
114            return;
115        m_vmHeap.deallocateLargeRange(lock, range);
116    }
117}
118
119SmallLine* Heap::allocateSmallLineSlowCase(std::lock_guard<StaticMutex>& lock, size_t smallSizeClass)
120{
121    m_isAllocatingPages = true;
122
123    SmallPage* page = [this]() {
124        if (m_smallPages.size())
125            return m_smallPages.pop();
126
127        SmallPage* page = m_vmHeap.allocateSmallPage();
128        vmAllocatePhysicalPages(page->begin()->begin(), vmPageSize);
129        return page;
130    }();
131
132    SmallLine* line = page->begin();
133    Vector<SmallLine*>& smallLines = m_smallLines[smallSizeClass];
134    for (auto it = line + 1; it != page->end(); ++it)
135        smallLines.push(it);
136
137    BASSERT(!line->refCount(lock));
138    page->setSmallSizeClass(smallSizeClass);
139    page->ref(lock);
140    return line;
141}
142
143MediumLine* Heap::allocateMediumLineSlowCase(std::lock_guard<StaticMutex>& lock)
144{
145    m_isAllocatingPages = true;
146
147    MediumPage* page = [this]() {
148        if (m_mediumPages.size())
149            return m_mediumPages.pop();
150
151        MediumPage* page = m_vmHeap.allocateMediumPage();
152        vmAllocatePhysicalPages(page->begin()->begin(), vmPageSize);
153        return page;
154    }();
155
156    MediumLine* line = page->begin();
157    for (auto it = line + 1; it != page->end(); ++it)
158        m_mediumLines.push(it);
159
160    page->ref(lock);
161    return line;
162}
163
164void* Heap::allocateXLarge(std::lock_guard<StaticMutex>&, size_t size)
165{
166    XLargeChunk* chunk = XLargeChunk::create(size);
167
168    BeginTag* beginTag = LargeChunk::beginTag(chunk->begin());
169    beginTag->setXLarge();
170    beginTag->setFree(false);
171    beginTag->setHasPhysicalPages(true);
172
173    return chunk->begin();
174}
175
176void Heap::deallocateXLarge(std::lock_guard<StaticMutex>&, void* object)
177{
178    XLargeChunk* chunk = XLargeChunk::get(object);
179    XLargeChunk::destroy(chunk);
180}
181
182void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, size_t size)
183{
184    BASSERT(size <= largeMax);
185    BASSERT(size >= largeMin);
186
187    m_isAllocatingPages = true;
188
189    Range range = m_largeRanges.take(size);
190    if (!range)
191        range = m_vmHeap.allocateLargeRange(size);
192
193    Range leftover;
194    bool hasPhysicalPages;
195    BoundaryTag::allocate(size, range, leftover, hasPhysicalPages);
196
197    if (!!leftover)
198        m_largeRanges.insert(leftover);
199
200    if (!hasPhysicalPages)
201        vmAllocatePhysicalPagesSloppy(range.begin(), range.size());
202
203    return range.begin();
204}
205
206void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object)
207{
208    Range range = BoundaryTag::deallocate(object);
209    m_largeRanges.insert(range);
210    m_scavenger.run();
211}
212
213} // namespace bmalloc
214