1/*
2 * Copyright (C) 2010 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef BumpPointerAllocator_h
27#define BumpPointerAllocator_h
28
29#include <algorithm>
30#include <wtf/PageAllocation.h>
31#include <wtf/PageBlock.h>
32
33namespace WTF {
34
35#define MINIMUM_BUMP_POOL_SIZE 0x1000
36
37class BumpPointerPool {
38public:
39    // ensureCapacity will check whether the current pool has capacity to
40    // allocate 'size' bytes of memory  If it does not, it will attempt to
41    // allocate a new pool (which will be added to this one in a chain).
42    //
43    // If allocation fails (out of memory) this method will return null.
44    // If the return value is non-null, then callers should update any
45    // references they have to this current (possibly full) BumpPointerPool
46    // to instead point to the newly returned BumpPointerPool.
47    BumpPointerPool* ensureCapacity(size_t size)
48    {
49        void* allocationEnd = static_cast<char*>(m_current) + size;
50        ASSERT_WITH_SECURITY_IMPLICATION(allocationEnd > m_current); // check for overflow
51        if (allocationEnd <= static_cast<void*>(this))
52            return this;
53        return ensureCapacityCrossPool(this, size);
54    }
55
56    // alloc should only be called after calling ensureCapacity; as such
57    // alloc will never fail.
58    void* alloc(size_t size)
59    {
60        void* current = m_current;
61        void* allocationEnd = static_cast<char*>(current) + size;
62        ASSERT_WITH_SECURITY_IMPLICATION(allocationEnd > current); // check for overflow
63        ASSERT(allocationEnd <= static_cast<void*>(this));
64        m_current = allocationEnd;
65        return current;
66    }
67
68    // The dealloc method releases memory allocated using alloc.  Memory
69    // must be released in a LIFO fashion, e.g. if the client calls alloc
70    // four times, returning pointer A, B, C, D, then the only valid order
71    // in which these may be deallocaed is D, C, B, A.
72    //
73    // The client may optionally skip some deallocations.  In the example
74    // above, it would be valid to only explicitly dealloc C, A (D being
75    // dealloced along with C, B along with A).
76    //
77    // If pointer was not allocated from this pool (or pools) then dealloc
78    // will CRASH().  Callers should update any references they have to
79    // this current BumpPointerPool to instead point to the returned
80    // BumpPointerPool.
81    BumpPointerPool* dealloc(void* position)
82    {
83        if ((position >= m_start) && (position <= static_cast<void*>(this))) {
84            ASSERT(position <= m_current);
85            m_current = position;
86            return this;
87        }
88        return deallocCrossPool(this, position);
89    }
90
91private:
92    // Placement operator new, returns the last 'size' bytes of allocation for use as this.
93    void* operator new(size_t size, const PageAllocation& allocation)
94    {
95        ASSERT(size < allocation.size());
96        return reinterpret_cast<char*>(reinterpret_cast<intptr_t>(allocation.base()) + allocation.size()) - size;
97    }
98
99    BumpPointerPool(const PageAllocation& allocation)
100        : m_current(allocation.base())
101        , m_start(allocation.base())
102        , m_next(0)
103        , m_previous(0)
104        , m_allocation(allocation)
105    {
106    }
107
108    static BumpPointerPool* create(size_t minimumCapacity = 0)
109    {
110        // Add size of BumpPointerPool object, check for overflow.
111        minimumCapacity += sizeof(BumpPointerPool);
112        if (minimumCapacity < sizeof(BumpPointerPool))
113            return 0;
114
115        size_t poolSize = std::max(static_cast<size_t>(MINIMUM_BUMP_POOL_SIZE), WTF::pageSize());
116        while (poolSize < minimumCapacity) {
117            poolSize <<= 1;
118            // The following if check relies on MINIMUM_BUMP_POOL_SIZE being a power of 2!
119            ASSERT(!(MINIMUM_BUMP_POOL_SIZE & (MINIMUM_BUMP_POOL_SIZE - 1)));
120            if (!poolSize)
121                return 0;
122        }
123
124        PageAllocation allocation = PageAllocation::allocate(poolSize);
125        if (!!allocation)
126            return new (allocation) BumpPointerPool(allocation);
127        return 0;
128    }
129
130    void shrink()
131    {
132        ASSERT(!m_previous);
133        m_current = m_start;
134        while (m_next) {
135            BumpPointerPool* nextNext = m_next->m_next;
136            m_next->destroy();
137            m_next = nextNext;
138        }
139    }
140
141    void destroy()
142    {
143        m_allocation.deallocate();
144    }
145
146    static BumpPointerPool* ensureCapacityCrossPool(BumpPointerPool* previousPool, size_t size)
147    {
148        // The pool passed should not have capacity, so we'll start with the next one.
149        ASSERT(previousPool);
150        ASSERT((static_cast<char*>(previousPool->m_current) + size) > previousPool->m_current); // check for overflow
151        ASSERT((static_cast<char*>(previousPool->m_current) + size) > static_cast<void*>(previousPool));
152        BumpPointerPool* pool = previousPool->m_next;
153
154        while (true) {
155            if (!pool) {
156                // We've run to the end; allocate a new pool.
157                pool = BumpPointerPool::create(size);
158                previousPool->m_next = pool;
159                pool->m_previous = previousPool;
160                return pool;
161            }
162
163            //
164            void* current = pool->m_current;
165            void* allocationEnd = static_cast<char*>(current) + size;
166            ASSERT_WITH_SECURITY_IMPLICATION(allocationEnd > current); // check for overflow
167            if (allocationEnd <= static_cast<void*>(pool))
168                return pool;
169        }
170    }
171
172    static BumpPointerPool* deallocCrossPool(BumpPointerPool* pool, void* position)
173    {
174        // Should only be called if position is not in the current pool.
175        ASSERT((position < pool->m_start) || (position > static_cast<void*>(pool)));
176
177        while (true) {
178            // Unwind the current pool to the start, move back in the chain to the previous pool.
179            pool->m_current = pool->m_start;
180            pool = pool->m_previous;
181
182            // position was nowhere in the chain!
183            if (!pool)
184                CRASH();
185
186            if ((position >= pool->m_start) && (position <= static_cast<void*>(pool))) {
187                ASSERT(position <= pool->m_current);
188                pool->m_current = position;
189                return pool;
190            }
191        }
192    }
193
194    void* m_current;
195    void* m_start;
196    BumpPointerPool* m_next;
197    BumpPointerPool* m_previous;
198    PageAllocation m_allocation;
199
200    friend class BumpPointerAllocator;
201};
202
203// A BumpPointerAllocator manages a set of BumpPointerPool objects, which
204// can be used for LIFO (stack like) allocation.
205//
206// To begin allocating using this class call startAllocator().  The result
207// of this method will be null if the initial pool allocation fails, or a
208// pointer to a BumpPointerPool object that can be used to perform
209// allocations.  Whilst running no memory will be released until
210// stopAllocator() is called.  At this point all allocations made through
211// this allocator will be reaped, and underlying memory may be freed.
212//
213// (In practice we will still hold on to the initial pool to allow allocation
214// to be quickly restared, but aditional pools will be freed).
215//
216// This allocator is non-renetrant, it is encumbant on the clients to ensure
217// startAllocator() is not called again until stopAllocator() has been called.
218class BumpPointerAllocator {
219public:
220    BumpPointerAllocator()
221        : m_head(0)
222    {
223    }
224
225    ~BumpPointerAllocator()
226    {
227        if (m_head)
228            m_head->destroy();
229    }
230
231    BumpPointerPool* startAllocator()
232    {
233        if (!m_head)
234            m_head = BumpPointerPool::create();
235        return m_head;
236    }
237
238    void stopAllocator()
239    {
240        if (m_head)
241            m_head->shrink();
242    }
243
244private:
245    BumpPointerPool* m_head;
246};
247
248}
249
250using WTF::BumpPointerAllocator;
251
252#endif // BumpPointerAllocator_h
253