1/* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements.  See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License.  You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "apr_general.h"
18#include "apr_rmm.h"
19#include "apr_errno.h"
20#include "apr_lib.h"
21#include "apr_strings.h"
22
23/* The RMM region is made up of two doubly-linked-list of blocks; the
24 * list of used blocks, and the list of free blocks (either list may
25 * be empty).  The base pointer, rmm->base, points at the beginning of
26 * the shmem region in use.  Each block is addressable by an
27 * apr_rmm_off_t value, which represents the offset from the base
28 * pointer.  The term "address" is used here to mean such a value; an
29 * "offset from rmm->base".
30 *
31 * The RMM region contains exactly one "rmm_hdr_block_t" structure,
32 * the "header block", which is always stored at the base pointer.
33 * The firstused field in this structure is the address of the first
34 * block in the "used blocks" list; the firstfree field is the address
35 * of the first block in the "free blocks" list.
36 *
37 * Each block is prefixed by an "rmm_block_t" structure, followed by
38 * the caller-usable region represented by the block.  The next and
39 * prev fields of the structure are zero if the block is at the end or
40 * beginning of the linked-list respectively, or otherwise hold the
41 * address of the next and previous blocks in the list.  ("address 0",
42 * i.e. rmm->base is *not* a valid address for a block, since the
43 * header block is always stored at that address).
44 *
45 * At creation, the RMM region is initialized to hold a single block
46 * on the free list representing the entire available shm segment
47 * (minus header block); subsequent allocation and deallocation of
48 * blocks involves splitting blocks and coalescing adjacent blocks,
49 * and switching them between the free and used lists as
50 * appropriate. */
51
52typedef struct rmm_block_t {
53    apr_size_t size;
54    apr_rmm_off_t prev;
55    apr_rmm_off_t next;
56} rmm_block_t;
57
58/* Always at our apr_rmm_off(0):
59 */
60typedef struct rmm_hdr_block_t {
61    apr_size_t abssize;
62    apr_rmm_off_t /* rmm_block_t */ firstused;
63    apr_rmm_off_t /* rmm_block_t */ firstfree;
64} rmm_hdr_block_t;
65
66#define RMM_HDR_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_hdr_block_t)))
67#define RMM_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_block_t)))
68
69struct apr_rmm_t {
70    apr_pool_t *p;
71    rmm_hdr_block_t *base;
72    apr_size_t size;
73    apr_anylock_t lock;
74};
75
76static apr_rmm_off_t find_block_by_offset(apr_rmm_t *rmm, apr_rmm_off_t next,
77                                          apr_rmm_off_t find, int includes)
78{
79    apr_rmm_off_t prev = 0;
80
81    while (next) {
82        struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
83
84        if (find == next)
85            return next;
86
87        /* Overshot? */
88        if (find < next)
89            return includes ? prev : 0;
90
91        prev = next;
92        next = blk->next;
93    }
94    return includes ? prev : 0;
95}
96
97static apr_rmm_off_t find_block_of_size(apr_rmm_t *rmm, apr_size_t size)
98{
99    apr_rmm_off_t next = rmm->base->firstfree;
100    apr_rmm_off_t best = 0;
101    apr_rmm_off_t bestsize = 0;
102
103    while (next) {
104        struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
105
106        if (blk->size == size)
107            return next;
108
109        if (blk->size >= size) {
110            /* XXX: sub optimal algorithm
111             * We need the most thorough best-fit logic, since we can
112             * never grow our rmm, we are SOL when we hit the wall.
113             */
114            if (!bestsize || (blk->size < bestsize)) {
115                bestsize = blk->size;
116                best = next;
117            }
118        }
119
120        next = blk->next;
121    }
122
123    if (bestsize > RMM_BLOCK_SIZE + size) {
124        struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + best);
125        struct rmm_block_t *new = (rmm_block_t*)((char*)rmm->base + best + size);
126
127        new->size = blk->size - size;
128        new->next = blk->next;
129        new->prev = best;
130
131        blk->size = size;
132        blk->next = best + size;
133
134        if (new->next) {
135            blk = (rmm_block_t*)((char*)rmm->base + new->next);
136            blk->prev = best + size;
137        }
138    }
139
140    return best;
141}
142
143static void move_block(apr_rmm_t *rmm, apr_rmm_off_t this, int free)
144{
145    struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + this);
146
147    /* close the gap */
148    if (blk->prev) {
149        struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
150        prev->next = blk->next;
151    }
152    else {
153        if (free) {
154            rmm->base->firstused = blk->next;
155        }
156        else {
157            rmm->base->firstfree = blk->next;
158        }
159    }
160    if (blk->next) {
161        struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
162        next->prev = blk->prev;
163    }
164
165    /* now find it in the other list, pushing it to the head if required */
166    if (free) {
167        blk->prev = find_block_by_offset(rmm, rmm->base->firstfree, this, 1);
168        if (!blk->prev) {
169            blk->next = rmm->base->firstfree;
170            rmm->base->firstfree = this;
171        }
172    }
173    else {
174        blk->prev = find_block_by_offset(rmm, rmm->base->firstused, this, 1);
175        if (!blk->prev) {
176            blk->next = rmm->base->firstused;
177            rmm->base->firstused = this;
178        }
179    }
180
181    /* and open it up */
182    if (blk->prev) {
183        struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
184        if (free && (blk->prev + prev->size == this)) {
185            /* Collapse us into our predecessor */
186            prev->size += blk->size;
187            this = blk->prev;
188            blk = prev;
189        }
190        else {
191            blk->next = prev->next;
192            prev->next = this;
193        }
194    }
195
196    if (blk->next) {
197        struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
198        if (free && (this + blk->size == blk->next)) {
199            /* Collapse us into our successor */
200            blk->size += next->size;
201            blk->next = next->next;
202            if (blk->next) {
203                next = (rmm_block_t*)((char*)rmm->base + blk->next);
204                next->prev = this;
205            }
206        }
207        else {
208            next->prev = this;
209        }
210    }
211}
212
213APU_DECLARE(apr_status_t) apr_rmm_init(apr_rmm_t **rmm, apr_anylock_t *lock,
214                                       void *base, apr_size_t size,
215                                       apr_pool_t *p)
216{
217    apr_status_t rv;
218    rmm_block_t *blk;
219    apr_anylock_t nulllock;
220
221    if (!lock) {
222        nulllock.type = apr_anylock_none;
223        nulllock.lock.pm = NULL;
224        lock = &nulllock;
225    }
226    if ((rv = APR_ANYLOCK_LOCK(lock)) != APR_SUCCESS)
227        return rv;
228
229    (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
230    (*rmm)->p = p;
231    (*rmm)->base = base;
232    (*rmm)->size = size;
233    (*rmm)->lock = *lock;
234
235    (*rmm)->base->abssize = size;
236    (*rmm)->base->firstused = 0;
237    (*rmm)->base->firstfree = RMM_HDR_BLOCK_SIZE;
238
239    blk = (rmm_block_t *)((char*)base + (*rmm)->base->firstfree);
240
241    blk->size = size - (*rmm)->base->firstfree;
242    blk->prev = 0;
243    blk->next = 0;
244
245    return APR_ANYLOCK_UNLOCK(lock);
246}
247
248APU_DECLARE(apr_status_t) apr_rmm_destroy(apr_rmm_t *rmm)
249{
250    apr_status_t rv;
251    rmm_block_t *blk;
252
253    if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
254        return rv;
255    }
256    /* Blast it all --- no going back :) */
257    if (rmm->base->firstused) {
258        apr_rmm_off_t this = rmm->base->firstused;
259        do {
260            blk = (rmm_block_t *)((char*)rmm->base + this);
261            this = blk->next;
262            blk->next = blk->prev = 0;
263        } while (this);
264        rmm->base->firstused = 0;
265    }
266    if (rmm->base->firstfree) {
267        apr_rmm_off_t this = rmm->base->firstfree;
268        do {
269            blk = (rmm_block_t *)((char*)rmm->base + this);
270            this = blk->next;
271            blk->next = blk->prev = 0;
272        } while (this);
273        rmm->base->firstfree = 0;
274    }
275    rmm->base->abssize = 0;
276    rmm->size = 0;
277
278    return APR_ANYLOCK_UNLOCK(&rmm->lock);
279}
280
281APU_DECLARE(apr_status_t) apr_rmm_attach(apr_rmm_t **rmm, apr_anylock_t *lock,
282                                         void *base, apr_pool_t *p)
283{
284    apr_anylock_t nulllock;
285
286    if (!lock) {
287        nulllock.type = apr_anylock_none;
288        nulllock.lock.pm = NULL;
289        lock = &nulllock;
290    }
291
292    /* sanity would be good here */
293    (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
294    (*rmm)->p = p;
295    (*rmm)->base = base;
296    (*rmm)->size = (*rmm)->base->abssize;
297    (*rmm)->lock = *lock;
298    return APR_SUCCESS;
299}
300
301APU_DECLARE(apr_status_t) apr_rmm_detach(apr_rmm_t *rmm)
302{
303    /* A noop until we introduce locked/refcounts */
304    return APR_SUCCESS;
305}
306
307APU_DECLARE(apr_rmm_off_t) apr_rmm_malloc(apr_rmm_t *rmm, apr_size_t reqsize)
308{
309    apr_size_t size;
310    apr_rmm_off_t this;
311
312    size = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
313    if (size < reqsize) {
314        return 0;
315    }
316
317    APR_ANYLOCK_LOCK(&rmm->lock);
318
319    this = find_block_of_size(rmm, size);
320
321    if (this) {
322        move_block(rmm, this, 0);
323        this += RMM_BLOCK_SIZE;
324    }
325
326    APR_ANYLOCK_UNLOCK(&rmm->lock);
327    return this;
328}
329
330APU_DECLARE(apr_rmm_off_t) apr_rmm_calloc(apr_rmm_t *rmm, apr_size_t reqsize)
331{
332    apr_size_t size;
333    apr_rmm_off_t this;
334
335    size = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
336    if (size < reqsize) {
337        return 0;
338    }
339
340    APR_ANYLOCK_LOCK(&rmm->lock);
341
342    this = find_block_of_size(rmm, size);
343
344    if (this) {
345        move_block(rmm, this, 0);
346        this += RMM_BLOCK_SIZE;
347        memset((char*)rmm->base + this, 0, size - RMM_BLOCK_SIZE);
348    }
349
350    APR_ANYLOCK_UNLOCK(&rmm->lock);
351    return this;
352}
353
354APU_DECLARE(apr_rmm_off_t) apr_rmm_realloc(apr_rmm_t *rmm, void *entity,
355                                           apr_size_t reqsize)
356{
357    apr_rmm_off_t this;
358    apr_rmm_off_t old;
359    struct rmm_block_t *blk;
360    apr_size_t size, oldsize;
361
362    if (!entity) {
363        return apr_rmm_malloc(rmm, reqsize);
364    }
365
366    size = APR_ALIGN_DEFAULT(reqsize);
367    if (size < reqsize) {
368        return 0;
369    }
370    old = apr_rmm_offset_get(rmm, entity);
371
372    if ((this = apr_rmm_malloc(rmm, size)) == 0) {
373        return 0;
374    }
375
376    blk = (rmm_block_t*)((char*)rmm->base + old - RMM_BLOCK_SIZE);
377    oldsize = blk->size;
378
379    memcpy(apr_rmm_addr_get(rmm, this),
380           apr_rmm_addr_get(rmm, old), oldsize < size ? oldsize : size);
381    apr_rmm_free(rmm, old);
382
383    return this;
384}
385
386APU_DECLARE(apr_status_t) apr_rmm_free(apr_rmm_t *rmm, apr_rmm_off_t this)
387{
388    apr_status_t rv;
389    struct rmm_block_t *blk;
390
391    /* A little sanity check is always healthy, especially here.
392     * If we really cared, we could make this compile-time
393     */
394    if (this < RMM_HDR_BLOCK_SIZE + RMM_BLOCK_SIZE) {
395        return APR_EINVAL;
396    }
397
398    this -= RMM_BLOCK_SIZE;
399
400    blk = (rmm_block_t*)((char*)rmm->base + this);
401
402    if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
403        return rv;
404    }
405    if (blk->prev) {
406        struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
407        if (prev->next != this) {
408            APR_ANYLOCK_UNLOCK(&rmm->lock);
409            return APR_EINVAL;
410        }
411    }
412    else {
413        if (rmm->base->firstused != this) {
414            APR_ANYLOCK_UNLOCK(&rmm->lock);
415            return APR_EINVAL;
416        }
417    }
418
419    if (blk->next) {
420        struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
421        if (next->prev != this) {
422            APR_ANYLOCK_UNLOCK(&rmm->lock);
423            return APR_EINVAL;
424        }
425    }
426
427    /* Ok, it remained [apparently] sane, so unlink it
428     */
429    move_block(rmm, this, 1);
430
431    return APR_ANYLOCK_UNLOCK(&rmm->lock);
432}
433
434APU_DECLARE(void *) apr_rmm_addr_get(apr_rmm_t *rmm, apr_rmm_off_t entity)
435{
436    /* debug-sanity checking here would be good
437     */
438    return (void*)((char*)rmm->base + entity);
439}
440
441APU_DECLARE(apr_rmm_off_t) apr_rmm_offset_get(apr_rmm_t *rmm, void* entity)
442{
443    /* debug, or always, sanity checking here would be good
444     * since the primitive is apr_rmm_off_t, I don't mind penalizing
445     * inverse conversions for safety, unless someone can prove that
446     * there is no choice in some cases.
447     */
448    return ((char*)entity - (char*)rmm->base);
449}
450
451APU_DECLARE(apr_size_t) apr_rmm_overhead_get(int n)
452{
453    /* overhead per block is at most APR_ALIGN_DEFAULT(1) wasted bytes
454     * for alignment overhead, plus the size of the rmm_block_t
455     * structure. */
456    return RMM_HDR_BLOCK_SIZE + n * (RMM_BLOCK_SIZE + APR_ALIGN_DEFAULT(1));
457}
458