1/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <stdlib.h>
17
18#include <apr_pools.h>
19
20#include "serf.h"
21#include "serf_bucket_util.h"
22
23
24typedef struct node_header_t {
25    apr_size_t size;
26    union {
27        struct node_header_t *next;      /* if size == 0 (freed/inactive) */
28        /* no data                          if size == STANDARD_NODE_SIZE */
29        apr_memnode_t *memnode;          /* if size > STANDARD_NODE_SIZE */
30    } u;
31} node_header_t;
32
33/* The size of a node_header_t, properly aligned. Note that (normally)
34 * this macro will round the size to a multiple of 8 bytes. Keep this in
35 * mind when altering the node_header_t structure. Also, keep in mind that
36 * node_header_t is an overhead for every allocation performed through
37 * the serf_bucket_mem_alloc() function.
38 */
39#define SIZEOF_NODE_HEADER_T  APR_ALIGN_DEFAULT(sizeof(node_header_t))
40
41
42/* STANDARD_NODE_SIZE is manually set to an allocation size that will
43 * capture most allocators performed via this API. It must be "large
44 * enough" to avoid lots of spillage to allocating directly from the
45 * apr_allocator associated with the bucket allocator. The apr_allocator
46 * has a minimum size of 8k, which can be expensive if you missed the
47 * STANDARD_NODE_SIZE by just a few bytes.
48 */
49/* ### we should define some rules or ways to determine how to derive
50 * ### a "good" value for this. probably log some stats on allocs, then
51 * ### analyze them for size "misses". then find the balance point between
52 * ### wasted space due to min-size allocator, and wasted-space due to
53 * ### size-spill to the 8k minimum.
54 */
55#define STANDARD_NODE_SIZE 128
56
57/* When allocating a block of memory from the allocator, we should go for
58 * an 8k block, minus the overhead that the allocator needs.
59 */
60#define ALLOC_AMT (8192 - APR_MEMNODE_T_SIZE)
61
62/* Define DEBUG_DOUBLE_FREE if you're interested in debugging double-free
63 * calls to serf_bucket_mem_free().
64 */
65#define DEBUG_DOUBLE_FREE
66
67
68typedef struct {
69    const serf_bucket_t *bucket;
70    apr_status_t last;
71} read_status_t;
72
73#define TRACK_BUCKET_COUNT 100  /* track N buckets' status */
74
75typedef struct {
76    int next_index;    /* info[] is a ring. next bucket goes at this idx. */
77    int num_used;
78
79    read_status_t info[TRACK_BUCKET_COUNT];
80} track_state_t;
81
82
83struct serf_bucket_alloc_t {
84    apr_pool_t *pool;
85    apr_allocator_t *allocator;
86    int own_allocator;
87
88    serf_unfreed_func_t unfreed;
89    void *unfreed_baton;
90
91    apr_uint32_t num_alloc;
92
93    node_header_t *freelist;    /* free STANDARD_NODE_SIZE blocks */
94    apr_memnode_t *blocks;      /* blocks we allocated for subdividing */
95
96    track_state_t *track;
97};
98
99/* ==================================================================== */
100
101
102static apr_status_t allocator_cleanup(void *data)
103{
104    serf_bucket_alloc_t *allocator = data;
105
106    /* If we allocated anything, give it back. */
107    if (allocator->blocks) {
108        apr_allocator_free(allocator->allocator, allocator->blocks);
109    }
110
111    /* If we allocated our own allocator (?!), destroy it here. */
112    if (allocator->own_allocator) {
113        apr_allocator_destroy(allocator->allocator);
114    }
115
116    return APR_SUCCESS;
117}
118
119serf_bucket_alloc_t *serf_bucket_allocator_create(
120    apr_pool_t *pool,
121    serf_unfreed_func_t unfreed,
122    void *unfreed_baton)
123{
124    serf_bucket_alloc_t *allocator = apr_pcalloc(pool, sizeof(*allocator));
125
126    allocator->pool = pool;
127    allocator->allocator = apr_pool_allocator_get(pool);
128    if (allocator->allocator == NULL) {
129        /* This most likely means pools are running in debug mode, create our
130         * own allocator to deal with memory ourselves */
131        apr_allocator_create(&allocator->allocator);
132        allocator->own_allocator = 1;
133    }
134    allocator->unfreed = unfreed;
135    allocator->unfreed_baton = unfreed_baton;
136
137#ifdef SERF_DEBUG_BUCKET_USE
138    {
139        track_state_t *track;
140
141        track = allocator->track = apr_palloc(pool, sizeof(*allocator->track));
142        track->next_index = 0;
143        track->num_used = 0;
144    }
145#endif
146
147    /* NOTE: On a fork/exec, the child won't bother cleaning up memory.
148             This is just fine... the memory will go away at exec.
149
150       NOTE: If the child will NOT perform an exec, then the parent or
151             the child will need to decide who to clean up any
152             outstanding connection/buckets (as appropriate).  */
153    apr_pool_cleanup_register(pool, allocator,
154                              allocator_cleanup, apr_pool_cleanup_null);
155
156    return allocator;
157}
158
159apr_pool_t *serf_bucket_allocator_get_pool(
160    const serf_bucket_alloc_t *allocator)
161{
162    return allocator->pool;
163}
164
165
166void *serf_bucket_mem_alloc(
167    serf_bucket_alloc_t *allocator,
168    apr_size_t size)
169{
170    node_header_t *node;
171
172    ++allocator->num_alloc;
173
174    size += SIZEOF_NODE_HEADER_T;
175    if (size <= STANDARD_NODE_SIZE) {
176        if (allocator->freelist) {
177            /* just pull a node off our freelist */
178            node = allocator->freelist;
179            allocator->freelist = node->u.next;
180#ifdef DEBUG_DOUBLE_FREE
181            /* When we free an item, we set its size to zero. Thus, when
182             * we return it to the caller, we must ensure the size is set
183             * properly.
184             */
185            node->size = STANDARD_NODE_SIZE;
186#endif
187        }
188        else {
189            apr_memnode_t *active = allocator->blocks;
190
191            if (active == NULL
192                || active->first_avail + STANDARD_NODE_SIZE >= active->endp) {
193                apr_memnode_t *head = allocator->blocks;
194
195                /* ran out of room. grab another block. */
196                active = apr_allocator_alloc(allocator->allocator, ALLOC_AMT);
197
198                /* System couldn't provide us with memory. */
199                if (active == NULL)
200                    return NULL;
201
202                /* link the block into our tracking list */
203                allocator->blocks = active;
204                active->next = head;
205            }
206
207            node = (node_header_t *)active->first_avail;
208            node->size = STANDARD_NODE_SIZE;
209            active->first_avail += STANDARD_NODE_SIZE;
210        }
211    }
212    else {
213        apr_memnode_t *memnode = apr_allocator_alloc(allocator->allocator,
214                                                     size);
215
216        if (memnode == NULL)
217            return NULL;
218
219        node = (node_header_t *)memnode->first_avail;
220        node->u.memnode = memnode;
221        node->size = size;
222    }
223
224    return ((char *)node) + SIZEOF_NODE_HEADER_T;
225}
226
227
228void *serf_bucket_mem_calloc(
229    serf_bucket_alloc_t *allocator,
230    apr_size_t size)
231{
232    void *mem;
233    mem = serf_bucket_mem_alloc(allocator, size);
234    if (mem == NULL)
235        return NULL;
236    memset(mem, 0, size);
237    return mem;
238}
239
240
241void serf_bucket_mem_free(
242    serf_bucket_alloc_t *allocator,
243    void *block)
244{
245    node_header_t *node;
246
247    --allocator->num_alloc;
248
249    node = (node_header_t *)((char *)block - SIZEOF_NODE_HEADER_T);
250
251    if (node->size == STANDARD_NODE_SIZE) {
252        /* put the node onto our free list */
253        node->u.next = allocator->freelist;
254        allocator->freelist = node;
255
256#ifdef DEBUG_DOUBLE_FREE
257        /* note that this thing was freed. */
258        node->size = 0;
259    }
260    else if (node->size == 0) {
261        /* damn thing was freed already. */
262        abort();
263#endif
264    }
265    else {
266#ifdef DEBUG_DOUBLE_FREE
267        /* note that this thing was freed. */
268        node->size = 0;
269#endif
270
271        /* now free it */
272        apr_allocator_free(allocator->allocator, node->u.memnode);
273    }
274}
275
276
277/* ==================================================================== */
278
279
280#ifdef SERF_DEBUG_BUCKET_USE
281
282static read_status_t *find_read_status(
283    track_state_t *track,
284    const serf_bucket_t *bucket,
285    int create_rs)
286{
287    read_status_t *rs;
288
289    if (track->num_used) {
290        int count = track->num_used;
291        int idx = track->next_index;
292
293        /* Search backwards. In all likelihood, the bucket which just got
294         * read was read very recently.
295         */
296        while (count-- > 0) {
297            if (!idx--) {
298                /* assert: track->num_used == TRACK_BUCKET_COUNT */
299                idx = track->num_used - 1;
300            }
301            if ((rs = &track->info[idx])->bucket == bucket) {
302                return rs;
303            }
304        }
305    }
306
307    /* Only create a new read_status_t when asked. */
308    if (!create_rs)
309        return NULL;
310
311    if (track->num_used < TRACK_BUCKET_COUNT) {
312        /* We're still filling up the ring. */
313        ++track->num_used;
314    }
315
316    rs = &track->info[track->next_index];
317    rs->bucket = bucket;
318    rs->last = APR_SUCCESS;     /* ### the right initial value? */
319
320    if (++track->next_index == TRACK_BUCKET_COUNT)
321        track->next_index = 0;
322
323    return rs;
324}
325
326#endif /* SERF_DEBUG_BUCKET_USE */
327
328
329apr_status_t serf_debug__record_read(
330    const serf_bucket_t *bucket,
331    apr_status_t status)
332{
333#ifndef SERF_DEBUG_BUCKET_USE
334    return status;
335#else
336
337    track_state_t *track = bucket->allocator->track;
338    read_status_t *rs = find_read_status(track, bucket, 1);
339
340    /* Validate that the previous status value allowed for another read. */
341    if (APR_STATUS_IS_EAGAIN(rs->last) /* ### or APR_EOF? */) {
342        /* Somebody read when they weren't supposed to. Bail. */
343        abort();
344    }
345
346    /* Save the current status for later. */
347    rs->last = status;
348
349    return status;
350#endif
351}
352
353
354void serf_debug__entered_loop(serf_bucket_alloc_t *allocator)
355{
356#ifdef SERF_DEBUG_BUCKET_USE
357
358    track_state_t *track = allocator->track;
359    read_status_t *rs = &track->info[0];
360
361    for ( ; track->num_used; --track->num_used, ++rs ) {
362        if (rs->last == APR_SUCCESS) {
363            /* Somebody should have read this bucket again. */
364            abort();
365        }
366
367        /* ### other status values? */
368    }
369
370    /* num_used was reset. also need to reset the next index. */
371    track->next_index = 0;
372
373#endif
374}
375
376
377void serf_debug__closed_conn(serf_bucket_alloc_t *allocator)
378{
379#ifdef SERF_DEBUG_BUCKET_USE
380
381    /* Just reset the number used so that we don't examine the info[] */
382    allocator->track->num_used = 0;
383    allocator->track->next_index = 0;
384
385#endif
386}
387
388
389void serf_debug__bucket_destroy(const serf_bucket_t *bucket)
390{
391#ifdef SERF_DEBUG_BUCKET_USE
392
393    track_state_t *track = bucket->allocator->track;
394    read_status_t *rs = find_read_status(track, bucket, 0);
395
396    if (rs != NULL && rs->last != APR_EOF) {
397        /* The bucket was destroyed before it was read to completion. */
398
399        /* Special exception for socket buckets. If a connection remains
400         * open, they are not read to completion.
401         */
402        if (SERF_BUCKET_IS_SOCKET(bucket))
403            return;
404
405        /* Ditto for SSL Decrypt buckets. */
406        if (SERF_BUCKET_IS_SSL_DECRYPT(bucket))
407            return;
408
409        /* Ditto for SSL Encrypt buckets. */
410        if (SERF_BUCKET_IS_SSL_ENCRYPT(bucket))
411            return;
412
413        /* Ditto for barrier buckets. */
414        if (SERF_BUCKET_IS_BARRIER(bucket))
415            return;
416
417
418        abort();
419    }
420
421#endif
422}
423
424
425void serf_debug__bucket_alloc_check(
426    serf_bucket_alloc_t *allocator)
427{
428#ifdef SERF_DEBUG_BUCKET_USE
429    if (allocator->num_alloc != 0) {
430        abort();
431    }
432#endif
433}
434
435