1/* ====================================================================
2 *    Licensed to the Apache Software Foundation (ASF) under one
3 *    or more contributor license agreements.  See the NOTICE file
4 *    distributed with this work for additional information
5 *    regarding copyright ownership.  The ASF licenses this file
6 *    to you under the Apache License, Version 2.0 (the
7 *    "License"); you may not use this file except in compliance
8 *    with the License.  You may obtain a copy of the License at
9 *
10 *      http://www.apache.org/licenses/LICENSE-2.0
11 *
12 *    Unless required by applicable law or agreed to in writing,
13 *    software distributed under the License is distributed on an
14 *    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 *    KIND, either express or implied.  See the License for the
16 *    specific language governing permissions and limitations
17 *    under the License.
18 * ====================================================================
19 */
20
21#include <stdlib.h>
22
23#include <apr_pools.h>
24
25#include "serf.h"
26#include "serf_bucket_util.h"
27
28
29typedef struct node_header_t {
30    apr_size_t size;
31    union {
32        struct node_header_t *next;      /* if size == 0 (freed/inactive) */
33        /* no data                          if size == STANDARD_NODE_SIZE */
34        apr_memnode_t *memnode;          /* if size > STANDARD_NODE_SIZE */
35    } u;
36} node_header_t;
37
38/* The size of a node_header_t, properly aligned. Note that (normally)
39 * this macro will round the size to a multiple of 8 bytes. Keep this in
40 * mind when altering the node_header_t structure. Also, keep in mind that
41 * node_header_t is an overhead for every allocation performed through
42 * the serf_bucket_mem_alloc() function.
43 */
44#define SIZEOF_NODE_HEADER_T  APR_ALIGN_DEFAULT(sizeof(node_header_t))
45
46
47/* STANDARD_NODE_SIZE is manually set to an allocation size that will
48 * capture most allocators performed via this API. It must be "large
49 * enough" to avoid lots of spillage to allocating directly from the
50 * apr_allocator associated with the bucket allocator. The apr_allocator
51 * has a minimum size of 8k, which can be expensive if you missed the
52 * STANDARD_NODE_SIZE by just a few bytes.
53 */
54/* ### we should define some rules or ways to determine how to derive
55 * ### a "good" value for this. probably log some stats on allocs, then
56 * ### analyze them for size "misses". then find the balance point between
57 * ### wasted space due to min-size allocator, and wasted-space due to
58 * ### size-spill to the 8k minimum.
59 */
60#define STANDARD_NODE_SIZE 128
61
62/* When allocating a block of memory from the allocator, we should go for
63 * an 8k block, minus the overhead that the allocator needs.
64 */
65#define ALLOC_AMT (8192 - APR_MEMNODE_T_SIZE)
66
67/* Define DEBUG_DOUBLE_FREE if you're interested in debugging double-free
68 * calls to serf_bucket_mem_free().
69 */
70#define DEBUG_DOUBLE_FREE
71
72
73typedef struct {
74    const serf_bucket_t *bucket;
75    apr_status_t last;
76} read_status_t;
77
78#define TRACK_BUCKET_COUNT 100  /* track N buckets' status */
79
80typedef struct {
81    int next_index;    /* info[] is a ring. next bucket goes at this idx. */
82    int num_used;
83
84    read_status_t info[TRACK_BUCKET_COUNT];
85} track_state_t;
86
87
88struct serf_bucket_alloc_t {
89    apr_pool_t *pool;
90    apr_allocator_t *allocator;
91    int own_allocator;
92
93    serf_unfreed_func_t unfreed;
94    void *unfreed_baton;
95
96    apr_uint32_t num_alloc;
97
98    node_header_t *freelist;    /* free STANDARD_NODE_SIZE blocks */
99    apr_memnode_t *blocks;      /* blocks we allocated for subdividing */
100
101    track_state_t *track;
102};
103
104/* ==================================================================== */
105
106
107static apr_status_t allocator_cleanup(void *data)
108{
109    serf_bucket_alloc_t *allocator = data;
110
111    /* If we allocated anything, give it back. */
112    if (allocator->blocks) {
113        apr_allocator_free(allocator->allocator, allocator->blocks);
114    }
115
116    /* If we allocated our own allocator (?!), destroy it here. */
117    if (allocator->own_allocator) {
118        apr_allocator_destroy(allocator->allocator);
119    }
120
121    return APR_SUCCESS;
122}
123
124serf_bucket_alloc_t *serf_bucket_allocator_create(
125    apr_pool_t *pool,
126    serf_unfreed_func_t unfreed,
127    void *unfreed_baton)
128{
129    serf_bucket_alloc_t *allocator = apr_pcalloc(pool, sizeof(*allocator));
130
131    allocator->pool = pool;
132    allocator->allocator = apr_pool_allocator_get(pool);
133    if (allocator->allocator == NULL) {
134        /* This most likely means pools are running in debug mode, create our
135         * own allocator to deal with memory ourselves */
136        apr_allocator_create(&allocator->allocator);
137        allocator->own_allocator = 1;
138    }
139    allocator->unfreed = unfreed;
140    allocator->unfreed_baton = unfreed_baton;
141
142#ifdef SERF_DEBUG_BUCKET_USE
143    {
144        track_state_t *track;
145
146        track = allocator->track = apr_palloc(pool, sizeof(*allocator->track));
147        track->next_index = 0;
148        track->num_used = 0;
149    }
150#endif
151
152    /* NOTE: On a fork/exec, the child won't bother cleaning up memory.
153             This is just fine... the memory will go away at exec.
154
155       NOTE: If the child will NOT perform an exec, then the parent or
156             the child will need to decide who to clean up any
157             outstanding connection/buckets (as appropriate).  */
158    apr_pool_cleanup_register(pool, allocator,
159                              allocator_cleanup, apr_pool_cleanup_null);
160
161    return allocator;
162}
163
164apr_pool_t *serf_bucket_allocator_get_pool(
165    const serf_bucket_alloc_t *allocator)
166{
167    return allocator->pool;
168}
169
170
171void *serf_bucket_mem_alloc(
172    serf_bucket_alloc_t *allocator,
173    apr_size_t size)
174{
175    node_header_t *node;
176
177    ++allocator->num_alloc;
178
179    size += SIZEOF_NODE_HEADER_T;
180    if (size <= STANDARD_NODE_SIZE) {
181        if (allocator->freelist) {
182            /* just pull a node off our freelist */
183            node = allocator->freelist;
184            allocator->freelist = node->u.next;
185#ifdef DEBUG_DOUBLE_FREE
186            /* When we free an item, we set its size to zero. Thus, when
187             * we return it to the caller, we must ensure the size is set
188             * properly.
189             */
190            node->size = STANDARD_NODE_SIZE;
191#endif
192        }
193        else {
194            apr_memnode_t *active = allocator->blocks;
195
196            if (active == NULL
197                || active->first_avail + STANDARD_NODE_SIZE >= active->endp) {
198                apr_memnode_t *head = allocator->blocks;
199
200                /* ran out of room. grab another block. */
201                active = apr_allocator_alloc(allocator->allocator, ALLOC_AMT);
202
203                /* System couldn't provide us with memory. */
204                if (active == NULL)
205                    return NULL;
206
207                /* link the block into our tracking list */
208                allocator->blocks = active;
209                active->next = head;
210            }
211
212            node = (node_header_t *)active->first_avail;
213            node->size = STANDARD_NODE_SIZE;
214            active->first_avail += STANDARD_NODE_SIZE;
215        }
216    }
217    else {
218        apr_memnode_t *memnode = apr_allocator_alloc(allocator->allocator,
219                                                     size);
220
221        if (memnode == NULL)
222            return NULL;
223
224        node = (node_header_t *)memnode->first_avail;
225        node->u.memnode = memnode;
226        node->size = size;
227    }
228
229    return ((char *)node) + SIZEOF_NODE_HEADER_T;
230}
231
232
233void *serf_bucket_mem_calloc(
234    serf_bucket_alloc_t *allocator,
235    apr_size_t size)
236{
237    void *mem;
238    mem = serf_bucket_mem_alloc(allocator, size);
239    if (mem == NULL)
240        return NULL;
241    memset(mem, 0, size);
242    return mem;
243}
244
245
246void serf_bucket_mem_free(
247    serf_bucket_alloc_t *allocator,
248    void *block)
249{
250    node_header_t *node;
251
252    --allocator->num_alloc;
253
254    node = (node_header_t *)((char *)block - SIZEOF_NODE_HEADER_T);
255
256    if (node->size == STANDARD_NODE_SIZE) {
257        /* put the node onto our free list */
258        node->u.next = allocator->freelist;
259        allocator->freelist = node;
260
261#ifdef DEBUG_DOUBLE_FREE
262        /* note that this thing was freed. */
263        node->size = 0;
264    }
265    else if (node->size == 0) {
266        /* damn thing was freed already. */
267        abort();
268#endif
269    }
270    else {
271#ifdef DEBUG_DOUBLE_FREE
272        /* note that this thing was freed. */
273        node->size = 0;
274#endif
275
276        /* now free it */
277        apr_allocator_free(allocator->allocator, node->u.memnode);
278    }
279}
280
281
282/* ==================================================================== */
283
284
285#ifdef SERF_DEBUG_BUCKET_USE
286
287static read_status_t *find_read_status(
288    track_state_t *track,
289    const serf_bucket_t *bucket,
290    int create_rs)
291{
292    read_status_t *rs;
293
294    if (track->num_used) {
295        int count = track->num_used;
296        int idx = track->next_index;
297
298        /* Search backwards. In all likelihood, the bucket which just got
299         * read was read very recently.
300         */
301        while (count-- > 0) {
302            if (!idx--) {
303                /* assert: track->num_used == TRACK_BUCKET_COUNT */
304                idx = track->num_used - 1;
305            }
306            if ((rs = &track->info[idx])->bucket == bucket) {
307                return rs;
308            }
309        }
310    }
311
312    /* Only create a new read_status_t when asked. */
313    if (!create_rs)
314        return NULL;
315
316    if (track->num_used < TRACK_BUCKET_COUNT) {
317        /* We're still filling up the ring. */
318        ++track->num_used;
319    }
320
321    rs = &track->info[track->next_index];
322    rs->bucket = bucket;
323    rs->last = APR_SUCCESS;     /* ### the right initial value? */
324
325    if (++track->next_index == TRACK_BUCKET_COUNT)
326        track->next_index = 0;
327
328    return rs;
329}
330
331#endif /* SERF_DEBUG_BUCKET_USE */
332
333
334apr_status_t serf_debug__record_read(
335    const serf_bucket_t *bucket,
336    apr_status_t status)
337{
338#ifndef SERF_DEBUG_BUCKET_USE
339    return status;
340#else
341
342    track_state_t *track = bucket->allocator->track;
343    read_status_t *rs = find_read_status(track, bucket, 1);
344
345    /* Validate that the previous status value allowed for another read. */
346    if (APR_STATUS_IS_EAGAIN(rs->last) /* ### or APR_EOF? */) {
347        /* Somebody read when they weren't supposed to. Bail. */
348        abort();
349    }
350
351    /* Save the current status for later. */
352    rs->last = status;
353
354    return status;
355#endif
356}
357
358
359void serf_debug__entered_loop(serf_bucket_alloc_t *allocator)
360{
361#ifdef SERF_DEBUG_BUCKET_USE
362
363    track_state_t *track = allocator->track;
364    read_status_t *rs = &track->info[0];
365
366    for ( ; track->num_used; --track->num_used, ++rs ) {
367        if (rs->last == APR_SUCCESS) {
368            /* Somebody should have read this bucket again. */
369            abort();
370        }
371
372        /* ### other status values? */
373    }
374
375    /* num_used was reset. also need to reset the next index. */
376    track->next_index = 0;
377
378#endif
379}
380
381
382void serf_debug__closed_conn(serf_bucket_alloc_t *allocator)
383{
384#ifdef SERF_DEBUG_BUCKET_USE
385
386    /* Just reset the number used so that we don't examine the info[] */
387    allocator->track->num_used = 0;
388    allocator->track->next_index = 0;
389
390#endif
391}
392
393
394void serf_debug__bucket_destroy(const serf_bucket_t *bucket)
395{
396#ifdef SERF_DEBUG_BUCKET_USE
397
398    track_state_t *track = bucket->allocator->track;
399    read_status_t *rs = find_read_status(track, bucket, 0);
400
401    if (rs != NULL && rs->last != APR_EOF) {
402        /* The bucket was destroyed before it was read to completion. */
403
404        /* Special exception for socket buckets. If a connection remains
405         * open, they are not read to completion.
406         */
407        if (SERF_BUCKET_IS_SOCKET(bucket))
408            return;
409
410        /* Ditto for SSL Decrypt buckets. */
411        if (SERF_BUCKET_IS_SSL_DECRYPT(bucket))
412            return;
413
414        /* Ditto for SSL Encrypt buckets. */
415        if (SERF_BUCKET_IS_SSL_ENCRYPT(bucket))
416            return;
417
418        /* Ditto for barrier buckets. */
419        if (SERF_BUCKET_IS_BARRIER(bucket))
420            return;
421
422
423        abort();
424    }
425
426#endif
427}
428
429
430void serf_debug__bucket_alloc_check(
431    serf_bucket_alloc_t *allocator)
432{
433#ifdef SERF_DEBUG_BUCKET_USE
434    if (allocator->num_alloc != 0) {
435        abort();
436    }
437#endif
438}
439
440