1/**
2 * @file
3 * Dynamic memory manager
4 *
5 * This is a lightweight replacement for the standard C library malloc().
6 *
7 * If you want to use the standard C library malloc() instead, define
8 * MEM_LIBC_MALLOC to 1 in your lwipopts.h
9 *
10 * To let mem_malloc() use pools (prevents fragmentation and is much faster than
11 * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
12 * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
13 * of pools like this (more pools can be added between _START and _END):
14 *
15 * Define three pools with sizes 256, 512, and 1512 bytes
16 * LWIP_MALLOC_MEMPOOL_START
17 * LWIP_MALLOC_MEMPOOL(20, 256)
18 * LWIP_MALLOC_MEMPOOL(10, 512)
19 * LWIP_MALLOC_MEMPOOL(5, 1512)
20 * LWIP_MALLOC_MEMPOOL_END
21 */
22
23/*
24 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
25 * All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without modification,
28 * are permitted provided that the following conditions are met:
29 *
30 * 1. Redistributions of source code must retain the above copyright notice,
31 *    this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright notice,
33 *    this list of conditions and the following disclaimer in the documentation
34 *    and/or other materials provided with the distribution.
35 * 3. The name of the author may not be used to endorse or promote products
36 *    derived from this software without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
39 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
40 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
41 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
42 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
43 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
46 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
47 * OF SUCH DAMAGE.
48 *
49 * This file is part of the lwIP TCP/IP stack.
50 *
51 * Author: Adam Dunkels <adam@sics.se>
52 *         Simon Goldschmidt
53 *
54 */
55
56#include "lwip/opt.h"
57
58#if !MEM_LIBC_MALLOC            /* don't build if not configured for use in lwipopts.h */
59
60#include "lwip/def.h"
61#include "lwip/mem.h"
62#include "lwip/sys.h"
63#include "lwip/stats.h"
64
65#include <string.h>
66#include <assert.h>
67#include <barrelfish/barrelfish.h>
68
69
70
71#if MEM_USE_POOLS
72/* lwIP head implemented with different sized pools */
73
74/**
75 * Allocate memory: determine the smallest pool that is big enough
76 * to contain an element of 'size' and get an element from that pool.
77 *
78 * @param size the size in bytes of the memory needed
79 * @return a pointer to the allocated memory or NULL if the pool is empty
80 */
81void *mem_malloc(mem_size_t size)
82{
83    struct memp_malloc_helper *element;
84    memp_t poolnr;
85    mem_size_t required_size = size + sizeof(struct memp_malloc_helper);
86
87    for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr++) {
88#if MEM_USE_POOLS_TRY_BIGGER_POOL
89      again:
90#endif                          /* MEM_USE_POOLS_TRY_BIGGER_POOL */
91        /* is this pool big enough to hold an element of the required size
92           plus a struct memp_malloc_helper that saves the pool this element came from? */
93        if (required_size <= memp_sizes[poolnr]) {
94            break;
95        }
96    }
97    if (poolnr > MEMP_POOL_LAST) {
98        LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
99        return NULL;
100    }
101    element = (struct memp_malloc_helper *) memp_malloc(poolnr);
102    if (element == NULL) {
103        /* No need to DEBUGF or ASSERT: This error is already
104           taken care of in memp.c */
105#if MEM_USE_POOLS_TRY_BIGGER_POOL
106    /** Try a bigger pool if this one is empty! */
107        if (poolnr < MEMP_POOL_LAST) {
108            poolnr++;
109            goto again;
110        }
111#endif                          /* MEM_USE_POOLS_TRY_BIGGER_POOL */
112        return NULL;
113    }
114
115    /* save the pool number this element came from */
116    element->poolnr = poolnr;
117    /* and return a pointer to the memory directly after the struct memp_malloc_helper */
118    element++;
119
120    return element;
121}
122
123/**
124 * Free memory previously allocated by mem_malloc. Loads the pool number
125 * and calls memp_free with that pool number to put the element back into
126 * its pool
127 *
128 * @param rmem the memory element to free
129 */
130void mem_free(void *rmem)
131{
132    struct memp_malloc_helper *hmem = (struct memp_malloc_helper *) rmem;
133
134    LWIP_ASSERT("rmem != NULL", (rmem != NULL));
135    LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
136
137    /* get the original struct memp_malloc_helper */
138    hmem--;
139
140    LWIP_ASSERT("hmem != NULL", (hmem != NULL));
141    LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
142    LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
143
144    /* and put it in the pool we saved earlier */
145    memp_free(hmem->poolnr, hmem);
146}
147
148#else                           /* MEM_USE_POOLS */
149/* lwIP replacement for your libc malloc() */
150
151/**
152 * The heap is made up as a list of structs of this type.
153 * This does not have to be aligned since for getting its size,
154 * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes.
155 */
156struct mem {
157  /** index (-> ram[next]) of the next struct */
158    mem_size_t next;
159  /** index (-> ram[next]) of the next struct */
160    mem_size_t prev;
161  /** 1: this area is used; 0: this area is unused */
162    u8_t used;
163};
164
165/** All allocated blocks will be MIN_SIZE bytes big, at least!
166 * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
167 * larger values could prevent too small blocks to fragment the RAM too much. */
168#ifndef MIN_SIZE
169#define MIN_SIZE             12
170#endif                          /* MIN_SIZE */
171/* some alignment macros: we define them here for better source code layout */
172#define MIN_SIZE_ALIGNED     LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
173#define SIZEOF_STRUCT_MEM    LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
174#define MEM_SIZE_ALIGNED     LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
175
176/** the heap. we need one struct mem at the end and some room for alignment */
177//static u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT];
178u8_t *mem_barrelfish_alloc(uint8_t buf_index, uint32_t size);
179u8_t *mem_barrelfish_register_buf(uint8_t binding_index, uint32_t size);
180
181static u8_t *ram_heap = 0;
182
183/** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
184static u8_t *ram;
185
186/** the last entry, always unused! */
187static struct mem *ram_end;
188
189/** pointer to the lowest free block, this is used for faster search */
190static struct mem *lfree;
191
192/** concurrent access protection */
193static sys_sem_t mem_sem;
194
195#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
196
197static volatile u8_t mem_free_count;
198
199/* Allow mem_free from other (e.g. interrupt) context */
200#define LWIP_MEM_FREE_DECL_PROTECT()  SYS_ARCH_DECL_PROTECT(lev_free)
201#define LWIP_MEM_FREE_PROTECT()       SYS_ARCH_PROTECT(lev_free)
202#define LWIP_MEM_FREE_UNPROTECT()     SYS_ARCH_UNPROTECT(lev_free)
203#define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
204#define LWIP_MEM_ALLOC_PROTECT()      SYS_ARCH_PROTECT(lev_alloc)
205#define LWIP_MEM_ALLOC_UNPROTECT()    SYS_ARCH_UNPROTECT(lev_alloc)
206
207#else                           /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
208
209/* Protect the heap only by using a semaphore */
210#define LWIP_MEM_FREE_DECL_PROTECT()
211#define LWIP_MEM_FREE_PROTECT()    sys_arch_sem_wait(mem_sem, 0)
212#define LWIP_MEM_FREE_UNPROTECT()  sys_sem_signal(mem_sem)
213/* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */
214#define LWIP_MEM_ALLOC_DECL_PROTECT()
215#define LWIP_MEM_ALLOC_PROTECT()
216#define LWIP_MEM_ALLOC_UNPROTECT()
217
218#endif                          /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
219
220
221/**
222 * "Plug holes" by combining adjacent empty struct mems.
223 * After this function is through, there should not exist
224 * one empty struct mem pointing to another empty struct mem.
225 *
226 * @param mem this points to a struct mem which just has been freed
227 * @internal this function is only called by mem_free() and mem_realloc()
228 *
229 * This assumes access to the heap is protected by the calling function
230 * already.
231 */
232static void plug_holes(struct mem *mem)
233{
234    struct mem *nmem;
235    struct mem *pmem;
236
237    LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *) mem >= ram);
238    LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *) mem < (u8_t *) ram_end);
239    LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
240
241    /* plug hole forward */
242    LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED",
243                mem->next <= MEM_SIZE_ALIGNED);
244
245    nmem = (struct mem *) &ram[mem->next];
246    if (mem != nmem && nmem->used == 0 && (u8_t *) nmem != (u8_t *) ram_end) {
247        /* if mem->next is unused and not end of ram, combine mem and mem->next */
248        if (lfree == nmem) {
249            lfree = mem;
250        }
251        mem->next = nmem->next;
252        ((struct mem *) &ram[nmem->next])->prev = (u8_t *) mem - ram;
253    }
254
255    /* plug hole backward */
256    pmem = (struct mem *) &ram[mem->prev];
257    if (pmem != mem && pmem->used == 0) {
258        /* if mem->prev is unused, combine mem and mem->prev */
259        if (lfree == mem) {
260            lfree = pmem;
261        }
262        pmem->next = mem->next;
263        ((struct mem *) &ram[mem->next])->prev = (u8_t *) pmem - ram;
264    }
265}
266
267/**
268 * Zero the heap and initialize start, end and lowest-free
269 */
270void mem_init(void)
271{
272    size_t bufsize = MEM_SIZE_ALIGNED + (2 * SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT;
273    struct mem *mem;
274
275//  printf("@@@@@@ mem alloc %lx, %lx for index %d\n", MEM_SIZE_ALIGNED +
276//          (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT, bufsize, TX_BUFFER_ID);
277
278
279    ram_heap = mem_barrelfish_alloc(TX_BUFFER_ID, bufsize);
280
281    LWIP_ASSERT("Sanity check alignment",
282                (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT - 1)) == 0);
283
284    /* align the heap */
285    ram = LWIP_MEM_ALIGN(ram_heap);
286    /* initialize the start of the heap */
287    mem = (struct mem *) ram;
288    mem->next = MEM_SIZE_ALIGNED;
289    mem->prev = 0;
290    mem->used = 0;
291    /* initialize the end of the heap */
292    ram_end = (struct mem *) &ram[MEM_SIZE_ALIGNED];
293    ram_end->used = 1;
294    ram_end->next = MEM_SIZE_ALIGNED;
295    ram_end->prev = MEM_SIZE_ALIGNED;
296    mem_sem = sys_sem_new(1);
297
298    /* initialize the lowest-free pointer to the start of the heap */
299    lfree = (struct mem *) ram;
300    MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
301    mem_barrelfish_register_buf(TX_BUFFER_ID, bufsize);
302}
303
304/**
305 * Put a struct mem back on the heap
306 *
307 * @param rmem is the data portion of a struct mem as returned by a previous
308 *             call to mem_malloc()
309 */
310void mem_free(void *rmem)
311{
312    struct mem *mem;
313
314    LWIP_MEM_FREE_DECL_PROTECT();
315
316    if (rmem == NULL) {
317        LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | 2,
318                    ("mem_free(p == NULL) was called.\n"));
319        return;
320    }
321    LWIP_ASSERT("mem_free: sanity check alignment",
322                (((mem_ptr_t) rmem) & (MEM_ALIGNMENT - 1)) == 0);
323
324    if ((u8_t *) rmem >= (u8_t *) ram &&
325                (u8_t *) rmem < (u8_t *) ram_end) {
326        // everything is great
327    } else {
328        printf("assertion failef for rmem %p\n", rmem);
329        printf("condition was ram %p <= rmem %p < ram_end %p\n",
330                ram, rmem, ram_end);
331    }
332    LWIP_ASSERT("mem_free: legal memory", (u8_t *) rmem >= (u8_t *) ram &&
333                (u8_t *) rmem < (u8_t *) ram_end);
334
335    if ((u8_t *) rmem < (u8_t *) ram || (u8_t *) rmem >= (u8_t *) ram_end) {
336        SYS_ARCH_DECL_PROTECT(lev);
337        LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_free: illegal memory\n"));
338        /* protect mem stats from concurrent access */
339        SYS_ARCH_PROTECT(lev);
340        MEM_STATS_INC(illegal);
341        SYS_ARCH_UNPROTECT(lev);
342        return;
343    }
344    /* protect the heap from concurrent access */
345    LWIP_MEM_FREE_PROTECT();
346    /* Get the corresponding struct mem ... */
347    mem = (struct mem *) ((u8_t *) rmem - SIZEOF_STRUCT_MEM);
348    /* ... which has to be in a used state ... */
349    LWIP_ASSERT("mem_free: mem->used", mem->used);
350    /* ... and is now unused. */
351    mem->used = 0;
352
353    if (mem < lfree) {
354        /* the newly freed struct is now the lowest */
355        lfree = mem;
356    }
357
358    MEM_STATS_DEC_USED(used, mem->next - ((u8_t *) mem - ram));
359
360    /* finally, see if prev or next are free also */
361    plug_holes(mem);
362#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
363    mem_free_count = 1;
364#endif                          /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
365    LWIP_MEM_FREE_UNPROTECT();
366}
367
368/**
369 * In contrast to its name, mem_realloc can only shrink memory, not expand it.
370 * Since the only use (for now) is in pbuf_realloc (which also can only shrink),
371 * this shouldn't be a problem!
372 *
373 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
374 * @param newsize required size after shrinking (needs to be smaller than or
375 *                equal to the previous size)
376 * @return for compatibility reasons: is always == rmem, at the moment
377 *         or NULL if newsize is > old size, in which case rmem is NOT touched
378 *         or freed!
379 */
380void *mem_realloc(void *rmem, mem_size_t newsize)
381{
382
383    mem_size_t size;
384    mem_size_t ptr, ptr2;
385    struct mem *mem, *mem2;
386
387    /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
388    LWIP_MEM_FREE_DECL_PROTECT();
389
390    /* Expand the size of the allocated memory region so that we can
391       adjust for alignment. */
392    newsize = LWIP_MEM_ALIGN_SIZE(newsize);
393
394    if (newsize < MIN_SIZE_ALIGNED) {
395        /* every data block must be at least MIN_SIZE_ALIGNED long */
396        newsize = MIN_SIZE_ALIGNED;
397    }
398
399    if (newsize > MEM_SIZE_ALIGNED) {
400        return NULL;
401    }
402
403    LWIP_ASSERT("mem_realloc: legal memory", (u8_t *) rmem >= (u8_t *) ram &&
404                (u8_t *) rmem < (u8_t *) ram_end);
405
406    if ((u8_t *) rmem < (u8_t *) ram || (u8_t *) rmem >= (u8_t *) ram_end) {
407        SYS_ARCH_DECL_PROTECT(lev);
408        LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_realloc: illegal memory\n"));
409        /* protect mem stats from concurrent access */
410        SYS_ARCH_PROTECT(lev);
411        MEM_STATS_INC(illegal);
412        SYS_ARCH_UNPROTECT(lev);
413        return rmem;
414    }
415    /* Get the corresponding struct mem ... */
416    mem = (struct mem *) ((u8_t *) rmem - SIZEOF_STRUCT_MEM);
417    /* ... and its offset pointer */
418    ptr = (u8_t *) mem - ram;
419
420    size = mem->next - ptr - SIZEOF_STRUCT_MEM;
421    LWIP_ASSERT("mem_realloc can only shrink memory", newsize <= size);
422    if (newsize > size) {
423        /* not supported */
424        return NULL;
425    }
426    if (newsize == size) {
427        /* No change in size, simply return */
428        return rmem;
429    }
430
431    /* protect the heap from concurrent access */
432    LWIP_MEM_FREE_PROTECT();
433
434    MEM_STATS_DEC_USED(used, (size - newsize));
435
436    mem2 = (struct mem *) &ram[mem->next];
437    if (mem2->used == 0) {
438        /* The next struct is unused, we can simply move it at little */
439        mem_size_t next;
440
441        /* remember the old next pointer */
442        next = mem2->next;
443        /* create new struct mem which is moved directly after the shrinked mem */
444        ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
445        if (lfree == mem2) {
446            lfree = (struct mem *) &ram[ptr2];
447        }
448        mem2 = (struct mem *) &ram[ptr2];
449        mem2->used = 0;
450        /* restore the next pointer */
451        mem2->next = next;
452        /* link it back to mem */
453        mem2->prev = ptr;
454        /* link mem to it */
455        mem->next = ptr2;
456        /* last thing to restore linked list: as we have moved mem2,
457         * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
458         * the end of the heap */
459        if (mem2->next != MEM_SIZE_ALIGNED) {
460            ((struct mem *) &ram[mem2->next])->prev = ptr2;
461        }
462        /* no need to plug holes, we've already done that */
463    } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
464        /* Next struct is used but there's room for another struct mem with
465         * at least MIN_SIZE_ALIGNED of data.
466         * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
467         * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
468         * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
469         *       region that couldn't hold data, but when mem->next gets freed,
470         *       the 2 regions would be combined, resulting in more free memory */
471        ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
472        mem2 = (struct mem *) &ram[ptr2];
473        if (mem2 < lfree) {
474            lfree = mem2;
475        }
476        mem2->used = 0;
477        mem2->next = mem->next;
478        mem2->prev = ptr;
479        mem->next = ptr2;
480        if (mem2->next != MEM_SIZE_ALIGNED) {
481            ((struct mem *) &ram[mem2->next])->prev = ptr2;
482        }
483        /* the original mem->next is used, so no need to plug holes! */
484    }
485    /* else {
486       next struct mem is used but size between mem and mem2 is not big enough
487       to create another struct mem
488       -> don't do anyhting.
489       -> the remaining space stays unused since it is too small
490       } */
491#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
492    mem_free_count = 1;
493#endif                          /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
494    LWIP_MEM_FREE_UNPROTECT();
495    return rmem;
496}
497
498/**
499 * Adam's mem_malloc() plus solution for bug #17922
500 * Allocate a block of memory with a minimum of 'size' bytes.
501 *
502 * @param size is the minimum size of the requested block in bytes.
503 * @return pointer to allocated memory or NULL if no free memory was found.
504 *
505 * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
506 */
507void *mem_malloc(mem_size_t size)
508{
509
510    mem_size_t ptr, ptr2;
511    struct mem *mem, *mem2;
512
513#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
514    u8_t local_mem_free_count = 0;
515#endif                          /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
516    LWIP_MEM_ALLOC_DECL_PROTECT();
517
518    if (size == 0) {
519        return NULL;
520    }
521
522    /* Expand the size of the allocated memory region so that we can
523       adjust for alignment. */
524    size = LWIP_MEM_ALIGN_SIZE(size);
525
526    if (size < MIN_SIZE_ALIGNED) {
527        /* every data block must be at least MIN_SIZE_ALIGNED long */
528        size = MIN_SIZE_ALIGNED;
529    }
530
531    if (size > MEM_SIZE_ALIGNED) {
532        return NULL;
533    }
534
535    /* protect the heap from concurrent access */
536    sys_arch_sem_wait(mem_sem, 0);
537    LWIP_MEM_ALLOC_PROTECT();
538#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
539    /* run as long as a mem_free disturbed mem_malloc */
540    do {
541        local_mem_free_count = 0;
542#endif                          /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
543
544        /* Scan through the heap searching for a free block that is big enough,
545         * beginning with the lowest free block.
546         */
547        for (ptr = (u8_t *) lfree - ram; ptr < MEM_SIZE_ALIGNED - size;
548             ptr = ((struct mem *) &ram[ptr])->next) {
549            mem = (struct mem *) &ram[ptr];
550#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
551            mem_free_count = 0;
552            LWIP_MEM_ALLOC_UNPROTECT();
553            /* allow mem_free to run */
554            LWIP_MEM_ALLOC_PROTECT();
555            if (mem_free_count != 0) {
556                local_mem_free_count = mem_free_count;
557            }
558            mem_free_count = 0;
559#endif                          /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
560
561            if ((!mem->used) && (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
562                /* mem is not used and at least perfect fit is possible:
563                 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
564
565                if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >=
566                    (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
567                    /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
568                     * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
569                     * -> split large block, create empty remainder,
570                     * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
571                     * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
572                     * struct mem would fit in but no data between mem2 and mem2->next
573                     * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
574                     *       region that couldn't hold data, but when mem->next gets freed,
575                     *       the 2 regions would be combined, resulting in more free memory
576                     */
577                    ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
578                    /* create mem2 struct */
579                    mem2 = (struct mem *) &ram[ptr2];
580                    mem2->used = 0;
581                    mem2->next = mem->next;
582                    mem2->prev = ptr;
583                    /* and insert it between mem and mem->next */
584                    mem->next = ptr2;
585                    mem->used = 1;
586
587                    if (mem2->next != MEM_SIZE_ALIGNED) {
588                        ((struct mem *) &ram[mem2->next])->prev = ptr2;
589                    }
590                    MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
591                } else {
592                    /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
593                     * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
594                     * take care of this).
595                     * -> near fit or excact fit: do not split, no mem2 creation
596                     * also can't move mem->next directly behind mem, since mem->next
597                     * will always be used at this point!
598                     */
599                    mem->used = 1;
600                    MEM_STATS_INC_USED(used, mem->next - ((u8_t *) mem - ram));
601                }
602
603                if (mem == lfree) {
604                    /* Find next free block after mem and update lowest free pointer */
605                    while (lfree->used && lfree != ram_end) {
606                        LWIP_MEM_ALLOC_UNPROTECT();
607                        /* prevent high interrupt latency... */
608                        LWIP_MEM_ALLOC_PROTECT();
609                        lfree = (struct mem *) &ram[lfree->next];
610                    }
611                    LWIP_ASSERT("mem_malloc: !lfree->used",
612                                ((lfree == ram_end) || (!lfree->used)));
613                }
614                LWIP_MEM_ALLOC_UNPROTECT();
615                sys_sem_signal(mem_sem);
616                LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
617                            (mem_ptr_t) mem + SIZEOF_STRUCT_MEM + size <=
618                            (mem_ptr_t) ram_end);
619                LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
620                            ((mem_ptr_t) mem +
621                             SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
622                LWIP_ASSERT("mem_malloc: sanity check alignment",
623                            (((mem_ptr_t) mem) & (MEM_ALIGNMENT - 1)) == 0);
624
625                return (u8_t *) mem + SIZEOF_STRUCT_MEM;
626            }
627        }
628#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
629        /* if we got interrupted by a mem_free, try again */
630    } while (local_mem_free_count != 0);
631#endif                          /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
632    LWIP_DEBUGF(MEM_DEBUG | 2,
633                ("mem_malloc: could not allocate %" S16_F " bytes\n",
634                 (s16_t) size));
635    MEM_STATS_INC(err);
636    LWIP_MEM_ALLOC_UNPROTECT();
637    sys_sem_signal(mem_sem);
638    return NULL;
639}
640
641#endif                          /* MEM_USE_POOLS */
642/**
643 * Contiguously allocates enough space for count objects that are size bytes
644 * of memory each and returns a pointer to the allocated memory.
645 *
646 * The allocated memory is filled with bytes of value zero.
647 *
648 * @param count number of objects to allocate
649 * @param size size of the objects to allocate
650 * @return pointer to allocated memory / NULL pointer if there is an error
651 */
652void *mem_calloc(mem_size_t count, mem_size_t size)
653{
654
655    void *p;
656
657    /* allocate 'count' objects of size 'size' */
658    p = mem_malloc(count * size);
659    if (p) {
660        /* zero the memory */
661        memset(p, 0, count * size);
662    }
663    return p;
664}
665
666#endif                          /* !MEM_LIBC_MALLOC */
667