1// Copyright 2016 The Fuchsia Authors
2// Copyright (c) 2015 Google, Inc. All rights reserved
3//
4// Use of this source code is governed by a MIT-style
5// license that can be found in the LICENSE file or at
6// https://opensource.org/licenses/MIT
7
8#include <lib/cmpctmalloc.h>
9
10#include <assert.h>
11#include <inttypes.h>
12#include <stdio.h>
13#include <stdlib.h>
14#include <string.h>
15
16#include <debug.h>
17#include <err.h>
18#include <kernel/mutex.h>
19#include <kernel/spinlock.h>
20#include <kernel/thread.h>
21#include <vm/vm.h>
22#include <lib/heap.h>
23#include <platform.h>
24#include <trace.h>
25
26// Malloc implementation tuned for space.
27//
28// Allocation strategy takes place with a global mutex.  Freelist entries are
29// kept in linked lists with 8 different sizes per binary order of magnitude
30// and the header size is two words with eager coalescing on free.
31//
32// ## Concepts ##
33//
34// OS allocation:
35//   A contiguous range of pages allocated from the OS using heap_page_alloc(),
36//   typically via heap_grow(). Initial layout:
37//
38//   Low addr =>
39//     header_t left_sentinel -- Marked as allocated, |left| pointer NULL.
40//     free_t memory_area -- Marked as free, with appropriate size,
41//                           and pointed to by a free bucket.
42//     [bulk of usable memory]
43//     header_t right_sentinel -- Marked as allocated, size zero
44//   <= High addr
45//
46//   For a normal allocation, the free memory area is added to the
47//   appropriate free bucket and picked up later in the cmpct_alloc()
48//   logic. For a large allocation, the area skips the primary free buckets
49//   and is returned directly via a |free_t** bucket| param.
50//
51//   cmpctmalloc does not keep a list of OS allocations; each is meant to free
52//   itself to the OS when all of its memory areas become free.
53//
54// Memory area:
55//   A sub-range of an OS allocation. Used to satisfy
56//   cmpct_alloc()/cmpct_memalign() calls. Can be free and live in a free
57//   bucket, or can be allocated and managed by the user.
58//
59//   Memory areas, both free and allocated, always begin with a header_t,
60//   followed by the area's usable memory. header_t.size includes the size of
61//   the header. untag(header_t.left) points to the preceding area's header_t.
62//
63//   The low bits of header_t.left hold additional flags about the area:
64//   - FREE_BIT: The area is free, and lives in a free bucket.
65//   These bits shouldn't be checked directly; use the is_tagged_as_*()
66//   functions.
67//
68//   If the area is free (is_tagged_as_free(header_t*)), the area's header
69//   includes the doubly-linked free list pointers defined by free_t (which is a
70//   header_t overlay). Those pointers are used to chain the free area off of
71//   the appropriately-sized free bucket.
72//
73// Normal (small/non-large) allocation:
74//   An alloction of less than HEAP_LARGE_ALLOC_BYTES, which can fit in a free
75//   bucket.
76//
77// Large allocation:
78//   An alloction of more than HEAP_LARGE_ALLOC_BYTES. This is no longer allowed.
79//
80// Free buckets:
81//   Freelist entries are kept in linked lists with 8 different sizes per binary
82//   order of magnitude: heap.free_lists[NUMBER_OF_BUCKETS]
83//
84//   Allocations are always rounded up to the nearest bucket size. This would
85//   appear to waste memory, but in fact it avoids some fragmentation.
86//
87//   Consider two buckets with size 512 and 576 (512 + 64). Perhaps the program
88//   often allocates 528 byte objects for some reason. When we need to allocate
89//   528 bytes, we round that up to 576 bytes. When it is freed, it goes in the
90//   576 byte bucket, where it is available for the next of the common 528 byte
91//   allocations.
92//
93//   If we did not round up allocations, then (assuming no coalescing is
94//   possible) we would have to place the freed 528 bytes in the 512 byte
95//   bucket, since only memory areas greater than or equal to 576 bytes can go
96//   in the 576 byte bucket. The next time we need to allocate a 528 byte object
97//   we do not look in the 512 byte bucket, because we want to be sure the first
98//   memory area we look at is big enough, to avoid searching a long chain of
99//   just-too-small memory areas on the free list. We would not find the 528
100//   byte space and would have to carve out a new 528 byte area from a large
101//   free memory area, making fragmentation worse.
102//
103// cmpct_free() behavior:
104//   Freed memory areas are eagerly coalesced with free left/right neighbors. If
105//   the new free area covers an entire OS allocation (i.e., its left and right
106//   neighbors are both sentinels), the OS allocation is returned to the OS.
107//
108//   Exception: to avoid OS free/alloc churn when right on the edge, the heap
109//   will try to hold onto one entirely-free, non-large OS allocation instead of
110//   returning it to the OS. See cached_os_alloc.
111
112#if defined(DEBUG) || LK_DEBUGLEVEL > 2
113#define CMPCT_DEBUG
114#endif
115
116#define LOCAL_TRACE 0
117
118// Use HEAP_ENABLE_TESTS to enable internal testing. The tests are not useful
119// when the target system is up. By that time we have done hundreds of allocations
120// already.
121
122#define ALLOC_FILL 0x99
123#define FREE_FILL 0x77
124#define PADDING_FILL 0x55
125
126#if !defined(HEAP_GROW_SIZE)
127#define HEAP_GROW_SIZE (1 * 1024 * 1024) /* Grow aggressively */
128#endif
129
130static_assert(IS_PAGE_ALIGNED(HEAP_GROW_SIZE), "");
131
132#define HEAP_ALLOC_VIRTUAL_BITS 22
133#define HEAP_LARGE_ALLOC_BYTES (1u << HEAP_ALLOC_VIRTUAL_BITS)
134
135// When we grow the heap we have to have somewhere in the freelist to put the
136// resulting freelist entry, so the freelist has to have a certain number of
137// buckets.
138static_assert(HEAP_GROW_SIZE <= HEAP_LARGE_ALLOC_BYTES, "");
139
140// Buckets for allocations.  The smallest 15 buckets are 8, 16, 24, etc. up to
141// 120 bytes.  After that we round up to the nearest size that can be written
142// /^0*1...0*$/, giving 8 buckets per order of binary magnitude.  The freelist
143// entries in a given bucket have at least the given size, plus the header
144// size.  On 64 bit, the 8 byte bucket is useless, since the freelist header
145// is 16 bytes larger than the header, but we have it for simplicity.
146#define NUMBER_OF_BUCKETS (1 + 15 + (HEAP_ALLOC_VIRTUAL_BITS - 7) * 8)
147
148// If a header's |left| field has this bit set, it is free and lives in
149// a free bucket.
150#define FREE_BIT (1 << 0)
151
152#define HEADER_LEFT_BIT_MASK (FREE_BIT)
153
154// All individual memory areas on the heap start with this.
155typedef struct header_struct {
156    // Pointer to the previous area in memory order. The lower bit is used
157    // to store extra state: see FREE_BIT. The left sentinel will have
158    // NULL in the address portion of this field. Left and right sentinels
159    // will always be marked as "allocated" to avoid coalescing.
160    struct header_struct* left;
161    // The size of the memory area in bytes, including this header.
162    // The right sentinel will have 0 in this field.
163    size_t size;
164} header_t;
165
166typedef struct free_struct {
167    header_t header;
168    struct free_struct* next;
169    struct free_struct* prev;
170} free_t;
171
172struct heap {
173    // Total bytes allocated from the OS for the heap.
174    size_t size;
175
176    // Bytes of usable free space in the heap.
177    size_t remaining;
178
179    // A non-large OS allocation that could have been freed to the OS but
180    // wasn't. We will attempt to use this before allocating more memory from
181    // the OS, to reduce churn. May be null. If non-null, cached_os_alloc->size
182    // holds the total size allocated from the OS for this block.
183    header_t* cached_os_alloc;
184
185    // Guards all elements in this structure. See lock(), unlock().
186    mutex_t lock;
187
188    // Free lists, bucketed by size. See size_to_index_helper().
189    free_t* free_lists[NUMBER_OF_BUCKETS];
190
191    // Bitmask that tracks whether a given free_lists entry has any elements.
192    // See set_free_list_bit(), clear_free_list_bit().
193#define BUCKET_WORDS (((NUMBER_OF_BUCKETS) + 31) >> 5)
194    uint32_t free_list_bits[BUCKET_WORDS];
195};
196
197// Heap static vars.
198static struct heap theheap;
199
200static ssize_t heap_grow(size_t len);
201
202static void lock(void) TA_ACQ(theheap.lock) {
203    mutex_acquire(&theheap.lock);
204}
205
206static void unlock(void) TA_REL(theheap.lock) {
207    mutex_release(&theheap.lock);
208}
209
210static void dump_free(header_t* header) {
211    dprintf(INFO, "\t\tbase %p, end %#" PRIxPTR ", len %#zx (%zu)\n",
212            header, (vaddr_t)header + header->size, header->size, header->size);
213}
214
215void cmpct_dump(bool panic_time) TA_NO_THREAD_SAFETY_ANALYSIS {
216    if (!panic_time) {
217        lock();
218    }
219
220    dprintf(INFO, "Heap dump (using cmpctmalloc):\n");
221    dprintf(INFO, "\tsize %lu, remaining %lu, cached free %lu\n",
222            (unsigned long)theheap.size,
223            (unsigned long)theheap.remaining,
224            theheap.cached_os_alloc ? theheap.cached_os_alloc->size : 0);
225
226    dprintf(INFO, "\tfree list:\n");
227    for (int i = 0; i < NUMBER_OF_BUCKETS; i++) {
228        bool header_printed = false;
229        free_t* free_area = theheap.free_lists[i];
230        for (; free_area != NULL; free_area = free_area->next) {
231            ASSERT(free_area != free_area->next);
232            if (!header_printed) {
233                dprintf(INFO, "\tbucket %d\n", i);
234                header_printed = true;
235            }
236            dump_free(&free_area->header);
237        }
238    }
239
240    if (!panic_time) {
241        unlock();
242    }
243}
244
245void cmpct_get_info(size_t* size_bytes, size_t* free_bytes) {
246    lock();
247    *size_bytes = theheap.size;
248    *free_bytes = theheap.remaining;
249    unlock();
250}
251
252// Operates in sizes that don't include the allocation header;
253// i.e., the usable portion of a memory area.
254static int size_to_index_helper(
255    size_t size, size_t* rounded_up_out, int adjust, int increment) {
256    // First buckets are simply 8-spaced up to 128.
257    if (size <= 128) {
258        if (sizeof(size_t) == 8u && size <= sizeof(free_t) - sizeof(header_t)) {
259            *rounded_up_out = sizeof(free_t) - sizeof(header_t);
260        } else {
261            *rounded_up_out = size;
262        }
263        // No allocation is smaller than 8 bytes, so the first bucket is for 8
264        // byte spaces (not including the header).  For 64 bit, the free list
265        // struct is 16 bytes larger than the header, so no allocation can be
266        // smaller than that (otherwise how to free it), but we have empty 8
267        // and 16 byte buckets for simplicity.
268        return (size >> 3) - 1;
269    }
270
271    // We are going to go up to the next size to round up, but if we hit a
272    // bucket size exactly we don't want to go up. By subtracting 8 here, we
273    // will do the right thing (the carry propagates up for the round numbers
274    // we are interested in).
275    size += adjust;
276    // After 128 the buckets are logarithmically spaced, every 16 up to 256,
277    // every 32 up to 512 etc.  This can be thought of as rows of 8 buckets.
278    // GCC intrinsic count-leading-zeros.
279    // Eg. 128-255 has 24 leading zeros and we want row to be 4.
280    unsigned row = sizeof(size_t) * 8 - 4 - __builtin_clzl(size);
281    // For row 4 we want to shift down 4 bits.
282    unsigned column = (size >> row) & 7;
283    int row_column = (row << 3) | column;
284    row_column += increment;
285    size = (8 + (row_column & 7)) << (row_column >> 3);
286    *rounded_up_out = size;
287    // We start with 15 buckets, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96,
288    // 104, 112, 120.  Then we have row 4, sizes 128 and up, with the
289    // row-column 8 and up.
290    int answer = row_column + 15 - 32;
291    DEBUG_ASSERT(answer < NUMBER_OF_BUCKETS);
292    return answer;
293}
294
295// Round up size to next bucket when allocating.
296static int size_to_index_allocating(size_t size, size_t* rounded_up_out) {
297    size_t rounded = ROUNDUP(size, 8);
298    return size_to_index_helper(rounded, rounded_up_out, -8, 1);
299}
300
301// Round down size to next bucket when freeing.
302static int size_to_index_freeing(size_t size) {
303    size_t dummy;
304    return size_to_index_helper(size, &dummy, 0, 0);
305}
306
307static inline header_t* tag_as_free(void* left) {
308    return (header_t*)((uintptr_t)left | FREE_BIT);
309}
310
311// Returns true if this header_t is marked as free.
312static inline bool is_tagged_as_free(const header_t* header) {
313    // The free bit is stashed in the lower bit of header->left.
314    return ((uintptr_t)(header->left) & FREE_BIT) != 0;
315}
316
317static inline header_t* untag(const void* left) {
318    return (header_t*)((uintptr_t)left & ~HEADER_LEFT_BIT_MASK);
319}
320
321static inline header_t* right_header(header_t* header) {
322    return (header_t*)((char*)header + header->size);
323}
324
325static inline void set_free_list_bit(int index) {
326    theheap.free_list_bits[index >> 5] |= (1u << (31 - (index & 0x1f)));
327}
328
329static inline void clear_free_list_bit(int index) {
330    theheap.free_list_bits[index >> 5] &= ~(1u << (31 - (index & 0x1f)));
331}
332
333static int find_nonempty_bucket(int index) {
334    uint32_t mask = (1u << (31 - (index & 0x1f))) - 1;
335    mask = mask * 2 + 1;
336    mask &= theheap.free_list_bits[index >> 5];
337    if (mask != 0) {
338        return (index & ~0x1f) + __builtin_clz(mask);
339    }
340    for (index = ROUNDUP(index + 1, 32);
341         index <= NUMBER_OF_BUCKETS; index += 32) {
342        mask = theheap.free_list_bits[index >> 5];
343        if (mask != 0u) {
344            return index + __builtin_clz(mask);
345        }
346    }
347    return -1;
348}
349
350static bool is_start_of_os_allocation(const header_t* header) {
351    return untag(header->left) == untag(NULL);
352}
353
354static void create_free_area(void* address, void* left, size_t size) {
355    free_t* free_area = (free_t*)address;
356    free_area->header.size = size;
357    free_area->header.left = tag_as_free(left);
358
359    int index = size_to_index_freeing(size - sizeof(header_t));
360    set_free_list_bit(index);
361    free_t** bucket = &theheap.free_lists[index];
362
363    free_t* old_head = *bucket;
364    if (old_head != NULL) {
365        old_head->prev = free_area;
366    }
367    free_area->next = old_head;
368    free_area->prev = NULL;
369    *bucket = free_area;
370    theheap.remaining += size;
371#ifdef CMPCT_DEBUG
372    memset(free_area + 1, FREE_FILL, size - sizeof(free_t));
373#endif
374}
375
376static bool is_end_of_os_allocation(char* address) {
377    return ((header_t*)address)->size == 0;
378}
379
380static void free_to_os(void* ptr, size_t size) {
381    DEBUG_ASSERT(IS_PAGE_ALIGNED(ptr));
382    DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
383    heap_page_free(ptr, size >> PAGE_SIZE_SHIFT);
384    theheap.size -= size;
385}
386
387// May call free_to_os(), or may cache the (non-large) OS allocation in
388// cached_os_alloc. |left_sentinel| is the start of the OS allocation, and
389// |total_size| is the (page-aligned) number of bytes that were originally
390// allocated from the OS.
391static void possibly_free_to_os(header_t *left_sentinel, size_t total_size) {
392    if (theheap.cached_os_alloc == NULL) {
393        LTRACEF("Keeping 0x%zx-byte OS alloc @%p\n", total_size, left_sentinel);
394        theheap.cached_os_alloc = left_sentinel;
395        theheap.cached_os_alloc->left = NULL;
396        theheap.cached_os_alloc->size = total_size;
397    } else {
398        LTRACEF("Returning 0x%zx bytes @%p to OS\n",
399                total_size, left_sentinel);
400        free_to_os(left_sentinel, total_size);
401    }
402}
403
404// Frees |size| bytes starting at |address|, either to a free bucket or to the
405// OS (in which case the left/right sentinels are freed as well). |address|
406// should point to what would be the header_t of the memory area to free, and
407// |left| and |size| should be set to the values that the header_t would have
408// contained. This is broken out because the header_t will not contain the
409// proper size when coalescing neighboring areas.
410static void free_memory(void* address, void* left, size_t size) {
411    left = untag(left);
412    if (IS_PAGE_ALIGNED(left) &&
413        is_start_of_os_allocation(left) &&
414        is_end_of_os_allocation((char*)address + size)) {
415
416        // Assert that it's safe to do a simple 2*sizeof(header_t)) below.
417        DEBUG_ASSERT_MSG(((header_t*)left)->size == sizeof(header_t),
418                         "Unexpected left sentinel size %zu != header size %zu",
419                         ((header_t*)left)->size, sizeof(header_t));
420        possibly_free_to_os((header_t*)left, size + 2 * sizeof(header_t));
421    } else {
422        create_free_area(address, left, size);
423    }
424}
425
426static void unlink_free(free_t* free_area, int bucket) {
427    theheap.remaining -= free_area->header.size;
428    ASSERT(theheap.remaining < 4000000000u);
429    free_t* next = free_area->next;
430    free_t* prev = free_area->prev;
431    if (theheap.free_lists[bucket] == free_area) {
432        theheap.free_lists[bucket] = next;
433        if (next == NULL) {
434            clear_free_list_bit(bucket);
435        }
436    }
437    if (prev != NULL) {
438        prev->next = next;
439    }
440    if (next != NULL) {
441        next->prev = prev;
442    }
443}
444
445static void unlink_free_unknown_bucket(free_t* free_area) {
446    return unlink_free(
447        free_area,
448        size_to_index_freeing(free_area->header.size - sizeof(header_t)));
449}
450
451static void* create_allocation_header(
452    void* address, size_t offset, size_t size, void* left) {
453
454    header_t* standalone = (header_t*)((char*)address + offset);
455    standalone->left = untag(left);
456    standalone->size = size;
457    return standalone + 1;
458}
459
460static void FixLeftPointer(header_t* right, header_t* new_left) {
461    int tag = (uintptr_t)right->left & 1;
462    right->left = (header_t*)(((uintptr_t)new_left & ~1) | tag);
463}
464
465static void check_free_fill(void* ptr, size_t size) {
466    // The first 16 bytes of the region won't have free fill due to overlap
467    // with the allocator bookkeeping.
468    const size_t start = sizeof(free_t) - sizeof(header_t);
469    for (size_t i = start; i < size; ++i) {
470        uint8_t byte = ((uint8_t*)ptr)[i];
471        if (byte != FREE_FILL) {
472            platform_panic_start();
473            printf("Heap free fill check fail.  Allocated region:\n");
474            hexdump8(ptr, size);
475            panic("allocating %lu bytes, fill was %02x, offset %lu\n",
476                  size, byte, i);
477        }
478    }
479}
480
481#ifdef HEAP_ENABLE_TESTS
482
483static void WasteFreeMemory(void) {
484    while (theheap.remaining != 0) {
485        cmpct_alloc(1);
486    }
487}
488
489// If we just make a big allocation it gets rounded off.  If we actually
490// want to use a reasonably accurate amount of memory for test purposes, we
491// have to do many small allocations.
492static void* TestTrimHelper(ssize_t target) {
493    char* answer = NULL;
494    size_t remaining = theheap.remaining;
495    while (theheap.remaining - target > 512) {
496        char* next_block = cmpct_alloc(8 + ((theheap.remaining - target) >> 2));
497        *(char**)next_block = answer;
498        answer = next_block;
499        if (theheap.remaining > remaining) {
500            return answer;
501        }
502        // Abandon attempt to hit particular freelist entry size if we
503        // accidentally got more memory from the OS.
504        remaining = theheap.remaining;
505    }
506    return answer;
507}
508
509static void TestTrimFreeHelper(char* block) {
510    while (block) {
511        char* next_block = *(char**)block;
512        cmpct_free(block);
513        block = next_block;
514    }
515}
516
517static void cmpct_test_trim(void) {
518    // XXX: Re-enable this test if we want, disabled due to float math
519    return;
520    WasteFreeMemory();
521
522    size_t test_sizes[200];
523    int sizes = 0;
524
525    for (size_t s = 1; s < PAGE_SIZE * 4; s = (s + 1) * 1.1) {
526        test_sizes[sizes++] = s;
527        ASSERT(sizes < 200);
528    }
529    for (ssize_t s = -32; s <= 32; s += 8) {
530        test_sizes[sizes++] = PAGE_SIZE + s;
531        ASSERT(sizes < 200);
532    }
533
534    // Test allocations at the start of an OS allocation.
535    for (int with_second_alloc = 0;
536         with_second_alloc < 2; with_second_alloc++) {
537        for (int i = 0; i < sizes; i++) {
538            size_t s = test_sizes[i];
539
540            char *a, *a2 = NULL;
541            a = cmpct_alloc(s);
542            if (with_second_alloc) {
543                a2 = cmpct_alloc(1);
544                if (s<PAGE_SIZE>> 1) {
545                    // It is the intention of the test that a is at the start
546                    // of an OS allocation and that a2 is "right after" it.
547                    // Otherwise we are not testing what I thought. OS
548                    // allocations are certainly not smaller than a page, so
549                    // check in that case.
550                    ASSERT((uintptr_t)(a2 - a) < s * 1.13 + 48);
551                }
552            }
553            cmpct_trim();
554            size_t remaining = theheap.remaining;
555            // We should have < 1 page on either side of the a allocation.
556            ASSERT(remaining < PAGE_SIZE * 2);
557            cmpct_free(a);
558            if (with_second_alloc) {
559                // Now only a2 is holding onto the OS allocation.
560                ASSERT(theheap.remaining > remaining);
561            } else {
562                ASSERT(theheap.remaining == 0);
563            }
564            remaining = theheap.remaining;
565            cmpct_trim();
566            ASSERT(theheap.remaining <= remaining);
567            // If a was at least one page then the trim should have freed up
568            // that page.
569            if (s >= PAGE_SIZE && with_second_alloc) {
570                ASSERT(theheap.remaining < remaining);
571            }
572            if (with_second_alloc) {
573                cmpct_free(a2);
574            }
575        }
576        ASSERT(theheap.remaining == 0);
577    }
578
579    ASSERT(theheap.remaining == 0);
580
581    // Now test allocations near the end of an OS allocation.
582    for (ssize_t wobble = -64; wobble <= 64; wobble += 8) {
583        for (int i = 0; i < sizes; i++) {
584            size_t s = test_sizes[i];
585
586            if ((ssize_t)s + wobble < 0) {
587                continue;
588            }
589
590            char* start_of_os_alloc = cmpct_alloc(1);
591
592            // If the OS allocations are very small this test does not make
593            // sense.
594            if (theheap.remaining <= s + wobble) {
595                cmpct_free(start_of_os_alloc);
596                continue;
597            }
598
599            char* big_bit_in_the_middle = TestTrimHelper(s + wobble);
600            size_t remaining = theheap.remaining;
601
602            // If the remaining is big we started a new OS allocation and the
603            // test makes no sense.
604            if (remaining > 128 + s * 1.13 + wobble) {
605                cmpct_free(start_of_os_alloc);
606                TestTrimFreeHelper(big_bit_in_the_middle);
607                continue;
608            }
609
610            cmpct_free(start_of_os_alloc);
611            remaining = theheap.remaining;
612
613            // This trim should sometimes trim a page off the end of the OS
614            // allocation.
615            cmpct_trim();
616            ASSERT(theheap.remaining <= remaining);
617            remaining = theheap.remaining;
618
619            // We should have < 1 page on either side of the big allocation.
620            ASSERT(remaining < PAGE_SIZE * 2);
621
622            TestTrimFreeHelper(big_bit_in_the_middle);
623        }
624    }
625}
626
627static void cmpct_test_buckets(void) {
628    size_t rounded;
629    unsigned bucket;
630    // Check for the 8-spaced buckets up to 128.
631    for (unsigned i = 1; i <= 128; i++) {
632        // Round up when allocating.
633        bucket = size_to_index_allocating(i, &rounded);
634        unsigned expected = (ROUNDUP(i, 8) >> 3) - 1;
635        ASSERT(bucket == expected);
636        ASSERT(IS_ALIGNED(rounded, 8));
637        ASSERT(rounded >= i);
638        if (i >= sizeof(free_t) - sizeof(header_t)) {
639            // Once we get above the size of the free area struct (4 words), we
640            // won't round up much for these small size.
641            ASSERT(rounded - i < 8);
642        }
643        // Only rounded sizes are freed.
644        if ((i & 7) == 0) {
645            // Up to size 128 we have exact buckets for each multiple of 8.
646            ASSERT(bucket == (unsigned)size_to_index_freeing(i));
647        }
648    }
649    int bucket_base = 7;
650    for (unsigned j = 16; j < 1024; j *= 2, bucket_base += 8) {
651        // Note the "<=", which ensures that we test the powers of 2 twice to
652        // ensure that both ways of calculating the bucket number match.
653        for (unsigned i = j * 8; i <= j * 16; i++) {
654            // Round up to j multiple in this range when allocating.
655            bucket = size_to_index_allocating(i, &rounded);
656            unsigned expected = bucket_base + ROUNDUP(i, j) / j;
657            ASSERT(bucket == expected);
658            ASSERT(IS_ALIGNED(rounded, j));
659            ASSERT(rounded >= i);
660            ASSERT(rounded - i < j);
661            // Only 8-rounded sizes are freed or chopped off the end of a free
662            // area when allocating.
663            if ((i & 7) == 0) {
664                // When freeing, if we don't hit the size of the bucket
665                // precisely, we have to put the free space into a smaller
666                // bucket, because the buckets have entries that will always
667                // be big enough for the corresponding allocation size (so we
668                // don't have to traverse the free chains to find a big enough
669                // one).
670                if ((i % j) == 0) {
671                    ASSERT((int)bucket == size_to_index_freeing(i));
672                } else {
673                    ASSERT((int)bucket - 1 == size_to_index_freeing(i));
674                }
675            }
676        }
677    }
678}
679
680static void cmpct_test_get_back_newly_freed_helper(size_t size) {
681    void* allocated = cmpct_alloc(size);
682    if (allocated == NULL) {
683        return;
684    }
685    char* allocated2 = cmpct_alloc(8);
686    char* expected_position = (char*)allocated + size;
687    if (allocated2 < expected_position ||
688        allocated2 > expected_position + 128) {
689        // If the allocated2 allocation is not in the same OS allocation as the
690        // first allocation then the test may not work as expected (the memory
691        // may be returned to the OS when we free the first allocation, and we
692        // might not get it back).
693        cmpct_free(allocated);
694        cmpct_free(allocated2);
695        return;
696    }
697
698    cmpct_free(allocated);
699    void* allocated3 = cmpct_alloc(size);
700    // To avoid churn and fragmentation we would want to get the newly freed
701    // memory back again when we allocate the same size shortly after.
702    ASSERT(allocated3 == allocated);
703    cmpct_free(allocated2);
704    cmpct_free(allocated3);
705}
706
707static void cmpct_test_get_back_newly_freed(void) {
708    size_t increment = 16;
709    for (size_t i = 128; i <= 0x8000000; i *= 2, increment *= 2) {
710        for (size_t j = i; j < i * 2; j += increment) {
711            cmpct_test_get_back_newly_freed_helper(i - 8);
712            cmpct_test_get_back_newly_freed_helper(i);
713            cmpct_test_get_back_newly_freed_helper(i + 1);
714        }
715    }
716    for (size_t i = 1024; i <= 2048; i++) {
717        cmpct_test_get_back_newly_freed_helper(i);
718    }
719}
720
721static void cmpct_test_return_to_os(void) {
722    cmpct_trim();
723    size_t remaining = theheap.remaining;
724    // This goes in a new OS allocation since the trim above removed any free
725    // area big enough to contain it.
726    void* a = cmpct_alloc(5000);
727    void* b = cmpct_alloc(2500);
728    cmpct_free(a);
729    cmpct_free(b);
730    // If things work as expected the new allocation is at the start of an OS
731    // allocation.  There's just one sentinel and one header to the left of it.
732    // It that's not the case then the allocation was met from some space in
733    // the middle of an OS allocation, and our test won't work as expected, so
734    // bail out.
735    if (((uintptr_t)a & (PAGE_SIZE - 1)) != sizeof(header_t) * 2) {
736        return;
737    }
738    // No trim needed when the entire OS allocation is free.
739    ASSERT(remaining == theheap.remaining);
740}
741
742void cmpct_test(void) {
743    cmpct_test_buckets();
744    cmpct_test_get_back_newly_freed();
745    cmpct_test_return_to_os();
746    cmpct_test_trim();
747    cmpct_dump(false);
748    void* ptr[16];
749
750    ptr[0] = cmpct_alloc(8);
751    ptr[1] = cmpct_alloc(32);
752    ptr[2] = cmpct_alloc(7);
753    cmpct_trim();
754    ptr[3] = cmpct_alloc(0);
755    ptr[4] = cmpct_alloc(98713);
756    ptr[5] = cmpct_alloc(16);
757
758    cmpct_free(ptr[5]);
759    cmpct_free(ptr[1]);
760    cmpct_free(ptr[3]);
761    cmpct_free(ptr[0]);
762    cmpct_free(ptr[4]);
763    cmpct_free(ptr[2]);
764
765    cmpct_dump(false);
766    cmpct_trim();
767    cmpct_dump(false);
768
769    int i;
770    for (i = 0; i < 16; i++)
771        ptr[i] = 0;
772
773    for (i = 0; i < 32768; i++) {
774        unsigned int index = (unsigned int)rand() % 16;
775
776        if ((i % (16 * 1024)) == 0) {
777            printf("pass %d\n", i);
778        }
779
780        // printf("index 0x%x\n", index);
781        if (ptr[index]) {
782            // printf("freeing ptr[0x%x] = %p\n", index, ptr[index]);
783            cmpct_free(ptr[index]);
784            ptr[index] = 0;
785        }
786        unsigned int align = 1 << ((unsigned int)rand() % 8);
787        ptr[index] = cmpct_memalign((unsigned int)rand() % 32768, align);
788        // printf("ptr[0x%x] = %p, align 0x%x\n", index, ptr[index], align);
789
790        DEBUG_ASSERT(((addr_t)ptr[index] % align) == 0);
791        // cmpct_dump(false);
792    }
793
794    for (i = 0; i < 16; i++) {
795        if (ptr[i]) {
796            cmpct_free(ptr[i]);
797        }
798    }
799
800    cmpct_dump(false);
801}
802
803#else
804void cmpct_test(void) {}
805#endif  // HEAP_ENABLE_TESTS
806
807void cmpct_trim(void) {
808    // Look at free list entries that are at least as large as one page plus a
809    // header. They might be at the start or the end of a block, so we can trim
810    // them and free the page(s).
811    lock();
812    for (int bucket = size_to_index_freeing(PAGE_SIZE);
813         bucket < NUMBER_OF_BUCKETS;
814         bucket++) {
815        free_t* next;
816        for (free_t* free_area = theheap.free_lists[bucket];
817             free_area != NULL;
818             free_area = next) {
819            DEBUG_ASSERT(
820                free_area->header.size >= PAGE_SIZE + sizeof(header_t));
821            next = free_area->next;
822            header_t* right = right_header(&free_area->header);
823            if (is_end_of_os_allocation((char*)right)) {
824                char* old_os_allocation_end =
825                    (char*)ROUNDUP((uintptr_t)right, PAGE_SIZE);
826                // The page will end with a smaller free list entry and a
827                // header-sized sentinel.
828                char* new_os_allocation_end =
829                    (char*)ROUNDUP(
830                        (uintptr_t)free_area +
831                            sizeof(header_t) +
832                            sizeof(free_t),
833                        PAGE_SIZE);
834                size_t freed_up = old_os_allocation_end - new_os_allocation_end;
835                DEBUG_ASSERT(IS_PAGE_ALIGNED(freed_up));
836                // Rare, because we only look at large freelist entries, but
837                // unlucky rounding could mean we can't actually free anything
838                // here.
839                if (freed_up == 0) {
840                    continue;
841                }
842                unlink_free(free_area, bucket);
843                size_t new_free_size = free_area->header.size - freed_up;
844                DEBUG_ASSERT(new_free_size >= sizeof(free_t));
845                // Right sentinel, not free, stops attempts to coalesce right.
846                create_allocation_header(
847                    free_area, new_free_size, 0, free_area);
848                // Also puts it in the correct bucket.
849                create_free_area(free_area, untag(free_area->header.left),
850                                 new_free_size);
851                heap_page_free(new_os_allocation_end,
852                               freed_up >> PAGE_SIZE_SHIFT);
853                theheap.size -= freed_up;
854            } else if (is_start_of_os_allocation(
855                           untag(free_area->header.left))) {
856                char* old_os_allocation_start =
857                    (char*)ROUNDDOWN((uintptr_t)free_area, PAGE_SIZE);
858                // For the sentinel, we need at least one header-size of space
859                // between the page edge and the first allocation to the right
860                // of the free area.
861                char* new_os_allocation_start =
862                    (char*)ROUNDDOWN((uintptr_t)(right - 1), PAGE_SIZE);
863                size_t freed_up =
864                    new_os_allocation_start - old_os_allocation_start;
865                DEBUG_ASSERT(IS_PAGE_ALIGNED(freed_up));
866                // This should not happen because we only look at the large
867                // free list buckets.
868                if (freed_up == 0) {
869                    continue;
870                }
871                unlink_free(free_area, bucket);
872                size_t sentinel_size = sizeof(header_t);
873                size_t new_free_size = free_area->header.size - freed_up;
874                if (new_free_size < sizeof(free_t)) {
875                    sentinel_size += new_free_size;
876                    new_free_size = 0;
877                }
878                // Left sentinel, not free, stops attempts to coalesce left.
879                create_allocation_header(new_os_allocation_start, 0,
880                                         sentinel_size, NULL);
881                if (new_free_size == 0) {
882                    FixLeftPointer(right, (header_t*)new_os_allocation_start);
883                } else {
884                    DEBUG_ASSERT(new_free_size >= sizeof(free_t));
885                    char* new_free = new_os_allocation_start + sentinel_size;
886                    // Also puts it in the correct bucket.
887                    create_free_area(new_free, new_os_allocation_start,
888                                     new_free_size);
889                    FixLeftPointer(right, (header_t*)new_free);
890                }
891                heap_page_free(old_os_allocation_start,
892                               freed_up >> PAGE_SIZE_SHIFT);
893                theheap.size -= freed_up;
894            }
895        }
896    }
897    unlock();
898}
899
900void* cmpct_alloc(size_t size) {
901    if (size == 0u) {
902        return NULL;
903    }
904
905    // Large allocations are no longer allowed. See ZX-1318 for details.
906    if (size > (HEAP_LARGE_ALLOC_BYTES - sizeof(header_t))) {
907        return NULL;
908    }
909
910    size_t rounded_up;
911    int start_bucket = size_to_index_allocating(size, &rounded_up);
912
913    rounded_up += sizeof(header_t);
914
915    lock();
916    int bucket = find_nonempty_bucket(start_bucket);
917    if (bucket == -1) {
918        // Grow heap by at least 12% if we can.
919        size_t growby = MIN(HEAP_LARGE_ALLOC_BYTES,
920                            MAX(theheap.size >> 3,
921                                MAX(HEAP_GROW_SIZE, rounded_up)));
922        // Try to add a new OS allocation to the heap, reducing the size until
923        // we succeed or get too small.
924        while (heap_grow(growby) < 0) {
925            if (growby <= rounded_up) {
926                unlock();
927                return NULL;
928            }
929            growby = MAX(growby >> 1, rounded_up);
930        }
931        bucket = find_nonempty_bucket(start_bucket);
932    }
933    free_t* head = theheap.free_lists[bucket];
934    size_t left_over = head->header.size - rounded_up;
935    // We can't carve off the rest for a new free space if it's smaller than the
936    // free-list linked structure.  We also don't carve it off if it's less than
937    // 1.6% the size of the allocation.  This is to avoid small long-lived
938    // allocations being placed right next to large allocations, hindering
939    // coalescing and returning pages to the OS.
940    if (left_over >= sizeof(free_t) && left_over > (size >> 6)) {
941        header_t* right = right_header(&head->header);
942        unlink_free(head, bucket);
943        void* free = (char*)head + rounded_up;
944        create_free_area(free, head, left_over);
945        FixLeftPointer(right, (header_t*)free);
946        head->header.size -= left_over;
947    } else {
948        unlink_free(head, bucket);
949    }
950    void* result =
951        create_allocation_header(head, 0, head->header.size, head->header.left);
952#ifdef CMPCT_DEBUG
953    check_free_fill(result, size);
954    memset(result, ALLOC_FILL, size);
955    memset(((char*)result) + size, PADDING_FILL,
956           rounded_up - size - sizeof(header_t));
957#endif
958    unlock();
959    return result;
960}
961
962void* cmpct_memalign(size_t size, size_t alignment) {
963    if (alignment < 8) {
964        return cmpct_alloc(size);
965    }
966
967    size_t padded_size =
968        size + alignment + sizeof(free_t) + sizeof(header_t);
969
970    char* unaligned = (char*)cmpct_alloc(padded_size);
971    if (unaligned == NULL) {
972        return NULL;
973    }
974
975    lock();
976    size_t mask = alignment - 1;
977    uintptr_t payload_int = (uintptr_t)unaligned + sizeof(free_t) +
978                            sizeof(header_t) + mask;
979    char* payload = (char*)(payload_int & ~mask);
980    if (unaligned != payload) {
981        header_t* unaligned_header = (header_t*)unaligned - 1;
982        header_t* header = (header_t*)payload - 1;
983        size_t left_over = payload - unaligned;
984        create_allocation_header(
985            header, 0, unaligned_header->size - left_over, unaligned_header);
986        header_t* right = right_header(unaligned_header);
987        unaligned_header->size = left_over;
988        FixLeftPointer(right, header);
989        unlock();
990        cmpct_free(unaligned);
991    } else {
992        unlock();
993    }
994    // TODO: Free the part after the aligned allocation.
995    return payload;
996}
997
998void cmpct_free(void* payload) {
999    if (payload == NULL) {
1000        return;
1001    }
1002    header_t* header = (header_t*)payload - 1;
1003    DEBUG_ASSERT(!is_tagged_as_free(header)); // Double free!
1004    size_t size = header->size;
1005    lock();
1006    header_t* left = header->left;
1007    if (left != NULL && is_tagged_as_free(left)) {
1008        // Coalesce with left free object.
1009        unlink_free_unknown_bucket((free_t*)left);
1010        header_t* right = right_header(header);
1011        if (is_tagged_as_free(right)) {
1012            // Coalesce both sides.
1013            unlink_free_unknown_bucket((free_t*)right);
1014            header_t* right_right = right_header(right);
1015            FixLeftPointer(right_right, left);
1016            free_memory(left, left->left, left->size + size + right->size);
1017        } else {
1018            // Coalesce only left.
1019            FixLeftPointer(right, left);
1020            free_memory(left, left->left, left->size + size);
1021        }
1022    } else {
1023        header_t* right = right_header(header);
1024        if (is_tagged_as_free(right)) {
1025            // Coalesce only right.
1026            header_t* right_right = right_header(right);
1027            unlink_free_unknown_bucket((free_t*)right);
1028            FixLeftPointer(right_right, header);
1029            free_memory(header, left, size + right->size);
1030        } else {
1031            free_memory(header, left, size);
1032        }
1033    }
1034    unlock();
1035}
1036
1037void* cmpct_realloc(void* payload, size_t size) {
1038    if (payload == NULL) {
1039        return cmpct_alloc(size);
1040    }
1041    header_t* header = (header_t*)payload - 1;
1042    size_t old_size = header->size - sizeof(header_t);
1043
1044    void* new_payload = cmpct_alloc(size);
1045    if (new_payload == NULL) {
1046        return NULL;
1047    }
1048
1049    memcpy(new_payload, payload, MIN(size, old_size));
1050    cmpct_free(payload);
1051    return new_payload;
1052}
1053
1054static void add_to_heap(void* new_area, size_t size) {
1055    void* top = (char*)new_area + size;
1056    // Set up the left sentinel. Its |left| field will not have FREE_BIT set,
1057    // stopping attempts to coalesce left.
1058    header_t* left_sentinel = (header_t*)new_area;
1059    create_allocation_header(left_sentinel, 0, sizeof(header_t), NULL);
1060
1061    // Set up the usable memory area, which will be marked free.
1062    header_t* new_header = left_sentinel + 1;
1063    size_t free_size = size - 2 * sizeof(header_t);
1064    create_free_area(new_header, left_sentinel, free_size);
1065
1066    // Set up the right sentinel. Its |left| field will not have FREE_BIT bit
1067    // set, stopping attempts to coalesce right.
1068    header_t* right_sentinel = (header_t*)(top - sizeof(header_t));
1069    create_allocation_header(right_sentinel, 0, 0, new_header);
1070}
1071
1072// Create a new free-list entry of at least size bytes (including the
1073// allocation header).  Called with the lock, apart from during init.
1074static ssize_t heap_grow(size_t size) {
1075    // The new free list entry will have a header on each side (the
1076    // sentinels) so we need to grow the gross heap size by this much more.
1077    size += 2 * sizeof(header_t);
1078    size = ROUNDUP(size, PAGE_SIZE);
1079
1080    void* ptr = NULL;
1081
1082    header_t* os_alloc = (header_t*)theheap.cached_os_alloc;
1083    if (os_alloc != NULL) {
1084        if (os_alloc->size >= size) {
1085            LTRACEF("Using saved 0x%zx-byte OS alloc @%p (>=0x%zx bytes)\n",
1086                    os_alloc->size, os_alloc, size);
1087            ptr = os_alloc;
1088            size = os_alloc->size;
1089            DEBUG_ASSERT_MSG(IS_PAGE_ALIGNED(ptr),
1090                             "0x%zx bytes @%p", size, ptr);
1091            DEBUG_ASSERT_MSG(IS_PAGE_ALIGNED(size),
1092                             "0x%zx bytes @%p", size, ptr);
1093        } else {
1094            // We need to allocate more from the OS. Return the cached OS
1095            // allocation, in case we're holding an unusually-small block
1096            // that's unlikely to satisfy future calls to heap_grow().
1097            LTRACEF("Returning too-small saved 0x%zx-byte OS alloc @%p "
1098                    "(<0x%zx bytes)\n",
1099                    os_alloc->size, os_alloc, size);
1100            free_to_os(os_alloc, os_alloc->size);
1101        }
1102        theheap.cached_os_alloc = NULL;
1103    }
1104    if (ptr == NULL) {
1105        ptr = heap_page_alloc(size >> PAGE_SIZE_SHIFT);
1106        if (ptr == NULL) {
1107            return ZX_ERR_NO_MEMORY;
1108        }
1109        LTRACEF("Growing heap by 0x%zx bytes, new ptr %p\n", size, ptr);
1110        theheap.size += size;
1111    }
1112
1113    add_to_heap(ptr, size);
1114
1115    return size;
1116}
1117
1118void cmpct_init(void) {
1119    LTRACE_ENTRY;
1120
1121    // Create a mutex.
1122    mutex_init(&theheap.lock);
1123
1124    // Initialize the free list.
1125    for (int i = 0; i < NUMBER_OF_BUCKETS; i++) {
1126        theheap.free_lists[i] = NULL;
1127    }
1128    for (int i = 0; i < BUCKET_WORDS; i++) {
1129        theheap.free_list_bits[i] = 0;
1130    }
1131
1132    size_t initial_alloc = HEAP_GROW_SIZE - 2 * sizeof(header_t);
1133
1134    theheap.remaining = 0;
1135
1136    heap_grow(initial_alloc);
1137}
1138