1
2/*
3 * K&R Malloc
4 *
5 * System specifc code should implement `more_core'
6 */
7#include "k_r_malloc.h"
8#include <stddef.h> /* For NULL */
9#include <stdlib.h>
10#include <string.h> /* For memcpy */
11
12#include <barrelfish/barrelfish.h>
13#include <barrelfish/core_state.h> /* XXX */
14
15typedef void *(*alt_malloc_t)(size_t bytes);
16alt_malloc_t alt_malloc = NULL;
17
18typedef void (*alt_free_t)(void *p);
19alt_free_t alt_free = NULL;
20
21#define MALLOC_LOCK thread_mutex_lock(&state->mutex)
22#define MALLOC_UNLOCK thread_mutex_unlock(&state->mutex)
23
24#ifdef CONFIG_MALLOC_INSTRUMENT
25size_t __malloc_instrumented_allocated;
26#endif
27
28#ifdef CONFIG_MALLOC_DEBUG_INTERNAL
29#include <stdio.h>
30#include <assert.h>
31int __malloc_check(void);
32void __malloc_dump(void);
33#endif
34
35/*
36 * malloc: general-purpose storage allocator
37 */
38void *
39malloc(size_t nbytes)
40{
41    if (alt_malloc != NULL) {
42        return alt_malloc(nbytes);
43    }
44
45    struct morecore_state *state = get_morecore_state();
46	Header *p, *prevp;
47	unsigned nunits;
48	nunits = (nbytes + sizeof(Header) - 1) / sizeof(Header) + 1;
49
50	MALLOC_LOCK;
51	if ((prevp = state->header_freep) == NULL) {	/* no free list yet */
52		state->header_base.s.ptr = state->header_freep = prevp = &state->header_base;
53		state->header_base.s.size = 0;
54	}
55	for (p = prevp->s.ptr;; prevp = p, p = p->s.ptr) {
56		if (p->s.size >= nunits) {	/* big enough */
57			if (p->s.size == nunits)	/* exactly */
58				prevp->s.ptr = p->s.ptr;
59			else {	/* allocate tail end */
60				p->s.size -= nunits;
61				p += p->s.size;
62				p->s.size = nunits;
63			}
64            p->s.magic = 0xdeadbeef;
65			state->header_freep = prevp;
66#ifdef CONFIG_MALLOC_DEBUG
67			{
68				/* Write bit pattern over data */
69				char *x = (char *) (p + 1);
70				int i;
71				for (i = 0; i < nbytes; i++)
72					x[i] = 0xd0;
73			}
74#endif
75
76#ifdef CONFIG_MALLOC_INSTRUMENT
77			__malloc_instrumented_allocated += nunits;
78#endif
79#ifdef CONFIG_MALLOC_DEBUG_INTERNAL
80			if (__malloc_check() != 0) {
81				printf("malloc %lu %p\n", nbytes, (void *) (p + 1));
82				__malloc_dump();
83				assert(__malloc_check() == 0);
84			}
85#endif
86			MALLOC_UNLOCK;
87			return (void *) (p + 1);
88		}
89		if (p == state->header_freep) {	/* wrapped around free list */
90			if ((p = (Header *) morecore(nunits)) == NULL) {
91				MALLOC_UNLOCK;
92				return NULL;	/* none left */
93			} else {
94
95			}
96		}
97	}
98	MALLOC_UNLOCK;
99}
100
101/*
102 * free: put block ap in free list
103 */
104void
105__free_locked(void *ap)
106{
107    struct morecore_state *state = get_morecore_state();
108	Header *bp, *p;
109
110	if (ap == NULL)
111		return;
112
113	bp = (Header *) ap - 1;	/* point to block header */
114	for (p = state->header_freep; !(bp > p && bp < p->s.ptr); p = p->s.ptr)
115		if (p >= p->s.ptr && (bp > p || bp < p->s.ptr))
116			break;	/* freed block at start or end of arena */
117
118#ifdef CONFIG_MALLOC_INSTRUMENT
119	__malloc_instrumented_allocated -= bp->s.size;
120#endif
121
122	if (bp + bp->s.size == p->s.ptr) {	/* join to upper nbr */
123		bp->s.size += p->s.ptr->s.size;
124		bp->s.ptr = p->s.ptr->s.ptr;
125	} else {
126		bp->s.ptr = p->s.ptr;
127	}
128
129	if (p + p->s.size == bp) {	/* join to lower nbr */
130		p->s.size += bp->s.size;
131		p->s.ptr = bp->s.ptr;
132	} else {
133		p->s.ptr = bp;
134	}
135
136	state->header_freep = p;
137
138#ifdef CONFIG_MALLOC_DEBUG_INTERNAL
139	if (__malloc_check() != 0) {
140		printf("free %p\n", ap);
141		__malloc_dump();
142		assert(__malloc_check() == 0);
143	}
144#endif
145}
146
147void free(void *ap)
148{
149    if (ap == NULL) {
150        return;
151    }
152
153
154    if (alt_free != NULL) {
155        return alt_free(ap);
156    }
157
158    struct morecore_state *state = get_morecore_state();
159
160#ifdef __x86_64__
161    /* XXX: Since dispatchers on different cores maintain different malloc arena,
162     * we detect instances when one dispatcher tries to free memory not in it's
163     * arena and leak it
164     */
165    lvaddr_t base = vregion_get_base_addr(&state->mmu_state.vregion);
166    lvaddr_t limit = base + vregion_get_size(&state->mmu_state.vregion);
167
168    if ((lvaddr_t)ap < base || (lvaddr_t)ap >= limit) {
169        if (X86_64_PML4_BASE(ap) != X86_64_PML4_BASE(base)) {
170            return;
171        }
172    }
173
174    assert((lvaddr_t)ap >= base && (lvaddr_t)ap < limit);
175#endif
176
177    if (((Header *)ap)[-1].s.magic != 0xdeadbeef) {
178        debug_printf("%s: Trying to free not malloced region %p by %p\n",
179            __func__, ap, __builtin_return_address(0));
180        return;
181    }
182    ((Header *)ap)[-1].s.magic = 0;
183    MALLOC_LOCK;
184    __free_locked(ap);
185    lesscore();
186    MALLOC_UNLOCK;
187}
188
189#ifdef CONFIG_MALLOC_DEBUG_INTERNAL
190
191int
192__malloc_check(void)
193{
194    struct morecore_state *state = get_morecore_state();
195	Header *p, *prevp;
196	if ((prevp = state->header_freep) == NULL) {	/* no free list yet */
197		return 0;
198	}
199	for (p = prevp->s.ptr;; prevp = p, p = p->s.ptr) {
200		if ((void*) p == NULL) {
201			return 1;
202		}
203		/* Free bits should be in order */
204		if (p > p->s.ptr && p->s.ptr != &state->header_base) {
205			return 1;
206		}
207		if ((uintptr_t) p + (p->s.size * sizeof(Header)) > (uintptr_t) p->s.ptr && p->s.ptr != &state->header_base) {
208			return 1;
209		}
210		/* shouldn't have zero sized free bits */
211		if (p->s.size == 0 && p != &state->header_base) {
212			return 1;
213		}
214		if (p == state->header_freep) {	/* wrapped around free list */
215			break;
216		}
217	}
218	return 0;
219}
220
221void
222__malloc_dump(void)
223{
224    struct morecore_state *state = get_morecore_state();
225	Header *p, *prevp;
226	if ((prevp = state->header_freep) == NULL) {	/* no free list yet */
227		return;
228	}
229        printf("Malloc dump\n"
230               "We expect the free list to be sorted from low to high addresses\n"
231               "with no item overlapping another item and no empty items.\n"
232               "Legend:\n"
233               "* Successor in list is at lower address than current item\n"
234               "# Item has size 0\n"
235               "$ This item overlaps (base + size) the next item's base\n");
236        printf("List base at %p, freep at %p\n", &state->header_base,
237               state->header_freep);
238	for (p = prevp->s.ptr;; prevp = p, p = p->s.ptr) {
239		if (p > p->s.ptr && p->s.ptr != &state->header_base) {
240			printf("* ");
241		}
242		if (p->s.size == 0 && p != &state->header_base) {
243			printf("# ");
244		}
245		if ((uintptr_t) p + (p->s.size * sizeof(Header)) > (uintptr_t) p->s.ptr && p->s.ptr != &state->header_base) {
246			printf("$ ");
247		}
248		if (p == &state->header_base) {
249			printf(" p: <base>\n");
250		} else {
251			printf(" p: %p (%d) -> %p\n", p, p->s.size, p->s.ptr);
252		}
253		assert(p != NULL);
254		if (p == state->header_freep) {	/* wrapped around free list */
255			return;
256		}
257	}
258}
259#endif
260