1/*
2 * Copyright 2002-2007, Haiku Inc.
3 * Distributed under the terms of the MIT License.
4 */
5
6/* Hoard: A Fast, Scalable, and Memory-Efficient Allocator
7 * 		for Shared-Memory Multiprocessors
8 * Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
9 *
10 * Copyright (c) 1998-2000, The University of Texas at Austin.
11 *
12 * This library is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU Library General Public License as
14 * published by the Free Software Foundation, http://www.fsf.org.
15 *
16 * This library is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * Library General Public License for more details.
20 */
21
22#include "config.h"
23#include "threadheap.h"
24#include "processheap.h"
25#include "arch-specific.h"
26
27#include <image.h>
28
29#include <errno.h>
30#include <string.h>
31
32#include <errno_private.h>
33#include <user_thread.h>
34
35#include "tracing_config.h"
36
37using namespace BPrivate;
38
39
40#if USER_MALLOC_TRACING
41#	define KTRACE(format...)	ktrace_printf(format)
42#else
43#	define KTRACE(format...)	do {} while (false)
44#endif
45
46
47#if HEAP_LEAK_CHECK
48static block* sUsedList = NULL;
49static hoardLockType sUsedLock = 0;
50
51
52/*!
53	Finds the closest symbol that comes before the given address.
54*/
55static status_t
56get_symbol_for_address(void* address, char *imageBuffer, size_t imageBufferSize,
57	char* buffer, size_t bufferSize, int32& offset)
58{
59	offset = -1;
60
61	image_info info;
62	int32 cookie = 0;
63	while (get_next_image_info(0, &cookie, &info) == B_OK) {
64		if (((addr_t)info.text > (addr_t)address
65				|| (addr_t)info.text + info.text_size < (addr_t)address)
66			&& ((addr_t)info.data > (addr_t)address
67				|| (addr_t)info.data + info.data_size < (addr_t)address))
68			continue;
69
70		char name[256];
71		int32 index = 0;
72		int32 nameLength = sizeof(name);
73		int32 symbolType;
74		void* location;
75		while (get_nth_image_symbol(info.id, index, name, &nameLength,
76				&symbolType, &location) == B_OK) {
77			if ((addr_t)address >= (addr_t)location) {
78				// see if this is better than what we have
79				int32 newOffset = (addr_t)address - (addr_t)location;
80
81				if (offset == -1 || offset > newOffset) {
82					const char* imageName = strrchr(info.name, '/');
83					if (imageName != NULL)
84						strlcpy(imageBuffer, imageName + 1, imageBufferSize);
85					else
86						strlcpy(imageBuffer, info.name, imageBufferSize);
87
88					strlcpy(buffer, name, bufferSize);
89					offset = newOffset;
90				}
91			}
92
93			nameLength = sizeof(name);
94			index++;
95		}
96	}
97
98	return offset != -1 ? B_OK : B_ENTRY_NOT_FOUND;
99}
100
101
102static void
103dump_block(block* b)
104{
105	printf("  %p, %ld bytes: call stack", b + 1, b->getAllocatedSize());
106
107	for (int i = 0; i < HEAP_CALL_STACK_SIZE; i++) {
108		if (b->getCallStack(i) != NULL) {
109			char image[256];
110			char name[256];
111			int32 offset;
112			if (get_symbol_for_address(b->getCallStack(i), image, sizeof(image),
113					name, sizeof(name), offset) != B_OK) {
114				strcpy(name, "???");
115				offset = 0;
116			}
117
118			printf(": %p (%s:%s+0x%lx)", b->getCallStack(i), image, name, offset);
119		}
120	}
121	putchar('\n');
122}
123
124
125extern "C" void __dump_allocated(void);
126
127extern "C" void
128__dump_allocated(void)
129{
130	hoardLock(sUsedLock);
131
132	puts("allocated:\n");
133
134	block* b = sUsedList;
135	while (b != NULL) {
136		dump_block(b);
137
138		b = b->getNext();
139	}
140
141	hoardUnlock(sUsedLock);
142}
143
144
145static void
146add_address(void* address, size_t size)
147{
148	block *b = (block *)address - 1;
149
150#ifdef __INTEL__
151	// set call stack
152	struct stack_frame {
153		struct stack_frame*	previous;
154		void*				return_address;
155	};
156
157	stack_frame* frame = (stack_frame*)get_stack_frame();
158
159	for (int i = 0; i < HEAP_CALL_STACK_SIZE; i++) {
160		if (frame != NULL) {
161			b->setCallStack(i, frame->return_address);
162			frame = frame->previous;
163		} else
164			b->setCallStack(i, NULL);
165	}
166
167	b->setAllocatedSize(size);
168#endif
169
170	hoardLock(sUsedLock);
171
172	b->setNext(sUsedList);
173	sUsedList = b;
174
175	hoardUnlock(sUsedLock);
176}
177
178
179static void
180remove_address(void* address)
181{
182	block* b = (block *)address - 1;
183	hoardLock(sUsedLock);
184
185	if (sUsedList == b) {
186		// we're lucky, it's the first block in the list
187		sUsedList = b->getNext();
188	} else {
189		// search for block in the used list (very slow!)
190		block* last = sUsedList;
191		while (last != NULL && last->getNext() != b) {
192			last = last->getNext();
193		}
194
195		if (last == NULL) {
196			printf("freed block not in used list!\n");
197			dump_block(b);
198		} else
199			last->setNext(b->getNext());
200	}
201
202	hoardUnlock(sUsedLock);
203}
204
205#endif	// HEAP_LEAK_CHECK
206
207#if HEAP_WALL
208
209static void*
210set_wall(void* addr, size_t size)
211{
212	size_t *start = (size_t*)addr;
213
214	start[0] = size;
215	memset(start + 1, 0x88, HEAP_WALL_SIZE - sizeof(size_t));
216	memset((uint8*)addr + size - HEAP_WALL_SIZE, 0x66, HEAP_WALL_SIZE);
217
218	return (uint8*)addr + HEAP_WALL_SIZE;
219}
220
221
222static void*
223check_wall(uint8* buffer)
224{
225	buffer -= HEAP_WALL_SIZE;
226	size_t size = *(size_t*)buffer;
227
228	if (threadHeap::objectSize(buffer) < size)
229		debugger("invalid size");
230
231	for (size_t i = 0; i < HEAP_WALL_SIZE; i++) {
232		if (i >= sizeof(size_t) && buffer[i] != 0x88) {
233			debug_printf("allocation %p, size %ld front wall clobbered at byte %ld.\n",
234				buffer + HEAP_WALL_SIZE, size - 2 * HEAP_WALL_SIZE, i);
235			debugger("front wall clobbered");
236		}
237		if (buffer[i + size - HEAP_WALL_SIZE] != 0x66) {
238			debug_printf("allocation %p, size %ld back wall clobbered at byte %ld.\n",
239				buffer + HEAP_WALL_SIZE, size - 2 * HEAP_WALL_SIZE, i);
240			debugger("back wall clobbered");
241		}
242	}
243
244	return buffer;
245}
246
247#endif	// HEAP_WALL
248
249inline static processHeap *
250getAllocator(void)
251{
252	static char *buffer = (char *)hoardSbrk(sizeof(processHeap));
253	static processHeap *theAllocator = new (buffer) processHeap;
254
255	return theAllocator;
256}
257
258
259//	#pragma mark - public functions
260
261
262extern "C" void *
263malloc(size_t size)
264{
265	static processHeap *pHeap = getAllocator();
266
267#if HEAP_WALL
268	size += 2 * HEAP_WALL_SIZE;
269#endif
270
271	defer_signals();
272
273	void *addr = pHeap->getHeap(pHeap->getHeapIndex()).malloc(size);
274	if (addr == NULL) {
275		undefer_signals();
276		__set_errno(B_NO_MEMORY);
277		KTRACE("malloc(%lu) -> NULL", size);
278		return NULL;
279	}
280
281#if HEAP_LEAK_CHECK
282	add_address(addr, size);
283#endif
284
285	undefer_signals();
286
287#if HEAP_WALL
288	addr = set_wall(addr, size);
289#endif
290
291	KTRACE("malloc(%lu) -> %p", size, addr);
292
293	return addr;
294}
295
296
297extern "C" void *
298calloc(size_t nelem, size_t elsize)
299{
300	static processHeap *pHeap = getAllocator();
301	size_t size = nelem * elsize;
302
303#if HEAP_WALL
304	size += 2 * HEAP_WALL_SIZE;
305#endif
306
307	defer_signals();
308
309	void *ptr = pHeap->getHeap(pHeap->getHeapIndex()).malloc(size);
310	if (ptr == NULL) {
311		undefer_signals();
312		__set_errno(B_NO_MEMORY);
313		KTRACE("calloc(%lu, %lu) -> NULL", nelem, elsize);
314		return NULL;
315	}
316
317#if HEAP_LEAK_CHECK
318	add_address(ptr, size);
319#endif
320
321	undefer_signals();
322
323#if HEAP_WALL
324	ptr = set_wall(ptr, size);
325	size -= 2 * HEAP_WALL_SIZE;
326#endif
327
328	// Zero out the malloc'd block.
329	memset(ptr, 0, size);
330	KTRACE("calloc(%lu, %lu) -> %p", nelem, elsize, ptr);
331	return ptr;
332}
333
334
335extern "C" void
336free(void *ptr)
337{
338	static processHeap *pHeap = getAllocator();
339
340#if HEAP_WALL
341	if (ptr == NULL)
342		return;
343	KTRACE("free(%p)", ptr);
344	ptr = check_wall((uint8*)ptr);
345#else
346	KTRACE("free(%p)", ptr);
347#endif
348
349	defer_signals();
350
351#if HEAP_LEAK_CHECK
352	if (ptr != NULL)
353		remove_address(ptr);
354#endif
355	pHeap->free(ptr);
356
357	undefer_signals();
358}
359
360
361extern "C" void *
362memalign(size_t alignment, size_t size)
363{
364	static processHeap *pHeap = getAllocator();
365
366#if HEAP_WALL
367	debug_printf("memalign() is not yet supported by the wall code.\n");
368	return NULL;
369#endif
370
371	defer_signals();
372
373	void *addr = pHeap->getHeap(pHeap->getHeapIndex()).memalign(alignment,
374		size);
375	if (addr == NULL) {
376		undefer_signals();
377		__set_errno(B_NO_MEMORY);
378		KTRACE("memalign(%lu, %lu) -> NULL", alignment, size);
379		return NULL;
380	}
381
382#if HEAP_LEAK_CHECK
383	add_address(addr, size);
384#endif
385
386	undefer_signals();
387
388	KTRACE("memalign(%lu, %lu) -> %p", alignment, size, addr);
389	return addr;
390}
391
392
393extern "C" int
394posix_memalign(void **_pointer, size_t alignment, size_t size)
395{
396	if ((alignment & (sizeof(void *) - 1)) != 0 || _pointer == NULL)
397		return B_BAD_VALUE;
398
399#if HEAP_WALL
400	debug_printf("posix_memalign() is not yet supported by the wall code.\n");
401	return -1;
402#endif
403	static processHeap *pHeap = getAllocator();
404	defer_signals();
405	void *pointer = pHeap->getHeap(pHeap->getHeapIndex()).memalign(alignment,
406		size);
407	if (pointer == NULL) {
408		undefer_signals();
409		KTRACE("posix_memalign(%p, %lu, %lu) -> NULL", _pointer, alignment,
410			size);
411		return B_NO_MEMORY;
412	}
413
414#if HEAP_LEAK_CHECK
415	add_address(pointer, size);
416#endif
417
418	undefer_signals();
419
420	*_pointer = pointer;
421	KTRACE("posix_memalign(%p, %lu, %lu) -> %p", _pointer, alignment, size,
422		pointer);
423	return 0;
424}
425
426
427extern "C" void *
428valloc(size_t size)
429{
430	return memalign(B_PAGE_SIZE, size);
431}
432
433
434extern "C" void *
435realloc(void *ptr, size_t size)
436{
437	if (ptr == NULL)
438		return malloc(size);
439
440	if (size == 0) {
441		free(ptr);
442		return NULL;
443	}
444
445	// If the existing object can hold the new size,
446	// just return it.
447
448#if HEAP_WALL
449	size += 2 * HEAP_WALL_SIZE;
450	ptr = (uint8*)ptr - HEAP_WALL_SIZE;
451#endif
452
453	size_t objSize = threadHeap::objectSize(ptr);
454	if (objSize >= size) {
455#if HEAP_WALL
456		check_wall((uint8*)ptr + HEAP_WALL_SIZE);
457		ptr = set_wall(ptr, size);
458#endif
459		KTRACE("realloc(%p, %lu) -> %p", ptr, size, ptr);
460		return ptr;
461	}
462
463#if HEAP_WALL
464	size -= 2 * HEAP_WALL_SIZE;
465	objSize -= 2 * HEAP_WALL_SIZE;
466	ptr = (uint8*)ptr + HEAP_WALL_SIZE;
467#endif
468
469	// Allocate a new block of size sz.
470	void *buffer = malloc(size);
471	if (buffer == NULL) {
472		// Allocation failed, leave old block and return
473		__set_errno(B_NO_MEMORY);
474		KTRACE("realloc(%p, %lu) -> NULL", ptr, size);
475		return NULL;
476	}
477
478	// Copy the contents of the original object
479	// up to the size of the new block.
480
481	size_t minSize = (objSize < size) ? objSize : size;
482	memcpy(buffer, ptr, minSize);
483
484	// Free the old block.
485	free(ptr);
486
487	// Return a pointer to the new one.
488	KTRACE("realloc(%p, %lu) -> %p", ptr, size, buffer);
489	return buffer;
490}
491
492
493//	#pragma mark - BeOS specific extensions
494
495
496struct mstats {
497	size_t bytes_total;
498	size_t chunks_used;
499	size_t bytes_used;
500	size_t chunks_free;
501	size_t bytes_free;
502};
503
504
505extern "C" struct mstats mstats(void);
506
507extern "C" struct mstats
508mstats(void)
509{
510	// Note, the stats structure is not thread-safe, but it doesn't
511	// matter that much either
512	processHeap *heap = getAllocator();
513	static struct mstats stats;
514
515	int allocated = 0;
516	int used = 0;
517	int chunks = 0;
518
519	for (int i = 0; i < hoardHeap::SIZE_CLASSES; i++) {
520		int classUsed, classAllocated;
521		heap->getStats(i, classUsed, classAllocated);
522
523		if (classUsed > 0)
524			chunks++;
525
526		allocated += classAllocated;
527		used += classUsed;
528	}
529
530	stats.bytes_total = allocated;
531	stats.chunks_used = chunks;
532	stats.bytes_used = used;
533	stats.chunks_free = hoardHeap::SIZE_CLASSES - chunks;
534	stats.bytes_free = allocated - used;
535
536	return stats;
537}
538
539