1/*
2 * Copyright 2011, Michael Lotz <mmlr@mlotz.ch>.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include <stdio.h>
8#include <string.h>
9
10#include <arch/debug.h>
11#include <elf.h>
12#include <debug.h>
13#include <heap.h>
14#include <malloc.h>
15#include <slab/Slab.h>
16#include <team.h>
17#include <tracing.h>
18#include <util/list.h>
19#include <util/AutoLock.h>
20#include <vm/vm.h>
21
22
23#if USE_GUARDED_HEAP_FOR_MALLOC
24
25
26#define GUARDED_HEAP_PAGE_FLAG_USED		0x01
27#define GUARDED_HEAP_PAGE_FLAG_FIRST	0x02
28#define GUARDED_HEAP_PAGE_FLAG_GUARD	0x04
29#define GUARDED_HEAP_PAGE_FLAG_DEAD		0x08
30
31#define GUARDED_HEAP_STACK_TRACE_DEPTH	0
32
33
34struct guarded_heap;
35
36struct guarded_heap_page {
37	uint8				flags;
38	size_t				allocation_size;
39	void*				allocation_base;
40	size_t				alignment;
41	team_id				team;
42	thread_id			thread;
43#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
44	size_t				stack_trace_depth;
45	addr_t				stack_trace[GUARDED_HEAP_STACK_TRACE_DEPTH];
46#endif
47	list_link			free_list_link;
48};
49
50struct guarded_heap_area {
51	guarded_heap*		heap;
52	guarded_heap_area*	next;
53	area_id				area;
54	addr_t				base;
55	size_t				size;
56	size_t				page_count;
57	size_t				used_pages;
58	void*				protection_cookie;
59	mutex				lock;
60	struct list			free_list;
61	guarded_heap_page	pages[0];
62};
63
64struct guarded_heap {
65	rw_lock				lock;
66	size_t				page_count;
67	size_t				used_pages;
68	int32				area_creation_counter;
69	guarded_heap_area*	areas;
70};
71
72
73static guarded_heap sGuardedHeap = {
74	RW_LOCK_INITIALIZER("guarded heap lock"),
75	0, 0, 0, NULL
76};
77
78
79#if GUARDED_HEAP_TRACING
80
81namespace GuardedHeapTracing {
82
83
84class GuardedHeapTraceEntry
85	: public TRACE_ENTRY_SELECTOR(GUARDED_HEAP_TRACING_STACK_TRACE) {
86	public:
87		GuardedHeapTraceEntry(guarded_heap* heap)
88			:
89			TraceEntryBase(GUARDED_HEAP_TRACING_STACK_TRACE, 0, true),
90			fHeap(heap)
91		{
92		}
93
94	protected:
95		guarded_heap*	fHeap;
96};
97
98
99class Allocate : public GuardedHeapTraceEntry {
100	public:
101		Allocate(guarded_heap* heap, void* pageBase, uint32 flags)
102			:
103			GuardedHeapTraceEntry(heap),
104			fPageBase(pageBase),
105			fFlags(flags)
106		{
107			Initialized();
108		}
109
110		virtual void AddDump(TraceOutput& out)
111		{
112			out.Print("guarded heap allocate: heap: %p; page: %p; "
113				"flags:%s%s%s%s", fHeap, fPageBase,
114				(fFlags & GUARDED_HEAP_PAGE_FLAG_USED) != 0 ? " used" : "",
115				(fFlags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0 ? " first" : "",
116				(fFlags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0 ? " guard" : "",
117				(fFlags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0 ? " dead" : "");
118		}
119
120	private:
121		void*		fPageBase;
122		uint32		fFlags;
123};
124
125
126class Free : public GuardedHeapTraceEntry {
127	public:
128		Free(guarded_heap* heap, void* pageBase)
129			:
130			GuardedHeapTraceEntry(heap),
131			fPageBase(pageBase)
132		{
133			Initialized();
134		}
135
136		virtual void AddDump(TraceOutput& out)
137		{
138			out.Print("guarded heap free: heap: %p; page: %p", fHeap,
139				fPageBase);
140		}
141
142	private:
143		void*		fPageBase;
144};
145
146
147}	// namespace GuardedHeapTracing
148
149#	define T(x)	new(std::nothrow) GuardedHeapTracing::x
150#else
151#	define T(x)
152#endif	// GUARDED_HEAP_TRACING
153
154
155static void
156guarded_heap_page_protect(guarded_heap_area& area, size_t pageIndex,
157	uint32 protection)
158{
159	if (area.area < 0)
160		return;
161
162	addr_t address = area.base + pageIndex * B_PAGE_SIZE;
163	vm_set_kernel_area_debug_protection(area.protection_cookie, (void*)address,
164		B_PAGE_SIZE, protection);
165}
166
167
168static void
169guarded_heap_page_allocate(guarded_heap_area& area, size_t startPageIndex,
170	size_t pagesNeeded, size_t allocationSize, size_t alignment,
171	void* allocationBase)
172{
173	if (pagesNeeded < 2) {
174		panic("need to allocate at least 2 pages, one for guard\n");
175		return;
176	}
177
178	guarded_heap_page* firstPage = NULL;
179	for (size_t i = 0; i < pagesNeeded; i++) {
180		guarded_heap_page& page = area.pages[startPageIndex + i];
181		page.flags = GUARDED_HEAP_PAGE_FLAG_USED;
182		if (i == 0) {
183			page.team = (gKernelStartup ? 0 : team_get_current_team_id());
184			page.thread = find_thread(NULL);
185#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
186			page.stack_trace_depth = arch_debug_get_stack_trace(
187				page.stack_trace, GUARDED_HEAP_STACK_TRACE_DEPTH, 0, 4,
188				STACK_TRACE_KERNEL);
189#endif
190			page.allocation_size = allocationSize;
191			page.allocation_base = allocationBase;
192			page.alignment = alignment;
193			page.flags |= GUARDED_HEAP_PAGE_FLAG_FIRST;
194			firstPage = &page;
195		} else {
196			page.team = firstPage->team;
197			page.thread = firstPage->thread;
198#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
199			page.stack_trace_depth = 0;
200#endif
201			page.allocation_size = allocationSize;
202			page.allocation_base = allocationBase;
203			page.alignment = alignment;
204		}
205
206		list_remove_item(&area.free_list, &page);
207
208		if (i == pagesNeeded - 1) {
209			page.flags |= GUARDED_HEAP_PAGE_FLAG_GUARD;
210			guarded_heap_page_protect(area, startPageIndex + i, 0);
211		} else {
212			guarded_heap_page_protect(area, startPageIndex + i,
213				B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
214		}
215
216		T(Allocate(area.heap,
217			(void*)(area.base + (startPageIndex + i) * B_PAGE_SIZE),
218			page.flags));
219	}
220}
221
222
223static void
224guarded_heap_free_page(guarded_heap_area& area, size_t pageIndex,
225	bool force = false)
226{
227	guarded_heap_page& page = area.pages[pageIndex];
228
229#if DEBUG_GUARDED_HEAP_DISABLE_MEMORY_REUSE
230	if (force || area.area < 0)
231		page.flags = 0;
232	else
233		page.flags |= GUARDED_HEAP_PAGE_FLAG_DEAD;
234#else
235	page.flags = 0;
236#endif
237
238	page.allocation_size = 0;
239	page.team = (gKernelStartup ? 0 : team_get_current_team_id());
240	page.thread = find_thread(NULL);
241
242#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
243	page.stack_trace_depth = arch_debug_get_stack_trace(page.stack_trace,
244		GUARDED_HEAP_STACK_TRACE_DEPTH, 0, 3, STACK_TRACE_KERNEL);
245#endif
246
247	list_add_item(&area.free_list, &page);
248
249	guarded_heap_page_protect(area, pageIndex, 0);
250
251	T(Free(area.heap, (void*)(area.base + pageIndex * B_PAGE_SIZE)));
252}
253
254
255static bool
256guarded_heap_pages_allocated(guarded_heap& heap, size_t pagesAllocated)
257{
258	return (atomic_add((int32*)&heap.used_pages, pagesAllocated)
259			+ pagesAllocated)
260		>= heap.page_count - HEAP_GROW_SIZE / B_PAGE_SIZE / 2;
261}
262
263
264static void*
265guarded_heap_area_allocate(guarded_heap_area& area, size_t size,
266	size_t alignment, uint32 flags, bool& grow)
267{
268	if (alignment > B_PAGE_SIZE) {
269		panic("alignment of %" B_PRIuSIZE " not supported", alignment);
270		return NULL;
271	}
272
273	size_t pagesNeeded = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE + 1;
274	if (pagesNeeded > area.page_count - area.used_pages)
275		return NULL;
276
277	if (pagesNeeded > area.page_count)
278		return NULL;
279
280	// We use the free list this way so that the page that has been free for
281	// the longest time is allocated. This keeps immediate re-use (that may
282	// hide bugs) to a minimum.
283	guarded_heap_page* page
284		= (guarded_heap_page*)list_get_first_item(&area.free_list);
285
286	for (; page != NULL;
287		page = (guarded_heap_page*)list_get_next_item(&area.free_list, page)) {
288
289		if ((page->flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
290			continue;
291
292		size_t pageIndex = page - area.pages;
293		if (pageIndex > area.page_count - pagesNeeded)
294			continue;
295
296		// Candidate, check if we have enough pages going forward
297		// (including the guard page).
298		bool candidate = true;
299		for (size_t j = 1; j < pagesNeeded; j++) {
300			if ((area.pages[pageIndex + j].flags & GUARDED_HEAP_PAGE_FLAG_USED)
301					!= 0) {
302				candidate = false;
303				break;
304			}
305		}
306
307		if (!candidate)
308			continue;
309
310		if (alignment == 0)
311			alignment = 1;
312
313		size_t offset = size & (B_PAGE_SIZE - 1);
314		void* result = (void*)((area.base + pageIndex * B_PAGE_SIZE
315			+ (offset > 0 ? B_PAGE_SIZE - offset : 0)) & ~(alignment - 1));
316
317		guarded_heap_page_allocate(area, pageIndex, pagesNeeded, size,
318			alignment, result);
319
320		area.used_pages += pagesNeeded;
321		grow = guarded_heap_pages_allocated(*area.heap, pagesNeeded);
322		return result;
323	}
324
325	return NULL;
326}
327
328
329static bool
330guarded_heap_area_init(guarded_heap& heap, area_id id, void* baseAddress,
331	size_t size, uint32 flags)
332{
333	guarded_heap_area* area = (guarded_heap_area*)baseAddress;
334	area->heap = &heap;
335	area->area = id;
336	area->size = size;
337	area->page_count = area->size / B_PAGE_SIZE;
338	area->used_pages = 0;
339
340	size_t pagesNeeded = (sizeof(guarded_heap_area)
341		+ area->page_count * sizeof(guarded_heap_page)
342		+ B_PAGE_SIZE - 1) / B_PAGE_SIZE;
343
344	area->page_count -= pagesNeeded;
345	area->size = area->page_count * B_PAGE_SIZE;
346	area->base = (addr_t)baseAddress + pagesNeeded * B_PAGE_SIZE;
347
348	if (area->area >= 0 && vm_prepare_kernel_area_debug_protection(area->area,
349			&area->protection_cookie) != B_OK) {
350		return false;
351	}
352
353	mutex_init(&area->lock, "guarded_heap_area_lock");
354
355	list_init_etc(&area->free_list,
356		offsetof(guarded_heap_page, free_list_link));
357
358	for (size_t i = 0; i < area->page_count; i++)
359		guarded_heap_free_page(*area, i, true);
360
361	WriteLocker areaListWriteLocker(heap.lock);
362	area->next = heap.areas;
363	heap.areas = area;
364	heap.page_count += area->page_count;
365
366	return true;
367}
368
369
370static bool
371guarded_heap_area_create(guarded_heap& heap, uint32 flags)
372{
373	for (size_t trySize = HEAP_GROW_SIZE; trySize >= 1 * 1024 * 1024;
374		trySize /= 2) {
375
376		void* baseAddress = NULL;
377		area_id id = create_area("guarded_heap_area", &baseAddress,
378			B_ANY_KERNEL_ADDRESS, trySize, B_FULL_LOCK,
379			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
380
381		if (id < 0)
382			continue;
383
384		if (guarded_heap_area_init(heap, id, baseAddress, trySize, flags))
385			return true;
386
387		delete_area(id);
388	}
389
390	panic("failed to allocate a new heap area");
391	return false;
392}
393
394
395static bool
396guarded_heap_add_area(guarded_heap& heap, int32 counter, uint32 flags)
397{
398	if ((flags & (HEAP_DONT_LOCK_KERNEL_SPACE | HEAP_DONT_WAIT_FOR_MEMORY))
399			!= 0) {
400		return false;
401	}
402
403	if (atomic_test_and_set(&heap.area_creation_counter,
404			counter + 1, counter) == counter) {
405		return guarded_heap_area_create(heap, flags);
406	}
407
408	return false;
409}
410
411
412static void*
413guarded_heap_allocate(guarded_heap& heap, size_t size, size_t alignment,
414	uint32 flags)
415{
416	bool grow = false;
417	void* result = NULL;
418	ReadLocker areaListReadLocker(heap.lock);
419	for (guarded_heap_area* area = heap.areas; area != NULL;
420			area = area->next) {
421
422		MutexLocker locker(area->lock);
423		result = guarded_heap_area_allocate(*area, size, alignment, flags,
424			grow);
425		if (result != NULL)
426			break;
427	}
428
429	int32 counter = atomic_get(&heap.area_creation_counter);
430	areaListReadLocker.Unlock();
431
432	if (result == NULL || grow) {
433		bool added = guarded_heap_add_area(heap, counter, flags);
434		if (result == NULL && added)
435			return guarded_heap_allocate(heap, size, alignment, flags);
436	}
437
438	if (result == NULL)
439		panic("ran out of memory");
440
441	return result;
442}
443
444
445static guarded_heap_area*
446guarded_heap_get_locked_area_for(guarded_heap& heap, void* address)
447{
448	ReadLocker areaListReadLocker(heap.lock);
449	for (guarded_heap_area* area = heap.areas; area != NULL;
450			area = area->next) {
451		if ((addr_t)address < area->base)
452			continue;
453
454		if ((addr_t)address >= area->base + area->size)
455			continue;
456
457		mutex_lock(&area->lock);
458		return area;
459	}
460
461	panic("guarded heap area for address %p not found", address);
462	return NULL;
463}
464
465
466static size_t
467guarded_heap_area_page_index_for(guarded_heap_area& area, void* address)
468{
469	size_t pageIndex = ((addr_t)address - area.base) / B_PAGE_SIZE;
470	guarded_heap_page& page = area.pages[pageIndex];
471	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) == 0) {
472		panic("tried to free %p which points at page %" B_PRIuSIZE
473			" which is not marked in use", address, pageIndex);
474		return area.page_count;
475	}
476
477	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0) {
478		panic("tried to free %p which points at page %" B_PRIuSIZE
479			" which is a guard page", address, pageIndex);
480		return area.page_count;
481	}
482
483	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0) {
484		panic("tried to free %p which points at page %" B_PRIuSIZE
485			" which is not an allocation first page", address, pageIndex);
486		return area.page_count;
487	}
488
489	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0) {
490		panic("tried to free %p which points at page %" B_PRIuSIZE
491			" which is a dead page", address, pageIndex);
492		return area.page_count;
493	}
494
495	return pageIndex;
496}
497
498
499static void
500guarded_heap_area_free(guarded_heap_area& area, void* address, uint32 flags)
501{
502	size_t pageIndex = guarded_heap_area_page_index_for(area, address);
503	if (pageIndex >= area.page_count)
504		return;
505
506	size_t pagesFreed = 0;
507	guarded_heap_page* page = &area.pages[pageIndex];
508	while ((page->flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0) {
509		// Mark the allocation page as free.
510		guarded_heap_free_page(area, pageIndex);
511
512		pagesFreed++;
513		pageIndex++;
514		page = &area.pages[pageIndex];
515	}
516
517	// Mark the guard page as free as well.
518	guarded_heap_free_page(area, pageIndex);
519	pagesFreed++;
520
521#if !DEBUG_GUARDED_HEAP_DISABLE_MEMORY_REUSE
522	area.used_pages -= pagesFreed;
523	atomic_add((int32*)&area.heap->used_pages, -pagesFreed);
524#endif
525}
526
527
528static void
529guarded_heap_free(void* address, uint32 flags)
530{
531	if (address == NULL)
532		return;
533
534	guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
535		address);
536	if (area == NULL)
537		return;
538
539	MutexLocker locker(area->lock, true);
540	guarded_heap_area_free(*area, address, flags);
541}
542
543
544static void*
545guarded_heap_realloc(void* address, size_t newSize, uint32 flags)
546{
547	guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
548		address);
549	if (area == NULL)
550		return NULL;
551
552	MutexLocker locker(area->lock, true);
553
554	size_t pageIndex = guarded_heap_area_page_index_for(*area, address);
555	if (pageIndex >= area->page_count)
556		return NULL;
557
558	guarded_heap_page& page = area->pages[pageIndex];
559	size_t oldSize = page.allocation_size;
560	locker.Unlock();
561
562	if (oldSize == newSize)
563		return address;
564
565	void* newBlock = malloc_etc(newSize, flags);
566	if (newBlock == NULL)
567		return NULL;
568
569	memcpy(newBlock, address, min_c(oldSize, newSize));
570
571	free_etc(address, flags);
572
573	return newBlock;
574}
575
576
577// #pragma mark - Debugger commands
578
579
580static void
581dump_guarded_heap_stack_trace(guarded_heap_page& page)
582{
583#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
584	kprintf("stack trace:\n");
585	for (size_t i = 0; i < page.stack_trace_depth; i++) {
586		addr_t address = page.stack_trace[i];
587
588		const char* symbol;
589		const char* imageName;
590		bool exactMatch;
591		addr_t baseAddress;
592
593		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
594				&imageName, &exactMatch) == B_OK) {
595			kprintf("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
596				address - baseAddress, imageName,
597				exactMatch ? "" : " (nearest)");
598		} else
599			kprintf("  %p\n", (void*)address);
600	}
601#endif
602}
603
604
605static int
606dump_guarded_heap_page(int argc, char** argv)
607{
608	if (argc != 2) {
609		print_debugger_command_usage(argv[0]);
610		return 0;
611	}
612
613	addr_t address = parse_expression(argv[1]);
614
615	// Find the area that contains this page.
616	guarded_heap_area* area = NULL;
617	for (guarded_heap_area* candidate = sGuardedHeap.areas; candidate != NULL;
618			candidate = candidate->next) {
619
620		if (address < candidate->base)
621			continue;
622		if (address >= candidate->base + candidate->size)
623			continue;
624
625		area = candidate;
626		break;
627	}
628
629	if (area == NULL) {
630		kprintf("didn't find area for address\n");
631		return 1;
632	}
633
634	size_t pageIndex = ((addr_t)address - area->base) / B_PAGE_SIZE;
635	guarded_heap_page& page = area->pages[pageIndex];
636
637	kprintf("page index: %" B_PRIuSIZE "\n", pageIndex);
638	kprintf("flags:");
639	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
640		kprintf(" used");
641	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0)
642		kprintf(" first");
643	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0)
644		kprintf(" guard");
645	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0)
646		kprintf(" dead");
647	kprintf("\n");
648
649	kprintf("allocation size: %" B_PRIuSIZE "\n", page.allocation_size);
650	kprintf("allocation base: %p\n", page.allocation_base);
651	kprintf("alignment: %" B_PRIuSIZE "\n", page.alignment);
652	kprintf("allocating team: %" B_PRId32 "\n", page.team);
653	kprintf("allocating thread: %" B_PRId32 "\n", page.thread);
654
655	dump_guarded_heap_stack_trace(page);
656	return 0;
657}
658
659
660static int
661dump_guarded_heap_area(int argc, char** argv)
662{
663	if (argc != 2) {
664		print_debugger_command_usage(argv[0]);
665		return 0;
666	}
667
668	addr_t address = parse_expression(argv[1]);
669
670	// Find the area that contains this page.
671	guarded_heap_area* area = NULL;
672	for (guarded_heap_area* candidate = sGuardedHeap.areas; candidate != NULL;
673			candidate = candidate->next) {
674
675		if ((addr_t)candidate != address) {
676			if (address < candidate->base)
677				continue;
678			if (address >= candidate->base + candidate->size)
679				continue;
680		}
681
682		area = candidate;
683		break;
684	}
685
686	if (area == NULL) {
687		kprintf("didn't find area for address\n");
688		return 1;
689	}
690
691	kprintf("guarded heap area: %p\n", area);
692	kprintf("next heap area: %p\n", area->next);
693	kprintf("guarded heap: %p\n", area->heap);
694	kprintf("area id: %" B_PRId32 "\n", area->area);
695	kprintf("base: 0x%" B_PRIxADDR "\n", area->base);
696	kprintf("size: %" B_PRIuSIZE "\n", area->size);
697	kprintf("page count: %" B_PRIuSIZE "\n", area->page_count);
698	kprintf("used pages: %" B_PRIuSIZE "\n", area->used_pages);
699	kprintf("protection cookie: %p\n", area->protection_cookie);
700	kprintf("lock: %p\n", &area->lock);
701
702	size_t freeCount = 0;
703	void* item = list_get_first_item(&area->free_list);
704	while (item != NULL) {
705		freeCount++;
706
707		if ((((guarded_heap_page*)item)->flags & GUARDED_HEAP_PAGE_FLAG_USED)
708				!= 0) {
709			kprintf("free list broken, page %p not actually free\n", item);
710		}
711
712		item = list_get_next_item(&area->free_list, item);
713	}
714
715	kprintf("free_list: %p (%" B_PRIuSIZE " free)\n", &area->free_list,
716		freeCount);
717
718	freeCount = 0;
719	size_t runLength = 0;
720	size_t longestRun = 0;
721	for (size_t i = 0; i <= area->page_count; i++) {
722		guarded_heap_page& page = area->pages[i];
723		if (i == area->page_count
724			|| (page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0) {
725			freeCount += runLength;
726			if (runLength > longestRun)
727				longestRun = runLength;
728			runLength = 0;
729			continue;
730		}
731
732		runLength = 1;
733		for (size_t j = 1; j < area->page_count - i; j++) {
734			if ((area->pages[i + j].flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
735				break;
736
737			runLength++;
738		}
739
740		i += runLength - 1;
741	}
742
743	kprintf("longest free run: %" B_PRIuSIZE " (%" B_PRIuSIZE " free)\n",
744		longestRun, freeCount);
745
746	kprintf("pages: %p\n", area->pages);
747
748	return 0;
749}
750
751
752static int
753dump_guarded_heap(int argc, char** argv)
754{
755	guarded_heap* heap = &sGuardedHeap;
756	if (argc != 1) {
757		if (argc == 2)
758			heap = (guarded_heap*)parse_expression(argv[1]);
759		else {
760			print_debugger_command_usage(argv[0]);
761			return 0;
762		}
763	}
764
765	kprintf("guarded heap: %p\n", heap);
766	kprintf("rw lock: %p\n", &heap->lock);
767	kprintf("page count: %" B_PRIuSIZE "\n", heap->page_count);
768	kprintf("used pages: %" B_PRIuSIZE "\n", heap->used_pages);
769	kprintf("area creation counter: %" B_PRId32 "\n",
770		heap->area_creation_counter);
771
772	size_t areaCount = 0;
773	guarded_heap_area* area = heap->areas;
774	while (area != NULL) {
775		areaCount++;
776		area = area->next;
777	}
778
779	kprintf("areas: %p (%" B_PRIuSIZE ")\n", heap->areas, areaCount);
780
781	return 0;
782}
783
784
785static int
786dump_guarded_heap_allocations(int argc, char** argv)
787{
788	team_id team = -1;
789	thread_id thread = -1;
790	addr_t address = 0;
791	bool statsOnly = false;
792	bool stackTrace = false;
793
794	for (int32 i = 1; i < argc; i++) {
795		if (strcmp(argv[i], "team") == 0)
796			team = parse_expression(argv[++i]);
797		else if (strcmp(argv[i], "thread") == 0)
798			thread = parse_expression(argv[++i]);
799		else if (strcmp(argv[i], "address") == 0)
800			address = parse_expression(argv[++i]);
801		else if (strcmp(argv[i], "stats") == 0)
802			statsOnly = true;
803#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
804		else if (strcmp(argv[i], "trace") == 0)
805			stackTrace = true;
806#endif
807		else {
808			print_debugger_command_usage(argv[0]);
809			return 0;
810		}
811	}
812
813	size_t totalSize = 0;
814	uint32 totalCount = 0;
815
816	guarded_heap_area* area = sGuardedHeap.areas;
817	while (area != NULL) {
818		for (size_t i = 0; i < area->page_count; i++) {
819			guarded_heap_page& page = area->pages[i];
820			if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0)
821				continue;
822
823			if ((team < 0 || page.team == team)
824				&& (thread < 0 || page.thread == thread)
825				&& (address == 0 || (addr_t)page.allocation_base == address)) {
826
827				if (!statsOnly) {
828					kprintf("team: % 6" B_PRId32 "; thread: % 6" B_PRId32 "; "
829						"address: 0x%08" B_PRIxADDR "; size: %" B_PRIuSIZE
830						" bytes\n", page.team, page.thread,
831						(addr_t)page.allocation_base, page.allocation_size);
832
833					if (stackTrace)
834						dump_guarded_heap_stack_trace(page);
835				}
836
837				totalSize += page.allocation_size;
838				totalCount++;
839			}
840		}
841
842		area = area->next;
843	}
844
845	kprintf("total allocations: %" B_PRIu32 "; total bytes: %" B_PRIuSIZE
846		"\n", totalCount, totalSize);
847	return 0;
848}
849
850
851// #pragma mark - Malloc API
852
853
854status_t
855heap_init(addr_t address, size_t size)
856{
857	return guarded_heap_area_init(sGuardedHeap, -1, (void*)address, size, 0)
858		? B_OK : B_ERROR;
859}
860
861
862status_t
863heap_init_post_area()
864{
865	return B_OK;
866}
867
868
869status_t
870heap_init_post_sem()
871{
872	for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
873			area = area->next) {
874		if (area->area >= 0)
875			continue;
876
877		area_id id = area_for((void*)area->base);
878		if (id < 0 || vm_prepare_kernel_area_debug_protection(id,
879				&area->protection_cookie) != B_OK) {
880			panic("failed to prepare initial guarded heap for protection");
881			continue;
882		}
883
884		area->area = id;
885		for (size_t i = 0; i < area->page_count; i++) {
886			guarded_heap_page& page = area->pages[i];
887			if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0
888				&& (page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0
889				&& (page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) == 0) {
890				guarded_heap_page_protect(*area, i,
891					B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
892			} else
893				guarded_heap_page_protect(*area, i, 0);
894		}
895	}
896
897	add_debugger_command("guarded_heap", &dump_guarded_heap,
898		"Dump info about the guarded heap");
899	add_debugger_command_etc("guarded_heap_area", &dump_guarded_heap_area,
900		"Dump info about a guarded heap area",
901		"<address>\nDump info about guarded heap area containing address.\n",
902		0);
903	add_debugger_command_etc("guarded_heap_page", &dump_guarded_heap_page,
904		"Dump info about a guarded heap page",
905		"<address>\nDump info about guarded heap page containing address.\n",
906		0);
907	add_debugger_command_etc("allocations", &dump_guarded_heap_allocations,
908		"Dump current heap allocations",
909#if GUARDED_HEAP_STACK_TRACE_DEPTH == 0
910		"[\"stats\"] [team] [thread] [address]\n"
911#else
912		"[\"stats\"|\"trace\"] [team] [thread] [address]\n"
913#endif
914		"If no parameters are given, all current alloactions are dumped.\n"
915		"If the optional argument \"stats\" is specified, only the allocation\n"
916		"counts and no individual allocations are printed.\n"
917#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
918		"If the optional argument \"trace\" is specified, a stack trace for\n"
919		"each allocation is printed.\n"
920#endif
921		"If a specific allocation address is given, only this allocation is\n"
922		"dumped.\n"
923		"If a team and/or thread is specified, only allocations of this\n"
924		"team/thread are dumped.\n", 0);
925
926	return B_OK;
927}
928
929
930void*
931memalign(size_t alignment, size_t size)
932{
933	return memalign_etc(alignment, size, 0);
934}
935
936
937void *
938memalign_etc(size_t alignment, size_t size, uint32 flags)
939{
940	if (size == 0)
941		size = 1;
942
943	return guarded_heap_allocate(sGuardedHeap, size, alignment, flags);
944}
945
946
947void
948free_etc(void *address, uint32 flags)
949{
950	guarded_heap_free(address, flags);
951}
952
953
954void*
955malloc(size_t size)
956{
957	return memalign_etc(0, size, 0);
958}
959
960
961void
962free(void* address)
963{
964	free_etc(address, 0);
965}
966
967
968void*
969realloc_etc(void* address, size_t newSize, uint32 flags)
970{
971	if (newSize == 0) {
972		free_etc(address, flags);
973		return NULL;
974	}
975
976	if (address == NULL)
977		return malloc_etc(newSize, flags);
978
979	return guarded_heap_realloc(address, newSize, flags);
980}
981
982
983void*
984realloc(void* address, size_t newSize)
985{
986	return realloc_etc(address, newSize, 0);
987}
988
989
990#if USE_GUARDED_HEAP_FOR_OBJECT_CACHE
991
992
993// #pragma mark - Slab API
994
995
996void
997request_memory_manager_maintenance()
998{
999}
1000
1001
1002object_cache*
1003create_object_cache(const char*, size_t objectSize, size_t, void*,
1004	object_cache_constructor, object_cache_destructor)
1005{
1006	return (object_cache*)objectSize;
1007}
1008
1009
1010object_cache*
1011create_object_cache_etc(const char*, size_t objectSize, size_t, size_t, size_t,
1012	size_t, uint32, void*, object_cache_constructor, object_cache_destructor,
1013	object_cache_reclaimer)
1014{
1015	return (object_cache*)objectSize;
1016}
1017
1018
1019void
1020delete_object_cache(object_cache* cache)
1021{
1022}
1023
1024
1025status_t
1026object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
1027{
1028	return B_OK;
1029}
1030
1031
1032void*
1033object_cache_alloc(object_cache* cache, uint32 flags)
1034{
1035	return memalign_etc(0, (size_t)cache, flags);
1036}
1037
1038
1039void
1040object_cache_free(object_cache* cache, void* object, uint32 flags)
1041{
1042	return free_etc(object, flags);
1043}
1044
1045
1046status_t
1047object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
1048{
1049	return B_OK;
1050}
1051
1052
1053void
1054object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
1055{
1056	*_allocatedMemory = 0;
1057}
1058
1059
1060void
1061slab_init(kernel_args* args)
1062{
1063}
1064
1065
1066void
1067slab_init_post_area()
1068{
1069}
1070
1071
1072void
1073slab_init_post_sem()
1074{
1075}
1076
1077
1078void
1079slab_init_post_thread()
1080{
1081}
1082
1083
1084#endif	// USE_GUARDED_HEAP_FOR_OBJECT_CACHE
1085
1086
1087#endif	// USE_GUARDED_HEAP_FOR_MALLOC
1088