1/*
2 * Copyright 2011, Michael Lotz <mmlr@mlotz.ch>.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include <stdio.h>
8#include <string.h>
9
10#include <arch/debug.h>
11#include <elf.h>
12#include <debug.h>
13#include <heap.h>
14#include <malloc.h>
15#include <slab/Slab.h>
16#include <tracing.h>
17#include <util/list.h>
18#include <util/AutoLock.h>
19#include <vm/vm.h>
20
21
22#if USE_GUARDED_HEAP_FOR_MALLOC
23
24
25#define GUARDED_HEAP_PAGE_FLAG_USED		0x01
26#define GUARDED_HEAP_PAGE_FLAG_FIRST	0x02
27#define GUARDED_HEAP_PAGE_FLAG_GUARD	0x04
28#define GUARDED_HEAP_PAGE_FLAG_DEAD		0x08
29
30#define GUARDED_HEAP_STACK_TRACE_DEPTH	0
31
32
33struct guarded_heap;
34
35struct guarded_heap_page {
36	uint8				flags;
37	size_t				allocation_size;
38	void*				allocation_base;
39	size_t				alignment;
40	thread_id			thread;
41#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
42	size_t				stack_trace_depth;
43	addr_t				stack_trace[GUARDED_HEAP_STACK_TRACE_DEPTH];
44#endif
45	list_link			free_list_link;
46};
47
48struct guarded_heap_area {
49	guarded_heap*		heap;
50	guarded_heap_area*	next;
51	area_id				area;
52	addr_t				base;
53	size_t				size;
54	size_t				page_count;
55	size_t				used_pages;
56	void*				protection_cookie;
57	mutex				lock;
58	struct list			free_list;
59	guarded_heap_page	pages[0];
60};
61
62struct guarded_heap {
63	rw_lock				lock;
64	size_t				page_count;
65	size_t				used_pages;
66	vint32				area_creation_counter;
67	guarded_heap_area*	areas;
68};
69
70
71static guarded_heap sGuardedHeap = {
72	RW_LOCK_INITIALIZER("guarded heap lock"),
73	0, 0, 0, NULL
74};
75
76
77#if GUARDED_HEAP_TRACING
78
79namespace GuardedHeapTracing {
80
81
82class GuardedHeapTraceEntry
83	: public TRACE_ENTRY_SELECTOR(GUARDED_HEAP_TRACING_STACK_TRACE) {
84	public:
85		GuardedHeapTraceEntry(guarded_heap* heap)
86			:
87			TraceEntryBase(GUARDED_HEAP_TRACING_STACK_TRACE, 0, true),
88			fHeap(heap)
89		{
90		}
91
92	protected:
93		guarded_heap*	fHeap;
94};
95
96
97class Allocate : public GuardedHeapTraceEntry {
98	public:
99		Allocate(guarded_heap* heap, void* pageBase, uint32 flags)
100			:
101			GuardedHeapTraceEntry(heap),
102			fPageBase(pageBase),
103			fFlags(flags)
104		{
105			Initialized();
106		}
107
108		virtual void AddDump(TraceOutput& out)
109		{
110			out.Print("guarded heap allocate: heap: %p; page: %p; "
111				"flags:%s%s%s%s", fHeap, fPageBase,
112				(fFlags & GUARDED_HEAP_PAGE_FLAG_USED) != 0 ? " used" : "",
113				(fFlags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0 ? " first" : "",
114				(fFlags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0 ? " guard" : "",
115				(fFlags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0 ? " dead" : "");
116		}
117
118	private:
119		void*		fPageBase;
120		uint32		fFlags;
121};
122
123
124class Free : public GuardedHeapTraceEntry {
125	public:
126		Free(guarded_heap* heap, void* pageBase)
127			:
128			GuardedHeapTraceEntry(heap),
129			fPageBase(pageBase)
130		{
131			Initialized();
132		}
133
134		virtual void AddDump(TraceOutput& out)
135		{
136			out.Print("guarded heap free: heap: %p; page: %p", fHeap,
137				fPageBase);
138		}
139
140	private:
141		void*		fPageBase;
142};
143
144
145}	// namespace GuardedHeapTracing
146
147#	define T(x)	new(std::nothrow) GuardedHeapTracing::x
148#else
149#	define T(x)
150#endif	// GUARDED_HEAP_TRACING
151
152
153static void
154guarded_heap_page_protect(guarded_heap_area& area, size_t pageIndex,
155	uint32 protection)
156{
157	if (area.area < 0)
158		return;
159
160	addr_t address = area.base + pageIndex * B_PAGE_SIZE;
161	vm_set_kernel_area_debug_protection(area.protection_cookie, (void*)address,
162		B_PAGE_SIZE, protection);
163}
164
165
166static void
167guarded_heap_page_allocate(guarded_heap_area& area, size_t startPageIndex,
168	size_t pagesNeeded, size_t allocationSize, size_t alignment,
169	void* allocationBase)
170{
171	if (pagesNeeded < 2) {
172		panic("need to allocate at least 2 pages, one for guard\n");
173		return;
174	}
175
176	guarded_heap_page* firstPage = NULL;
177	for (size_t i = 0; i < pagesNeeded; i++) {
178		guarded_heap_page& page = area.pages[startPageIndex + i];
179		page.flags = GUARDED_HEAP_PAGE_FLAG_USED;
180		if (i == 0) {
181			page.thread = find_thread(NULL);
182#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
183			page.stack_trace_depth = arch_debug_get_stack_trace(
184				page.stack_trace, GUARDED_HEAP_STACK_TRACE_DEPTH, 0, 4,
185				STACK_TRACE_KERNEL);
186#endif
187			page.allocation_size = allocationSize;
188			page.allocation_base = allocationBase;
189			page.alignment = alignment;
190			page.flags |= GUARDED_HEAP_PAGE_FLAG_FIRST;
191			firstPage = &page;
192		} else {
193			page.thread = firstPage->thread;
194#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
195			page.stack_trace_depth = 0;
196#endif
197			page.allocation_size = allocationSize;
198			page.allocation_base = allocationBase;
199			page.alignment = alignment;
200		}
201
202		list_remove_item(&area.free_list, &page);
203
204		if (i == pagesNeeded - 1) {
205			page.flags |= GUARDED_HEAP_PAGE_FLAG_GUARD;
206			guarded_heap_page_protect(area, startPageIndex + i, 0);
207		} else {
208			guarded_heap_page_protect(area, startPageIndex + i,
209				B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
210		}
211
212		T(Allocate(area.heap,
213			(void*)(area.base + (startPageIndex + i) * B_PAGE_SIZE),
214			page.flags));
215	}
216}
217
218
219static void
220guarded_heap_free_page(guarded_heap_area& area, size_t pageIndex,
221	bool force = false)
222{
223	guarded_heap_page& page = area.pages[pageIndex];
224
225#if DEBUG_GUARDED_HEAP_DISABLE_MEMORY_REUSE
226	if (force || area.area < 0)
227		page.flags = 0;
228	else
229		page.flags |= GUARDED_HEAP_PAGE_FLAG_DEAD;
230#else
231	page.flags = 0;
232#endif
233
234	page.allocation_size = 0;
235	page.thread = find_thread(NULL);
236
237#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
238	page.stack_trace_depth = arch_debug_get_stack_trace(page.stack_trace,
239		GUARDED_HEAP_STACK_TRACE_DEPTH, 0, 3, STACK_TRACE_KERNEL);
240#endif
241
242	list_add_item(&area.free_list, &page);
243
244	guarded_heap_page_protect(area, pageIndex, 0);
245
246	T(Free(area.heap, (void*)(area.base + pageIndex * B_PAGE_SIZE)));
247}
248
249
250static bool
251guarded_heap_pages_allocated(guarded_heap& heap, size_t pagesAllocated)
252{
253	return (atomic_add((vint32*)&heap.used_pages, pagesAllocated)
254			+ pagesAllocated)
255		>= heap.page_count - HEAP_GROW_SIZE / B_PAGE_SIZE / 2;
256}
257
258
259static void*
260guarded_heap_area_allocate(guarded_heap_area& area, size_t size,
261	size_t alignment, uint32 flags, bool& grow)
262{
263	if (alignment > B_PAGE_SIZE) {
264		panic("alignment of %" B_PRIuSIZE " not supported", alignment);
265		return NULL;
266	}
267
268	size_t pagesNeeded = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE + 1;
269	if (pagesNeeded > area.page_count - area.used_pages)
270		return NULL;
271
272	if (pagesNeeded > area.page_count)
273		return NULL;
274
275	// We use the free list this way so that the page that has been free for
276	// the longest time is allocated. This keeps immediate re-use (that may
277	// hide bugs) to a minimum.
278	guarded_heap_page* page
279		= (guarded_heap_page*)list_get_first_item(&area.free_list);
280
281	for (; page != NULL;
282		page = (guarded_heap_page*)list_get_next_item(&area.free_list, page)) {
283
284		if ((page->flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
285			continue;
286
287		size_t pageIndex = page - area.pages;
288		if (pageIndex > area.page_count - pagesNeeded)
289			continue;
290
291		// Candidate, check if we have enough pages going forward
292		// (including the guard page).
293		bool candidate = true;
294		for (size_t j = 1; j < pagesNeeded; j++) {
295			if ((area.pages[pageIndex + j].flags & GUARDED_HEAP_PAGE_FLAG_USED)
296					!= 0) {
297				candidate = false;
298				break;
299			}
300		}
301
302		if (!candidate)
303			continue;
304
305		if (alignment == 0)
306			alignment = 1;
307
308		size_t offset = size & (B_PAGE_SIZE - 1);
309		void* result = (void*)((area.base + pageIndex * B_PAGE_SIZE
310			+ (offset > 0 ? B_PAGE_SIZE - offset : 0)) & ~(alignment - 1));
311
312		guarded_heap_page_allocate(area, pageIndex, pagesNeeded, size,
313			alignment, result);
314
315		area.used_pages += pagesNeeded;
316		grow = guarded_heap_pages_allocated(*area.heap, pagesNeeded);
317		return result;
318	}
319
320	return NULL;
321}
322
323
324static bool
325guarded_heap_area_init(guarded_heap& heap, area_id id, void* baseAddress,
326	size_t size, uint32 flags)
327{
328	guarded_heap_area* area = (guarded_heap_area*)baseAddress;
329	area->heap = &heap;
330	area->area = id;
331	area->size = size;
332	area->page_count = area->size / B_PAGE_SIZE;
333	area->used_pages = 0;
334
335	size_t pagesNeeded = (sizeof(guarded_heap_area)
336		+ area->page_count * sizeof(guarded_heap_page)
337		+ B_PAGE_SIZE - 1) / B_PAGE_SIZE;
338
339	area->page_count -= pagesNeeded;
340	area->size = area->page_count * B_PAGE_SIZE;
341	area->base = (addr_t)baseAddress + pagesNeeded * B_PAGE_SIZE;
342
343	if (area->area >= 0 && vm_prepare_kernel_area_debug_protection(area->area,
344			&area->protection_cookie) != B_OK) {
345		return false;
346	}
347
348	mutex_init(&area->lock, "guarded_heap_area_lock");
349
350	list_init_etc(&area->free_list,
351		offsetof(guarded_heap_page, free_list_link));
352
353	for (size_t i = 0; i < area->page_count; i++)
354		guarded_heap_free_page(*area, i, true);
355
356	WriteLocker areaListWriteLocker(heap.lock);
357	area->next = heap.areas;
358	heap.areas = area;
359	heap.page_count += area->page_count;
360
361	return true;
362}
363
364
365static bool
366guarded_heap_area_create(guarded_heap& heap, uint32 flags)
367{
368	for (size_t trySize = HEAP_GROW_SIZE; trySize >= 1 * 1024 * 1024;
369		trySize /= 2) {
370
371		void* baseAddress = NULL;
372		area_id id = create_area("guarded_heap_area", &baseAddress,
373			B_ANY_KERNEL_ADDRESS, trySize, B_FULL_LOCK,
374			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
375
376		if (id < 0)
377			continue;
378
379		if (guarded_heap_area_init(heap, id, baseAddress, trySize, flags))
380			return true;
381
382		delete_area(id);
383	}
384
385	panic("failed to allocate a new heap area");
386	return false;
387}
388
389
390static bool
391guarded_heap_add_area(guarded_heap& heap, int32 counter, uint32 flags)
392{
393	if ((flags & (HEAP_DONT_LOCK_KERNEL_SPACE | HEAP_DONT_WAIT_FOR_MEMORY))
394			!= 0) {
395		return false;
396	}
397
398	if (atomic_test_and_set((vint32*)&heap.area_creation_counter,
399			counter + 1, counter) == counter) {
400		return guarded_heap_area_create(heap, flags);
401	}
402
403	return false;
404}
405
406
407static void*
408guarded_heap_allocate(guarded_heap& heap, size_t size, size_t alignment,
409	uint32 flags)
410{
411	bool grow = false;
412	void* result = NULL;
413	ReadLocker areaListReadLocker(heap.lock);
414	for (guarded_heap_area* area = heap.areas; area != NULL;
415			area = area->next) {
416
417		MutexLocker locker(area->lock);
418		result = guarded_heap_area_allocate(*area, size, alignment, flags,
419			grow);
420		if (result != NULL)
421			break;
422	}
423
424	int32 counter = atomic_get(&heap.area_creation_counter);
425	areaListReadLocker.Unlock();
426
427	if (result == NULL || grow) {
428		bool added = guarded_heap_add_area(heap, counter, flags);
429		if (result == NULL && added)
430			return guarded_heap_allocate(heap, size, alignment, flags);
431	}
432
433	if (result == NULL)
434		panic("ran out of memory");
435
436	return result;
437}
438
439
440static guarded_heap_area*
441guarded_heap_get_locked_area_for(guarded_heap& heap, void* address)
442{
443	ReadLocker areaListReadLocker(heap.lock);
444	for (guarded_heap_area* area = heap.areas; area != NULL;
445			area = area->next) {
446		if ((addr_t)address < area->base)
447			continue;
448
449		if ((addr_t)address >= area->base + area->size)
450			continue;
451
452		mutex_lock(&area->lock);
453		return area;
454	}
455
456	panic("guarded heap area for address %p not found", address);
457	return NULL;
458}
459
460
461static size_t
462guarded_heap_area_page_index_for(guarded_heap_area& area, void* address)
463{
464	size_t pageIndex = ((addr_t)address - area.base) / B_PAGE_SIZE;
465	guarded_heap_page& page = area.pages[pageIndex];
466	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) == 0) {
467		panic("tried to free %p which points at page %" B_PRIuSIZE
468			" which is not marked in use", address, pageIndex);
469		return area.page_count;
470	}
471
472	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0) {
473		panic("tried to free %p which points at page %" B_PRIuSIZE
474			" which is a guard page", address, pageIndex);
475		return area.page_count;
476	}
477
478	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0) {
479		panic("tried to free %p which points at page %" B_PRIuSIZE
480			" which is not an allocation first page", address, pageIndex);
481		return area.page_count;
482	}
483
484	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0) {
485		panic("tried to free %p which points at page %" B_PRIuSIZE
486			" which is a dead page", address, pageIndex);
487		return area.page_count;
488	}
489
490	return pageIndex;
491}
492
493
494static void
495guarded_heap_area_free(guarded_heap_area& area, void* address, uint32 flags)
496{
497	size_t pageIndex = guarded_heap_area_page_index_for(area, address);
498	if (pageIndex >= area.page_count)
499		return;
500
501	size_t pagesFreed = 0;
502	guarded_heap_page* page = &area.pages[pageIndex];
503	while ((page->flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0) {
504		// Mark the allocation page as free.
505		guarded_heap_free_page(area, pageIndex);
506
507		pagesFreed++;
508		pageIndex++;
509		page = &area.pages[pageIndex];
510	}
511
512	// Mark the guard page as free as well.
513	guarded_heap_free_page(area, pageIndex);
514	pagesFreed++;
515
516#if !DEBUG_GUARDED_HEAP_DISABLE_MEMORY_REUSE
517	area.used_pages -= pagesFreed;
518	atomic_add((vint32*)&area.heap->used_pages, -pagesFreed);
519#endif
520}
521
522
523static void
524guarded_heap_free(void* address, uint32 flags)
525{
526	if (address == NULL)
527		return;
528
529	guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
530		address);
531	if (area == NULL)
532		return;
533
534	MutexLocker locker(area->lock, true);
535	guarded_heap_area_free(*area, address, flags);
536}
537
538
539static void*
540guarded_heap_realloc(void* address, size_t newSize)
541{
542	guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
543		address);
544	if (area == NULL)
545		return NULL;
546
547	MutexLocker locker(area->lock, true);
548
549	size_t pageIndex = guarded_heap_area_page_index_for(*area, address);
550	if (pageIndex >= area->page_count)
551		return NULL;
552
553	guarded_heap_page& page = area->pages[pageIndex];
554	size_t oldSize = page.allocation_size;
555	locker.Unlock();
556
557	if (oldSize == newSize)
558		return address;
559
560	void* newBlock = memalign(0, newSize);
561	if (newBlock == NULL)
562		return NULL;
563
564	memcpy(newBlock, address, min_c(oldSize, newSize));
565
566	free(address);
567
568	return newBlock;
569}
570
571
572// #pragma mark - Debugger commands
573
574
575static int
576dump_guarded_heap_page(int argc, char** argv)
577{
578	if (argc != 2) {
579		print_debugger_command_usage(argv[0]);
580		return 0;
581	}
582
583	addr_t address = parse_expression(argv[1]);
584
585	// Find the area that contains this page.
586	guarded_heap_area* area = NULL;
587	for (guarded_heap_area* candidate = sGuardedHeap.areas; candidate != NULL;
588			candidate = candidate->next) {
589
590		if (address < candidate->base)
591			continue;
592		if (address >= candidate->base + candidate->size)
593			continue;
594
595		area = candidate;
596		break;
597	}
598
599	if (area == NULL) {
600		kprintf("didn't find area for address\n");
601		return 1;
602	}
603
604	size_t pageIndex = ((addr_t)address - area->base) / B_PAGE_SIZE;
605	guarded_heap_page& page = area->pages[pageIndex];
606
607	kprintf("page index: %" B_PRIuSIZE "\n", pageIndex);
608	kprintf("flags:");
609	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
610		kprintf(" used");
611	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0)
612		kprintf(" first");
613	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0)
614		kprintf(" guard");
615	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0)
616		kprintf(" dead");
617	kprintf("\n");
618
619	kprintf("allocation size: %" B_PRIuSIZE "\n", page.allocation_size);
620	kprintf("allocation base: %p\n", page.allocation_base);
621	kprintf("alignment: %" B_PRIuSIZE "\n", page.alignment);
622	kprintf("allocating thread: %" B_PRId32 "\n", page.thread);
623
624#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
625	kprintf("stack trace:\n");
626	for (size_t i = 0; i < page.stack_trace_depth; i++) {
627		addr_t address = page.stack_trace[i];
628
629		const char* symbol;
630		const char* imageName;
631		bool exactMatch;
632		addr_t baseAddress;
633
634		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
635				&imageName, &exactMatch) == B_OK) {
636			kprintf("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
637				address - baseAddress, imageName,
638				exactMatch ? "" : " (nearest)");
639		} else
640			kprintf("  %p\n", (void*)address);
641	}
642#endif
643
644	return 0;
645}
646
647
648static int
649dump_guarded_heap_area(int argc, char** argv)
650{
651	if (argc != 2) {
652		print_debugger_command_usage(argv[0]);
653		return 0;
654	}
655
656	addr_t address = parse_expression(argv[1]);
657
658	// Find the area that contains this page.
659	guarded_heap_area* area = NULL;
660	for (guarded_heap_area* candidate = sGuardedHeap.areas; candidate != NULL;
661			candidate = candidate->next) {
662
663		if ((addr_t)candidate != address) {
664			if (address < candidate->base)
665				continue;
666			if (address >= candidate->base + candidate->size)
667				continue;
668		}
669
670		area = candidate;
671		break;
672	}
673
674	if (area == NULL) {
675		kprintf("didn't find area for address\n");
676		return 1;
677	}
678
679	kprintf("guarded heap area: %p\n", area);
680	kprintf("next heap area: %p\n", area->next);
681	kprintf("guarded heap: %p\n", area->heap);
682	kprintf("area id: %" B_PRId32 "\n", area->area);
683	kprintf("base: 0x%" B_PRIxADDR "\n", area->base);
684	kprintf("size: %" B_PRIuSIZE "\n", area->size);
685	kprintf("page count: %" B_PRIuSIZE "\n", area->page_count);
686	kprintf("used pages: %" B_PRIuSIZE "\n", area->used_pages);
687	kprintf("protection cookie: %p\n", area->protection_cookie);
688	kprintf("lock: %p\n", &area->lock);
689
690	size_t freeCount = 0;
691	void* item = list_get_first_item(&area->free_list);
692	while (item != NULL) {
693		freeCount++;
694
695		if ((((guarded_heap_page*)item)->flags & GUARDED_HEAP_PAGE_FLAG_USED)
696				!= 0) {
697			kprintf("free list broken, page %p not actually free\n", item);
698		}
699
700		item = list_get_next_item(&area->free_list, item);
701	}
702
703	kprintf("free_list: %p (%" B_PRIuSIZE " free)\n", &area->free_list,
704		freeCount);
705
706	freeCount = 0;
707	size_t runLength = 0;
708	size_t longestRun = 0;
709	for (size_t i = 0; i <= area->page_count; i++) {
710		guarded_heap_page& page = area->pages[i];
711		if (i == area->page_count
712			|| (page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0) {
713			freeCount += runLength;
714			if (runLength > longestRun)
715				longestRun = runLength;
716			runLength = 0;
717			continue;
718		}
719
720		runLength = 1;
721		for (size_t j = 1; j < area->page_count - i; j++) {
722			if ((area->pages[i + j].flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
723				break;
724
725			runLength++;
726		}
727
728		i += runLength - 1;
729	}
730
731	kprintf("longest free run: %" B_PRIuSIZE " (%" B_PRIuSIZE " free)\n",
732		longestRun, freeCount);
733
734	kprintf("pages: %p\n", area->pages);
735
736	return 0;
737}
738
739
740static int
741dump_guarded_heap(int argc, char** argv)
742{
743	guarded_heap* heap = &sGuardedHeap;
744	if (argc != 1) {
745		if (argc == 2)
746			heap = (guarded_heap*)parse_expression(argv[1]);
747		else {
748			print_debugger_command_usage(argv[0]);
749			return 0;
750		}
751	}
752
753	kprintf("guarded heap: %p\n", heap);
754	kprintf("rw lock: %p\n", &heap->lock);
755	kprintf("page count: %" B_PRIuSIZE "\n", heap->page_count);
756	kprintf("used pages: %" B_PRIuSIZE "\n", heap->used_pages);
757	kprintf("area creation counter: %" B_PRId32 "\n",
758		(int32)heap->area_creation_counter);
759
760	size_t areaCount = 0;
761	guarded_heap_area* area = heap->areas;
762	while (area != NULL) {
763		areaCount++;
764		area = area->next;
765	}
766
767	kprintf("areas: %p (%" B_PRIuSIZE ")\n", heap->areas, areaCount);
768
769	return 0;
770}
771
772
773// #pragma mark - Malloc API
774
775
776status_t
777heap_init(addr_t address, size_t size)
778{
779	return guarded_heap_area_init(sGuardedHeap, -1, (void*)address, size, 0)
780		? B_OK : B_ERROR;
781}
782
783
784status_t
785heap_init_post_area()
786{
787	return B_OK;
788}
789
790
791status_t
792heap_init_post_sem()
793{
794	for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
795			area = area->next) {
796		if (area->area >= 0)
797			continue;
798
799		area_id id = area_for((void*)area->base);
800		if (id < 0 || vm_prepare_kernel_area_debug_protection(id,
801				&area->protection_cookie) != B_OK) {
802			panic("failed to prepare initial guarded heap for protection");
803			continue;
804		}
805
806		area->area = id;
807		for (size_t i = 0; i < area->page_count; i++) {
808			guarded_heap_page& page = area->pages[i];
809			if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0
810				&& (page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0
811				&& (page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) == 0) {
812				guarded_heap_page_protect(*area, i,
813					B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
814			} else
815				guarded_heap_page_protect(*area, i, 0);
816		}
817	}
818
819	add_debugger_command("guarded_heap", &dump_guarded_heap,
820		"Dump info about the guarded heap");
821	add_debugger_command_etc("guarded_heap_area", &dump_guarded_heap_area,
822		"Dump info about a guarded heap area",
823		"<address>\nDump info about guarded heap area containing address.\n",
824		0);
825	add_debugger_command_etc("guarded_heap_page", &dump_guarded_heap_page,
826		"Dump info about a guarded heap page",
827		"<address>\nDump info about guarded heap page containing address.\n",
828		0);
829
830	return B_OK;
831}
832
833
834void*
835memalign(size_t alignment, size_t size)
836{
837	return memalign_etc(alignment, size, 0);
838}
839
840
841void *
842memalign_etc(size_t alignment, size_t size, uint32 flags)
843{
844	if (size == 0)
845		size = 1;
846
847	return guarded_heap_allocate(sGuardedHeap, size, alignment, flags);
848}
849
850
851void
852free_etc(void *address, uint32 flags)
853{
854	guarded_heap_free(address, flags);
855}
856
857
858void*
859malloc(size_t size)
860{
861	return memalign_etc(0, size, 0);
862}
863
864
865void
866free(void* address)
867{
868	free_etc(address, 0);
869}
870
871
872void*
873realloc(void* address, size_t newSize)
874{
875	if (newSize == 0) {
876		free(address);
877		return NULL;
878	}
879
880	if (address == NULL)
881		return memalign(0, newSize);
882
883	return guarded_heap_realloc(address, newSize);
884}
885
886
887#if USE_GUARDED_HEAP_FOR_OBJECT_CACHE
888
889
890// #pragma mark - Slab API
891
892
893void
894request_memory_manager_maintenance()
895{
896}
897
898
899object_cache*
900create_object_cache(const char*, size_t objectSize, size_t, void*,
901	object_cache_constructor, object_cache_destructor)
902{
903	return (object_cache*)objectSize;
904}
905
906
907object_cache*
908create_object_cache_etc(const char*, size_t objectSize, size_t, size_t, size_t,
909	size_t, uint32, void*, object_cache_constructor, object_cache_destructor,
910	object_cache_reclaimer)
911{
912	return (object_cache*)objectSize;
913}
914
915
916void
917delete_object_cache(object_cache* cache)
918{
919}
920
921
922status_t
923object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
924{
925	return B_OK;
926}
927
928
929void*
930object_cache_alloc(object_cache* cache, uint32 flags)
931{
932	return memalign_etc(0, (size_t)cache, flags);
933}
934
935
936void
937object_cache_free(object_cache* cache, void* object, uint32 flags)
938{
939	return free_etc(object, flags);
940}
941
942
943status_t
944object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
945{
946	return B_OK;
947}
948
949
950void
951object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
952{
953	*_allocatedMemory = 0;
954}
955
956
957void
958slab_init(kernel_args* args)
959{
960}
961
962
963void
964slab_init_post_area()
965{
966}
967
968
969void
970slab_init_post_sem()
971{
972}
973
974
975void
976slab_init_post_thread()
977{
978}
979
980
981#endif	// USE_GUARDED_HEAP_FOR_OBJECT_CACHE
982
983
984#endif	// USE_GUARDED_HEAP_FOR_MALLOC
985