1/*
2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include "MemoryManager.h"
8
9#include <algorithm>
10
11#include <debug.h>
12#include <tracing.h>
13#include <util/AutoLock.h>
14#include <vm/vm.h>
15#include <vm/vm_page.h>
16#include <vm/vm_priv.h>
17#include <vm/VMAddressSpace.h>
18#include <vm/VMArea.h>
19#include <vm/VMCache.h>
20#include <vm/VMTranslationMap.h>
21
22#include "kernel_debug_config.h"
23
24#include "ObjectCache.h"
25
26
27//#define TRACE_MEMORY_MANAGER
28#ifdef TRACE_MEMORY_MANAGER
29#	define TRACE(x...)	dprintf(x)
30#else
31#	define TRACE(x...)	do {} while (false)
32#endif
33
34#if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
35#	define PARANOID_CHECKS_ONLY(x)	x
36#else
37#	define PARANOID_CHECKS_ONLY(x)
38#endif
39
40
41static const char* const kSlabAreaName = "slab area";
42
43static void* sAreaTableBuffer[1024];
44
45mutex MemoryManager::sLock;
46rw_lock MemoryManager::sAreaTableLock;
47kernel_args* MemoryManager::sKernelArgs;
48MemoryManager::AreaTable MemoryManager::sAreaTable;
49MemoryManager::Area* MemoryManager::sFreeAreas;
50int MemoryManager::sFreeAreaCount;
51MemoryManager::MetaChunkList MemoryManager::sFreeCompleteMetaChunks;
52MemoryManager::MetaChunkList MemoryManager::sFreeShortMetaChunks;
53MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksSmall;
54MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksMedium;
55MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryCanWait;
56MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryDontWait;
57bool MemoryManager::sMaintenanceNeeded;
58
59
60RANGE_MARKER_FUNCTION_BEGIN(SlabMemoryManager)
61
62
63// #pragma mark - kernel tracing
64
65
66#if SLAB_MEMORY_MANAGER_TRACING
67
68
69//namespace SlabMemoryManagerCacheTracing {
70struct MemoryManager::Tracing {
71
72class MemoryManagerTraceEntry
73	: public TRACE_ENTRY_SELECTOR(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE) {
74public:
75	MemoryManagerTraceEntry()
76		:
77		TraceEntryBase(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE, 0, true)
78	{
79	}
80};
81
82
83class Allocate : public MemoryManagerTraceEntry {
84public:
85	Allocate(ObjectCache* cache, uint32 flags)
86		:
87		MemoryManagerTraceEntry(),
88		fCache(cache),
89		fFlags(flags)
90	{
91		Initialized();
92	}
93
94	virtual void AddDump(TraceOutput& out)
95	{
96		out.Print("slab memory manager alloc: cache: %p, flags: %#" B_PRIx32,
97			fCache, fFlags);
98	}
99
100private:
101	ObjectCache*	fCache;
102	uint32			fFlags;
103};
104
105
106class Free : public MemoryManagerTraceEntry {
107public:
108	Free(void* address, uint32 flags)
109		:
110		MemoryManagerTraceEntry(),
111		fAddress(address),
112		fFlags(flags)
113	{
114		Initialized();
115	}
116
117	virtual void AddDump(TraceOutput& out)
118	{
119		out.Print("slab memory manager free: address: %p, flags: %#" B_PRIx32,
120			fAddress, fFlags);
121	}
122
123private:
124	void*	fAddress;
125	uint32	fFlags;
126};
127
128
129class AllocateRaw : public MemoryManagerTraceEntry {
130public:
131	AllocateRaw(size_t size, uint32 flags)
132		:
133		MemoryManagerTraceEntry(),
134		fSize(size),
135		fFlags(flags)
136	{
137		Initialized();
138	}
139
140	virtual void AddDump(TraceOutput& out)
141	{
142		out.Print("slab memory manager alloc raw: size: %" B_PRIuSIZE
143			", flags: %#" B_PRIx32, fSize, fFlags);
144	}
145
146private:
147	size_t	fSize;
148	uint32	fFlags;
149};
150
151
152class FreeRawOrReturnCache : public MemoryManagerTraceEntry {
153public:
154	FreeRawOrReturnCache(void* address, uint32 flags)
155		:
156		MemoryManagerTraceEntry(),
157		fAddress(address),
158		fFlags(flags)
159	{
160		Initialized();
161	}
162
163	virtual void AddDump(TraceOutput& out)
164	{
165		out.Print("slab memory manager free raw/return: address: %p, flags: %#"
166			B_PRIx32, fAddress, fFlags);
167	}
168
169private:
170	void*	fAddress;
171	uint32	fFlags;
172};
173
174
175class AllocateArea : public MemoryManagerTraceEntry {
176public:
177	AllocateArea(Area* area, uint32 flags)
178		:
179		MemoryManagerTraceEntry(),
180		fArea(area),
181		fFlags(flags)
182	{
183		Initialized();
184	}
185
186	virtual void AddDump(TraceOutput& out)
187	{
188		out.Print("slab memory manager alloc area: flags: %#" B_PRIx32
189			" -> %p", fFlags, fArea);
190	}
191
192private:
193	Area*	fArea;
194	uint32	fFlags;
195};
196
197
198class AddArea : public MemoryManagerTraceEntry {
199public:
200	AddArea(Area* area)
201		:
202		MemoryManagerTraceEntry(),
203		fArea(area)
204	{
205		Initialized();
206	}
207
208	virtual void AddDump(TraceOutput& out)
209	{
210		out.Print("slab memory manager add area: %p", fArea);
211	}
212
213private:
214	Area*	fArea;
215};
216
217
218class FreeArea : public MemoryManagerTraceEntry {
219public:
220	FreeArea(Area* area, bool areaRemoved, uint32 flags)
221		:
222		MemoryManagerTraceEntry(),
223		fArea(area),
224		fFlags(flags),
225		fRemoved(areaRemoved)
226	{
227		Initialized();
228	}
229
230	virtual void AddDump(TraceOutput& out)
231	{
232		out.Print("slab memory manager free area: %p%s, flags: %#" B_PRIx32,
233			fArea, fRemoved ? " (removed)" : "", fFlags);
234	}
235
236private:
237	Area*	fArea;
238	uint32	fFlags;
239	bool	fRemoved;
240};
241
242
243class AllocateMetaChunk : public MemoryManagerTraceEntry {
244public:
245	AllocateMetaChunk(MetaChunk* metaChunk)
246		:
247		MemoryManagerTraceEntry(),
248		fMetaChunk(metaChunk->chunkBase)
249	{
250		Initialized();
251	}
252
253	virtual void AddDump(TraceOutput& out)
254	{
255		out.Print("slab memory manager alloc meta chunk: %#" B_PRIxADDR,
256			fMetaChunk);
257	}
258
259private:
260	addr_t	fMetaChunk;
261};
262
263
264class FreeMetaChunk : public MemoryManagerTraceEntry {
265public:
266	FreeMetaChunk(MetaChunk* metaChunk)
267		:
268		MemoryManagerTraceEntry(),
269		fMetaChunk(metaChunk->chunkBase)
270	{
271		Initialized();
272	}
273
274	virtual void AddDump(TraceOutput& out)
275	{
276		out.Print("slab memory manager free meta chunk: %#" B_PRIxADDR,
277			fMetaChunk);
278	}
279
280private:
281	addr_t	fMetaChunk;
282};
283
284
285class AllocateChunk : public MemoryManagerTraceEntry {
286public:
287	AllocateChunk(size_t chunkSize, MetaChunk* metaChunk, Chunk* chunk)
288		:
289		MemoryManagerTraceEntry(),
290		fChunkSize(chunkSize),
291		fMetaChunk(metaChunk->chunkBase),
292		fChunk(chunk - metaChunk->chunks)
293	{
294		Initialized();
295	}
296
297	virtual void AddDump(TraceOutput& out)
298	{
299		out.Print("slab memory manager alloc chunk: size: %" B_PRIuSIZE
300			" -> meta chunk: %#" B_PRIxADDR ", chunk: %" B_PRIu32, fChunkSize,
301			fMetaChunk, fChunk);
302	}
303
304private:
305	size_t	fChunkSize;
306	addr_t	fMetaChunk;
307	uint32	fChunk;
308};
309
310
311class AllocateChunks : public MemoryManagerTraceEntry {
312public:
313	AllocateChunks(size_t chunkSize, uint32 chunkCount, MetaChunk* metaChunk,
314		Chunk* chunk)
315		:
316		MemoryManagerTraceEntry(),
317		fMetaChunk(metaChunk->chunkBase),
318		fChunkSize(chunkSize),
319		fChunkCount(chunkCount),
320		fChunk(chunk - metaChunk->chunks)
321	{
322		Initialized();
323	}
324
325	virtual void AddDump(TraceOutput& out)
326	{
327		out.Print("slab memory manager alloc chunks: size: %" B_PRIuSIZE
328			", count %" B_PRIu32 " -> meta chunk: %#" B_PRIxADDR ", chunk: %"
329			B_PRIu32, fChunkSize, fChunkCount, fMetaChunk, fChunk);
330	}
331
332private:
333	addr_t	fMetaChunk;
334	size_t	fChunkSize;
335	uint32	fChunkCount;
336	uint32	fChunk;
337};
338
339
340class FreeChunk : public MemoryManagerTraceEntry {
341public:
342	FreeChunk(MetaChunk* metaChunk, Chunk* chunk)
343		:
344		MemoryManagerTraceEntry(),
345		fMetaChunk(metaChunk->chunkBase),
346		fChunk(chunk - metaChunk->chunks)
347	{
348		Initialized();
349	}
350
351	virtual void AddDump(TraceOutput& out)
352	{
353		out.Print("slab memory manager free chunk: meta chunk: %#" B_PRIxADDR
354			", chunk: %" B_PRIu32, fMetaChunk, fChunk);
355	}
356
357private:
358	addr_t	fMetaChunk;
359	uint32	fChunk;
360};
361
362
363class Map : public MemoryManagerTraceEntry {
364public:
365	Map(addr_t address, size_t size, uint32 flags)
366		:
367		MemoryManagerTraceEntry(),
368		fAddress(address),
369		fSize(size),
370		fFlags(flags)
371	{
372		Initialized();
373	}
374
375	virtual void AddDump(TraceOutput& out)
376	{
377		out.Print("slab memory manager map: %#" B_PRIxADDR ", size: %"
378			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
379	}
380
381private:
382	addr_t	fAddress;
383	size_t	fSize;
384	uint32	fFlags;
385};
386
387
388class Unmap : public MemoryManagerTraceEntry {
389public:
390	Unmap(addr_t address, size_t size, uint32 flags)
391		:
392		MemoryManagerTraceEntry(),
393		fAddress(address),
394		fSize(size),
395		fFlags(flags)
396	{
397		Initialized();
398	}
399
400	virtual void AddDump(TraceOutput& out)
401	{
402		out.Print("slab memory manager unmap: %#" B_PRIxADDR ", size: %"
403			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
404	}
405
406private:
407	addr_t	fAddress;
408	size_t	fSize;
409	uint32	fFlags;
410};
411
412
413//}	// namespace SlabMemoryManagerCacheTracing
414};	// struct MemoryManager::Tracing
415
416
417//#	define T(x)	new(std::nothrow) SlabMemoryManagerCacheTracing::x
418#	define T(x)	new(std::nothrow) MemoryManager::Tracing::x
419
420#else
421#	define T(x)
422#endif	// SLAB_MEMORY_MANAGER_TRACING
423
424
425// #pragma mark - MemoryManager
426
427
428/*static*/ void
429MemoryManager::Init(kernel_args* args)
430{
431	mutex_init(&sLock, "slab memory manager");
432	rw_lock_init(&sAreaTableLock, "slab memory manager area table");
433	sKernelArgs = args;
434
435	new(&sFreeCompleteMetaChunks) MetaChunkList;
436	new(&sFreeShortMetaChunks) MetaChunkList;
437	new(&sPartialMetaChunksSmall) MetaChunkList;
438	new(&sPartialMetaChunksMedium) MetaChunkList;
439
440	new(&sAreaTable) AreaTable;
441	sAreaTable.Resize(sAreaTableBuffer, sizeof(sAreaTableBuffer), true);
442		// A bit hacky: The table now owns the memory. Since we never resize or
443		// free it, that's not a problem, though.
444
445	sFreeAreas = NULL;
446	sFreeAreaCount = 0;
447	sMaintenanceNeeded = false;
448}
449
450
451/*static*/ void
452MemoryManager::InitPostArea()
453{
454	sKernelArgs = NULL;
455
456	// Convert all areas to actual areas. This loop might look a bit weird, but
457	// is necessary since creating the actual area involves memory allocations,
458	// which in turn can change the situation.
459	bool done;
460	do {
461		done = true;
462
463		for (AreaTable::Iterator it = sAreaTable.GetIterator();
464				Area* area = it.Next();) {
465			if (area->vmArea == NULL) {
466				_ConvertEarlyArea(area);
467				done = false;
468				break;
469			}
470		}
471	} while (!done);
472
473	// unmap and free unused pages
474	if (sFreeAreas != NULL) {
475		// Just "leak" all but the first of the free areas -- the VM will
476		// automatically free all unclaimed memory.
477		sFreeAreas->next = NULL;
478		sFreeAreaCount = 1;
479
480		Area* area = sFreeAreas;
481		_ConvertEarlyArea(area);
482		_UnmapFreeChunksEarly(area);
483	}
484
485	for (AreaTable::Iterator it = sAreaTable.GetIterator();
486			Area* area = it.Next();) {
487		_UnmapFreeChunksEarly(area);
488	}
489
490	sMaintenanceNeeded = true;
491		// might not be necessary, but doesn't harm
492
493	add_debugger_command_etc("slab_area", &_DumpArea,
494		"Dump information on a given slab area",
495		"[ -c ] <area>\n"
496		"Dump information on a given slab area specified by its base "
497			"address.\n"
498		"If \"-c\" is given, the chunks of all meta chunks area printed as "
499			"well.\n", 0);
500	add_debugger_command_etc("slab_areas", &_DumpAreas,
501		"List all slab areas",
502		"\n"
503		"Lists all slab areas.\n", 0);
504	add_debugger_command_etc("slab_meta_chunk", &_DumpMetaChunk,
505		"Dump information on a given slab meta chunk",
506		"<meta chunk>\n"
507		"Dump information on a given slab meta chunk specified by its base "
508			"or object address.\n", 0);
509	add_debugger_command_etc("slab_meta_chunks", &_DumpMetaChunks,
510		"List all non-full slab meta chunks",
511		"[ -c ]\n"
512		"Lists all non-full slab meta chunks.\n"
513		"If \"-c\" is given, the chunks of all meta chunks area printed as "
514			"well.\n", 0);
515	add_debugger_command_etc("slab_raw_allocations", &_DumpRawAllocations,
516		"List all raw allocations in slab areas",
517		"\n"
518		"Lists all raw allocations in slab areas.\n", 0);
519}
520
521
522/*static*/ status_t
523MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages)
524{
525	// TODO: Support CACHE_UNLOCKED_PAGES!
526
527	T(Allocate(cache, flags));
528
529	size_t chunkSize = cache->slab_size;
530
531	TRACE("MemoryManager::Allocate(%p, %#" B_PRIx32 "): chunkSize: %"
532		B_PRIuSIZE "\n", cache, flags, chunkSize);
533
534	MutexLocker locker(sLock);
535
536	// allocate a chunk
537	MetaChunk* metaChunk;
538	Chunk* chunk;
539	status_t error = _AllocateChunks(chunkSize, 1, flags, metaChunk, chunk);
540	if (error != B_OK)
541		return error;
542
543	// map the chunk
544	Area* area = metaChunk->GetArea();
545	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
546
547	locker.Unlock();
548	error = _MapChunk(area->vmArea, chunkAddress, chunkSize, 0, flags);
549	locker.Lock();
550	if (error != B_OK) {
551		// something failed -- free the chunk
552		_FreeChunk(area, metaChunk, chunk, chunkAddress, true, flags);
553		return error;
554	}
555
556	chunk->reference = (addr_t)cache;
557	_pages = (void*)chunkAddress;
558
559	TRACE("MemoryManager::Allocate() done: %p (meta chunk: %d, chunk %d)\n",
560		_pages, int(metaChunk - area->metaChunks),
561		int(chunk - metaChunk->chunks));
562	return B_OK;
563}
564
565
566/*static*/ void
567MemoryManager::Free(void* pages, uint32 flags)
568{
569	TRACE("MemoryManager::Free(%p, %#" B_PRIx32 ")\n", pages, flags);
570
571	T(Free(pages, flags));
572
573	// get the area and the meta chunk
574	Area* area = _AreaForAddress((addr_t)pages);
575	MetaChunk* metaChunk = &area->metaChunks[
576		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
577
578	ASSERT(metaChunk->chunkSize > 0);
579	ASSERT((addr_t)pages >= metaChunk->chunkBase);
580	ASSERT(((addr_t)pages % metaChunk->chunkSize) == 0);
581
582	// get the chunk
583	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
584	Chunk* chunk = &metaChunk->chunks[chunkIndex];
585
586	ASSERT(chunk->next != NULL);
587	ASSERT(chunk->next < metaChunk->chunks
588		|| chunk->next
589			>= metaChunk->chunks + SLAB_SMALL_CHUNKS_PER_META_CHUNK);
590
591	// and free it
592	MutexLocker locker(sLock);
593	_FreeChunk(area, metaChunk, chunk, (addr_t)pages, false, flags);
594}
595
596
597/*static*/ status_t
598MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
599{
600#if SLAB_MEMORY_MANAGER_TRACING
601#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
602	AbstractTraceEntryWithStackTrace* traceEntry = T(AllocateRaw(size, flags));
603	size += sizeof(AllocationTrackingInfo);
604#else
605	T(AllocateRaw(size, flags));
606#endif
607#endif
608
609	size = ROUNDUP(size, SLAB_CHUNK_SIZE_SMALL);
610
611	TRACE("MemoryManager::AllocateRaw(%" B_PRIuSIZE ", %#" B_PRIx32 ")\n", size,
612		  flags);
613
614	if (size > SLAB_CHUNK_SIZE_LARGE || (flags & CACHE_ALIGN_ON_SIZE) != 0) {
615		// Requested size greater than a large chunk or an aligned allocation.
616		// Allocate as an area.
617		if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0)
618			return B_WOULD_BLOCK;
619
620		virtual_address_restrictions virtualRestrictions = {};
621		virtualRestrictions.address_specification
622			= (flags & CACHE_ALIGN_ON_SIZE) != 0
623				? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS;
624		physical_address_restrictions physicalRestrictions = {};
625		area_id area = create_area_etc(VMAddressSpace::KernelID(),
626			"slab large raw allocation", size, B_FULL_LOCK,
627			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
628			((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
629					? CREATE_AREA_DONT_WAIT : 0)
630				| CREATE_AREA_DONT_CLEAR, 0,
631			&virtualRestrictions, &physicalRestrictions, &_pages);
632
633		status_t result = area >= 0 ? B_OK : area;
634		if (result == B_OK) {
635			fill_allocated_block(_pages, size);
636#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
637			_AddTrackingInfo(_pages, size, traceEntry);
638#endif
639		}
640
641		return result;
642	}
643
644	// determine chunk size (small or medium)
645	size_t chunkSize = SLAB_CHUNK_SIZE_SMALL;
646	uint32 chunkCount = size / SLAB_CHUNK_SIZE_SMALL;
647
648	if (size % SLAB_CHUNK_SIZE_MEDIUM == 0) {
649		chunkSize = SLAB_CHUNK_SIZE_MEDIUM;
650		chunkCount = size / SLAB_CHUNK_SIZE_MEDIUM;
651	}
652
653	MutexLocker locker(sLock);
654
655	// allocate the chunks
656	MetaChunk* metaChunk;
657	Chunk* chunk;
658	status_t error = _AllocateChunks(chunkSize, chunkCount, flags, metaChunk,
659		chunk);
660	if (error != B_OK)
661		return error;
662
663	// map the chunks
664	Area* area = metaChunk->GetArea();
665	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
666
667	locker.Unlock();
668	error = _MapChunk(area->vmArea, chunkAddress, size, 0, flags);
669	locker.Lock();
670	if (error != B_OK) {
671		// something failed -- free the chunks
672		for (uint32 i = 0; i < chunkCount; i++)
673			_FreeChunk(area, metaChunk, chunk + i, chunkAddress, true, flags);
674		return error;
675	}
676
677	chunk->reference = (addr_t)chunkAddress + size - 1;
678	_pages = (void*)chunkAddress;
679
680	fill_allocated_block(_pages, size);
681#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
682	_AddTrackingInfo(_pages, size, traceEntry);
683#endif
684
685	TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n",
686		_pages, int(metaChunk - area->metaChunks),
687		int(chunk - metaChunk->chunks));
688	return B_OK;
689}
690
691
692/*static*/ ObjectCache*
693MemoryManager::FreeRawOrReturnCache(void* pages, uint32 flags)
694{
695	TRACE("MemoryManager::FreeRawOrReturnCache(%p, %#" B_PRIx32 ")\n", pages,
696		flags);
697
698	T(FreeRawOrReturnCache(pages, flags));
699
700	if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
701		panic("cannot proceed without locking kernel space!");
702		return NULL;
703	}
704
705	// get the area
706	addr_t areaBase = _AreaBaseAddressForAddress((addr_t)pages);
707
708	ReadLocker readLocker(sAreaTableLock);
709	Area* area = sAreaTable.Lookup(areaBase);
710	readLocker.Unlock();
711
712	if (area == NULL) {
713		// Probably a large allocation. Look up the VM area.
714		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
715		addressSpace->ReadLock();
716		VMArea* area = addressSpace->LookupArea((addr_t)pages);
717		addressSpace->ReadUnlock();
718
719		if (area != NULL && (addr_t)pages == area->Base())
720			delete_area(area->id);
721		else
722			panic("freeing unknown block %p from area %p", pages, area);
723
724		return NULL;
725	}
726
727	MetaChunk* metaChunk = &area->metaChunks[
728		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
729
730	// get the chunk
731	ASSERT(metaChunk->chunkSize > 0);
732	ASSERT((addr_t)pages >= metaChunk->chunkBase);
733	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
734	Chunk* chunk = &metaChunk->chunks[chunkIndex];
735
736	addr_t reference = chunk->reference;
737	if ((reference & 1) == 0)
738		return (ObjectCache*)reference;
739
740	// Seems we have a raw chunk allocation.
741	ASSERT((addr_t)pages == _ChunkAddress(metaChunk, chunk));
742	ASSERT(reference > (addr_t)pages);
743	ASSERT(reference <= areaBase + SLAB_AREA_SIZE - 1);
744	size_t size = reference - (addr_t)pages + 1;
745	ASSERT((size % SLAB_CHUNK_SIZE_SMALL) == 0);
746
747	// unmap the chunks
748	_UnmapChunk(area->vmArea, (addr_t)pages, size, flags);
749
750	// and free them
751	MutexLocker locker(sLock);
752	uint32 chunkCount = size / metaChunk->chunkSize;
753	for (uint32 i = 0; i < chunkCount; i++)
754		_FreeChunk(area, metaChunk, chunk + i, (addr_t)pages, true, flags);
755
756	return NULL;
757}
758
759
760/*static*/ size_t
761MemoryManager::AcceptableChunkSize(size_t size)
762{
763	if (size <= SLAB_CHUNK_SIZE_SMALL)
764		return SLAB_CHUNK_SIZE_SMALL;
765	if (size <= SLAB_CHUNK_SIZE_MEDIUM)
766		return SLAB_CHUNK_SIZE_MEDIUM;
767	return SLAB_CHUNK_SIZE_LARGE;
768}
769
770
771/*static*/ ObjectCache*
772MemoryManager::GetAllocationInfo(void* address, size_t& _size)
773{
774	// get the area
775	ReadLocker readLocker(sAreaTableLock);
776	Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
777	readLocker.Unlock();
778
779	if (area == NULL) {
780		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
781		addressSpace->ReadLock();
782		VMArea* area = addressSpace->LookupArea((addr_t)address);
783		if (area != NULL && (addr_t)address == area->Base())
784			_size = area->Size();
785		else
786			_size = 0;
787		addressSpace->ReadUnlock();
788
789		return NULL;
790	}
791
792	MetaChunk* metaChunk = &area->metaChunks[
793		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
794
795	// get the chunk
796	ASSERT(metaChunk->chunkSize > 0);
797	ASSERT((addr_t)address >= metaChunk->chunkBase);
798	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
799
800	addr_t reference = metaChunk->chunks[chunkIndex].reference;
801	if ((reference & 1) == 0) {
802		ObjectCache* cache = (ObjectCache*)reference;
803		_size = cache->object_size;
804		return cache;
805	}
806
807	_size = reference - (addr_t)address + 1;
808	return NULL;
809}
810
811
812/*static*/ ObjectCache*
813MemoryManager::CacheForAddress(void* address)
814{
815	// get the area
816	ReadLocker readLocker(sAreaTableLock);
817	Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
818	readLocker.Unlock();
819
820	if (area == NULL)
821		return NULL;
822
823	MetaChunk* metaChunk = &area->metaChunks[
824		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
825
826	// get the chunk
827	ASSERT(metaChunk->chunkSize > 0);
828	ASSERT((addr_t)address >= metaChunk->chunkBase);
829	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
830
831	addr_t reference = metaChunk->chunks[chunkIndex].reference;
832	return (reference & 1) == 0 ? (ObjectCache*)reference : NULL;
833}
834
835
836/*static*/ void
837MemoryManager::PerformMaintenance()
838{
839	MutexLocker locker(sLock);
840
841	while (sMaintenanceNeeded) {
842		sMaintenanceNeeded = false;
843
844		// We want to keep one or two areas as a reserve. This way we have at
845		// least one area to use in situations when we aren't allowed to
846		// allocate one and also avoid ping-pong effects.
847		if (sFreeAreaCount > 0 && sFreeAreaCount <= 2)
848			return;
849
850		if (sFreeAreaCount == 0) {
851			// try to allocate one
852			Area* area;
853			if (_AllocateArea(0, area) != B_OK)
854				return;
855
856			_PushFreeArea(area);
857			if (sFreeAreaCount > 2)
858				sMaintenanceNeeded = true;
859		} else {
860			// free until we only have two free ones
861			while (sFreeAreaCount > 2)
862				_FreeArea(_PopFreeArea(), true, 0);
863
864			if (sFreeAreaCount == 0)
865				sMaintenanceNeeded = true;
866		}
867	}
868}
869
870
871#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
872
873/*static*/ bool
874MemoryManager::AnalyzeAllocationCallers(AllocationTrackingCallback& callback)
875{
876	for (AreaTable::Iterator it = sAreaTable.GetIterator();
877			Area* area = it.Next();) {
878		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
879			MetaChunk* metaChunk = area->metaChunks + i;
880			if (metaChunk->chunkSize == 0)
881				continue;
882
883			for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
884				Chunk* chunk = metaChunk->chunks + k;
885
886				// skip free chunks
887				if (_IsChunkFree(metaChunk, chunk))
888					continue;
889
890				addr_t reference = chunk->reference;
891				if ((reference & 1) == 0 || reference == 1)
892					continue;
893
894				addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
895				size_t size = reference - chunkAddress + 1;
896
897				if (!callback.ProcessTrackingInfo(
898						_TrackingInfoFor((void*)chunkAddress, size),
899						(void*)chunkAddress, size)) {
900					return false;
901				}
902			}
903		}
904	}
905
906	return true;
907}
908
909#endif	// SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
910
911
912/*static*/ ObjectCache*
913MemoryManager::DebugObjectCacheForAddress(void* address)
914{
915	// get the area
916	addr_t areaBase = _AreaBaseAddressForAddress((addr_t)address);
917	Area* area = sAreaTable.Lookup(areaBase);
918
919	if (area == NULL)
920		return NULL;
921
922	MetaChunk* metaChunk = &area->metaChunks[
923		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
924
925	// get the chunk
926	if (metaChunk->chunkSize == 0)
927		return NULL;
928	if ((addr_t)address < metaChunk->chunkBase)
929		return NULL;
930
931	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
932	Chunk* chunk = &metaChunk->chunks[chunkIndex];
933
934	addr_t reference = chunk->reference;
935	if ((reference & 1) == 0)
936		return (ObjectCache*)reference;
937
938	return NULL;
939}
940
941
942/*static*/ status_t
943MemoryManager::_AllocateChunks(size_t chunkSize, uint32 chunkCount,
944	uint32 flags, MetaChunk*& _metaChunk, Chunk*& _chunk)
945{
946	MetaChunkList* metaChunkList = NULL;
947	if (chunkSize == SLAB_CHUNK_SIZE_SMALL) {
948		metaChunkList = &sPartialMetaChunksSmall;
949	} else if (chunkSize == SLAB_CHUNK_SIZE_MEDIUM) {
950		metaChunkList = &sPartialMetaChunksMedium;
951	} else if (chunkSize != SLAB_CHUNK_SIZE_LARGE) {
952		panic("MemoryManager::_AllocateChunks(): Unsupported chunk size: %"
953			B_PRIuSIZE, chunkSize);
954		return B_BAD_VALUE;
955	}
956
957	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk))
958		return B_OK;
959
960	if (sFreeAreas != NULL) {
961		_AddArea(_PopFreeArea());
962		_RequestMaintenance();
963
964		return _GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
965			_chunk) ? B_OK : B_NO_MEMORY;
966	}
967
968	if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
969		// We can't create an area with this limitation and we must not wait for
970		// someone else doing that.
971		return B_WOULD_BLOCK;
972	}
973
974	// We need to allocate a new area. Wait, if someone else is trying to do
975	// the same.
976	while (true) {
977		AllocationEntry* allocationEntry = NULL;
978		if (sAllocationEntryDontWait != NULL) {
979			allocationEntry = sAllocationEntryDontWait;
980		} else if (sAllocationEntryCanWait != NULL
981				&& (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) {
982			allocationEntry = sAllocationEntryCanWait;
983		} else
984			break;
985
986		allocationEntry->condition.Wait(&sLock);
987
988		if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
989				_chunk)) {
990			return B_OK;
991		}
992	}
993
994	// prepare the allocation entry others can wait on
995	AllocationEntry*& allocationEntry
996		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
997			? sAllocationEntryDontWait : sAllocationEntryCanWait;
998
999	AllocationEntry myResizeEntry;
1000	allocationEntry = &myResizeEntry;
1001	allocationEntry->condition.Init(metaChunkList, "wait for slab area");
1002	allocationEntry->thread = find_thread(NULL);
1003
1004	Area* area;
1005	status_t error = _AllocateArea(flags, area);
1006
1007	allocationEntry->condition.NotifyAll();
1008	allocationEntry = NULL;
1009
1010	if (error != B_OK)
1011		return error;
1012
1013	// Try again to get a meta chunk. Something might have been freed in the
1014	// meantime. We can free the area in this case.
1015	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk)) {
1016		_FreeArea(area, true, flags);
1017		return B_OK;
1018	}
1019
1020	_AddArea(area);
1021	return _GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
1022		_chunk) ? B_OK : B_NO_MEMORY;
1023}
1024
1025
1026/*static*/ bool
1027MemoryManager::_GetChunks(MetaChunkList* metaChunkList, size_t chunkSize,
1028	uint32 chunkCount, MetaChunk*& _metaChunk, Chunk*& _chunk)
1029{
1030	// the common and less complicated special case
1031	if (chunkCount == 1)
1032		return _GetChunk(metaChunkList, chunkSize, _metaChunk, _chunk);
1033
1034	ASSERT(metaChunkList != NULL);
1035
1036	// Iterate through the partial meta chunk list and try to find a free
1037	// range that is large enough.
1038	MetaChunk* metaChunk = NULL;
1039	for (MetaChunkList::Iterator it = metaChunkList->GetIterator();
1040			(metaChunk = it.Next()) != NULL;) {
1041		if (metaChunk->firstFreeChunk + chunkCount - 1
1042				<= metaChunk->lastFreeChunk) {
1043			break;
1044		}
1045	}
1046
1047	if (metaChunk == NULL) {
1048		// try to get a free meta chunk
1049		if ((SLAB_CHUNK_SIZE_LARGE - SLAB_AREA_STRUCT_OFFSET - kAreaAdminSize)
1050				/ chunkSize >= chunkCount) {
1051			metaChunk = sFreeShortMetaChunks.RemoveHead();
1052		}
1053		if (metaChunk == NULL)
1054			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1055
1056		if (metaChunk == NULL)
1057			return false;
1058
1059		metaChunkList->Add(metaChunk);
1060		metaChunk->GetArea()->usedMetaChunkCount++;
1061		_PrepareMetaChunk(metaChunk, chunkSize);
1062
1063		T(AllocateMetaChunk(metaChunk));
1064	}
1065
1066	// pull the chunks out of the free list
1067	Chunk* firstChunk = metaChunk->chunks + metaChunk->firstFreeChunk;
1068	Chunk* lastChunk = firstChunk + (chunkCount - 1);
1069	Chunk** chunkPointer = &metaChunk->freeChunks;
1070	uint32 remainingChunks = chunkCount;
1071	while (remainingChunks > 0) {
1072		ASSERT_PRINT(chunkPointer, "remaining: %" B_PRIu32 "/%" B_PRIu32
1073			", area: %p, meta chunk: %" B_PRIdSSIZE "\n", remainingChunks,
1074			chunkCount, metaChunk->GetArea(),
1075			metaChunk - metaChunk->GetArea()->metaChunks);
1076		Chunk* chunk = *chunkPointer;
1077		if (chunk >= firstChunk && chunk <= lastChunk) {
1078			*chunkPointer = chunk->next;
1079			chunk->reference = 1;
1080			remainingChunks--;
1081		} else
1082			chunkPointer = &chunk->next;
1083	}
1084
1085	// allocate the chunks
1086	metaChunk->usedChunkCount += chunkCount;
1087	if (metaChunk->usedChunkCount == metaChunk->chunkCount) {
1088		// meta chunk is full now -- remove it from its list
1089		if (metaChunkList != NULL)
1090			metaChunkList->Remove(metaChunk);
1091	}
1092
1093	// update the free range
1094	metaChunk->firstFreeChunk += chunkCount;
1095
1096	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1097
1098	_chunk = firstChunk;
1099	_metaChunk = metaChunk;
1100
1101	T(AllocateChunks(chunkSize, chunkCount, metaChunk, firstChunk));
1102
1103	return true;
1104}
1105
1106
1107/*static*/ bool
1108MemoryManager::_GetChunk(MetaChunkList* metaChunkList, size_t chunkSize,
1109	MetaChunk*& _metaChunk, Chunk*& _chunk)
1110{
1111	MetaChunk* metaChunk = metaChunkList != NULL
1112		? metaChunkList->Head() : NULL;
1113	if (metaChunk == NULL) {
1114		// no partial meta chunk -- maybe there's a free one
1115		if (chunkSize == SLAB_CHUNK_SIZE_LARGE) {
1116			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1117		} else {
1118			metaChunk = sFreeShortMetaChunks.RemoveHead();
1119			if (metaChunk == NULL)
1120				metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1121			if (metaChunk != NULL)
1122				metaChunkList->Add(metaChunk);
1123		}
1124
1125		if (metaChunk == NULL)
1126			return false;
1127
1128		metaChunk->GetArea()->usedMetaChunkCount++;
1129		_PrepareMetaChunk(metaChunk, chunkSize);
1130
1131		T(AllocateMetaChunk(metaChunk));
1132	}
1133
1134	// allocate the chunk
1135	if (++metaChunk->usedChunkCount == metaChunk->chunkCount) {
1136		// meta chunk is full now -- remove it from its list
1137		if (metaChunkList != NULL)
1138			metaChunkList->Remove(metaChunk);
1139	}
1140
1141	_chunk = _pop(metaChunk->freeChunks);
1142	_metaChunk = metaChunk;
1143
1144	_chunk->reference = 1;
1145
1146	// update the free range
1147	uint32 chunkIndex = _chunk - metaChunk->chunks;
1148	if (chunkIndex >= metaChunk->firstFreeChunk
1149			&& chunkIndex <= metaChunk->lastFreeChunk) {
1150		if (chunkIndex - metaChunk->firstFreeChunk
1151				<= metaChunk->lastFreeChunk - chunkIndex) {
1152			metaChunk->firstFreeChunk = chunkIndex + 1;
1153		} else
1154			metaChunk->lastFreeChunk = chunkIndex - 1;
1155	}
1156
1157	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1158
1159	T(AllocateChunk(chunkSize, metaChunk, _chunk));
1160
1161	return true;
1162}
1163
1164
1165/*static*/ void
1166MemoryManager::_FreeChunk(Area* area, MetaChunk* metaChunk, Chunk* chunk,
1167	addr_t chunkAddress, bool alreadyUnmapped, uint32 flags)
1168{
1169	// unmap the chunk
1170	if (!alreadyUnmapped) {
1171		mutex_unlock(&sLock);
1172		_UnmapChunk(area->vmArea, chunkAddress, metaChunk->chunkSize, flags);
1173		mutex_lock(&sLock);
1174	}
1175
1176	T(FreeChunk(metaChunk, chunk));
1177
1178	_push(metaChunk->freeChunks, chunk);
1179
1180	uint32 chunkIndex = chunk - metaChunk->chunks;
1181
1182	// free the meta chunk, if it is unused now
1183	PARANOID_CHECKS_ONLY(bool areaDeleted = false;)
1184	ASSERT(metaChunk->usedChunkCount > 0);
1185	if (--metaChunk->usedChunkCount == 0) {
1186		T(FreeMetaChunk(metaChunk));
1187
1188		// remove from partial meta chunk list
1189		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1190			sPartialMetaChunksSmall.Remove(metaChunk);
1191		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1192			sPartialMetaChunksMedium.Remove(metaChunk);
1193
1194		// mark empty
1195		metaChunk->chunkSize = 0;
1196
1197		// add to free list
1198		if (metaChunk == area->metaChunks)
1199			sFreeShortMetaChunks.Add(metaChunk, false);
1200		else
1201			sFreeCompleteMetaChunks.Add(metaChunk, false);
1202
1203		// free the area, if it is unused now
1204		ASSERT(area->usedMetaChunkCount > 0);
1205		if (--area->usedMetaChunkCount == 0) {
1206			_FreeArea(area, false, flags);
1207			PARANOID_CHECKS_ONLY(areaDeleted = true;)
1208		}
1209	} else if (metaChunk->usedChunkCount == metaChunk->chunkCount - 1) {
1210		// the meta chunk was full before -- add it back to its partial chunk
1211		// list
1212		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1213			sPartialMetaChunksSmall.Add(metaChunk, false);
1214		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1215			sPartialMetaChunksMedium.Add(metaChunk, false);
1216
1217		metaChunk->firstFreeChunk = chunkIndex;
1218		metaChunk->lastFreeChunk = chunkIndex;
1219	} else {
1220		// extend the free range, if the chunk adjoins
1221		if (chunkIndex + 1 == metaChunk->firstFreeChunk) {
1222			uint32 firstFree = chunkIndex;
1223			for (; firstFree > 0; firstFree--) {
1224				Chunk* previousChunk = &metaChunk->chunks[firstFree - 1];
1225				if (!_IsChunkFree(metaChunk, previousChunk))
1226					break;
1227			}
1228			metaChunk->firstFreeChunk = firstFree;
1229		} else if (chunkIndex == (uint32)metaChunk->lastFreeChunk + 1) {
1230			uint32 lastFree = chunkIndex;
1231			for (; lastFree + 1 < metaChunk->chunkCount; lastFree++) {
1232				Chunk* nextChunk = &metaChunk->chunks[lastFree + 1];
1233				if (!_IsChunkFree(metaChunk, nextChunk))
1234					break;
1235			}
1236			metaChunk->lastFreeChunk = lastFree;
1237		}
1238	}
1239
1240	PARANOID_CHECKS_ONLY(
1241		if (!areaDeleted)
1242			_CheckMetaChunk(metaChunk);
1243	)
1244}
1245
1246
1247/*static*/ void
1248MemoryManager::_PrepareMetaChunk(MetaChunk* metaChunk, size_t chunkSize)
1249{
1250	Area* area = metaChunk->GetArea();
1251
1252	if (metaChunk == area->metaChunks) {
1253		// the first chunk is shorter
1254		size_t unusableSize = ROUNDUP(SLAB_AREA_STRUCT_OFFSET + kAreaAdminSize,
1255			chunkSize);
1256		metaChunk->chunkBase = area->BaseAddress() + unusableSize;
1257		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE - unusableSize;
1258	}
1259
1260	metaChunk->chunkSize = chunkSize;
1261	metaChunk->chunkCount = metaChunk->totalSize / chunkSize;
1262	metaChunk->usedChunkCount = 0;
1263
1264	metaChunk->freeChunks = NULL;
1265	for (int32 i = metaChunk->chunkCount - 1; i >= 0; i--)
1266		_push(metaChunk->freeChunks, metaChunk->chunks + i);
1267
1268	metaChunk->firstFreeChunk = 0;
1269	metaChunk->lastFreeChunk = metaChunk->chunkCount - 1;
1270
1271	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1272}
1273
1274
1275/*static*/ void
1276MemoryManager::_AddArea(Area* area)
1277{
1278	T(AddArea(area));
1279
1280	// add the area to the hash table
1281	WriteLocker writeLocker(sAreaTableLock);
1282	sAreaTable.InsertUnchecked(area);
1283	writeLocker.Unlock();
1284
1285	// add the area's meta chunks to the free lists
1286	sFreeShortMetaChunks.Add(&area->metaChunks[0]);
1287	for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++)
1288		sFreeCompleteMetaChunks.Add(&area->metaChunks[i]);
1289}
1290
1291
1292/*static*/ status_t
1293MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
1294{
1295	TRACE("MemoryManager::_AllocateArea(%#" B_PRIx32 ")\n", flags);
1296
1297	ASSERT((flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0);
1298
1299	mutex_unlock(&sLock);
1300
1301	size_t pagesNeededToMap = 0;
1302	void* areaBase;
1303	Area* area;
1304	VMArea* vmArea = NULL;
1305
1306	if (sKernelArgs == NULL) {
1307		// create an area
1308		uint32 areaCreationFlags = (flags & CACHE_PRIORITY_VIP) != 0
1309			? CREATE_AREA_PRIORITY_VIP : 0;
1310		area_id areaID = vm_create_null_area(B_SYSTEM_TEAM, kSlabAreaName,
1311			&areaBase, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE,
1312			areaCreationFlags);
1313		if (areaID < 0) {
1314			mutex_lock(&sLock);
1315			return areaID;
1316		}
1317
1318		area = _AreaForAddress((addr_t)areaBase);
1319
1320		// map the memory for the administrative structure
1321		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1322		VMTranslationMap* translationMap = addressSpace->TranslationMap();
1323
1324		pagesNeededToMap = translationMap->MaxPagesNeededToMap(
1325			(addr_t)area, (addr_t)areaBase + SLAB_AREA_SIZE - 1);
1326
1327		vmArea = VMAreas::Lookup(areaID);
1328		status_t error = _MapChunk(vmArea, (addr_t)area, kAreaAdminSize,
1329			pagesNeededToMap, flags);
1330		if (error != B_OK) {
1331			delete_area(areaID);
1332			mutex_lock(&sLock);
1333			return error;
1334		}
1335
1336		dprintf("slab memory manager: created area %p (%" B_PRId32 ")\n", area,
1337			areaID);
1338	} else {
1339		// no areas yet -- allocate raw memory
1340		areaBase = (void*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
1341			SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1342			SLAB_AREA_SIZE);
1343		if (areaBase == NULL) {
1344			mutex_lock(&sLock);
1345			return B_NO_MEMORY;
1346		}
1347		area = _AreaForAddress((addr_t)areaBase);
1348
1349		TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n",
1350			area);
1351	}
1352
1353	// init the area structure
1354	area->vmArea = vmArea;
1355	area->reserved_memory_for_mapping = pagesNeededToMap * B_PAGE_SIZE;
1356	area->usedMetaChunkCount = 0;
1357	area->fullyMapped = vmArea == NULL;
1358
1359	// init the meta chunks
1360	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1361		MetaChunk* metaChunk = area->metaChunks + i;
1362		metaChunk->chunkSize = 0;
1363		metaChunk->chunkBase = (addr_t)areaBase + i * SLAB_CHUNK_SIZE_LARGE;
1364		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE;
1365			// Note: chunkBase and totalSize aren't correct for the first
1366			// meta chunk. They will be set in _PrepareMetaChunk().
1367		metaChunk->chunkCount = 0;
1368		metaChunk->usedChunkCount = 0;
1369		metaChunk->freeChunks = NULL;
1370	}
1371
1372	mutex_lock(&sLock);
1373	_area = area;
1374
1375	T(AllocateArea(area, flags));
1376
1377	return B_OK;
1378}
1379
1380
1381/*static*/ void
1382MemoryManager::_FreeArea(Area* area, bool areaRemoved, uint32 flags)
1383{
1384	TRACE("MemoryManager::_FreeArea(%p, %#" B_PRIx32 ")\n", area, flags);
1385
1386	T(FreeArea(area, areaRemoved, flags));
1387
1388	ASSERT(area->usedMetaChunkCount == 0);
1389
1390	if (!areaRemoved) {
1391		// remove the area's meta chunks from the free lists
1392		ASSERT(area->metaChunks[0].usedChunkCount == 0);
1393		sFreeShortMetaChunks.Remove(&area->metaChunks[0]);
1394
1395		for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1396			ASSERT(area->metaChunks[i].usedChunkCount == 0);
1397			sFreeCompleteMetaChunks.Remove(&area->metaChunks[i]);
1398		}
1399
1400		// remove the area from the hash table
1401		WriteLocker writeLocker(sAreaTableLock);
1402		sAreaTable.RemoveUnchecked(area);
1403		writeLocker.Unlock();
1404	}
1405
1406	// We want to keep one or two free areas as a reserve.
1407	if (sFreeAreaCount <= 1) {
1408		_PushFreeArea(area);
1409		return;
1410	}
1411
1412	if (area->vmArea == NULL || (flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
1413		// This is either early in the boot process or we aren't allowed to
1414		// delete the area now.
1415		_PushFreeArea(area);
1416		_RequestMaintenance();
1417		return;
1418	}
1419
1420	mutex_unlock(&sLock);
1421
1422	dprintf("slab memory manager: deleting area %p (%" B_PRId32 ")\n", area,
1423		area->vmArea->id);
1424
1425	size_t memoryToUnreserve = area->reserved_memory_for_mapping;
1426	delete_area(area->vmArea->id);
1427	vm_unreserve_memory(memoryToUnreserve);
1428
1429	mutex_lock(&sLock);
1430}
1431
1432
1433/*static*/ status_t
1434MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
1435	size_t reserveAdditionalMemory, uint32 flags)
1436{
1437	TRACE("MemoryManager::_MapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1438		")\n", vmArea, address, size);
1439
1440	T(Map(address, size, flags));
1441
1442	if (vmArea == NULL) {
1443		// everything is mapped anyway
1444		return B_OK;
1445	}
1446
1447	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1448	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1449
1450	// reserve memory for the chunk
1451	int priority = (flags & CACHE_PRIORITY_VIP) != 0
1452		? VM_PRIORITY_VIP : VM_PRIORITY_SYSTEM;
1453	size_t reservedMemory = size + reserveAdditionalMemory;
1454	status_t error = vm_try_reserve_memory(size, priority,
1455		(flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000);
1456	if (error != B_OK)
1457		return error;
1458
1459	// reserve the pages we need now
1460	size_t reservedPages = size / B_PAGE_SIZE
1461		+ translationMap->MaxPagesNeededToMap(address, address + size - 1);
1462	vm_page_reservation reservation;
1463	if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) {
1464		if (!vm_page_try_reserve_pages(&reservation, reservedPages, priority)) {
1465			vm_unreserve_memory(reservedMemory);
1466			return B_WOULD_BLOCK;
1467		}
1468	} else
1469		vm_page_reserve_pages(&reservation, reservedPages, priority);
1470
1471	VMCache* cache = vm_area_get_locked_cache(vmArea);
1472
1473	// map the pages
1474	translationMap->Lock();
1475
1476	addr_t areaOffset = address - vmArea->Base();
1477	addr_t endAreaOffset = areaOffset + size;
1478	for (size_t offset = areaOffset; offset < endAreaOffset;
1479			offset += B_PAGE_SIZE) {
1480		vm_page* page = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
1481		cache->InsertPage(page, offset);
1482
1483		page->IncrementWiredCount();
1484		atomic_add(&gMappedPagesCount, 1);
1485		DEBUG_PAGE_ACCESS_END(page);
1486
1487		translationMap->Map(vmArea->Base() + offset,
1488			page->physical_page_number * B_PAGE_SIZE,
1489			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1490			vmArea->MemoryType(), &reservation);
1491	}
1492
1493	translationMap->Unlock();
1494
1495	cache->ReleaseRefAndUnlock();
1496
1497	vm_page_unreserve_pages(&reservation);
1498
1499	return B_OK;
1500}
1501
1502
1503/*static*/ status_t
1504MemoryManager::_UnmapChunk(VMArea* vmArea, addr_t address, size_t size,
1505	uint32 flags)
1506{
1507	T(Unmap(address, size, flags));
1508
1509	if (vmArea == NULL)
1510		return B_ERROR;
1511
1512	TRACE("MemoryManager::_UnmapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1513		")\n", vmArea, address, size);
1514
1515	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1516	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1517	VMCache* cache = vm_area_get_locked_cache(vmArea);
1518
1519	// unmap the pages
1520	translationMap->Lock();
1521	translationMap->Unmap(address, address + size - 1);
1522	atomic_add(&gMappedPagesCount, -(size / B_PAGE_SIZE));
1523	translationMap->Unlock();
1524
1525	// free the pages
1526	addr_t areaPageOffset = (address - vmArea->Base()) / B_PAGE_SIZE;
1527	addr_t areaPageEndOffset = areaPageOffset + size / B_PAGE_SIZE;
1528	VMCachePagesTree::Iterator it = cache->pages.GetIterator(
1529		areaPageOffset, true, true);
1530	while (vm_page* page = it.Next()) {
1531		if (page->cache_offset >= areaPageEndOffset)
1532			break;
1533
1534		DEBUG_PAGE_ACCESS_START(page);
1535
1536		page->DecrementWiredCount();
1537
1538		cache->RemovePage(page);
1539			// the iterator is remove-safe
1540		vm_page_free(cache, page);
1541	}
1542
1543	cache->ReleaseRefAndUnlock();
1544
1545	vm_unreserve_memory(size);
1546
1547	return B_OK;
1548}
1549
1550
1551/*static*/ void
1552MemoryManager::_UnmapFreeChunksEarly(Area* area)
1553{
1554	if (!area->fullyMapped)
1555		return;
1556
1557	TRACE("MemoryManager::_UnmapFreeChunksEarly(%p)\n", area);
1558
1559	// unmap the space before the Area structure
1560	#if SLAB_AREA_STRUCT_OFFSET > 0
1561		_UnmapChunk(area->vmArea, area->BaseAddress(), SLAB_AREA_STRUCT_OFFSET,
1562			0);
1563	#endif
1564
1565	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1566		MetaChunk* metaChunk = area->metaChunks + i;
1567		if (metaChunk->chunkSize == 0) {
1568			// meta chunk is free -- unmap it completely
1569			if (i == 0) {
1570				_UnmapChunk(area->vmArea, (addr_t)area + kAreaAdminSize,
1571					SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize, 0);
1572			} else {
1573				_UnmapChunk(area->vmArea,
1574					area->BaseAddress() + i * SLAB_CHUNK_SIZE_LARGE,
1575					SLAB_CHUNK_SIZE_LARGE, 0);
1576			}
1577		} else {
1578			// unmap free chunks
1579			for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1580					chunk = chunk->next) {
1581				_UnmapChunk(area->vmArea, _ChunkAddress(metaChunk, chunk),
1582					metaChunk->chunkSize, 0);
1583			}
1584
1585			// The first meta chunk might have space before its first chunk.
1586			if (i == 0) {
1587				addr_t unusedStart = (addr_t)area + kAreaAdminSize;
1588				if (unusedStart < metaChunk->chunkBase) {
1589					_UnmapChunk(area->vmArea, unusedStart,
1590						metaChunk->chunkBase - unusedStart, 0);
1591				}
1592			}
1593		}
1594	}
1595
1596	area->fullyMapped = false;
1597}
1598
1599
1600/*static*/ void
1601MemoryManager::_ConvertEarlyArea(Area* area)
1602{
1603	void* address = (void*)area->BaseAddress();
1604	area_id areaID = create_area(kSlabAreaName, &address, B_EXACT_ADDRESS,
1605		SLAB_AREA_SIZE, B_ALREADY_WIRED,
1606		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1607	if (areaID < 0)
1608		panic("out of memory");
1609
1610	area->vmArea = VMAreas::Lookup(areaID);
1611}
1612
1613
1614/*static*/ void
1615MemoryManager::_RequestMaintenance()
1616{
1617	if ((sFreeAreaCount > 0 && sFreeAreaCount <= 2) || sMaintenanceNeeded)
1618		return;
1619
1620	sMaintenanceNeeded = true;
1621	request_memory_manager_maintenance();
1622}
1623
1624
1625/*static*/ bool
1626MemoryManager::_IsChunkInFreeList(const MetaChunk* metaChunk,
1627	const Chunk* chunk)
1628{
1629	Chunk* freeChunk = metaChunk->freeChunks;
1630	while (freeChunk != NULL) {
1631		if (freeChunk == chunk)
1632			return true;
1633		freeChunk = freeChunk->next;
1634	}
1635
1636	return false;
1637}
1638
1639
1640#if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1641
1642/*static*/ void
1643MemoryManager::_CheckMetaChunk(MetaChunk* metaChunk)
1644{
1645	Area* area = metaChunk->GetArea();
1646	int32 metaChunkIndex = metaChunk - area->metaChunks;
1647	if (metaChunkIndex < 0 || metaChunkIndex >= SLAB_META_CHUNKS_PER_AREA) {
1648		panic("invalid meta chunk %p!", metaChunk);
1649		return;
1650	}
1651
1652	switch (metaChunk->chunkSize) {
1653		case 0:
1654			// unused
1655			return;
1656		case SLAB_CHUNK_SIZE_SMALL:
1657		case SLAB_CHUNK_SIZE_MEDIUM:
1658		case SLAB_CHUNK_SIZE_LARGE:
1659			break;
1660		default:
1661			panic("meta chunk %p has invalid chunk size: %" B_PRIuSIZE,
1662				metaChunk, metaChunk->chunkSize);
1663			return;
1664	}
1665
1666	if (metaChunk->totalSize > SLAB_CHUNK_SIZE_LARGE) {
1667		panic("meta chunk %p has invalid total size: %" B_PRIuSIZE,
1668			metaChunk, metaChunk->totalSize);
1669		return;
1670	}
1671
1672	addr_t expectedBase = area->BaseAddress()
1673		+ metaChunkIndex * SLAB_CHUNK_SIZE_LARGE;
1674	if (metaChunk->chunkBase < expectedBase
1675		|| metaChunk->chunkBase - expectedBase + metaChunk->totalSize
1676			> SLAB_CHUNK_SIZE_LARGE) {
1677		panic("meta chunk %p has invalid base address: %" B_PRIxADDR, metaChunk,
1678			metaChunk->chunkBase);
1679		return;
1680	}
1681
1682	if (metaChunk->chunkCount != metaChunk->totalSize / metaChunk->chunkSize) {
1683		panic("meta chunk %p has invalid chunk count: %u", metaChunk,
1684			metaChunk->chunkCount);
1685		return;
1686	}
1687
1688	if (metaChunk->usedChunkCount > metaChunk->chunkCount) {
1689		panic("meta chunk %p has invalid unused chunk count: %u", metaChunk,
1690			metaChunk->usedChunkCount);
1691		return;
1692	}
1693
1694	if (metaChunk->firstFreeChunk > metaChunk->chunkCount) {
1695		panic("meta chunk %p has invalid first free chunk: %u", metaChunk,
1696			metaChunk->firstFreeChunk);
1697		return;
1698	}
1699
1700	if (metaChunk->lastFreeChunk >= metaChunk->chunkCount) {
1701		panic("meta chunk %p has invalid last free chunk: %u", metaChunk,
1702			metaChunk->lastFreeChunk);
1703		return;
1704	}
1705
1706	// check free list for structural sanity
1707	uint32 freeChunks = 0;
1708	for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1709			chunk = chunk->next) {
1710		if ((addr_t)chunk % sizeof(Chunk) != 0 || chunk < metaChunk->chunks
1711			|| chunk >= metaChunk->chunks + metaChunk->chunkCount) {
1712			panic("meta chunk %p has invalid element in free list, chunk: %p",
1713				metaChunk, chunk);
1714			return;
1715		}
1716
1717		if (++freeChunks > metaChunk->chunkCount) {
1718			panic("meta chunk %p has cyclic free list", metaChunk);
1719			return;
1720		}
1721	}
1722
1723	if (freeChunks + metaChunk->usedChunkCount > metaChunk->chunkCount) {
1724		panic("meta chunk %p has mismatching free/used chunk counts: total: "
1725			"%u, used: %u, free: %" B_PRIu32, metaChunk, metaChunk->chunkCount,
1726			metaChunk->usedChunkCount, freeChunks);
1727		return;
1728	}
1729
1730	// count used chunks by looking at their reference/next field
1731	uint32 usedChunks = 0;
1732	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1733		if (!_IsChunkFree(metaChunk, metaChunk->chunks + i))
1734			usedChunks++;
1735	}
1736
1737	if (usedChunks != metaChunk->usedChunkCount) {
1738		panic("meta chunk %p has used chunks that appear free: total: "
1739			"%u, used: %u, appearing used: %" B_PRIu32, metaChunk,
1740			metaChunk->chunkCount, metaChunk->usedChunkCount, usedChunks);
1741		return;
1742	}
1743
1744	// check free range
1745	for (uint32 i = metaChunk->firstFreeChunk; i < metaChunk->lastFreeChunk;
1746			i++) {
1747		if (!_IsChunkFree(metaChunk, metaChunk->chunks + i)) {
1748			panic("meta chunk %p has used chunk in free range, chunk: %p (%"
1749				B_PRIu32 ", free range: %u - %u)", metaChunk,
1750				metaChunk->chunks + i, i, metaChunk->firstFreeChunk,
1751				metaChunk->lastFreeChunk);
1752			return;
1753		}
1754	}
1755}
1756
1757#endif	// DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1758
1759
1760/*static*/ int
1761MemoryManager::_DumpRawAllocations(int argc, char** argv)
1762{
1763	kprintf("%-*s    meta chunk  chunk  %-*s    size (KB)\n",
1764		B_PRINTF_POINTER_WIDTH, "area", B_PRINTF_POINTER_WIDTH, "base");
1765
1766	size_t totalSize = 0;
1767
1768	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1769			Area* area = it.Next();) {
1770		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1771			MetaChunk* metaChunk = area->metaChunks + i;
1772			if (metaChunk->chunkSize == 0)
1773				continue;
1774			for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
1775				Chunk* chunk = metaChunk->chunks + k;
1776
1777				// skip free chunks
1778				if (_IsChunkFree(metaChunk, chunk))
1779					continue;
1780
1781				addr_t reference = chunk->reference;
1782				if ((reference & 1) == 0 || reference == 1)
1783					continue;
1784
1785				addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
1786				size_t size = reference - chunkAddress + 1;
1787				totalSize += size;
1788
1789				kprintf("%p  %10" B_PRId32 "  %5" B_PRIu32 "  %p  %9"
1790					B_PRIuSIZE "\n", area, i, k, (void*)chunkAddress,
1791					size / 1024);
1792			}
1793		}
1794	}
1795
1796	kprintf("total:%*s%9" B_PRIuSIZE "\n", (2 * B_PRINTF_POINTER_WIDTH) + 21,
1797		"", totalSize / 1024);
1798
1799	return 0;
1800}
1801
1802
1803/*static*/ void
1804MemoryManager::_PrintMetaChunkTableHeader(bool printChunks)
1805{
1806	if (printChunks)
1807		kprintf("chunk        base       cache  object size  cache name\n");
1808	else
1809		kprintf("chunk        base\n");
1810}
1811
1812/*static*/ void
1813MemoryManager::_DumpMetaChunk(MetaChunk* metaChunk, bool printChunks,
1814	bool printHeader)
1815{
1816	if (printHeader)
1817		_PrintMetaChunkTableHeader(printChunks);
1818
1819	const char* type = "empty";
1820	if (metaChunk->chunkSize != 0) {
1821		switch (metaChunk->chunkSize) {
1822			case SLAB_CHUNK_SIZE_SMALL:
1823				type = "small";
1824				break;
1825			case SLAB_CHUNK_SIZE_MEDIUM:
1826				type = "medium";
1827				break;
1828			case SLAB_CHUNK_SIZE_LARGE:
1829				type = "large";
1830				break;
1831		}
1832	}
1833
1834	int metaChunkIndex = metaChunk - metaChunk->GetArea()->metaChunks;
1835	kprintf("%5d  %p  --- %6s meta chunk", metaChunkIndex,
1836		(void*)metaChunk->chunkBase, type);
1837	if (metaChunk->chunkSize != 0) {
1838		kprintf(": %4u/%4u used, %-4u-%4u free ------------\n",
1839			metaChunk->usedChunkCount, metaChunk->chunkCount,
1840			metaChunk->firstFreeChunk, metaChunk->lastFreeChunk);
1841	} else
1842		kprintf(" --------------------------------------------\n");
1843
1844	if (metaChunk->chunkSize == 0 || !printChunks)
1845		return;
1846
1847	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1848		Chunk* chunk = metaChunk->chunks + i;
1849
1850		// skip free chunks
1851		if (_IsChunkFree(metaChunk, chunk)) {
1852			if (!_IsChunkInFreeList(metaChunk, chunk)) {
1853				kprintf("%5" B_PRIu32 "  %p  appears free, but isn't in free "
1854					"list!\n", i, (void*)_ChunkAddress(metaChunk, chunk));
1855			}
1856
1857			continue;
1858		}
1859
1860		addr_t reference = chunk->reference;
1861		if ((reference & 1) == 0) {
1862			ObjectCache* cache = (ObjectCache*)reference;
1863			kprintf("%5" B_PRIu32 "  %p  %p  %11" B_PRIuSIZE "  %s\n", i,
1864				(void*)_ChunkAddress(metaChunk, chunk), cache,
1865				cache != NULL ? cache->object_size : 0,
1866				cache != NULL ? cache->name : "");
1867		} else if (reference != 1) {
1868			kprintf("%5" B_PRIu32 "  %p  raw allocation up to %p\n", i,
1869				(void*)_ChunkAddress(metaChunk, chunk), (void*)reference);
1870		}
1871	}
1872}
1873
1874
1875/*static*/ int
1876MemoryManager::_DumpMetaChunk(int argc, char** argv)
1877{
1878	if (argc != 2) {
1879		print_debugger_command_usage(argv[0]);
1880		return 0;
1881	}
1882
1883	uint64 address;
1884	if (!evaluate_debug_expression(argv[1], &address, false))
1885		return 0;
1886
1887	Area* area = _AreaForAddress(address);
1888
1889	MetaChunk* metaChunk;
1890	if ((addr_t)address >= (addr_t)area->metaChunks
1891		&& (addr_t)address
1892			< (addr_t)(area->metaChunks + SLAB_META_CHUNKS_PER_AREA)) {
1893		metaChunk = (MetaChunk*)(addr_t)address;
1894	} else {
1895		metaChunk = area->metaChunks
1896			+ (address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE;
1897	}
1898
1899	_DumpMetaChunk(metaChunk, true, true);
1900
1901	return 0;
1902}
1903
1904
1905/*static*/ void
1906MemoryManager::_DumpMetaChunks(const char* name, MetaChunkList& metaChunkList,
1907	bool printChunks)
1908{
1909	kprintf("%s:\n", name);
1910
1911	for (MetaChunkList::Iterator it = metaChunkList.GetIterator();
1912			MetaChunk* metaChunk = it.Next();) {
1913		_DumpMetaChunk(metaChunk, printChunks, false);
1914	}
1915}
1916
1917
1918/*static*/ int
1919MemoryManager::_DumpMetaChunks(int argc, char** argv)
1920{
1921	bool printChunks = argc > 1 && strcmp(argv[1], "-c") == 0;
1922
1923	_PrintMetaChunkTableHeader(printChunks);
1924	_DumpMetaChunks("free complete", sFreeCompleteMetaChunks, printChunks);
1925	_DumpMetaChunks("free short", sFreeShortMetaChunks, printChunks);
1926	_DumpMetaChunks("partial small", sPartialMetaChunksSmall, printChunks);
1927	_DumpMetaChunks("partial medium", sPartialMetaChunksMedium, printChunks);
1928
1929	return 0;
1930}
1931
1932
1933/*static*/ int
1934MemoryManager::_DumpArea(int argc, char** argv)
1935{
1936	bool printChunks = false;
1937
1938	int argi = 1;
1939	while (argi < argc) {
1940		if (argv[argi][0] != '-')
1941			break;
1942		const char* arg = argv[argi++];
1943		if (strcmp(arg, "-c") == 0) {
1944			printChunks = true;
1945		} else {
1946			print_debugger_command_usage(argv[0]);
1947			return 0;
1948		}
1949	}
1950
1951	if (argi + 1 != argc) {
1952		print_debugger_command_usage(argv[0]);
1953		return 0;
1954	}
1955
1956	uint64 address;
1957	if (!evaluate_debug_expression(argv[argi], &address, false))
1958		return 0;
1959
1960	Area* area = _AreaForAddress((addr_t)address);
1961
1962	for (uint32 k = 0; k < SLAB_META_CHUNKS_PER_AREA; k++) {
1963		MetaChunk* metaChunk = area->metaChunks + k;
1964		_DumpMetaChunk(metaChunk, printChunks, k == 0);
1965	}
1966
1967	return 0;
1968}
1969
1970
1971/*static*/ int
1972MemoryManager::_DumpAreas(int argc, char** argv)
1973{
1974	kprintf("  %*s    %*s   meta      small   medium  large\n",
1975		B_PRINTF_POINTER_WIDTH, "base", B_PRINTF_POINTER_WIDTH, "area");
1976
1977	size_t totalTotalSmall = 0;
1978	size_t totalUsedSmall = 0;
1979	size_t totalTotalMedium = 0;
1980	size_t totalUsedMedium = 0;
1981	size_t totalUsedLarge = 0;
1982	uint32 areaCount = 0;
1983
1984	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1985			Area* area = it.Next();) {
1986		areaCount++;
1987
1988		// sum up the free/used counts for the chunk sizes
1989		int totalSmall = 0;
1990		int usedSmall = 0;
1991		int totalMedium = 0;
1992		int usedMedium = 0;
1993		int usedLarge = 0;
1994
1995		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1996			MetaChunk* metaChunk = area->metaChunks + i;
1997			if (metaChunk->chunkSize == 0)
1998				continue;
1999
2000			switch (metaChunk->chunkSize) {
2001				case SLAB_CHUNK_SIZE_SMALL:
2002					totalSmall += metaChunk->chunkCount;
2003					usedSmall += metaChunk->usedChunkCount;
2004					break;
2005				case SLAB_CHUNK_SIZE_MEDIUM:
2006					totalMedium += metaChunk->chunkCount;
2007					usedMedium += metaChunk->usedChunkCount;
2008					break;
2009				case SLAB_CHUNK_SIZE_LARGE:
2010					usedLarge += metaChunk->usedChunkCount;
2011					break;
2012			}
2013		}
2014
2015		kprintf("%p  %p  %2u/%2u  %4d/%4d  %3d/%3d  %5d\n",
2016			area, area->vmArea, area->usedMetaChunkCount,
2017			SLAB_META_CHUNKS_PER_AREA, usedSmall, totalSmall, usedMedium,
2018			totalMedium, usedLarge);
2019
2020		totalTotalSmall += totalSmall;
2021		totalUsedSmall += usedSmall;
2022		totalTotalMedium += totalMedium;
2023		totalUsedMedium += usedMedium;
2024		totalUsedLarge += usedLarge;
2025	}
2026
2027	kprintf("%d free area%s:\n", sFreeAreaCount,
2028		sFreeAreaCount == 1 ? "" : "s");
2029	for (Area* area = sFreeAreas; area != NULL; area = area->next) {
2030		areaCount++;
2031		kprintf("%p  %p\n", area, area->vmArea);
2032	}
2033
2034	kprintf("total usage:\n");
2035	kprintf("  small:    %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedSmall,
2036		totalTotalSmall);
2037	kprintf("  medium:   %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedMedium,
2038		totalTotalMedium);
2039	kprintf("  large:    %" B_PRIuSIZE "\n", totalUsedLarge);
2040	kprintf("  memory:   %" B_PRIuSIZE "/%" B_PRIu32 " KB\n",
2041		(totalUsedSmall * SLAB_CHUNK_SIZE_SMALL
2042			+ totalUsedMedium * SLAB_CHUNK_SIZE_MEDIUM
2043			+ totalUsedLarge * SLAB_CHUNK_SIZE_LARGE) / 1024,
2044		areaCount * SLAB_AREA_SIZE / 1024);
2045	kprintf("  overhead: %" B_PRIuSIZE " KB\n",
2046		areaCount * kAreaAdminSize / 1024);
2047
2048	return 0;
2049}
2050
2051
2052#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
2053
2054void
2055MemoryManager::_AddTrackingInfo(void* allocation, size_t size,
2056	AbstractTraceEntryWithStackTrace* traceEntry)
2057{
2058	_TrackingInfoFor(allocation, size)->Init(traceEntry);
2059}
2060
2061#endif // SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
2062
2063
2064RANGE_MARKER_FUNCTION_END(SlabMemoryManager)
2065