1/*
2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3 * Copyright 2008-2010, Axel D��rfler. All Rights Reserved.
4 * Copyright 2007, Hugo Santos. All Rights Reserved.
5 *
6 * Distributed under the terms of the MIT License.
7 */
8
9
10#include <slab/Slab.h>
11
12#include <algorithm>
13#include <new>
14#include <stdlib.h>
15#include <string.h>
16
17#include <KernelExport.h>
18
19#include <condition_variable.h>
20#include <elf.h>
21#include <kernel.h>
22#include <low_resource_manager.h>
23#include <slab/ObjectDepot.h>
24#include <smp.h>
25#include <tracing.h>
26#include <util/AutoLock.h>
27#include <util/DoublyLinkedList.h>
28#include <vm/vm.h>
29#include <vm/VMAddressSpace.h>
30
31#include "HashedObjectCache.h"
32#include "MemoryManager.h"
33#include "slab_debug.h"
34#include "slab_private.h"
35#include "SmallObjectCache.h"
36
37
38#if !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
39
40
41typedef DoublyLinkedList<ObjectCache> ObjectCacheList;
42
43typedef DoublyLinkedList<ObjectCache,
44	DoublyLinkedListMemberGetLink<ObjectCache, &ObjectCache::maintenance_link> >
45		MaintenanceQueue;
46
47static ObjectCacheList sObjectCaches;
48static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
49
50static mutex sMaintenanceLock
51	= MUTEX_INITIALIZER("object cache resize requests");
52static MaintenanceQueue sMaintenanceQueue;
53static ConditionVariable sMaintenanceCondition;
54
55
56#if SLAB_ALLOCATION_TRACKING_AVAILABLE
57
58struct caller_info {
59	addr_t		caller;
60	size_t		count;
61	size_t		size;
62};
63
64static const int32 kCallerInfoTableSize = 1024;
65static caller_info sCallerInfoTable[kCallerInfoTableSize];
66static int32 sCallerInfoCount = 0;
67
68static caller_info* get_caller_info(addr_t caller);
69
70
71RANGE_MARKER_FUNCTION_PROTOTYPES(slab_allocator)
72RANGE_MARKER_FUNCTION_PROTOTYPES(SlabHashedObjectCache)
73RANGE_MARKER_FUNCTION_PROTOTYPES(SlabMemoryManager)
74RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectCache)
75RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectDepot)
76RANGE_MARKER_FUNCTION_PROTOTYPES(Slab)
77RANGE_MARKER_FUNCTION_PROTOTYPES(SlabSmallObjectCache)
78
79
80static const addr_t kSlabCodeAddressRanges[] = {
81	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(slab_allocator),
82	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabHashedObjectCache),
83	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabMemoryManager),
84	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectCache),
85	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectDepot),
86	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(Slab),
87	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabSmallObjectCache)
88};
89
90static const uint32 kSlabCodeAddressRangeCount
91	= B_COUNT_OF(kSlabCodeAddressRanges) / 2;
92
93#endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
94
95
96RANGE_MARKER_FUNCTION_BEGIN(Slab)
97
98
99#if SLAB_OBJECT_CACHE_TRACING
100
101
102namespace SlabObjectCacheTracing {
103
104class ObjectCacheTraceEntry
105	: public TRACE_ENTRY_SELECTOR(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE) {
106	public:
107		ObjectCacheTraceEntry(ObjectCache* cache)
108			:
109			TraceEntryBase(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE, 0, true),
110			fCache(cache)
111		{
112		}
113
114	protected:
115		ObjectCache*	fCache;
116};
117
118
119class Create : public ObjectCacheTraceEntry {
120	public:
121		Create(const char* name, size_t objectSize, size_t alignment,
122				size_t maxByteUsage, uint32 flags, void* cookie,
123				ObjectCache* cache)
124			:
125			ObjectCacheTraceEntry(cache),
126			fObjectSize(objectSize),
127			fAlignment(alignment),
128			fMaxByteUsage(maxByteUsage),
129			fFlags(flags),
130			fCookie(cookie)
131		{
132			fName = alloc_tracing_buffer_strcpy(name, 64, false);
133			Initialized();
134		}
135
136		virtual void AddDump(TraceOutput& out)
137		{
138			out.Print("object cache create: name: \"%s\", object size: "
139				"%" B_PRIuSIZE ", alignment: %" B_PRIuSIZE ", max usage: "
140				"%" B_PRIuSIZE ", flags: 0x%" B_PRIx32 ", cookie: %p -> cache: %p",
141					fName, fObjectSize, fAlignment, fMaxByteUsage, fFlags,
142					fCookie, fCache);
143		}
144
145	private:
146		const char*	fName;
147		size_t		fObjectSize;
148		size_t		fAlignment;
149		size_t		fMaxByteUsage;
150		uint32		fFlags;
151		void*		fCookie;
152};
153
154
155class Delete : public ObjectCacheTraceEntry {
156	public:
157		Delete(ObjectCache* cache)
158			:
159			ObjectCacheTraceEntry(cache)
160		{
161			Initialized();
162		}
163
164		virtual void AddDump(TraceOutput& out)
165		{
166			out.Print("object cache delete: %p", fCache);
167		}
168};
169
170
171class Alloc : public ObjectCacheTraceEntry {
172	public:
173		Alloc(ObjectCache* cache, uint32 flags, void* object)
174			:
175			ObjectCacheTraceEntry(cache),
176			fFlags(flags),
177			fObject(object)
178		{
179			Initialized();
180		}
181
182		virtual void AddDump(TraceOutput& out)
183		{
184			out.Print("object cache alloc: cache: %p, flags: 0x%" B_PRIx32
185				" -> object: %p", fCache, fFlags, fObject);
186		}
187
188	private:
189		uint32		fFlags;
190		void*		fObject;
191};
192
193
194class Free : public ObjectCacheTraceEntry {
195	public:
196		Free(ObjectCache* cache, void* object)
197			:
198			ObjectCacheTraceEntry(cache),
199			fObject(object)
200		{
201			Initialized();
202		}
203
204		virtual void AddDump(TraceOutput& out)
205		{
206			out.Print("object cache free: cache: %p, object: %p", fCache,
207				fObject);
208		}
209
210	private:
211		void*		fObject;
212};
213
214
215class Reserve : public ObjectCacheTraceEntry {
216	public:
217		Reserve(ObjectCache* cache, size_t count, uint32 flags)
218			:
219			ObjectCacheTraceEntry(cache),
220			fCount(count),
221			fFlags(flags)
222		{
223			Initialized();
224		}
225
226		virtual void AddDump(TraceOutput& out)
227		{
228			out.Print("object cache reserve: cache: %p, count: %" B_PRIu32 ", "
229				"flags: 0x%" B_PRIx32, fCache, fCount, fFlags);
230		}
231
232	private:
233		uint32		fCount;
234		uint32		fFlags;
235};
236
237
238}	// namespace SlabObjectCacheTracing
239
240#	define T(x)	new(std::nothrow) SlabObjectCacheTracing::x
241
242#else
243#	define T(x)
244#endif	// SLAB_OBJECT_CACHE_TRACING
245
246
247// #pragma mark -
248
249
250static void
251dump_slab(::slab* slab)
252{
253	kprintf("  %p  %p  %6" B_PRIuSIZE " %6" B_PRIuSIZE " %6" B_PRIuSIZE "  %p\n",
254		slab, slab->pages, slab->size, slab->count, slab->offset, slab->free);
255}
256
257
258static int
259dump_slabs(int argc, char* argv[])
260{
261	kprintf("%*s %22s %8s %8s %8s %6s %8s %8s %8s\n",
262		B_PRINTF_POINTER_WIDTH + 2, "address", "name", "objsize", "align",
263		"usage", "empty", "usedobj", "total", "flags");
264
265	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
266
267	while (it.HasNext()) {
268		ObjectCache* cache = it.Next();
269
270		kprintf("%p %22s %8lu %8" B_PRIuSIZE " %8lu %6lu %8lu %8lu %8" B_PRIx32
271			"\n", cache, cache->name, cache->object_size, cache->alignment,
272			cache->usage, cache->empty_count, cache->used_count,
273			cache->total_objects, cache->flags);
274	}
275
276	return 0;
277}
278
279
280static int
281dump_cache_info(int argc, char* argv[])
282{
283	if (argc < 2) {
284		kprintf("usage: slab_cache [address]\n");
285		return 0;
286	}
287
288	ObjectCache* cache = (ObjectCache*)parse_expression(argv[1]);
289
290	kprintf("name:              %s\n", cache->name);
291	kprintf("lock:              %p\n", &cache->lock);
292	kprintf("object_size:       %lu\n", cache->object_size);
293	kprintf("alignment:         %" B_PRIuSIZE "\n", cache->alignment);
294	kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle);
295	kprintf("total_objects:     %lu\n", cache->total_objects);
296	kprintf("used_count:        %lu\n", cache->used_count);
297	kprintf("empty_count:       %lu\n", cache->empty_count);
298	kprintf("pressure:          %lu\n", cache->pressure);
299	kprintf("slab_size:         %lu\n", cache->slab_size);
300	kprintf("usage:             %lu\n", cache->usage);
301	kprintf("maximum:           %lu\n", cache->maximum);
302	kprintf("flags:             0x%" B_PRIx32 "\n", cache->flags);
303	kprintf("cookie:            %p\n", cache->cookie);
304	kprintf("resize entry don't wait: %p\n", cache->resize_entry_dont_wait);
305	kprintf("resize entry can wait:   %p\n", cache->resize_entry_can_wait);
306
307	kprintf("  %-*s    %-*s      size   used offset  free\n",
308		B_PRINTF_POINTER_WIDTH, "slab", B_PRINTF_POINTER_WIDTH, "chunk");
309
310	SlabList::Iterator iterator = cache->empty.GetIterator();
311	if (iterator.HasNext())
312		kprintf("empty:\n");
313	while (::slab* slab = iterator.Next())
314		dump_slab(slab);
315
316	iterator = cache->partial.GetIterator();
317	if (iterator.HasNext())
318		kprintf("partial:\n");
319	while (::slab* slab = iterator.Next())
320		dump_slab(slab);
321
322	iterator = cache->full.GetIterator();
323	if (iterator.HasNext())
324		kprintf("full:\n");
325	while (::slab* slab = iterator.Next())
326		dump_slab(slab);
327
328	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
329		kprintf("depot:\n");
330		dump_object_depot(&cache->depot);
331	}
332
333	return 0;
334}
335
336
337// #pragma mark - AllocationTrackingCallback
338
339
340#if SLAB_ALLOCATION_TRACKING_AVAILABLE
341
342AllocationTrackingCallback::~AllocationTrackingCallback()
343{
344}
345
346#endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
347
348
349// #pragma mark -
350
351
352#if SLAB_ALLOCATION_TRACKING_AVAILABLE
353
354namespace {
355
356class AllocationCollectorCallback : public AllocationTrackingCallback {
357public:
358	AllocationCollectorCallback(bool resetInfos)
359		:
360		fResetInfos(resetInfos)
361	{
362	}
363
364	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
365		void* allocation, size_t allocationSize)
366	{
367		if (!info->IsInitialized())
368			return true;
369
370		addr_t caller = 0;
371		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
372
373		if (traceEntry != NULL && info->IsTraceEntryValid()) {
374			caller = tracing_find_caller_in_stack_trace(
375				traceEntry->StackTrace(), kSlabCodeAddressRanges,
376				kSlabCodeAddressRangeCount);
377		}
378
379		caller_info* callerInfo = get_caller_info(caller);
380		if (callerInfo == NULL) {
381			kprintf("out of space for caller infos\n");
382			return false;
383		}
384
385		callerInfo->count++;
386		callerInfo->size += allocationSize;
387
388		if (fResetInfos)
389			info->Clear();
390
391		return true;
392	}
393
394private:
395	bool	fResetInfos;
396};
397
398
399class AllocationInfoPrinterCallback : public AllocationTrackingCallback {
400public:
401	AllocationInfoPrinterCallback(bool printStackTrace, addr_t addressFilter,
402		team_id teamFilter, thread_id threadFilter)
403		:
404		fPrintStackTrace(printStackTrace),
405		fAddressFilter(addressFilter),
406		fTeamFilter(teamFilter),
407		fThreadFilter(threadFilter)
408	{
409	}
410
411	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
412		void* allocation, size_t allocationSize)
413	{
414		if (!info->IsInitialized())
415			return true;
416
417		if (fAddressFilter != 0 && (addr_t)allocation != fAddressFilter)
418			return true;
419
420		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
421		if (traceEntry != NULL && !info->IsTraceEntryValid())
422			traceEntry = NULL;
423
424		if (traceEntry != NULL) {
425			if (fTeamFilter != -1 && traceEntry->TeamID() != fTeamFilter)
426				return true;
427			if (fThreadFilter != -1 && traceEntry->ThreadID() != fThreadFilter)
428				return true;
429		} else {
430			// we need the info if we have filters set
431			if (fTeamFilter != -1 || fThreadFilter != -1)
432				return true;
433		}
434
435		kprintf("allocation %p, size: %" B_PRIuSIZE, allocation,
436			allocationSize);
437
438		if (traceEntry != NULL) {
439			kprintf(", team: %" B_PRId32 ", thread %" B_PRId32
440				", time %" B_PRId64 "\n", traceEntry->TeamID(),
441				traceEntry->ThreadID(), traceEntry->Time());
442
443			if (fPrintStackTrace)
444				tracing_print_stack_trace(traceEntry->StackTrace());
445		} else
446			kprintf("\n");
447
448		return true;
449	}
450
451private:
452	bool		fPrintStackTrace;
453	addr_t		fAddressFilter;
454	team_id		fTeamFilter;
455	thread_id	fThreadFilter;
456};
457
458
459class AllocationDetailPrinterCallback : public AllocationTrackingCallback {
460public:
461	AllocationDetailPrinterCallback(addr_t caller)
462		:
463		fCaller(caller)
464	{
465	}
466
467	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
468		void* allocation, size_t allocationSize)
469	{
470		if (!info->IsInitialized())
471			return true;
472
473		addr_t caller = 0;
474		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
475		if (traceEntry != NULL && !info->IsTraceEntryValid())
476			traceEntry = NULL;
477
478		if (traceEntry != NULL) {
479			caller = tracing_find_caller_in_stack_trace(
480				traceEntry->StackTrace(), kSlabCodeAddressRanges,
481				kSlabCodeAddressRangeCount);
482		}
483
484		if (caller != fCaller)
485			return true;
486
487		kprintf("allocation %p, size: %" B_PRIuSIZE "\n", allocation,
488			allocationSize);
489		if (traceEntry != NULL)
490			tracing_print_stack_trace(traceEntry->StackTrace());
491
492		return true;
493	}
494
495private:
496	addr_t	fCaller;
497};
498
499}	// unnamed namespace
500
501static caller_info*
502get_caller_info(addr_t caller)
503{
504	// find the caller info
505	for (int32 i = 0; i < sCallerInfoCount; i++) {
506		if (caller == sCallerInfoTable[i].caller)
507			return &sCallerInfoTable[i];
508	}
509
510	// not found, add a new entry, if there are free slots
511	if (sCallerInfoCount >= kCallerInfoTableSize)
512		return NULL;
513
514	caller_info* info = &sCallerInfoTable[sCallerInfoCount++];
515	info->caller = caller;
516	info->count = 0;
517	info->size = 0;
518
519	return info;
520}
521
522
523static int
524caller_info_compare_size(const void* _a, const void* _b)
525{
526	const caller_info* a = (const caller_info*)_a;
527	const caller_info* b = (const caller_info*)_b;
528	return (int)(b->size - a->size);
529}
530
531
532static int
533caller_info_compare_count(const void* _a, const void* _b)
534{
535	const caller_info* a = (const caller_info*)_a;
536	const caller_info* b = (const caller_info*)_b;
537	return (int)(b->count - a->count);
538}
539
540
541#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
542
543static bool
544analyze_allocation_callers(ObjectCache* cache, slab* slab,
545	AllocationTrackingCallback& callback)
546{
547	for (uint32 i = 0; i < slab->size; i++) {
548		if (!callback.ProcessTrackingInfo(&slab->tracking[i],
549				cache->ObjectAtIndex(slab, i), cache->object_size)) {
550			return false;
551		}
552	}
553
554	return true;
555}
556
557
558static bool
559analyze_allocation_callers(ObjectCache* cache, const SlabList& slabList,
560	AllocationTrackingCallback& callback)
561{
562	for (SlabList::ConstIterator it = slabList.GetIterator();
563			slab* slab = it.Next();) {
564		if (!analyze_allocation_callers(cache, slab, callback))
565			return false;
566	}
567
568	return true;
569}
570
571
572static bool
573analyze_allocation_callers(ObjectCache* cache,
574	AllocationTrackingCallback& callback)
575{
576	return analyze_allocation_callers(cache, cache->full, callback)
577		&& analyze_allocation_callers(cache, cache->partial, callback);
578}
579
580#endif	// SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
581
582
583static int
584dump_allocation_infos(int argc, char **argv)
585{
586	ObjectCache* cache = NULL;
587	slab* slab = NULL;
588	addr_t addressFilter = 0;
589	team_id teamFilter = -1;
590	thread_id threadFilter = -1;
591	bool printStackTraces = false;
592
593	for (int32 i = 1; i < argc; i++) {
594		if (strcmp(argv[i], "--stacktrace") == 0)
595			printStackTraces = true;
596		else if (strcmp(argv[i], "-a") == 0) {
597			uint64 address;
598			if (++i >= argc
599				|| !evaluate_debug_expression(argv[i], &address, true)) {
600				print_debugger_command_usage(argv[0]);
601				return 0;
602			}
603
604			addressFilter = address;
605		} else if (strcmp(argv[i], "-o") == 0) {
606			uint64 cacheAddress;
607			if (++i >= argc
608				|| !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
609				print_debugger_command_usage(argv[0]);
610				return 0;
611			}
612
613			cache = (ObjectCache*)(addr_t)cacheAddress;
614		} else if (strcasecmp(argv[i], "-s") == 0) {
615			uint64 slabAddress;
616			if (++i >= argc
617				|| !evaluate_debug_expression(argv[i], &slabAddress, true)) {
618				print_debugger_command_usage(argv[0]);
619				return 0;
620			}
621
622			void* slabPages = (void*)slabAddress;
623			if (strcmp(argv[i], "-s") == 0) {
624				slab = (struct slab*)(addr_t)slabAddress;
625				slabPages = slab->pages;
626			}
627
628			cache = MemoryManager::DebugObjectCacheForAddress(slabPages);
629			if (cache == NULL) {
630				kprintf("Couldn't find object cache for address %p.\n",
631					slabPages);
632				return 0;
633			}
634
635			if (slab == NULL) {
636				slab = cache->ObjectSlab(slabPages);
637
638				if (slab == NULL) {
639					kprintf("Couldn't find slab for address %p.\n", slabPages);
640					return 0;
641				}
642			}
643		} else if (strcmp(argv[i], "--team") == 0) {
644			uint64 team;
645			if (++i >= argc
646				|| !evaluate_debug_expression(argv[i], &team, true)) {
647				print_debugger_command_usage(argv[0]);
648				return 0;
649			}
650
651			teamFilter = team;
652		} else if (strcmp(argv[i], "--thread") == 0) {
653			uint64 thread;
654			if (++i >= argc
655				|| !evaluate_debug_expression(argv[i], &thread, true)) {
656				print_debugger_command_usage(argv[0]);
657				return 0;
658			}
659
660			threadFilter = thread;
661		} else {
662			print_debugger_command_usage(argv[0]);
663			return 0;
664		}
665	}
666
667	AllocationInfoPrinterCallback callback(printStackTraces, addressFilter,
668		teamFilter, threadFilter);
669
670	if (slab != NULL || cache != NULL) {
671#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
672		if (slab != NULL) {
673			if (!analyze_allocation_callers(cache, slab, callback))
674				return 0;
675		} else if (cache != NULL) {
676			if (!analyze_allocation_callers(cache, callback))
677				return 0;
678		}
679#else
680		kprintf("Object cache allocation tracking not available. "
681			"SLAB_OBJECT_CACHE_TRACING (%d) and "
682			"SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
683			SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
684		return 0;
685#endif
686	} else {
687#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
688
689		for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
690				it.HasNext();) {
691			if (!analyze_allocation_callers(it.Next(), callback))
692				return 0;
693		}
694#endif
695
696#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
697		if (!MemoryManager::AnalyzeAllocationCallers(callback))
698			return 0;
699#endif
700	}
701
702	return 0;
703}
704
705
706static int
707dump_allocations_per_caller(int argc, char **argv)
708{
709	bool sortBySize = true;
710	bool resetAllocationInfos = false;
711	bool printDetails = false;
712	ObjectCache* cache = NULL;
713	addr_t caller = 0;
714
715	for (int32 i = 1; i < argc; i++) {
716		if (strcmp(argv[i], "-c") == 0) {
717			sortBySize = false;
718		} else if (strcmp(argv[i], "-d") == 0) {
719			uint64 callerAddress;
720			if (++i >= argc
721				|| !evaluate_debug_expression(argv[i], &callerAddress, true)) {
722				print_debugger_command_usage(argv[0]);
723				return 0;
724			}
725
726			caller = callerAddress;
727			printDetails = true;
728		} else if (strcmp(argv[i], "-o") == 0) {
729			uint64 cacheAddress;
730			if (++i >= argc
731				|| !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
732				print_debugger_command_usage(argv[0]);
733				return 0;
734			}
735
736			cache = (ObjectCache*)(addr_t)cacheAddress;
737		} else if (strcmp(argv[i], "-r") == 0) {
738			resetAllocationInfos = true;
739		} else {
740			print_debugger_command_usage(argv[0]);
741			return 0;
742		}
743	}
744
745	sCallerInfoCount = 0;
746
747	AllocationCollectorCallback collectorCallback(resetAllocationInfos);
748	AllocationDetailPrinterCallback detailsCallback(caller);
749	AllocationTrackingCallback& callback = printDetails
750		? (AllocationTrackingCallback&)detailsCallback
751		: (AllocationTrackingCallback&)collectorCallback;
752
753	if (cache != NULL) {
754#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
755		if (!analyze_allocation_callers(cache, callback))
756			return 0;
757#else
758		kprintf("Object cache allocation tracking not available. "
759			"SLAB_OBJECT_CACHE_TRACING (%d) and "
760			"SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
761			SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
762		return 0;
763#endif
764	} else {
765#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
766
767		for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
768				it.HasNext();) {
769			if (!analyze_allocation_callers(it.Next(), callback))
770				return 0;
771		}
772#endif
773
774#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
775		if (!MemoryManager::AnalyzeAllocationCallers(callback))
776			return 0;
777#endif
778	}
779
780	if (printDetails)
781		return 0;
782
783	// sort the array
784	qsort(sCallerInfoTable, sCallerInfoCount, sizeof(caller_info),
785		sortBySize ? &caller_info_compare_size : &caller_info_compare_count);
786
787	kprintf("%" B_PRId32 " different callers, sorted by %s...\n\n",
788		sCallerInfoCount, sortBySize ? "size" : "count");
789
790	size_t totalAllocationSize = 0;
791	size_t totalAllocationCount = 0;
792
793	kprintf("     count        size      caller\n");
794	kprintf("----------------------------------\n");
795	for (int32 i = 0; i < sCallerInfoCount; i++) {
796		caller_info& info = sCallerInfoTable[i];
797		kprintf("%10" B_PRIuSIZE "  %10" B_PRIuSIZE "  %p", info.count,
798			info.size, (void*)info.caller);
799
800		const char* symbol;
801		const char* imageName;
802		bool exactMatch;
803		addr_t baseAddress;
804
805		if (elf_debug_lookup_symbol_address(info.caller, &baseAddress, &symbol,
806				&imageName, &exactMatch) == B_OK) {
807			kprintf("  %s + %#" B_PRIxADDR " (%s)%s\n", symbol,
808				info.caller - baseAddress, imageName,
809				exactMatch ? "" : " (nearest)");
810		} else
811			kprintf("\n");
812
813		totalAllocationCount += info.count;
814		totalAllocationSize += info.size;
815	}
816
817	kprintf("\ntotal allocations: %" B_PRIuSIZE ", %" B_PRIuSIZE " bytes\n",
818		totalAllocationCount, totalAllocationSize);
819
820	return 0;
821}
822
823#endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
824
825
826void
827add_alloc_tracing_entry(ObjectCache* cache, uint32 flags, void* object)
828{
829#if SLAB_OBJECT_CACHE_TRACING
830#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
831	MutexLocker _(cache->lock);
832	cache->TrackingInfoFor(object)->Init(T(Alloc(cache, flags, object)));
833#else
834	T(Alloc(cache, flags, object));
835#endif
836#endif
837}
838
839
840// #pragma mark -
841
842
843void
844request_memory_manager_maintenance()
845{
846	MutexLocker locker(sMaintenanceLock);
847	sMaintenanceCondition.NotifyAll();
848}
849
850
851// #pragma mark -
852
853
854static void
855delete_object_cache_internal(object_cache* cache)
856{
857	if (!(cache->flags & CACHE_NO_DEPOT))
858		object_depot_destroy(&cache->depot, 0);
859
860	mutex_lock(&cache->lock);
861
862	if (!cache->full.IsEmpty())
863		panic("cache destroy: still has full slabs");
864
865	if (!cache->partial.IsEmpty())
866		panic("cache destroy: still has partial slabs");
867
868	while (!cache->empty.IsEmpty())
869		cache->ReturnSlab(cache->empty.RemoveHead(), 0);
870
871	mutex_destroy(&cache->lock);
872	cache->Delete();
873}
874
875
876static void
877increase_object_reserve(ObjectCache* cache)
878{
879	MutexLocker locker(sMaintenanceLock);
880
881	cache->maintenance_resize = true;
882
883	if (!cache->maintenance_pending) {
884		cache->maintenance_pending = true;
885		sMaintenanceQueue.Add(cache);
886		sMaintenanceCondition.NotifyAll();
887	}
888}
889
890
891/*!	Makes sure that \a objectCount objects can be allocated.
892*/
893static status_t
894object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
895	uint32 flags)
896{
897	// If someone else is already adding slabs, we wait for that to be finished
898	// first.
899	thread_id thread = find_thread(NULL);
900	while (true) {
901		if (objectCount <= cache->total_objects - cache->used_count)
902			return B_OK;
903
904		ObjectCacheResizeEntry* resizeEntry = NULL;
905		if (cache->resize_entry_dont_wait != NULL) {
906			resizeEntry = cache->resize_entry_dont_wait;
907			if (thread == resizeEntry->thread)
908				return B_WOULD_BLOCK;
909			// Note: We could still have reentered the function, i.e.
910			// resize_entry_can_wait would be ours. That doesn't matter much,
911			// though, since after the don't-wait thread has done its job
912			// everyone will be happy.
913		} else if (cache->resize_entry_can_wait != NULL) {
914			resizeEntry = cache->resize_entry_can_wait;
915			if (thread == resizeEntry->thread)
916				return B_WOULD_BLOCK;
917
918			if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0)
919				break;
920		} else
921			break;
922
923		resizeEntry->condition.Wait(&cache->lock);
924	}
925
926	// prepare the resize entry others can wait on
927	ObjectCacheResizeEntry*& resizeEntry
928		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
929			? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
930
931	ObjectCacheResizeEntry myResizeEntry;
932	resizeEntry = &myResizeEntry;
933	resizeEntry->condition.Init(cache, "wait for slabs");
934	resizeEntry->thread = thread;
935
936	// add new slabs until there are as many free ones as requested
937	while (objectCount > cache->total_objects - cache->used_count) {
938		slab* newSlab = cache->CreateSlab(flags);
939		if (newSlab == NULL) {
940			resizeEntry->condition.NotifyAll();
941			resizeEntry = NULL;
942			return B_NO_MEMORY;
943		}
944
945		cache->usage += cache->slab_size;
946		cache->total_objects += newSlab->size;
947
948		cache->empty.Add(newSlab);
949		cache->empty_count++;
950	}
951
952	resizeEntry->condition.NotifyAll();
953	resizeEntry = NULL;
954
955	return B_OK;
956}
957
958
959static void
960object_cache_low_memory(void* dummy, uint32 resources, int32 level)
961{
962	if (level == B_NO_LOW_RESOURCE)
963		return;
964
965	MutexLocker cacheListLocker(sObjectCacheListLock);
966
967	// Append the first cache to the end of the queue. We assume that it is
968	// one of the caches that will never be deleted and thus we use it as a
969	// marker.
970	ObjectCache* firstCache = sObjectCaches.RemoveHead();
971	sObjectCaches.Add(firstCache);
972	cacheListLocker.Unlock();
973
974	ObjectCache* cache;
975	do {
976		cacheListLocker.Lock();
977
978		cache = sObjectCaches.RemoveHead();
979		sObjectCaches.Add(cache);
980
981		MutexLocker maintenanceLocker(sMaintenanceLock);
982		if (cache->maintenance_pending || cache->maintenance_in_progress) {
983			// We don't want to mess with caches in maintenance.
984			continue;
985		}
986
987		cache->maintenance_pending = true;
988		cache->maintenance_in_progress = true;
989
990		maintenanceLocker.Unlock();
991		cacheListLocker.Unlock();
992
993		// We are calling the reclaimer without the object cache lock
994		// to give the owner a chance to return objects to the slabs.
995
996		if (cache->reclaimer)
997			cache->reclaimer(cache->cookie, level);
998
999		if ((cache->flags & CACHE_NO_DEPOT) == 0)
1000			object_depot_make_empty(&cache->depot, 0);
1001
1002		MutexLocker cacheLocker(cache->lock);
1003		size_t minimumAllowed;
1004
1005		switch (level) {
1006			case B_LOW_RESOURCE_NOTE:
1007				minimumAllowed = cache->pressure / 2 + 1;
1008				cache->pressure -= cache->pressure / 8;
1009				break;
1010
1011			case B_LOW_RESOURCE_WARNING:
1012				cache->pressure /= 2;
1013				minimumAllowed = 0;
1014				break;
1015
1016			default:
1017				cache->pressure = 0;
1018				minimumAllowed = 0;
1019				break;
1020		}
1021
1022		while (cache->empty_count > minimumAllowed) {
1023			// make sure we respect the cache's minimum object reserve
1024			size_t objectsPerSlab = cache->empty.Head()->size;
1025			size_t freeObjects = cache->total_objects - cache->used_count;
1026			if (freeObjects < cache->min_object_reserve + objectsPerSlab)
1027				break;
1028
1029			cache->ReturnSlab(cache->empty.RemoveHead(), 0);
1030			cache->empty_count--;
1031		}
1032
1033		cacheLocker.Unlock();
1034
1035		// Check whether in the meantime someone has really requested
1036		// maintenance for the cache.
1037		maintenanceLocker.Lock();
1038
1039		if (cache->maintenance_delete) {
1040			delete_object_cache_internal(cache);
1041			continue;
1042		}
1043
1044		cache->maintenance_in_progress = false;
1045
1046		if (cache->maintenance_resize)
1047			sMaintenanceQueue.Add(cache);
1048		else
1049			cache->maintenance_pending = false;
1050	} while (cache != firstCache);
1051}
1052
1053
1054static status_t
1055object_cache_maintainer(void*)
1056{
1057	while (true) {
1058		MutexLocker locker(sMaintenanceLock);
1059
1060		// wait for the next request
1061		while (sMaintenanceQueue.IsEmpty()) {
1062			// perform memory manager maintenance, if needed
1063			if (MemoryManager::MaintenanceNeeded()) {
1064				locker.Unlock();
1065				MemoryManager::PerformMaintenance();
1066				locker.Lock();
1067				continue;
1068			}
1069
1070			sMaintenanceCondition.Wait(locker.Get());
1071		}
1072
1073		ObjectCache* cache = sMaintenanceQueue.RemoveHead();
1074
1075		while (true) {
1076			bool resizeRequested = cache->maintenance_resize;
1077			bool deleteRequested = cache->maintenance_delete;
1078
1079			if (!resizeRequested && !deleteRequested) {
1080				cache->maintenance_pending = false;
1081				cache->maintenance_in_progress = false;
1082				break;
1083			}
1084
1085			cache->maintenance_resize = false;
1086			cache->maintenance_in_progress = true;
1087
1088			locker.Unlock();
1089
1090			if (deleteRequested) {
1091				delete_object_cache_internal(cache);
1092				break;
1093			}
1094
1095			// resize the cache, if necessary
1096
1097			MutexLocker cacheLocker(cache->lock);
1098
1099			if (resizeRequested) {
1100				status_t error = object_cache_reserve_internal(cache,
1101					cache->min_object_reserve, 0);
1102				if (error != B_OK) {
1103					dprintf("object cache resizer: Failed to resize object "
1104						"cache %p!\n", cache);
1105					break;
1106				}
1107			}
1108
1109			locker.Lock();
1110		}
1111	}
1112
1113	// never can get here
1114	return B_OK;
1115}
1116
1117
1118// #pragma mark - public API
1119
1120
1121object_cache*
1122create_object_cache(const char* name, size_t object_size, size_t alignment,
1123	void* cookie, object_cache_constructor constructor,
1124	object_cache_destructor destructor)
1125{
1126	return create_object_cache_etc(name, object_size, alignment, 0, 0, 0, 0,
1127		cookie, constructor, destructor, NULL);
1128}
1129
1130
1131object_cache*
1132create_object_cache_etc(const char* name, size_t objectSize, size_t alignment,
1133	size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
1134	uint32 flags, void* cookie, object_cache_constructor constructor,
1135	object_cache_destructor destructor, object_cache_reclaimer reclaimer)
1136{
1137	ObjectCache* cache;
1138
1139	if (objectSize == 0) {
1140		cache = NULL;
1141	} else if (objectSize <= 256) {
1142		cache = SmallObjectCache::Create(name, objectSize, alignment, maximum,
1143			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1144			destructor, reclaimer);
1145	} else {
1146		cache = HashedObjectCache::Create(name, objectSize, alignment, maximum,
1147			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1148			destructor, reclaimer);
1149	}
1150
1151	if (cache != NULL) {
1152		MutexLocker _(sObjectCacheListLock);
1153		sObjectCaches.Add(cache);
1154	}
1155
1156	T(Create(name, objectSize, alignment, maximum, flags, cookie, cache));
1157	return cache;
1158}
1159
1160
1161void
1162delete_object_cache(object_cache* cache)
1163{
1164	T(Delete(cache));
1165
1166	{
1167		MutexLocker _(sObjectCacheListLock);
1168		sObjectCaches.Remove(cache);
1169	}
1170
1171	MutexLocker cacheLocker(cache->lock);
1172
1173	{
1174		MutexLocker maintenanceLocker(sMaintenanceLock);
1175		if (cache->maintenance_in_progress) {
1176			// The maintainer thread is working with the cache. Just mark it
1177			// to be deleted.
1178			cache->maintenance_delete = true;
1179			return;
1180		}
1181
1182		// unschedule maintenance
1183		if (cache->maintenance_pending)
1184			sMaintenanceQueue.Remove(cache);
1185	}
1186
1187	// at this point no-one should have a reference to the cache anymore
1188	cacheLocker.Unlock();
1189
1190	delete_object_cache_internal(cache);
1191}
1192
1193
1194status_t
1195object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
1196{
1197	MutexLocker _(cache->lock);
1198
1199	if (cache->min_object_reserve == objectCount)
1200		return B_OK;
1201
1202	cache->min_object_reserve = objectCount;
1203
1204	increase_object_reserve(cache);
1205
1206	return B_OK;
1207}
1208
1209
1210void*
1211object_cache_alloc(object_cache* cache, uint32 flags)
1212{
1213	if (!(cache->flags & CACHE_NO_DEPOT)) {
1214		void* object = object_depot_obtain(&cache->depot);
1215		if (object) {
1216			add_alloc_tracing_entry(cache, flags, object);
1217			return fill_allocated_block(object, cache->object_size);
1218		}
1219	}
1220
1221	MutexLocker locker(cache->lock);
1222	slab* source = NULL;
1223
1224	while (true) {
1225		source = cache->partial.Head();
1226		if (source != NULL)
1227			break;
1228
1229		source = cache->empty.RemoveHead();
1230		if (source != NULL) {
1231			cache->empty_count--;
1232			cache->partial.Add(source);
1233			break;
1234		}
1235
1236		if (object_cache_reserve_internal(cache, 1, flags) != B_OK) {
1237			T(Alloc(cache, flags, NULL));
1238			return NULL;
1239		}
1240
1241		cache->pressure++;
1242	}
1243
1244	ParanoiaChecker _2(source);
1245
1246	object_link* link = _pop(source->free);
1247	source->count--;
1248	cache->used_count++;
1249
1250	if (cache->total_objects - cache->used_count < cache->min_object_reserve)
1251		increase_object_reserve(cache);
1252
1253	REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next,
1254		sizeof(void*));
1255
1256	TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.",
1257		link_to_object(link, cache->object_size), link, source, source->count);
1258
1259	if (source->count == 0) {
1260		cache->partial.Remove(source);
1261		cache->full.Add(source);
1262	}
1263
1264	void* object = link_to_object(link, cache->object_size);
1265	locker.Unlock();
1266
1267	add_alloc_tracing_entry(cache, flags, object);
1268	return fill_allocated_block(object, cache->object_size);
1269}
1270
1271
1272void
1273object_cache_free(object_cache* cache, void* object, uint32 flags)
1274{
1275	if (object == NULL)
1276		return;
1277
1278	T(Free(cache, object));
1279
1280#if PARANOID_KERNEL_FREE
1281	// TODO: allow forcing the check even if we don't find deadbeef
1282	if (*(uint32*)object == 0xdeadbeef) {
1283		if (!cache->AssertObjectNotFreed(object))
1284			return;
1285
1286		if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1287			if (object_depot_contains_object(&cache->depot, object)) {
1288				panic("object_cache: object %p is already freed", object);
1289				return;
1290			}
1291		}
1292	}
1293
1294	fill_freed_block(object, cache->object_size);
1295#endif
1296
1297#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
1298	mutex_lock(&cache->lock);
1299	cache->TrackingInfoFor(object)->Clear();
1300	mutex_unlock(&cache->lock);
1301#endif
1302
1303	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1304		object_depot_store(&cache->depot, object, flags);
1305		return;
1306	}
1307
1308	MutexLocker _(cache->lock);
1309	cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
1310}
1311
1312
1313status_t
1314object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
1315{
1316	if (objectCount == 0)
1317		return B_OK;
1318
1319	T(Reserve(cache, objectCount, flags));
1320
1321	MutexLocker _(cache->lock);
1322	return object_cache_reserve_internal(cache, objectCount, flags);
1323}
1324
1325
1326void
1327object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
1328{
1329	MutexLocker _(cache->lock);
1330	*_allocatedMemory = cache->usage;
1331}
1332
1333
1334void
1335slab_init(kernel_args* args)
1336{
1337	MemoryManager::Init(args);
1338
1339	new (&sObjectCaches) ObjectCacheList();
1340
1341	block_allocator_init_boot();
1342}
1343
1344
1345void
1346slab_init_post_area()
1347{
1348	MemoryManager::InitPostArea();
1349
1350	add_debugger_command("slabs", dump_slabs, "list all object caches");
1351	add_debugger_command("slab_cache", dump_cache_info,
1352		"dump information about a specific object cache");
1353	add_debugger_command("slab_depot", dump_object_depot,
1354		"dump contents of an object depot");
1355	add_debugger_command("slab_magazine", dump_depot_magazine,
1356		"dump contents of a depot magazine");
1357#if SLAB_ALLOCATION_TRACKING_AVAILABLE
1358	add_debugger_command_etc("allocations_per_caller",
1359		&dump_allocations_per_caller,
1360		"Dump current slab allocations summed up per caller",
1361		"[ -c ] [ -d <caller> ] [ -o <object cache> ] [ -r ]\n"
1362		"The current allocations will by summed up by caller (their count and\n"
1363		"size) printed in decreasing order by size or, if \"-c\" is\n"
1364		"specified, by allocation count. If given <object cache> specifies\n"
1365		"the address of the object cache for which to print the allocations.\n"
1366		"If \"-d\" is given, each allocation for caller <caller> is printed\n"
1367		"including the respective stack trace.\n"
1368		"If \"-r\" is given, the allocation infos are reset after gathering\n"
1369		"the information, so the next command invocation will only show the\n"
1370		"allocations made after the reset.\n", 0);
1371	add_debugger_command_etc("allocation_infos",
1372		&dump_allocation_infos,
1373		"Dump current slab allocations",
1374		"[ --stacktrace ] [ -o <object cache> | -s <slab> | -S <address> ] "
1375		"[ -a <allocation> ] [ --team <team ID> ] [ --thread <thread ID> ]\n"
1376		"The current allocations filtered by optional values will be printed.\n"
1377		"If given, <object cache> specifies the address of the object cache\n"
1378		"or <slab> specifies the address of a slab, for which to print the\n"
1379		"allocations. Alternatively <address> specifies any address within\n"
1380		"a slab allocation range.\n"
1381		"The optional \"-a\" address filters for a specific allocation,\n"
1382		"with \"--team\" and \"--thread\" allocations by specific teams\n"
1383		"and/or threads can be filtered (these only work if a corresponding\n"
1384		"tracing entry is still available).\n"
1385		"If \"--stacktrace\" is given, then stack traces of the allocation\n"
1386		"callers are printed, where available\n", 0);
1387#endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
1388}
1389
1390
1391void
1392slab_init_post_sem()
1393{
1394	register_low_resource_handler(object_cache_low_memory, NULL,
1395		B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY
1396			| B_KERNEL_RESOURCE_ADDRESS_SPACE, 5);
1397
1398	block_allocator_init_rest();
1399}
1400
1401
1402void
1403slab_init_post_thread()
1404{
1405	new(&sMaintenanceQueue) MaintenanceQueue;
1406	sMaintenanceCondition.Init(&sMaintenanceQueue, "object cache maintainer");
1407
1408	thread_id objectCacheResizer = spawn_kernel_thread(object_cache_maintainer,
1409		"object cache resizer", B_URGENT_PRIORITY, NULL);
1410	if (objectCacheResizer < 0) {
1411		panic("slab_init_post_thread(): failed to spawn object cache resizer "
1412			"thread\n");
1413		return;
1414	}
1415
1416	resume_thread(objectCacheResizer);
1417}
1418
1419
1420RANGE_MARKER_FUNCTION_END(Slab)
1421
1422
1423#endif	// !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
1424