1/*
2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3 * Copyright 2008-2010, Axel D��rfler. All Rights Reserved.
4 * Copyright 2007, Hugo Santos. All Rights Reserved.
5 *
6 * Distributed under the terms of the MIT License.
7 */
8
9
10#include <slab/Slab.h>
11
12#include <algorithm>
13#include <new>
14#include <stdlib.h>
15#include <string.h>
16
17#include <KernelExport.h>
18
19#include <condition_variable.h>
20#include <elf.h>
21#include <kernel.h>
22#include <low_resource_manager.h>
23#include <slab/ObjectDepot.h>
24#include <smp.h>
25#include <tracing.h>
26#include <util/AutoLock.h>
27#include <util/DoublyLinkedList.h>
28#include <util/khash.h>
29#include <vm/vm.h>
30#include <vm/VMAddressSpace.h>
31
32#include "HashedObjectCache.h"
33#include "MemoryManager.h"
34#include "slab_debug.h"
35#include "slab_private.h"
36#include "SmallObjectCache.h"
37
38
39#if !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
40
41
42typedef DoublyLinkedList<ObjectCache> ObjectCacheList;
43
44typedef DoublyLinkedList<ObjectCache,
45	DoublyLinkedListMemberGetLink<ObjectCache, &ObjectCache::maintenance_link> >
46		MaintenanceQueue;
47
48static ObjectCacheList sObjectCaches;
49static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
50
51static mutex sMaintenanceLock
52	= MUTEX_INITIALIZER("object cache resize requests");
53static MaintenanceQueue sMaintenanceQueue;
54static ConditionVariable sMaintenanceCondition;
55
56
57#if SLAB_ALLOCATION_TRACKING_AVAILABLE
58
59struct caller_info {
60	addr_t		caller;
61	size_t		count;
62	size_t		size;
63};
64
65static const int32 kCallerInfoTableSize = 1024;
66static caller_info sCallerInfoTable[kCallerInfoTableSize];
67static int32 sCallerInfoCount = 0;
68
69static caller_info* get_caller_info(addr_t caller);
70
71
72RANGE_MARKER_FUNCTION_PROTOTYPES(slab_allocator)
73RANGE_MARKER_FUNCTION_PROTOTYPES(SlabHashedObjectCache)
74RANGE_MARKER_FUNCTION_PROTOTYPES(SlabMemoryManager)
75RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectCache)
76RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectDepot)
77RANGE_MARKER_FUNCTION_PROTOTYPES(Slab)
78RANGE_MARKER_FUNCTION_PROTOTYPES(SlabSmallObjectCache)
79
80
81static const addr_t kSlabCodeAddressRanges[] = {
82	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(slab_allocator),
83	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabHashedObjectCache),
84	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabMemoryManager),
85	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectCache),
86	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectDepot),
87	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(Slab),
88	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabSmallObjectCache)
89};
90
91static const uint32 kSlabCodeAddressRangeCount
92	= sizeof(kSlabCodeAddressRanges) / sizeof(kSlabCodeAddressRanges[0]) / 2;
93
94#endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
95
96
97RANGE_MARKER_FUNCTION_BEGIN(Slab)
98
99
100#if SLAB_OBJECT_CACHE_TRACING
101
102
103namespace SlabObjectCacheTracing {
104
105class ObjectCacheTraceEntry
106	: public TRACE_ENTRY_SELECTOR(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE) {
107	public:
108		ObjectCacheTraceEntry(ObjectCache* cache)
109			:
110			TraceEntryBase(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE, 0, true),
111			fCache(cache)
112		{
113		}
114
115	protected:
116		ObjectCache*	fCache;
117};
118
119
120class Create : public ObjectCacheTraceEntry {
121	public:
122		Create(const char* name, size_t objectSize, size_t alignment,
123				size_t maxByteUsage, uint32 flags, void* cookie,
124				ObjectCache* cache)
125			:
126			ObjectCacheTraceEntry(cache),
127			fObjectSize(objectSize),
128			fAlignment(alignment),
129			fMaxByteUsage(maxByteUsage),
130			fFlags(flags),
131			fCookie(cookie)
132		{
133			fName = alloc_tracing_buffer_strcpy(name, 64, false);
134			Initialized();
135		}
136
137		virtual void AddDump(TraceOutput& out)
138		{
139			out.Print("object cache create: name: \"%s\", object size: %lu, "
140				"alignment: %lu, max usage: %lu, flags: 0x%lx, cookie: %p "
141				"-> cache: %p", fName, fObjectSize, fAlignment, fMaxByteUsage,
142					fFlags, fCookie, fCache);
143		}
144
145	private:
146		const char*	fName;
147		size_t		fObjectSize;
148		size_t		fAlignment;
149		size_t		fMaxByteUsage;
150		uint32		fFlags;
151		void*		fCookie;
152};
153
154
155class Delete : public ObjectCacheTraceEntry {
156	public:
157		Delete(ObjectCache* cache)
158			:
159			ObjectCacheTraceEntry(cache)
160		{
161			Initialized();
162		}
163
164		virtual void AddDump(TraceOutput& out)
165		{
166			out.Print("object cache delete: %p", fCache);
167		}
168};
169
170
171class Alloc : public ObjectCacheTraceEntry {
172	public:
173		Alloc(ObjectCache* cache, uint32 flags, void* object)
174			:
175			ObjectCacheTraceEntry(cache),
176			fFlags(flags),
177			fObject(object)
178		{
179			Initialized();
180		}
181
182		virtual void AddDump(TraceOutput& out)
183		{
184			out.Print("object cache alloc: cache: %p, flags: 0x%lx -> "
185				"object: %p", fCache, fFlags, fObject);
186		}
187
188	private:
189		uint32		fFlags;
190		void*		fObject;
191};
192
193
194class Free : public ObjectCacheTraceEntry {
195	public:
196		Free(ObjectCache* cache, void* object)
197			:
198			ObjectCacheTraceEntry(cache),
199			fObject(object)
200		{
201			Initialized();
202		}
203
204		virtual void AddDump(TraceOutput& out)
205		{
206			out.Print("object cache free: cache: %p, object: %p", fCache,
207				fObject);
208		}
209
210	private:
211		void*		fObject;
212};
213
214
215class Reserve : public ObjectCacheTraceEntry {
216	public:
217		Reserve(ObjectCache* cache, size_t count, uint32 flags)
218			:
219			ObjectCacheTraceEntry(cache),
220			fCount(count),
221			fFlags(flags)
222		{
223			Initialized();
224		}
225
226		virtual void AddDump(TraceOutput& out)
227		{
228			out.Print("object cache reserve: cache: %p, count: %lu, "
229				"flags: 0x%lx", fCache, fCount, fFlags);
230		}
231
232	private:
233		uint32		fCount;
234		uint32		fFlags;
235};
236
237
238}	// namespace SlabObjectCacheTracing
239
240#	define T(x)	new(std::nothrow) SlabObjectCacheTracing::x
241
242#else
243#	define T(x)
244#endif	// SLAB_OBJECT_CACHE_TRACING
245
246
247// #pragma mark -
248
249
250static void
251dump_slab(::slab* slab)
252{
253	kprintf("  %p  %p  %6" B_PRIuSIZE " %6" B_PRIuSIZE " %6" B_PRIuSIZE "  %p\n",
254		slab, slab->pages, slab->size, slab->count, slab->offset, slab->free);
255}
256
257
258static int
259dump_slabs(int argc, char* argv[])
260{
261	kprintf("%*s %22s %8s %8s %8s %6s %8s %8s %8s\n",
262		B_PRINTF_POINTER_WIDTH + 2, "address", "name", "objsize", "align",
263		"usage", "empty", "usedobj", "total", "flags");
264
265	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
266
267	while (it.HasNext()) {
268		ObjectCache* cache = it.Next();
269
270		kprintf("%p %22s %8lu %8" B_PRIuSIZE " %8lu %6lu %8lu %8lu %8" B_PRIx32
271			"\n", cache, cache->name, cache->object_size, cache->alignment,
272			cache->usage, cache->empty_count, cache->used_count,
273			cache->total_objects, cache->flags);
274	}
275
276	return 0;
277}
278
279
280static int
281dump_cache_info(int argc, char* argv[])
282{
283	if (argc < 2) {
284		kprintf("usage: slab_cache [address]\n");
285		return 0;
286	}
287
288	ObjectCache* cache = (ObjectCache*)parse_expression(argv[1]);
289
290	kprintf("name:              %s\n", cache->name);
291	kprintf("lock:              %p\n", &cache->lock);
292	kprintf("object_size:       %lu\n", cache->object_size);
293	kprintf("alignment:         %" B_PRIuSIZE "\n", cache->alignment);
294	kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle);
295	kprintf("total_objects:     %lu\n", cache->total_objects);
296	kprintf("used_count:        %lu\n", cache->used_count);
297	kprintf("empty_count:       %lu\n", cache->empty_count);
298	kprintf("pressure:          %lu\n", cache->pressure);
299	kprintf("slab_size:         %lu\n", cache->slab_size);
300	kprintf("usage:             %lu\n", cache->usage);
301	kprintf("maximum:           %lu\n", cache->maximum);
302	kprintf("flags:             0x%" B_PRIx32 "\n", cache->flags);
303	kprintf("cookie:            %p\n", cache->cookie);
304	kprintf("resize entry don't wait: %p\n", cache->resize_entry_dont_wait);
305	kprintf("resize entry can wait:   %p\n", cache->resize_entry_can_wait);
306
307	kprintf("  %-*s    %-*s      size   used offset  free\n",
308		B_PRINTF_POINTER_WIDTH, "slab", B_PRINTF_POINTER_WIDTH, "chunk");
309
310	SlabList::Iterator iterator = cache->empty.GetIterator();
311	if (iterator.HasNext())
312		kprintf("empty:\n");
313	while (::slab* slab = iterator.Next())
314		dump_slab(slab);
315
316	iterator = cache->partial.GetIterator();
317	if (iterator.HasNext())
318		kprintf("partial:\n");
319	while (::slab* slab = iterator.Next())
320		dump_slab(slab);
321
322	iterator = cache->full.GetIterator();
323	if (iterator.HasNext())
324		kprintf("full:\n");
325	while (::slab* slab = iterator.Next())
326		dump_slab(slab);
327
328	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
329		kprintf("depot:\n");
330		dump_object_depot(&cache->depot);
331	}
332
333	return 0;
334}
335
336
337// #pragma mark - AllocationTrackingCallback
338
339
340#if SLAB_ALLOCATION_TRACKING_AVAILABLE
341
342AllocationTrackingCallback::~AllocationTrackingCallback()
343{
344}
345
346#endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
347
348
349// #pragma mark -
350
351
352#if SLAB_ALLOCATION_TRACKING_AVAILABLE
353
354namespace {
355
356class AllocationCollectorCallback : public AllocationTrackingCallback {
357public:
358	AllocationCollectorCallback(bool resetInfos)
359		:
360		fResetInfos(resetInfos)
361	{
362	}
363
364	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
365		void* allocation, size_t allocationSize)
366	{
367		if (!info->IsInitialized())
368			return true;
369
370		addr_t caller = 0;
371		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
372
373		if (traceEntry != NULL && info->IsTraceEntryValid()) {
374			caller = tracing_find_caller_in_stack_trace(
375				traceEntry->StackTrace(), kSlabCodeAddressRanges,
376				kSlabCodeAddressRangeCount);
377		}
378
379		caller_info* callerInfo = get_caller_info(caller);
380		if (callerInfo == NULL) {
381			kprintf("out of space for caller infos\n");
382			return false;
383		}
384
385		callerInfo->count++;
386		callerInfo->size += allocationSize;
387
388		if (fResetInfos)
389			info->Clear();
390
391		return true;
392	}
393
394private:
395	bool	fResetInfos;
396};
397
398
399class AllocationInfoPrinterCallback : public AllocationTrackingCallback {
400public:
401	AllocationInfoPrinterCallback(bool printStackTrace, addr_t addressFilter,
402		team_id teamFilter, thread_id threadFilter)
403		:
404		fPrintStackTrace(printStackTrace),
405		fAddressFilter(addressFilter),
406		fTeamFilter(teamFilter),
407		fThreadFilter(threadFilter)
408	{
409	}
410
411	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
412		void* allocation, size_t allocationSize)
413	{
414		if (!info->IsInitialized())
415			return true;
416
417		if (fAddressFilter != 0 && (addr_t)allocation != fAddressFilter)
418			return true;
419
420		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
421		if (traceEntry != NULL && !info->IsTraceEntryValid())
422			traceEntry = NULL;
423
424		if (traceEntry != NULL) {
425			if (fTeamFilter != -1 && traceEntry->TeamID() != fTeamFilter)
426				return true;
427			if (fThreadFilter != -1 && traceEntry->ThreadID() != fThreadFilter)
428				return true;
429		} else {
430			// we need the info if we have filters set
431			if (fTeamFilter != -1 || fThreadFilter != -1)
432				return true;
433		}
434
435		kprintf("allocation %p, size: %" B_PRIuSIZE, allocation,
436			allocationSize);
437
438		if (traceEntry != NULL) {
439			kprintf(", team: %" B_PRId32 ", thread %" B_PRId32
440				", time %" B_PRId64 "\n", traceEntry->TeamID(),
441				traceEntry->ThreadID(), traceEntry->Time());
442
443			if (fPrintStackTrace)
444				tracing_print_stack_trace(traceEntry->StackTrace());
445		} else
446			kprintf("\n");
447
448		return true;
449	}
450
451private:
452	bool		fPrintStackTrace;
453	addr_t		fAddressFilter;
454	team_id		fTeamFilter;
455	thread_id	fThreadFilter;
456};
457
458
459class AllocationDetailPrinterCallback : public AllocationTrackingCallback {
460public:
461	AllocationDetailPrinterCallback(addr_t caller)
462		:
463		fCaller(caller)
464	{
465	}
466
467	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
468		void* allocation, size_t allocationSize)
469	{
470		if (!info->IsInitialized())
471			return true;
472
473		addr_t caller = 0;
474		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
475		if (traceEntry != NULL && !info->IsTraceEntryValid())
476			traceEntry = NULL;
477
478		if (traceEntry != NULL) {
479			caller = tracing_find_caller_in_stack_trace(
480				traceEntry->StackTrace(), kSlabCodeAddressRanges,
481				kSlabCodeAddressRangeCount);
482		}
483
484		if (caller != fCaller)
485			return true;
486
487		kprintf("allocation %p, size: %" B_PRIuSIZE "\n", allocation,
488			allocationSize);
489		if (traceEntry != NULL)
490			tracing_print_stack_trace(traceEntry->StackTrace());
491
492		return true;
493	}
494
495private:
496	addr_t	fCaller;
497};
498
499}	// unnamed namespace
500
501static caller_info*
502get_caller_info(addr_t caller)
503{
504	// find the caller info
505	for (int32 i = 0; i < sCallerInfoCount; i++) {
506		if (caller == sCallerInfoTable[i].caller)
507			return &sCallerInfoTable[i];
508	}
509
510	// not found, add a new entry, if there are free slots
511	if (sCallerInfoCount >= kCallerInfoTableSize)
512		return NULL;
513
514	caller_info* info = &sCallerInfoTable[sCallerInfoCount++];
515	info->caller = caller;
516	info->count = 0;
517	info->size = 0;
518
519	return info;
520}
521
522
523static int
524caller_info_compare_size(const void* _a, const void* _b)
525{
526	const caller_info* a = (const caller_info*)_a;
527	const caller_info* b = (const caller_info*)_b;
528	return (int)(b->size - a->size);
529}
530
531
532static int
533caller_info_compare_count(const void* _a, const void* _b)
534{
535	const caller_info* a = (const caller_info*)_a;
536	const caller_info* b = (const caller_info*)_b;
537	return (int)(b->count - a->count);
538}
539
540
541#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
542
543static bool
544analyze_allocation_callers(ObjectCache* cache, slab* slab,
545	AllocationTrackingCallback& callback)
546{
547	for (uint32 i = 0; i < slab->size; i++) {
548		if (!callback.ProcessTrackingInfo(&slab->tracking[i],
549				cache->ObjectAtIndex(slab, i), cache->object_size)) {
550			return false;
551		}
552	}
553
554	return true;
555}
556
557
558static bool
559analyze_allocation_callers(ObjectCache* cache, const SlabList& slabList,
560	AllocationTrackingCallback& callback)
561{
562	for (SlabList::ConstIterator it = slabList.GetIterator();
563			slab* slab = it.Next();) {
564		if (!analyze_allocation_callers(cache, slab, callback))
565			return false;
566	}
567
568	return true;
569}
570
571
572static bool
573analyze_allocation_callers(ObjectCache* cache,
574	AllocationTrackingCallback& callback)
575{
576	return analyze_allocation_callers(cache, cache->full, callback)
577		&& analyze_allocation_callers(cache, cache->partial, callback);
578}
579
580#endif	// SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
581
582
583static int
584dump_allocation_infos(int argc, char **argv)
585{
586	ObjectCache* cache = NULL;
587	slab* slab = NULL;
588	addr_t addressFilter = 0;
589	team_id teamFilter = -1;
590	thread_id threadFilter = -1;
591	bool printStackTraces = false;
592
593	for (int32 i = 1; i < argc; i++) {
594		if (strcmp(argv[i], "--stacktrace") == 0)
595			printStackTraces = true;
596		else if (strcmp(argv[i], "-a") == 0) {
597			uint64 address;
598			if (++i >= argc
599				|| !evaluate_debug_expression(argv[i], &address, true)) {
600				print_debugger_command_usage(argv[0]);
601				return 0;
602			}
603
604			addressFilter = address;
605		} else if (strcmp(argv[i], "-o") == 0) {
606			uint64 cacheAddress;
607			if (++i >= argc
608				|| !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
609				print_debugger_command_usage(argv[0]);
610				return 0;
611			}
612
613			cache = (ObjectCache*)(addr_t)cacheAddress;
614		} else if (strcasecmp(argv[i], "-s") == 0) {
615			uint64 slabAddress;
616			if (++i >= argc
617				|| !evaluate_debug_expression(argv[i], &slabAddress, true)) {
618				print_debugger_command_usage(argv[0]);
619				return 0;
620			}
621
622			void* slabPages = (void*)slabAddress;
623			if (strcmp(argv[i], "-s") == 0) {
624				slab = (struct slab*)(addr_t)slabAddress;
625				slabPages = slab->pages;
626			}
627
628			cache = MemoryManager::DebugObjectCacheForAddress(slabPages);
629			if (cache == NULL) {
630				kprintf("Couldn't find object cache for address %p.\n",
631					slabPages);
632				return 0;
633			}
634
635			if (slab == NULL) {
636				slab = cache->ObjectSlab(slabPages);
637
638				if (slab == NULL) {
639					kprintf("Couldn't find slab for address %p.\n", slabPages);
640					return 0;
641				}
642			}
643		} else if (strcmp(argv[i], "--team") == 0) {
644			uint64 team;
645			if (++i >= argc
646				|| !evaluate_debug_expression(argv[i], &team, true)) {
647				print_debugger_command_usage(argv[0]);
648				return 0;
649			}
650
651			teamFilter = team;
652		} else if (strcmp(argv[i], "--thread") == 0) {
653			uint64 thread;
654			if (++i >= argc
655				|| !evaluate_debug_expression(argv[i], &thread, true)) {
656				print_debugger_command_usage(argv[0]);
657				return 0;
658			}
659
660			threadFilter = thread;
661		} else {
662			print_debugger_command_usage(argv[0]);
663			return 0;
664		}
665	}
666
667	AllocationInfoPrinterCallback callback(printStackTraces, addressFilter,
668		teamFilter, threadFilter);
669
670	if (slab != NULL || cache != NULL) {
671#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
672		if (slab != NULL) {
673			if (!analyze_allocation_callers(cache, slab, callback))
674				return 0;
675		} else if (cache != NULL) {
676			if (!analyze_allocation_callers(cache, callback))
677				return 0;
678		}
679#else
680		kprintf("Object cache allocation tracking not available. "
681			"SLAB_OBJECT_CACHE_TRACING (%d) and "
682			"SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
683			SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
684		return 0;
685#endif
686	} else {
687#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
688
689		for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
690				it.HasNext();) {
691			if (!analyze_allocation_callers(it.Next(), callback))
692				return 0;
693		}
694#endif
695
696#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
697		if (!MemoryManager::AnalyzeAllocationCallers(callback))
698			return 0;
699#endif
700	}
701
702	return 0;
703}
704
705
706static int
707dump_allocations_per_caller(int argc, char **argv)
708{
709	bool sortBySize = true;
710	bool resetAllocationInfos = false;
711	bool printDetails = false;
712	ObjectCache* cache = NULL;
713	addr_t caller = 0;
714
715	for (int32 i = 1; i < argc; i++) {
716		if (strcmp(argv[i], "-c") == 0) {
717			sortBySize = false;
718		} else if (strcmp(argv[i], "-d") == 0) {
719			uint64 callerAddress;
720			if (++i >= argc
721				|| !evaluate_debug_expression(argv[i], &callerAddress, true)) {
722				print_debugger_command_usage(argv[0]);
723				return 0;
724			}
725
726			caller = callerAddress;
727			printDetails = true;
728		} else if (strcmp(argv[i], "-o") == 0) {
729			uint64 cacheAddress;
730			if (++i >= argc
731				|| !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
732				print_debugger_command_usage(argv[0]);
733				return 0;
734			}
735
736			cache = (ObjectCache*)(addr_t)cacheAddress;
737		} else if (strcmp(argv[i], "-r") == 0) {
738			resetAllocationInfos = true;
739		} else {
740			print_debugger_command_usage(argv[0]);
741			return 0;
742		}
743	}
744
745	sCallerInfoCount = 0;
746
747	AllocationCollectorCallback collectorCallback(resetAllocationInfos);
748	AllocationDetailPrinterCallback detailsCallback(caller);
749	AllocationTrackingCallback& callback = printDetails
750		? (AllocationTrackingCallback&)detailsCallback
751		: (AllocationTrackingCallback&)collectorCallback;
752
753	if (cache != NULL) {
754#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
755		if (!analyze_allocation_callers(cache, callback))
756			return 0;
757#else
758		kprintf("Object cache allocation tracking not available. "
759			"SLAB_OBJECT_CACHE_TRACING (%d) and "
760			"SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
761			SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
762		return 0;
763#endif
764	} else {
765#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
766
767		for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
768				it.HasNext();) {
769			if (!analyze_allocation_callers(it.Next(), callback))
770				return 0;
771		}
772#endif
773
774#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
775		if (!MemoryManager::AnalyzeAllocationCallers(callback))
776			return 0;
777#endif
778	}
779
780	if (printDetails)
781		return 0;
782
783	// sort the array
784	qsort(sCallerInfoTable, sCallerInfoCount, sizeof(caller_info),
785		sortBySize ? &caller_info_compare_size : &caller_info_compare_count);
786
787	kprintf("%ld different callers, sorted by %s...\n\n", sCallerInfoCount,
788		sortBySize ? "size" : "count");
789
790	size_t totalAllocationSize = 0;
791	size_t totalAllocationCount = 0;
792
793	kprintf("     count        size      caller\n");
794	kprintf("----------------------------------\n");
795	for (int32 i = 0; i < sCallerInfoCount; i++) {
796		caller_info& info = sCallerInfoTable[i];
797		kprintf("%10" B_PRIuSIZE "  %10" B_PRIuSIZE "  %p", info.count,
798			info.size, (void*)info.caller);
799
800		const char* symbol;
801		const char* imageName;
802		bool exactMatch;
803		addr_t baseAddress;
804
805		if (elf_debug_lookup_symbol_address(info.caller, &baseAddress, &symbol,
806				&imageName, &exactMatch) == B_OK) {
807			kprintf("  %s + %#" B_PRIxADDR " (%s)%s\n", symbol,
808				info.caller - baseAddress, imageName,
809				exactMatch ? "" : " (nearest)");
810		} else
811			kprintf("\n");
812
813		totalAllocationCount += info.count;
814		totalAllocationSize += info.size;
815	}
816
817	kprintf("\ntotal allocations: %" B_PRIuSIZE ", %" B_PRIuSIZE " bytes\n",
818		totalAllocationCount, totalAllocationSize);
819
820	return 0;
821}
822
823#endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
824
825
826void
827add_alloc_tracing_entry(ObjectCache* cache, uint32 flags, void* object)
828{
829#if SLAB_OBJECT_CACHE_TRACING
830#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
831	MutexLocker _(cache->lock);
832	cache->TrackingInfoFor(object)->Init(T(Alloc(cache, flags, object)));
833#else
834	T(Alloc(cache, flags, object));
835#endif
836#endif
837}
838
839
840// #pragma mark -
841
842
843void
844request_memory_manager_maintenance()
845{
846	MutexLocker locker(sMaintenanceLock);
847	sMaintenanceCondition.NotifyAll();
848}
849
850
851// #pragma mark -
852
853
854static void
855delete_object_cache_internal(object_cache* cache)
856{
857	if (!(cache->flags & CACHE_NO_DEPOT))
858		object_depot_destroy(&cache->depot, 0);
859
860	mutex_lock(&cache->lock);
861
862	if (!cache->full.IsEmpty())
863		panic("cache destroy: still has full slabs");
864
865	if (!cache->partial.IsEmpty())
866		panic("cache destroy: still has partial slabs");
867
868	while (!cache->empty.IsEmpty())
869		cache->ReturnSlab(cache->empty.RemoveHead(), 0);
870
871	mutex_destroy(&cache->lock);
872	cache->Delete();
873}
874
875
876static void
877increase_object_reserve(ObjectCache* cache)
878{
879	MutexLocker locker(sMaintenanceLock);
880
881	cache->maintenance_resize = true;
882
883	if (!cache->maintenance_pending) {
884		cache->maintenance_pending = true;
885		sMaintenanceQueue.Add(cache);
886		sMaintenanceCondition.NotifyAll();
887	}
888}
889
890
891/*!	Makes sure that \a objectCount objects can be allocated.
892*/
893static status_t
894object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
895	uint32 flags)
896{
897	// If someone else is already adding slabs, we wait for that to be finished
898	// first.
899	thread_id thread = find_thread(NULL);
900	while (true) {
901		if (objectCount <= cache->total_objects - cache->used_count)
902			return B_OK;
903
904		ObjectCacheResizeEntry* resizeEntry = NULL;
905		if (cache->resize_entry_dont_wait != NULL) {
906			resizeEntry = cache->resize_entry_dont_wait;
907			if (thread == resizeEntry->thread)
908				return B_WOULD_BLOCK;
909			// Note: We could still have reentered the function, i.e.
910			// resize_entry_can_wait would be ours. That doesn't matter much,
911			// though, since after the don't-wait thread has done its job
912			// everyone will be happy.
913		} else if (cache->resize_entry_can_wait != NULL) {
914			resizeEntry = cache->resize_entry_can_wait;
915			if (thread == resizeEntry->thread)
916				return B_WOULD_BLOCK;
917
918			if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0)
919				break;
920		} else
921			break;
922
923		ConditionVariableEntry entry;
924		resizeEntry->condition.Add(&entry);
925
926		cache->Unlock();
927		entry.Wait();
928		cache->Lock();
929	}
930
931	// prepare the resize entry others can wait on
932	ObjectCacheResizeEntry*& resizeEntry
933		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
934			? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
935
936	ObjectCacheResizeEntry myResizeEntry;
937	resizeEntry = &myResizeEntry;
938	resizeEntry->condition.Init(cache, "wait for slabs");
939	resizeEntry->thread = thread;
940
941	// add new slabs until there are as many free ones as requested
942	while (objectCount > cache->total_objects - cache->used_count) {
943		slab* newSlab = cache->CreateSlab(flags);
944		if (newSlab == NULL) {
945			resizeEntry->condition.NotifyAll();
946			resizeEntry = NULL;
947			return B_NO_MEMORY;
948		}
949
950		cache->usage += cache->slab_size;
951		cache->total_objects += newSlab->size;
952
953		cache->empty.Add(newSlab);
954		cache->empty_count++;
955	}
956
957	resizeEntry->condition.NotifyAll();
958	resizeEntry = NULL;
959
960	return B_OK;
961}
962
963
964static void
965object_cache_low_memory(void* dummy, uint32 resources, int32 level)
966{
967	if (level == B_NO_LOW_RESOURCE)
968		return;
969
970	MutexLocker cacheListLocker(sObjectCacheListLock);
971
972	// Append the first cache to the end of the queue. We assume that it is
973	// one of the caches that will never be deleted and thus we use it as a
974	// marker.
975	ObjectCache* firstCache = sObjectCaches.RemoveHead();
976	sObjectCaches.Add(firstCache);
977	cacheListLocker.Unlock();
978
979	ObjectCache* cache;
980	do {
981		cacheListLocker.Lock();
982
983		cache = sObjectCaches.RemoveHead();
984		sObjectCaches.Add(cache);
985
986		MutexLocker maintenanceLocker(sMaintenanceLock);
987		if (cache->maintenance_pending || cache->maintenance_in_progress) {
988			// We don't want to mess with caches in maintenance.
989			continue;
990		}
991
992		cache->maintenance_pending = true;
993		cache->maintenance_in_progress = true;
994
995		maintenanceLocker.Unlock();
996		cacheListLocker.Unlock();
997
998		// We are calling the reclaimer without the object cache lock
999		// to give the owner a chance to return objects to the slabs.
1000
1001		if (cache->reclaimer)
1002			cache->reclaimer(cache->cookie, level);
1003
1004		if ((cache->flags & CACHE_NO_DEPOT) == 0)
1005			object_depot_make_empty(&cache->depot, 0);
1006
1007		MutexLocker cacheLocker(cache->lock);
1008		size_t minimumAllowed;
1009
1010		switch (level) {
1011			case B_LOW_RESOURCE_NOTE:
1012				minimumAllowed = cache->pressure / 2 + 1;
1013				cache->pressure -= cache->pressure / 8;
1014				break;
1015
1016			case B_LOW_RESOURCE_WARNING:
1017				cache->pressure /= 2;
1018				minimumAllowed = 0;
1019				break;
1020
1021			default:
1022				cache->pressure = 0;
1023				minimumAllowed = 0;
1024				break;
1025		}
1026
1027		while (cache->empty_count > minimumAllowed) {
1028			// make sure we respect the cache's minimum object reserve
1029			size_t objectsPerSlab = cache->empty.Head()->size;
1030			size_t freeObjects = cache->total_objects - cache->used_count;
1031			if (freeObjects < cache->min_object_reserve + objectsPerSlab)
1032				break;
1033
1034			cache->ReturnSlab(cache->empty.RemoveHead(), 0);
1035			cache->empty_count--;
1036		}
1037
1038		cacheLocker.Unlock();
1039
1040		// Check whether in the meantime someone has really requested
1041		// maintenance for the cache.
1042		maintenanceLocker.Lock();
1043
1044		if (cache->maintenance_delete) {
1045			delete_object_cache_internal(cache);
1046			continue;
1047		}
1048
1049		cache->maintenance_in_progress = false;
1050
1051		if (cache->maintenance_resize)
1052			sMaintenanceQueue.Add(cache);
1053		else
1054			cache->maintenance_pending = false;
1055	} while (cache != firstCache);
1056}
1057
1058
1059static status_t
1060object_cache_maintainer(void*)
1061{
1062	while (true) {
1063		MutexLocker locker(sMaintenanceLock);
1064
1065		// wait for the next request
1066		while (sMaintenanceQueue.IsEmpty()) {
1067			// perform memory manager maintenance, if needed
1068			if (MemoryManager::MaintenanceNeeded()) {
1069				locker.Unlock();
1070				MemoryManager::PerformMaintenance();
1071				locker.Lock();
1072				continue;
1073			}
1074
1075			ConditionVariableEntry entry;
1076			sMaintenanceCondition.Add(&entry);
1077			locker.Unlock();
1078			entry.Wait();
1079			locker.Lock();
1080		}
1081
1082		ObjectCache* cache = sMaintenanceQueue.RemoveHead();
1083
1084		while (true) {
1085			bool resizeRequested = cache->maintenance_resize;
1086			bool deleteRequested = cache->maintenance_delete;
1087
1088			if (!resizeRequested && !deleteRequested) {
1089				cache->maintenance_pending = false;
1090				cache->maintenance_in_progress = false;
1091				break;
1092			}
1093
1094			cache->maintenance_resize = false;
1095			cache->maintenance_in_progress = true;
1096
1097			locker.Unlock();
1098
1099			if (deleteRequested) {
1100				delete_object_cache_internal(cache);
1101				break;
1102			}
1103
1104			// resize the cache, if necessary
1105
1106			MutexLocker cacheLocker(cache->lock);
1107
1108			if (resizeRequested) {
1109				status_t error = object_cache_reserve_internal(cache,
1110					cache->min_object_reserve, 0);
1111				if (error != B_OK) {
1112					dprintf("object cache resizer: Failed to resize object "
1113						"cache %p!\n", cache);
1114					break;
1115				}
1116			}
1117
1118			locker.Lock();
1119		}
1120	}
1121
1122	// never can get here
1123	return B_OK;
1124}
1125
1126
1127// #pragma mark - public API
1128
1129
1130object_cache*
1131create_object_cache(const char* name, size_t object_size, size_t alignment,
1132	void* cookie, object_cache_constructor constructor,
1133	object_cache_destructor destructor)
1134{
1135	return create_object_cache_etc(name, object_size, alignment, 0, 0, 0, 0,
1136		cookie, constructor, destructor, NULL);
1137}
1138
1139
1140object_cache*
1141create_object_cache_etc(const char* name, size_t objectSize, size_t alignment,
1142	size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
1143	uint32 flags, void* cookie, object_cache_constructor constructor,
1144	object_cache_destructor destructor, object_cache_reclaimer reclaimer)
1145{
1146	ObjectCache* cache;
1147
1148	if (objectSize == 0) {
1149		cache = NULL;
1150	} else if (objectSize <= 256) {
1151		cache = SmallObjectCache::Create(name, objectSize, alignment, maximum,
1152			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1153			destructor, reclaimer);
1154	} else {
1155		cache = HashedObjectCache::Create(name, objectSize, alignment, maximum,
1156			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1157			destructor, reclaimer);
1158	}
1159
1160	if (cache != NULL) {
1161		MutexLocker _(sObjectCacheListLock);
1162		sObjectCaches.Add(cache);
1163	}
1164
1165	T(Create(name, objectSize, alignment, maximum, flags, cookie, cache));
1166	return cache;
1167}
1168
1169
1170void
1171delete_object_cache(object_cache* cache)
1172{
1173	T(Delete(cache));
1174
1175	{
1176		MutexLocker _(sObjectCacheListLock);
1177		sObjectCaches.Remove(cache);
1178	}
1179
1180	MutexLocker cacheLocker(cache->lock);
1181
1182	{
1183		MutexLocker maintenanceLocker(sMaintenanceLock);
1184		if (cache->maintenance_in_progress) {
1185			// The maintainer thread is working with the cache. Just mark it
1186			// to be deleted.
1187			cache->maintenance_delete = true;
1188			return;
1189		}
1190
1191		// unschedule maintenance
1192		if (cache->maintenance_pending)
1193			sMaintenanceQueue.Remove(cache);
1194	}
1195
1196	// at this point no-one should have a reference to the cache anymore
1197	cacheLocker.Unlock();
1198
1199	delete_object_cache_internal(cache);
1200}
1201
1202
1203status_t
1204object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
1205{
1206	MutexLocker _(cache->lock);
1207
1208	if (cache->min_object_reserve == objectCount)
1209		return B_OK;
1210
1211	cache->min_object_reserve = objectCount;
1212
1213	increase_object_reserve(cache);
1214
1215	return B_OK;
1216}
1217
1218
1219void*
1220object_cache_alloc(object_cache* cache, uint32 flags)
1221{
1222	if (!(cache->flags & CACHE_NO_DEPOT)) {
1223		void* object = object_depot_obtain(&cache->depot);
1224		if (object) {
1225			add_alloc_tracing_entry(cache, flags, object);
1226			return fill_allocated_block(object, cache->object_size);
1227		}
1228	}
1229
1230	MutexLocker locker(cache->lock);
1231	slab* source = NULL;
1232
1233	while (true) {
1234		source = cache->partial.Head();
1235		if (source != NULL)
1236			break;
1237
1238		source = cache->empty.RemoveHead();
1239		if (source != NULL) {
1240			cache->empty_count--;
1241			cache->partial.Add(source);
1242			break;
1243		}
1244
1245		if (object_cache_reserve_internal(cache, 1, flags) != B_OK) {
1246			T(Alloc(cache, flags, NULL));
1247			return NULL;
1248		}
1249
1250		cache->pressure++;
1251	}
1252
1253	ParanoiaChecker _2(source);
1254
1255	object_link* link = _pop(source->free);
1256	source->count--;
1257	cache->used_count++;
1258
1259	if (cache->total_objects - cache->used_count < cache->min_object_reserve)
1260		increase_object_reserve(cache);
1261
1262	REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next,
1263		sizeof(void*));
1264
1265	TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.",
1266		link_to_object(link, cache->object_size), link, source, source->count);
1267
1268	if (source->count == 0) {
1269		cache->partial.Remove(source);
1270		cache->full.Add(source);
1271	}
1272
1273	void* object = link_to_object(link, cache->object_size);
1274	locker.Unlock();
1275
1276	add_alloc_tracing_entry(cache, flags, object);
1277	return fill_allocated_block(object, cache->object_size);
1278}
1279
1280
1281void
1282object_cache_free(object_cache* cache, void* object, uint32 flags)
1283{
1284	if (object == NULL)
1285		return;
1286
1287	T(Free(cache, object));
1288
1289#if PARANOID_KERNEL_FREE
1290	// TODO: allow forcing the check even if we don't find deadbeef
1291	if (*(uint32*)object == 0xdeadbeef) {
1292		if (!cache->AssertObjectNotFreed(object))
1293			return;
1294
1295		if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1296			if (object_depot_contains_object(&cache->depot, object)) {
1297				panic("object_cache: object %p is already freed", object);
1298				return;
1299			}
1300		}
1301	}
1302
1303	fill_freed_block(object, cache->object_size);
1304#endif
1305
1306#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
1307	mutex_lock(&cache->lock);
1308	cache->TrackingInfoFor(object)->Clear();
1309	mutex_unlock(&cache->lock);
1310#endif
1311
1312	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1313		object_depot_store(&cache->depot, object, flags);
1314		return;
1315	}
1316
1317	MutexLocker _(cache->lock);
1318	cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
1319}
1320
1321
1322status_t
1323object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
1324{
1325	if (objectCount == 0)
1326		return B_OK;
1327
1328	T(Reserve(cache, objectCount, flags));
1329
1330	MutexLocker _(cache->lock);
1331	return object_cache_reserve_internal(cache, objectCount, flags);
1332}
1333
1334
1335void
1336object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
1337{
1338	MutexLocker _(cache->lock);
1339	*_allocatedMemory = cache->usage;
1340}
1341
1342
1343void
1344slab_init(kernel_args* args)
1345{
1346	MemoryManager::Init(args);
1347
1348	new (&sObjectCaches) ObjectCacheList();
1349
1350	block_allocator_init_boot();
1351}
1352
1353
1354void
1355slab_init_post_area()
1356{
1357	MemoryManager::InitPostArea();
1358
1359	add_debugger_command("slabs", dump_slabs, "list all object caches");
1360	add_debugger_command("slab_cache", dump_cache_info,
1361		"dump information about a specific object cache");
1362	add_debugger_command("slab_depot", dump_object_depot,
1363		"dump contents of an object depot");
1364	add_debugger_command("slab_magazine", dump_depot_magazine,
1365		"dump contents of a depot magazine");
1366#if SLAB_ALLOCATION_TRACKING_AVAILABLE
1367	add_debugger_command_etc("allocations_per_caller",
1368		&dump_allocations_per_caller,
1369		"Dump current slab allocations summed up per caller",
1370		"[ -c ] [ -d <caller> ] [ -o <object cache> ] [ -r ]\n"
1371		"The current allocations will by summed up by caller (their count and\n"
1372		"size) printed in decreasing order by size or, if \"-c\" is\n"
1373		"specified, by allocation count. If given <object cache> specifies\n"
1374		"the address of the object cache for which to print the allocations.\n"
1375		"If \"-d\" is given, each allocation for caller <caller> is printed\n"
1376		"including the respective stack trace.\n"
1377		"If \"-r\" is given, the allocation infos are reset after gathering\n"
1378		"the information, so the next command invocation will only show the\n"
1379		"allocations made after the reset.\n", 0);
1380	add_debugger_command_etc("allocation_infos",
1381		&dump_allocation_infos,
1382		"Dump current slab allocations",
1383		"[ --stacktrace ] [ -o <object cache> | -s <slab> | -S <address> ] "
1384		"[ -a <allocation> ] [ --team <team ID> ] [ --thread <thread ID> ]\n"
1385		"The current allocations filtered by optional values will be printed.\n"
1386		"If given, <object cache> specifies the address of the object cache\n"
1387		"or <slab> specifies the address of a slab, for which to print the\n"
1388		"allocations. Alternatively <address> specifies any address within\n"
1389		"a slab allocation range.\n"
1390		"The optional \"-a\" address filters for a specific allocation,\n"
1391		"with \"--team\" and \"--thread\" allocations by specific teams\n"
1392		"and/or threads can be filtered (these only work if a corresponding\n"
1393		"tracing entry is still available).\n"
1394		"If \"--stacktrace\" is given, then stack traces of the allocation\n"
1395		"callers are printed, where available\n", 0);
1396#endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
1397}
1398
1399
1400void
1401slab_init_post_sem()
1402{
1403	register_low_resource_handler(object_cache_low_memory, NULL,
1404		B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY
1405			| B_KERNEL_RESOURCE_ADDRESS_SPACE, 5);
1406
1407	block_allocator_init_rest();
1408}
1409
1410
1411void
1412slab_init_post_thread()
1413{
1414	new(&sMaintenanceQueue) MaintenanceQueue;
1415	sMaintenanceCondition.Init(&sMaintenanceQueue, "object cache maintainer");
1416
1417	thread_id objectCacheResizer = spawn_kernel_thread(object_cache_maintainer,
1418		"object cache resizer", B_URGENT_PRIORITY, NULL);
1419	if (objectCacheResizer < 0) {
1420		panic("slab_init_post_thread(): failed to spawn object cache resizer "
1421			"thread\n");
1422		return;
1423	}
1424
1425	resume_thread(objectCacheResizer);
1426}
1427
1428
1429RANGE_MARKER_FUNCTION_END(Slab)
1430
1431
1432#endif	// !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
1433