1/*
2 * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include <system_profiler.h>
8
9#include <AutoDeleter.h>
10#include <Referenceable.h>
11
12#include <util/AutoLock.h>
13
14#include <system_profiler_defs.h>
15
16#include <cpu.h>
17#include <kernel.h>
18#include <kimage.h>
19#include <kscheduler.h>
20#include <listeners.h>
21#include <Notifications.h>
22#include <sem.h>
23#include <team.h>
24#include <thread.h>
25#include <user_debugger.h>
26#include <vm/vm.h>
27
28#include <arch/debug.h>
29
30#include "IOSchedulerRoster.h"
31
32
33// This is the kernel-side implementation of the system profiling support.
34// A userland team can register as system profiler, providing an area as buffer
35// for events. Those events are team, thread, and image changes (added/removed),
36// periodic sampling of the return address stack for each CPU, as well as
37// scheduling and I/O scheduling events.
38
39
40class SystemProfiler;
41
42
43// minimum/maximum size of the table used for wait object caching
44#define MIN_WAIT_OBJECT_COUNT	128
45#define MAX_WAIT_OBJECT_COUNT	1024
46
47
48static spinlock sProfilerLock = B_SPINLOCK_INITIALIZER;
49static SystemProfiler* sProfiler = NULL;
50static struct system_profiler_parameters* sRecordedParameters = NULL;
51
52
53class SystemProfiler : public BReferenceable, private NotificationListener,
54	private SchedulerListener, private WaitObjectListener {
55public:
56								SystemProfiler(team_id team,
57									const area_info& userAreaInfo,
58									const system_profiler_parameters&
59										parameters);
60								~SystemProfiler();
61
62			team_id				TeamID() const	{ return fTeam; }
63
64			status_t			Init();
65			status_t			NextBuffer(size_t bytesRead,
66									uint64* _droppedEvents);
67
68private:
69    virtual	void				EventOccurred(NotificationService& service,
70									const KMessage* event);
71
72	virtual	void				ThreadEnqueuedInRunQueue(Thread* thread);
73	virtual	void				ThreadRemovedFromRunQueue(Thread* thread);
74	virtual	void				ThreadScheduled(Thread* oldThread,
75									Thread* newThread);
76
77	virtual	void				SemaphoreCreated(sem_id id,
78									const char* name);
79	virtual	void				ConditionVariableInitialized(
80									ConditionVariable* variable);
81	virtual	void				MutexInitialized(mutex* lock);
82	virtual	void				RWLockInitialized(rw_lock* lock);
83
84			bool				_TeamAdded(Team* team);
85			bool				_TeamRemoved(Team* team);
86			bool				_TeamExec(Team* team);
87
88			bool				_ThreadAdded(Thread* thread);
89			bool				_ThreadRemoved(Thread* thread);
90
91			bool				_ImageAdded(struct image* image);
92			bool				_ImageRemoved(struct image* image);
93
94			bool				_IOSchedulerAdded(IOScheduler* scheduler);
95			bool				_IOSchedulerRemoved(IOScheduler* scheduler);
96			bool				_IORequestScheduled(IOScheduler* scheduler,
97									IORequest* request);
98			bool				_IORequestFinished(IOScheduler* scheduler,
99									IORequest* request);
100			bool				_IOOperationStarted(IOScheduler* scheduler,
101									IORequest* request, IOOperation* operation);
102			bool				_IOOperationFinished(IOScheduler* scheduler,
103									IORequest* request, IOOperation* operation);
104
105			void				_WaitObjectCreated(addr_t object, uint32 type);
106			void				_WaitObjectUsed(addr_t object, uint32 type);
107
108	inline	void				_MaybeNotifyProfilerThreadLocked();
109	inline	void				_MaybeNotifyProfilerThread();
110
111	static	bool				_InitialImageIterator(struct image* image,
112									void* cookie);
113
114			void*				_AllocateBuffer(size_t size, int event, int cpu,
115									int count);
116
117	static	void				_InitTimers(void* cookie, int cpu);
118	static	void				_UninitTimers(void* cookie, int cpu);
119			void				_ScheduleTimer(int cpu);
120
121			void				_DoSample();
122
123	static	int32				_ProfilingEvent(struct timer* timer);
124
125private:
126			struct CPUProfileData {
127				struct timer	timer;
128				bigtime_t		timerEnd;
129				bool			timerScheduled;
130				addr_t			buffer[B_DEBUG_STACK_TRACE_DEPTH];
131			};
132
133			struct WaitObjectKey {
134				addr_t	object;
135				uint32	type;
136			};
137
138			struct WaitObject : DoublyLinkedListLinkImpl<WaitObject>,
139					WaitObjectKey {
140				struct WaitObject* hash_link;
141			};
142
143			struct WaitObjectTableDefinition {
144				typedef WaitObjectKey	KeyType;
145				typedef	WaitObject		ValueType;
146
147				size_t HashKey(const WaitObjectKey& key) const
148				{
149					return (size_t)key.object ^ (size_t)key.type;
150				}
151
152				size_t Hash(const WaitObject* value) const
153				{
154					return HashKey(*value);
155				}
156
157				bool Compare(const WaitObjectKey& key,
158					const WaitObject* value) const
159				{
160					return value->type == key.type
161						&& value->object == key.object;
162				}
163
164				WaitObject*& GetLink(WaitObject* value) const
165				{
166					return value->hash_link;
167				}
168			};
169
170			typedef DoublyLinkedList<WaitObject> WaitObjectList;
171			typedef BOpenHashTable<WaitObjectTableDefinition> WaitObjectTable;
172
173private:
174			spinlock			fLock;
175			team_id				fTeam;
176			area_id				fUserArea;
177			area_id				fKernelArea;
178			size_t				fAreaSize;
179			uint32				fFlags;
180			uint32				fStackDepth;
181			bigtime_t			fInterval;
182			system_profiler_buffer_header* fHeader;
183			uint8*				fBufferBase;
184			size_t				fBufferCapacity;
185			size_t				fBufferStart;
186			size_t				fBufferSize;
187			uint64				fDroppedEvents;
188			int64				fLastTeamAddedSerialNumber;
189			int64				fLastThreadAddedSerialNumber;
190			bool				fTeamNotificationsRequested;
191			bool				fTeamNotificationsEnabled;
192			bool				fThreadNotificationsRequested;
193			bool				fThreadNotificationsEnabled;
194			bool				fImageNotificationsRequested;
195			bool				fImageNotificationsEnabled;
196			bool				fIONotificationsRequested;
197			bool				fIONotificationsEnabled;
198			bool				fSchedulerNotificationsRequested;
199			bool				fWaitObjectNotificationsRequested;
200			Thread* volatile	fWaitingProfilerThread;
201			bool				fProfilingActive;
202			bool				fReentered[B_MAX_CPU_COUNT];
203			CPUProfileData		fCPUData[B_MAX_CPU_COUNT];
204			WaitObject*			fWaitObjectBuffer;
205			int32				fWaitObjectCount;
206			WaitObjectList		fUsedWaitObjects;
207			WaitObjectList		fFreeWaitObjects;
208			WaitObjectTable		fWaitObjectTable;
209};
210
211
212/*!	Notifies the profiler thread when the profiling buffer is full enough.
213	The caller must hold the scheduler lock and fLock.
214*/
215inline void
216SystemProfiler::_MaybeNotifyProfilerThreadLocked()
217{
218	// If the buffer is full enough, notify the profiler.
219	if (fWaitingProfilerThread != NULL && fBufferSize > fBufferCapacity / 2) {
220		int cpu = smp_get_current_cpu();
221		fReentered[cpu] = true;
222
223		thread_unblock_locked(fWaitingProfilerThread, B_OK);
224
225		fWaitingProfilerThread = NULL;
226		fReentered[cpu] = false;
227	}
228}
229
230
231inline void
232SystemProfiler::_MaybeNotifyProfilerThread()
233{
234	if (fWaitingProfilerThread == NULL)
235		return;
236
237	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
238	SpinLocker locker(fLock);
239
240	_MaybeNotifyProfilerThreadLocked();
241}
242
243
244SystemProfiler::SystemProfiler(team_id team, const area_info& userAreaInfo,
245	const system_profiler_parameters& parameters)
246	:
247	fTeam(team),
248	fUserArea(userAreaInfo.area),
249	fKernelArea(-1),
250	fAreaSize(userAreaInfo.size),
251	fFlags(parameters.flags),
252	fStackDepth(parameters.stack_depth),
253	fInterval(parameters.interval),
254	fHeader(NULL),
255	fBufferBase(NULL),
256	fBufferCapacity(0),
257	fBufferStart(0),
258	fBufferSize(0),
259	fDroppedEvents(0),
260	fLastTeamAddedSerialNumber(0),
261	fLastThreadAddedSerialNumber(0),
262	fTeamNotificationsRequested(false),
263	fTeamNotificationsEnabled(false),
264	fThreadNotificationsRequested(false),
265	fThreadNotificationsEnabled(false),
266	fImageNotificationsRequested(false),
267	fImageNotificationsEnabled(false),
268	fIONotificationsRequested(false),
269	fIONotificationsEnabled(false),
270	fSchedulerNotificationsRequested(false),
271	fWaitObjectNotificationsRequested(false),
272	fWaitingProfilerThread(NULL),
273	fWaitObjectBuffer(NULL),
274	fWaitObjectCount(0),
275	fUsedWaitObjects(),
276	fFreeWaitObjects(),
277	fWaitObjectTable()
278{
279	B_INITIALIZE_SPINLOCK(&fLock);
280
281	memset(fReentered, 0, sizeof(fReentered));
282
283	// compute the number wait objects we want to cache
284	if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
285		fWaitObjectCount = parameters.locking_lookup_size
286			/ (sizeof(WaitObject) + (sizeof(void*) * 3 / 2));
287		if (fWaitObjectCount < MIN_WAIT_OBJECT_COUNT)
288			fWaitObjectCount = MIN_WAIT_OBJECT_COUNT;
289		if (fWaitObjectCount > MAX_WAIT_OBJECT_COUNT)
290			fWaitObjectCount = MAX_WAIT_OBJECT_COUNT;
291	}
292}
293
294
295SystemProfiler::~SystemProfiler()
296{
297	// Wake up the user thread, if it is waiting, and mark profiling
298	// inactive.
299	InterruptsSpinLocker locker(fLock);
300	if (fWaitingProfilerThread != NULL) {
301		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
302		thread_unblock_locked(fWaitingProfilerThread, B_OK);
303		fWaitingProfilerThread = NULL;
304	}
305	fProfilingActive = false;
306	locker.Unlock();
307
308	// stop scheduler listening
309	if (fSchedulerNotificationsRequested) {
310		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
311		scheduler_remove_listener(this);
312	}
313
314	// stop wait object listening
315	if (fWaitObjectNotificationsRequested) {
316		InterruptsSpinLocker locker(gWaitObjectListenerLock);
317		remove_wait_object_listener(this);
318	}
319
320	// deactivate the profiling timers on all CPUs
321	if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
322		call_all_cpus(_UninitTimers, this);
323
324	// cancel notifications
325	NotificationManager& notificationManager
326		= NotificationManager::Manager();
327
328	// images
329	if (fImageNotificationsRequested) {
330		fImageNotificationsRequested = false;
331		notificationManager.RemoveListener("images", NULL, *this);
332	}
333
334	// threads
335	if (fThreadNotificationsRequested) {
336		fThreadNotificationsRequested = false;
337		notificationManager.RemoveListener("threads", NULL, *this);
338	}
339
340	// teams
341	if (fTeamNotificationsRequested) {
342		fTeamNotificationsRequested = false;
343		notificationManager.RemoveListener("teams", NULL, *this);
344	}
345
346	// I/O
347	if (fIONotificationsRequested) {
348		fIONotificationsRequested = false;
349		notificationManager.RemoveListener("I/O", NULL, *this);
350	}
351
352	// delete wait object related allocations
353	fWaitObjectTable.Clear();
354	delete[] fWaitObjectBuffer;
355
356	// unlock the memory and delete the area
357	if (fKernelArea >= 0) {
358		unlock_memory(fHeader, fAreaSize, B_READ_DEVICE);
359		delete_area(fKernelArea);
360		fKernelArea = -1;
361	}
362}
363
364
365status_t
366SystemProfiler::Init()
367{
368	// clone the user area
369	void* areaBase;
370	fKernelArea = clone_area("profiling samples", &areaBase,
371		B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA,
372		fUserArea);
373	if (fKernelArea < 0)
374		return fKernelArea;
375
376	// we need the memory locked
377	status_t error = lock_memory(areaBase, fAreaSize, B_READ_DEVICE);
378	if (error != B_OK) {
379		delete_area(fKernelArea);
380		fKernelArea = -1;
381		return error;
382	}
383
384	// the buffer is ready for use
385	fHeader = (system_profiler_buffer_header*)areaBase;
386	fBufferBase = (uint8*)(fHeader + 1);
387	fBufferCapacity = fAreaSize - (fBufferBase - (uint8*)areaBase);
388	fHeader->start = 0;
389	fHeader->size = 0;
390
391	// allocate the wait object buffer and init the hash table
392	if (fWaitObjectCount > 0) {
393		fWaitObjectBuffer = new(std::nothrow) WaitObject[fWaitObjectCount];
394		if (fWaitObjectBuffer == NULL)
395			return B_NO_MEMORY;
396
397		for (int32 i = 0; i < fWaitObjectCount; i++)
398			fFreeWaitObjects.Add(fWaitObjectBuffer + i);
399
400		error = fWaitObjectTable.Init(fWaitObjectCount * 3 / 2);
401		if (error != B_OK)
402			return error;
403	}
404
405	// start listening for notifications
406
407	// teams
408	NotificationManager& notificationManager
409		= NotificationManager::Manager();
410	if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
411		error = notificationManager.AddListener("teams",
412			TEAM_ADDED | TEAM_REMOVED | TEAM_EXEC, *this);
413		if (error != B_OK)
414			return error;
415		fTeamNotificationsRequested = true;
416	}
417
418	// threads
419	if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
420		error = notificationManager.AddListener("threads",
421			THREAD_ADDED | THREAD_REMOVED, *this);
422		if (error != B_OK)
423			return error;
424		fThreadNotificationsRequested = true;
425	}
426
427	// images
428	if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
429		error = notificationManager.AddListener("images",
430			IMAGE_ADDED | IMAGE_REMOVED, *this);
431		if (error != B_OK)
432			return error;
433		fImageNotificationsRequested = true;
434	}
435
436	// I/O events
437	if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
438		error = notificationManager.AddListener("I/O",
439			IO_SCHEDULER_ADDED | IO_SCHEDULER_REMOVED
440				| IO_SCHEDULER_REQUEST_SCHEDULED | IO_SCHEDULER_REQUEST_FINISHED
441				| IO_SCHEDULER_OPERATION_STARTED
442				| IO_SCHEDULER_OPERATION_FINISHED,
443			*this);
444		if (error != B_OK)
445			return error;
446		fIONotificationsRequested = true;
447	}
448
449	// We need to fill the buffer with the initial state of teams, threads,
450	// and images.
451
452	// teams
453	if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
454		InterruptsSpinLocker locker(fLock);
455
456		TeamListIterator iterator;
457		while (Team* team = iterator.Next()) {
458			locker.Unlock();
459
460			bool added = _TeamAdded(team);
461
462			// release the reference returned by the iterator
463			team->ReleaseReference();
464
465			if (!added)
466				return B_BUFFER_OVERFLOW;
467
468			locker.Lock();
469		}
470
471		fTeamNotificationsEnabled = true;
472	}
473
474	// images
475	if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
476		if (image_iterate_through_images(&_InitialImageIterator, this) != NULL)
477			return B_BUFFER_OVERFLOW;
478	}
479
480	// threads
481	if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
482		InterruptsSpinLocker locker(fLock);
483
484		ThreadListIterator iterator;
485		while (Thread* thread = iterator.Next()) {
486			locker.Unlock();
487
488			bool added = _ThreadAdded(thread);
489
490			// release the reference returned by the iterator
491			thread->ReleaseReference();
492
493			if (!added)
494				return B_BUFFER_OVERFLOW;
495
496			locker.Lock();
497		}
498
499		fThreadNotificationsEnabled = true;
500	}
501
502	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
503
504	fProfilingActive = true;
505
506	// start scheduler and wait object listening
507	if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
508		scheduler_add_listener(this);
509		fSchedulerNotificationsRequested = true;
510
511		SpinLocker waitObjectLocker(gWaitObjectListenerLock);
512		add_wait_object_listener(this);
513		fWaitObjectNotificationsRequested = true;
514		waitObjectLocker.Unlock();
515
516		// fake schedule events for the initially running threads
517		int32 cpuCount = smp_get_num_cpus();
518		for (int32 i = 0; i < cpuCount; i++) {
519			Thread* thread = gCPU[i].running_thread;
520			if (thread != NULL)
521				ThreadScheduled(thread, thread);
522		}
523	}
524
525	schedulerLocker.Unlock();
526
527	// I/O scheduling
528	if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
529		IOSchedulerRoster* roster = IOSchedulerRoster::Default();
530		AutoLocker<IOSchedulerRoster> rosterLocker(roster);
531
532		for (IOSchedulerList::ConstIterator it
533				= roster->SchedulerList().GetIterator();
534			IOScheduler* scheduler = it.Next();) {
535			_IOSchedulerAdded(scheduler);
536		}
537
538		fIONotificationsEnabled = true;
539	}
540
541	// activate the profiling timers on all CPUs
542	if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
543		call_all_cpus(_InitTimers, this);
544
545	return B_OK;
546}
547
548
549status_t
550SystemProfiler::NextBuffer(size_t bytesRead, uint64* _droppedEvents)
551{
552	InterruptsSpinLocker locker(fLock);
553
554	if (fWaitingProfilerThread != NULL || !fProfilingActive
555		|| bytesRead > fBufferSize) {
556		return B_BAD_VALUE;
557	}
558
559	fBufferSize -= bytesRead;
560	fBufferStart += bytesRead;
561	if (fBufferStart > fBufferCapacity)
562		fBufferStart -= fBufferCapacity;
563	fHeader->size = fBufferSize;
564	fHeader->start = fBufferStart;
565
566	// already enough data in the buffer to return?
567	if (fBufferSize > fBufferCapacity / 2)
568		return B_OK;
569
570	// Wait until the buffer gets too full or an error or a timeout occurs.
571	while (true) {
572		Thread* thread = thread_get_current_thread();
573		fWaitingProfilerThread = thread;
574
575		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
576
577		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
578			THREAD_BLOCK_TYPE_OTHER, "system profiler buffer");
579
580		schedulerLocker.Unlock();
581		locker.Unlock();
582
583		status_t error = thread_block_with_timeout(B_RELATIVE_TIMEOUT, 1000000);
584
585		locker.Lock();
586
587		if (error == B_OK) {
588			// the caller has unset fWaitingProfilerThread for us
589			break;
590		}
591
592		fWaitingProfilerThread = NULL;
593
594		if (error != B_TIMED_OUT)
595			return error;
596
597		// just the timeout -- return, if the buffer is not empty
598		if (fBufferSize > 0)
599			break;
600	}
601
602	if (_droppedEvents != NULL) {
603		*_droppedEvents = fDroppedEvents;
604		fDroppedEvents = 0;
605	}
606
607	return B_OK;
608}
609
610
611void
612SystemProfiler::EventOccurred(NotificationService& service,
613	const KMessage* event)
614{
615	int32 eventCode;
616	if (event->FindInt32("event", &eventCode) != B_OK)
617		return;
618
619	if (strcmp(service.Name(), "teams") == 0) {
620		Team* team = (Team*)event->GetPointer("teamStruct", NULL);
621		if (team == NULL)
622			return;
623
624		switch (eventCode) {
625			case TEAM_ADDED:
626				if (fTeamNotificationsEnabled)
627					_TeamAdded(team);
628				break;
629
630			case TEAM_REMOVED:
631				if (team->id == fTeam) {
632					// The profiling team is gone -- uninstall the profiler!
633					InterruptsSpinLocker locker(sProfilerLock);
634					if (sProfiler != this)
635						return;
636
637					sProfiler = NULL;
638					locker.Unlock();
639
640					ReleaseReference();
641					return;
642				}
643
644				// When we're still doing the initial team list scan, we are
645				// also interested in removals that happened to teams we have
646				// already seen.
647				if (fTeamNotificationsEnabled
648					|| team->serial_number <= fLastTeamAddedSerialNumber) {
649					_TeamRemoved(team);
650				}
651				break;
652
653			case TEAM_EXEC:
654				if (fTeamNotificationsEnabled)
655					_TeamExec(team);
656				break;
657		}
658	} else if (strcmp(service.Name(), "threads") == 0) {
659		Thread* thread = (Thread*)event->GetPointer("threadStruct", NULL);
660		if (thread == NULL)
661			return;
662
663		switch (eventCode) {
664			case THREAD_ADDED:
665				if (fThreadNotificationsEnabled)
666					_ThreadAdded(thread);
667				break;
668
669			case THREAD_REMOVED:
670				// When we're still doing the initial thread list scan, we are
671				// also interested in removals that happened to threads we have
672				// already seen.
673				if (fThreadNotificationsEnabled
674					|| thread->serial_number <= fLastThreadAddedSerialNumber) {
675					_ThreadRemoved(thread);
676				}
677				break;
678		}
679	} else if (strcmp(service.Name(), "images") == 0) {
680		if (!fImageNotificationsEnabled)
681			return;
682
683		struct image* image = (struct image*)event->GetPointer(
684			"imageStruct", NULL);
685		if (image == NULL)
686			return;
687
688		switch (eventCode) {
689			case IMAGE_ADDED:
690				_ImageAdded(image);
691				break;
692
693			case IMAGE_REMOVED:
694				_ImageRemoved(image);
695				break;
696		}
697	} else if (strcmp(service.Name(), "I/O") == 0) {
698		if (!fIONotificationsEnabled)
699			return;
700
701		IOScheduler* scheduler = (IOScheduler*)event->GetPointer("scheduler",
702			NULL);
703		if (scheduler == NULL)
704			return;
705
706		IORequest* request = (IORequest*)event->GetPointer("request", NULL);
707		IOOperation* operation = (IOOperation*)event->GetPointer("operation",
708			NULL);
709
710		switch (eventCode) {
711			case IO_SCHEDULER_ADDED:
712				_IOSchedulerAdded(scheduler);
713				break;
714
715			case IO_SCHEDULER_REMOVED:
716				_IOSchedulerRemoved(scheduler);
717				break;
718
719			case IO_SCHEDULER_REQUEST_SCHEDULED:
720				_IORequestScheduled(scheduler, request);
721				break;
722
723			case IO_SCHEDULER_REQUEST_FINISHED:
724				_IORequestFinished(scheduler, request);
725				break;
726
727			case IO_SCHEDULER_OPERATION_STARTED:
728				_IOOperationStarted(scheduler, request, operation);
729				break;
730
731			case IO_SCHEDULER_OPERATION_FINISHED:
732				_IOOperationFinished(scheduler, request, operation);
733				break;
734		}
735	}
736
737	_MaybeNotifyProfilerThread();
738}
739
740
741void
742SystemProfiler::ThreadEnqueuedInRunQueue(Thread* thread)
743{
744	int cpu = smp_get_current_cpu();
745
746	SpinLocker locker(fLock, false, !fReentered[cpu]);
747		// When re-entering, we already hold the lock.
748
749	system_profiler_thread_enqueued_in_run_queue* event
750		= (system_profiler_thread_enqueued_in_run_queue*)
751			_AllocateBuffer(
752				sizeof(system_profiler_thread_enqueued_in_run_queue),
753				B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE, cpu, 0);
754	if (event == NULL)
755		return;
756
757	event->time = system_time_nsecs();
758	event->thread = thread->id;
759	event->priority = thread->priority;
760
761	fHeader->size = fBufferSize;
762
763	// Unblock the profiler thread, if necessary, but don't unblock the thread,
764	// if it had been waiting on a condition variable, since then we'd likely
765	// deadlock in ConditionVariable::NotifyOne(), as it acquires a static
766	// spinlock.
767	if (thread->wait.type != THREAD_BLOCK_TYPE_CONDITION_VARIABLE)
768		_MaybeNotifyProfilerThreadLocked();
769}
770
771
772void
773SystemProfiler::ThreadRemovedFromRunQueue(Thread* thread)
774{
775	int cpu = smp_get_current_cpu();
776
777	SpinLocker locker(fLock, false, !fReentered[cpu]);
778		// When re-entering, we already hold the lock.
779
780	system_profiler_thread_removed_from_run_queue* event
781		= (system_profiler_thread_removed_from_run_queue*)
782			_AllocateBuffer(
783				sizeof(system_profiler_thread_removed_from_run_queue),
784				B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE, cpu, 0);
785	if (event == NULL)
786		return;
787
788	event->time = system_time_nsecs();
789	event->thread = thread->id;
790
791	fHeader->size = fBufferSize;
792
793	// unblock the profiler thread, if necessary
794	_MaybeNotifyProfilerThreadLocked();
795}
796
797
798void
799SystemProfiler::ThreadScheduled(Thread* oldThread, Thread* newThread)
800{
801	int cpu = smp_get_current_cpu();
802
803	SpinLocker locker(fLock, false, !fReentered[cpu]);
804		// When re-entering, we already hold the lock.
805
806	// If the old thread starts waiting, handle the wait object.
807	if (oldThread->state == B_THREAD_WAITING)
808		_WaitObjectUsed((addr_t)oldThread->wait.object, oldThread->wait.type);
809
810	system_profiler_thread_scheduled* event
811		= (system_profiler_thread_scheduled*)
812			_AllocateBuffer(sizeof(system_profiler_thread_scheduled),
813				B_SYSTEM_PROFILER_THREAD_SCHEDULED, cpu, 0);
814	if (event == NULL)
815		return;
816
817	event->time = system_time_nsecs();
818	event->thread = newThread->id;
819	event->previous_thread = oldThread->id;
820	event->previous_thread_state = oldThread->state;
821	event->previous_thread_wait_object_type = oldThread->wait.type;
822	event->previous_thread_wait_object = (addr_t)oldThread->wait.object;
823
824	fHeader->size = fBufferSize;
825
826	// unblock the profiler thread, if necessary
827	_MaybeNotifyProfilerThreadLocked();
828}
829
830
831void
832SystemProfiler::SemaphoreCreated(sem_id id, const char* name)
833{
834	_WaitObjectCreated((addr_t)id, THREAD_BLOCK_TYPE_SEMAPHORE);
835}
836
837
838void
839SystemProfiler::ConditionVariableInitialized(ConditionVariable* variable)
840{
841	_WaitObjectCreated((addr_t)variable, THREAD_BLOCK_TYPE_CONDITION_VARIABLE);
842}
843
844
845void
846SystemProfiler::MutexInitialized(mutex* lock)
847{
848	_WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_MUTEX);
849}
850
851
852void
853SystemProfiler::RWLockInitialized(rw_lock* lock)
854{
855	_WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_RW_LOCK);
856}
857
858
859bool
860SystemProfiler::_TeamAdded(Team* team)
861{
862	TeamLocker teamLocker(team);
863
864	size_t nameLen = strlen(team->Name());
865	size_t argsLen = strlen(team->Args());
866
867	InterruptsSpinLocker locker(fLock);
868
869	// During the initial scan check whether the team is already gone again.
870	// Later this cannot happen, since the team creator notifies us before
871	// actually starting the team.
872	if (!fTeamNotificationsEnabled && team->state >= TEAM_STATE_DEATH)
873		return true;
874
875	if (team->serial_number > fLastTeamAddedSerialNumber)
876		fLastTeamAddedSerialNumber = team->serial_number;
877
878	system_profiler_team_added* event = (system_profiler_team_added*)
879		_AllocateBuffer(
880			sizeof(system_profiler_team_added) + nameLen + 1 + argsLen,
881			B_SYSTEM_PROFILER_TEAM_ADDED, 0, 0);
882	if (event == NULL)
883		return false;
884
885	event->team = team->id;
886	strcpy(event->name, team->Name());
887	event->args_offset = nameLen + 1;
888	strcpy(event->name + nameLen + 1, team->Args());
889
890	fHeader->size = fBufferSize;
891
892	return true;
893}
894
895
896bool
897SystemProfiler::_TeamRemoved(Team* team)
898{
899	// TODO: It is possible that we get remove notifications for teams that
900	// had already been removed from the global team list when we did the
901	// initial scan, but were still in the process of dying. ATM it is not
902	// really possible to identify such a case.
903
904	TeamLocker teamLocker(team);
905	InterruptsSpinLocker locker(fLock);
906
907	system_profiler_team_removed* event = (system_profiler_team_removed*)
908		_AllocateBuffer(sizeof(system_profiler_team_removed),
909			B_SYSTEM_PROFILER_TEAM_REMOVED, 0, 0);
910	if (event == NULL)
911		return false;
912
913	event->team = team->id;
914
915	fHeader->size = fBufferSize;
916
917	return true;
918}
919
920
921bool
922SystemProfiler::_TeamExec(Team* team)
923{
924	TeamLocker teamLocker(team);
925
926	size_t argsLen = strlen(team->Args());
927
928	InterruptsSpinLocker locker(fLock);
929
930	system_profiler_team_exec* event = (system_profiler_team_exec*)
931		_AllocateBuffer(sizeof(system_profiler_team_exec) + argsLen,
932			B_SYSTEM_PROFILER_TEAM_EXEC, 0, 0);
933	if (event == NULL)
934		return false;
935
936	event->team = team->id;
937	strlcpy(event->thread_name, team->main_thread->name,
938		sizeof(event->thread_name));
939	strcpy(event->args, team->Args());
940
941	fHeader->size = fBufferSize;
942
943	return true;
944}
945
946
947bool
948SystemProfiler::_ThreadAdded(Thread* thread)
949{
950	ThreadLocker threadLocker(thread);
951	InterruptsSpinLocker locker(fLock);
952
953	// During the initial scan check whether the team is already gone again.
954	// Later this cannot happen, since the team creator notifies us before
955	// actually starting the thread.
956	if (!fThreadNotificationsEnabled && !thread->IsAlive())
957		return true;
958
959	if (thread->serial_number > fLastThreadAddedSerialNumber)
960		fLastThreadAddedSerialNumber = thread->serial_number;
961
962	system_profiler_thread_added* event = (system_profiler_thread_added*)
963		_AllocateBuffer(sizeof(system_profiler_thread_added),
964			B_SYSTEM_PROFILER_THREAD_ADDED, 0, 0);
965	if (event == NULL)
966		return false;
967
968	event->team = thread->team->id;
969	event->thread = thread->id;
970	strlcpy(event->name, thread->name, sizeof(event->name));
971
972	fHeader->size = fBufferSize;
973
974	return true;
975}
976
977
978bool
979SystemProfiler::_ThreadRemoved(Thread* thread)
980{
981	// TODO: It is possible that we get remove notifications for threads that
982	// had already been removed from the global thread list when we did the
983	// initial scan, but were still in the process of dying. ATM it is not
984	// really possible to identify such a case.
985
986	ThreadLocker threadLocker(thread);
987	InterruptsSpinLocker locker(fLock);
988
989	system_profiler_thread_removed* event
990		= (system_profiler_thread_removed*)
991			_AllocateBuffer(sizeof(system_profiler_thread_removed),
992				B_SYSTEM_PROFILER_THREAD_REMOVED, 0, 0);
993	if (event == NULL)
994		return false;
995
996	event->team = thread->team->id;
997	event->thread = thread->id;
998
999	fHeader->size = fBufferSize;
1000
1001	return true;
1002}
1003
1004
1005bool
1006SystemProfiler::_ImageAdded(struct image* image)
1007{
1008	InterruptsSpinLocker locker(fLock);
1009
1010	system_profiler_image_added* event = (system_profiler_image_added*)
1011		_AllocateBuffer(sizeof(system_profiler_image_added),
1012			B_SYSTEM_PROFILER_IMAGE_ADDED, 0, 0);
1013	if (event == NULL)
1014		return false;
1015
1016	event->team = image->team;
1017	event->info = image->info;
1018
1019	fHeader->size = fBufferSize;
1020
1021	return true;
1022}
1023
1024
1025bool
1026SystemProfiler::_ImageRemoved(struct image* image)
1027{
1028	InterruptsSpinLocker locker(fLock);
1029
1030	system_profiler_image_removed* event = (system_profiler_image_removed*)
1031		_AllocateBuffer(sizeof(system_profiler_image_removed),
1032			B_SYSTEM_PROFILER_IMAGE_REMOVED, 0, 0);
1033	if (event == NULL)
1034		return false;
1035
1036	event->team = image->team;
1037	event->image = image->info.id;
1038
1039	fHeader->size = fBufferSize;
1040
1041	return true;
1042}
1043
1044
1045bool
1046SystemProfiler::_IOSchedulerAdded(IOScheduler* scheduler)
1047{
1048	size_t nameLen = strlen(scheduler->Name());
1049
1050	InterruptsSpinLocker locker(fLock);
1051
1052	system_profiler_io_scheduler_added* event
1053		= (system_profiler_io_scheduler_added*)_AllocateBuffer(
1054			sizeof(system_profiler_io_scheduler_added) + nameLen,
1055			B_SYSTEM_PROFILER_IO_SCHEDULER_ADDED, 0, 0);
1056	if (event == NULL)
1057		return false;
1058
1059	event->scheduler = scheduler->ID();
1060	strcpy(event->name, scheduler->Name());
1061
1062	fHeader->size = fBufferSize;
1063
1064	return true;
1065}
1066
1067
1068bool
1069SystemProfiler::_IOSchedulerRemoved(IOScheduler* scheduler)
1070{
1071	InterruptsSpinLocker locker(fLock);
1072
1073	system_profiler_io_scheduler_removed* event
1074		= (system_profiler_io_scheduler_removed*)_AllocateBuffer(
1075			sizeof(system_profiler_io_scheduler_removed),
1076			B_SYSTEM_PROFILER_IO_SCHEDULER_REMOVED, 0, 0);
1077	if (event == NULL)
1078		return false;
1079
1080	event->scheduler = scheduler->ID();
1081
1082	fHeader->size = fBufferSize;
1083
1084	return true;
1085}
1086
1087
1088bool
1089SystemProfiler::_IORequestScheduled(IOScheduler* scheduler, IORequest* request)
1090{
1091	InterruptsSpinLocker locker(fLock);
1092
1093	system_profiler_io_request_scheduled* event
1094		= (system_profiler_io_request_scheduled*)_AllocateBuffer(
1095			sizeof(system_profiler_io_request_scheduled),
1096			B_SYSTEM_PROFILER_IO_REQUEST_SCHEDULED, 0, 0);
1097	if (event == NULL)
1098		return false;
1099
1100	IORequestOwner* owner = request->Owner();
1101
1102	event->time = system_time_nsecs();
1103	event->scheduler = scheduler->ID();
1104	event->team = owner->team;
1105	event->thread = owner->thread;
1106	event->request = request;
1107	event->offset = request->Offset();
1108	event->length = request->Length();
1109	event->write = request->IsWrite();
1110	event->priority = owner->priority;
1111
1112	fHeader->size = fBufferSize;
1113
1114	return true;
1115}
1116
1117
1118bool
1119SystemProfiler::_IORequestFinished(IOScheduler* scheduler, IORequest* request)
1120{
1121	InterruptsSpinLocker locker(fLock);
1122
1123	system_profiler_io_request_finished* event
1124		= (system_profiler_io_request_finished*)_AllocateBuffer(
1125			sizeof(system_profiler_io_request_finished),
1126			B_SYSTEM_PROFILER_IO_REQUEST_FINISHED, 0, 0);
1127	if (event == NULL)
1128		return false;
1129
1130	event->time = system_time_nsecs();
1131	event->scheduler = scheduler->ID();
1132	event->request = request;
1133	event->status = request->Status();
1134	event->transferred = request->TransferredBytes();
1135
1136	fHeader->size = fBufferSize;
1137
1138	return true;
1139}
1140
1141
1142bool
1143SystemProfiler::_IOOperationStarted(IOScheduler* scheduler, IORequest* request,
1144	IOOperation* operation)
1145{
1146	InterruptsSpinLocker locker(fLock);
1147
1148	system_profiler_io_operation_started* event
1149		= (system_profiler_io_operation_started*)_AllocateBuffer(
1150			sizeof(system_profiler_io_operation_started),
1151			B_SYSTEM_PROFILER_IO_OPERATION_STARTED, 0, 0);
1152	if (event == NULL)
1153		return false;
1154
1155	event->time = system_time_nsecs();
1156	event->scheduler = scheduler->ID();
1157	event->request = request;
1158	event->operation = operation;
1159	event->offset = request->Offset();
1160	event->length = request->Length();
1161	event->write = request->IsWrite();
1162
1163	fHeader->size = fBufferSize;
1164
1165	return true;
1166}
1167
1168
1169bool
1170SystemProfiler::_IOOperationFinished(IOScheduler* scheduler, IORequest* request,
1171	IOOperation* operation)
1172{
1173	InterruptsSpinLocker locker(fLock);
1174
1175	system_profiler_io_operation_finished* event
1176		= (system_profiler_io_operation_finished*)_AllocateBuffer(
1177			sizeof(system_profiler_io_operation_finished),
1178			B_SYSTEM_PROFILER_IO_OPERATION_FINISHED, 0, 0);
1179	if (event == NULL)
1180		return false;
1181
1182	event->time = system_time_nsecs();
1183	event->scheduler = scheduler->ID();
1184	event->request = request;
1185	event->operation = operation;
1186	event->status = request->Status();
1187	event->transferred = request->TransferredBytes();
1188
1189	fHeader->size = fBufferSize;
1190
1191	return true;
1192}
1193
1194
1195void
1196SystemProfiler::_WaitObjectCreated(addr_t object, uint32 type)
1197{
1198	SpinLocker locker(fLock);
1199
1200	// look up the object
1201	WaitObjectKey key;
1202	key.object = object;
1203	key.type = type;
1204	WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1205
1206	// If found, remove it and add it to the free list. This might sound weird,
1207	// but it makes sense, since we lazily track *used* wait objects only.
1208	// I.e. the object in the table is now guaranteedly obsolete.
1209	if (waitObject) {
1210		fWaitObjectTable.RemoveUnchecked(waitObject);
1211		fUsedWaitObjects.Remove(waitObject);
1212		fFreeWaitObjects.Add(waitObject, false);
1213	}
1214}
1215
1216void
1217SystemProfiler::_WaitObjectUsed(addr_t object, uint32 type)
1218{
1219	// look up the object
1220	WaitObjectKey key;
1221	key.object = object;
1222	key.type = type;
1223	WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1224
1225	// If already known, re-queue it as most recently used and be done.
1226	if (waitObject != NULL) {
1227		fUsedWaitObjects.Remove(waitObject);
1228		fUsedWaitObjects.Add(waitObject);
1229		return;
1230	}
1231
1232	// not known yet -- get the info
1233	const char* name = NULL;
1234	const void* referencedObject = NULL;
1235
1236	switch (type) {
1237		case THREAD_BLOCK_TYPE_SEMAPHORE:
1238		{
1239			name = sem_get_name_unsafe((sem_id)object);
1240			break;
1241		}
1242
1243		case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1244		{
1245			ConditionVariable* variable = (ConditionVariable*)object;
1246			name = variable->ObjectType();
1247			referencedObject = variable->Object();
1248			break;
1249		}
1250
1251		case THREAD_BLOCK_TYPE_MUTEX:
1252		{
1253			mutex* lock = (mutex*)object;
1254			name = lock->name;
1255			break;
1256		}
1257
1258		case THREAD_BLOCK_TYPE_RW_LOCK:
1259		{
1260			rw_lock* lock = (rw_lock*)object;
1261			name = lock->name;
1262			break;
1263		}
1264
1265		case THREAD_BLOCK_TYPE_OTHER:
1266		{
1267			name = (const char*)(void*)object;
1268			break;
1269		}
1270
1271		case THREAD_BLOCK_TYPE_SNOOZE:
1272		case THREAD_BLOCK_TYPE_SIGNAL:
1273		default:
1274			return;
1275	}
1276
1277	// add the event
1278	size_t nameLen = name != NULL ? strlen(name) : 0;
1279
1280	system_profiler_wait_object_info* event
1281		= (system_profiler_wait_object_info*)
1282			_AllocateBuffer(sizeof(system_profiler_wait_object_info) + nameLen,
1283				B_SYSTEM_PROFILER_WAIT_OBJECT_INFO, 0, 0);
1284	if (event == NULL)
1285		return;
1286
1287	event->type = type;
1288	event->object = object;
1289	event->referenced_object = (addr_t)referencedObject;
1290	if (name != NULL)
1291		strcpy(event->name, name);
1292	else
1293		event->name[0] = '\0';
1294
1295	fHeader->size = fBufferSize;
1296
1297	// add the wait object
1298
1299	// get a free one or steal the least recently used one
1300	waitObject = fFreeWaitObjects.RemoveHead();
1301	if (waitObject == NULL) {
1302		waitObject = fUsedWaitObjects.RemoveHead();
1303		fWaitObjectTable.RemoveUnchecked(waitObject);
1304	}
1305
1306	waitObject->object = object;
1307	waitObject->type = type;
1308	fWaitObjectTable.InsertUnchecked(waitObject);
1309	fUsedWaitObjects.Add(waitObject);
1310}
1311
1312
1313/*static*/ bool
1314SystemProfiler::_InitialImageIterator(struct image* image, void* cookie)
1315{
1316	SystemProfiler* self = (SystemProfiler*)cookie;
1317	self->fImageNotificationsEnabled = true;
1318		// Set that here, since the image lock is being held now.
1319	return !self->_ImageAdded(image);
1320}
1321
1322
1323void*
1324SystemProfiler::_AllocateBuffer(size_t size, int event, int cpu, int count)
1325{
1326	size = (size + 3) / 4 * 4;
1327	size += sizeof(system_profiler_event_header);
1328
1329	size_t end = fBufferStart + fBufferSize;
1330	if (end + size > fBufferCapacity) {
1331		// Buffer is wrapped or needs wrapping.
1332		if (end < fBufferCapacity) {
1333			// not wrapped yet, but needed
1334			system_profiler_event_header* header
1335				= (system_profiler_event_header*)(fBufferBase + end);
1336			header->event = B_SYSTEM_PROFILER_BUFFER_END;
1337			fBufferSize = fBufferCapacity - fBufferStart;
1338			end = 0;
1339		} else
1340			end -= fBufferCapacity;
1341
1342		if (end + size > fBufferStart) {
1343			fDroppedEvents++;
1344			return NULL;
1345		}
1346	}
1347
1348	system_profiler_event_header* header
1349		= (system_profiler_event_header*)(fBufferBase + end);
1350	header->event = event;
1351	header->cpu = cpu;
1352	header->size = size - sizeof(system_profiler_event_header);
1353
1354	fBufferSize += size;
1355
1356	return header + 1;
1357}
1358
1359
1360/*static*/ void
1361SystemProfiler::_InitTimers(void* cookie, int cpu)
1362{
1363	SystemProfiler* self = (SystemProfiler*)cookie;
1364	self->_ScheduleTimer(cpu);
1365}
1366
1367
1368/*static*/ void
1369SystemProfiler::_UninitTimers(void* cookie, int cpu)
1370{
1371	SystemProfiler* self = (SystemProfiler*)cookie;
1372
1373	CPUProfileData& cpuData = self->fCPUData[cpu];
1374	cancel_timer(&cpuData.timer);
1375	cpuData.timerScheduled = false;
1376}
1377
1378
1379void
1380SystemProfiler::_ScheduleTimer(int cpu)
1381{
1382	CPUProfileData& cpuData = fCPUData[cpu];
1383	cpuData.timerEnd = system_time() + fInterval;
1384	cpuData.timer.user_data = this;
1385	add_timer(&cpuData.timer, &_ProfilingEvent, fInterval,
1386		B_ONE_SHOT_RELATIVE_TIMER);
1387	cpuData.timerScheduled = true;
1388}
1389
1390
1391void
1392SystemProfiler::_DoSample()
1393{
1394	Thread* thread = thread_get_current_thread();
1395	int cpu = thread->cpu->cpu_num;
1396	CPUProfileData& cpuData = fCPUData[cpu];
1397
1398	// get the samples
1399	int32 count = arch_debug_get_stack_trace(cpuData.buffer, fStackDepth, 1,
1400		0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
1401
1402	InterruptsSpinLocker locker(fLock);
1403
1404	system_profiler_samples* event = (system_profiler_samples*)
1405		_AllocateBuffer(sizeof(system_profiler_samples)
1406				+ count * sizeof(addr_t),
1407			B_SYSTEM_PROFILER_SAMPLES, cpu, count);
1408	if (event == NULL)
1409		return;
1410
1411	event->thread = thread->id;
1412	memcpy(event->samples, cpuData.buffer, count * sizeof(addr_t));
1413
1414	fHeader->size = fBufferSize;
1415}
1416
1417
1418/*static*/ int32
1419SystemProfiler::_ProfilingEvent(struct timer* timer)
1420{
1421	SystemProfiler* self = (SystemProfiler*)timer->user_data;
1422
1423	self->_DoSample();
1424	self->_ScheduleTimer(timer->cpu);
1425
1426	return B_HANDLED_INTERRUPT;
1427}
1428
1429
1430// #pragma mark - private kernel API
1431
1432
1433status_t
1434start_system_profiler(size_t areaSize, uint32 stackDepth, bigtime_t interval)
1435{
1436	struct ParameterDeleter {
1437		ParameterDeleter(area_id area)
1438			:
1439			fArea(area),
1440			fDetached(false)
1441		{
1442		}
1443
1444		~ParameterDeleter()
1445		{
1446			if (!fDetached) {
1447				delete_area(fArea);
1448				delete sRecordedParameters;
1449				sRecordedParameters = NULL;
1450			}
1451		}
1452
1453		void Detach()
1454		{
1455			fDetached = true;
1456		}
1457
1458	private:
1459		area_id	fArea;
1460		bool	fDetached;
1461	};
1462
1463	void* address;
1464	area_id area = create_area("kernel profile data", &address,
1465		B_ANY_KERNEL_ADDRESS, areaSize, B_FULL_LOCK,
1466		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1467	if (area < 0)
1468		return area;
1469
1470	ParameterDeleter parameterDeleter(area);
1471
1472	sRecordedParameters = new(std::nothrow) system_profiler_parameters;
1473	if (sRecordedParameters == NULL)
1474		return B_NO_MEMORY;
1475
1476	sRecordedParameters->buffer_area = area;
1477	sRecordedParameters->flags = B_SYSTEM_PROFILER_TEAM_EVENTS
1478		| B_SYSTEM_PROFILER_THREAD_EVENTS | B_SYSTEM_PROFILER_IMAGE_EVENTS
1479		| B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS
1480		| B_SYSTEM_PROFILER_SAMPLING_EVENTS;
1481	sRecordedParameters->locking_lookup_size = 4096;
1482	sRecordedParameters->interval = interval;
1483	sRecordedParameters->stack_depth = stackDepth;
1484
1485	area_info areaInfo;
1486	get_area_info(area, &areaInfo);
1487
1488	// initialize the profiler
1489	SystemProfiler* profiler = new(std::nothrow) SystemProfiler(B_SYSTEM_TEAM,
1490		areaInfo, *sRecordedParameters);
1491	if (profiler == NULL)
1492		return B_NO_MEMORY;
1493
1494	ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1495
1496	status_t error = profiler->Init();
1497	if (error != B_OK)
1498		return error;
1499
1500	// set the new profiler
1501	InterruptsSpinLocker locker(sProfilerLock);
1502	if (sProfiler != NULL)
1503		return B_BUSY;
1504
1505	parameterDeleter.Detach();
1506	profilerDeleter.Detach();
1507	sProfiler = profiler;
1508	locker.Unlock();
1509
1510	return B_OK;
1511}
1512
1513
1514void
1515stop_system_profiler()
1516{
1517	InterruptsSpinLocker locker(sProfilerLock);
1518	if (sProfiler == NULL)
1519		return;
1520
1521	SystemProfiler* profiler = sProfiler;
1522	sProfiler = NULL;
1523	locker.Unlock();
1524
1525	profiler->ReleaseReference();
1526}
1527
1528
1529// #pragma mark - syscalls
1530
1531
1532status_t
1533_user_system_profiler_start(struct system_profiler_parameters* userParameters)
1534{
1535	// copy params to the kernel
1536	struct system_profiler_parameters parameters;
1537	if (userParameters == NULL || !IS_USER_ADDRESS(userParameters)
1538		|| user_memcpy(&parameters, userParameters, sizeof(parameters))
1539			!= B_OK) {
1540		return B_BAD_ADDRESS;
1541	}
1542
1543	// check the parameters
1544	team_id team = thread_get_current_thread()->team->id;
1545
1546	area_info areaInfo;
1547	status_t error = get_area_info(parameters.buffer_area, &areaInfo);
1548	if (error != B_OK)
1549		return error;
1550
1551	if (areaInfo.team != team)
1552		return B_BAD_VALUE;
1553
1554	if ((parameters.flags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) {
1555		if (parameters.stack_depth < 1)
1556			return B_BAD_VALUE;
1557
1558		if (parameters.interval < B_DEBUG_MIN_PROFILE_INTERVAL)
1559			parameters.interval = B_DEBUG_MIN_PROFILE_INTERVAL;
1560
1561		if (parameters.stack_depth > B_DEBUG_STACK_TRACE_DEPTH)
1562			parameters.stack_depth = B_DEBUG_STACK_TRACE_DEPTH;
1563	}
1564
1565	// quick check to see whether we do already have a profiler installed
1566	InterruptsSpinLocker locker(sProfilerLock);
1567	if (sProfiler != NULL)
1568		return B_BUSY;
1569	locker.Unlock();
1570
1571	// initialize the profiler
1572	SystemProfiler* profiler = new(std::nothrow) SystemProfiler(team, areaInfo,
1573		parameters);
1574	if (profiler == NULL)
1575		return B_NO_MEMORY;
1576	ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1577
1578	error = profiler->Init();
1579	if (error != B_OK)
1580		return error;
1581
1582	// set the new profiler
1583	locker.Lock();
1584	if (sProfiler != NULL)
1585		return B_BUSY;
1586
1587	profilerDeleter.Detach();
1588	sProfiler = profiler;
1589	locker.Unlock();
1590
1591	return B_OK;
1592}
1593
1594
1595status_t
1596_user_system_profiler_next_buffer(size_t bytesRead, uint64* _droppedEvents)
1597{
1598	if (_droppedEvents != NULL && !IS_USER_ADDRESS(_droppedEvents))
1599		return B_BAD_ADDRESS;
1600
1601	team_id team = thread_get_current_thread()->team->id;
1602
1603	InterruptsSpinLocker locker(sProfilerLock);
1604	if (sProfiler == NULL || sProfiler->TeamID() != team)
1605		return B_BAD_VALUE;
1606
1607	// get a reference to the profiler
1608	SystemProfiler* profiler = sProfiler;
1609	BReference<SystemProfiler> reference(profiler);
1610	locker.Unlock();
1611
1612	uint64 droppedEvents;
1613	status_t error = profiler->NextBuffer(bytesRead,
1614		_droppedEvents != NULL ? &droppedEvents : NULL);
1615	if (error == B_OK && _droppedEvents != NULL)
1616		user_memcpy(_droppedEvents, &droppedEvents, sizeof(droppedEvents));
1617
1618	return error;
1619}
1620
1621
1622status_t
1623_user_system_profiler_stop()
1624{
1625	team_id team = thread_get_current_thread()->team->id;
1626
1627	InterruptsSpinLocker locker(sProfilerLock);
1628	if (sProfiler == NULL || sProfiler->TeamID() != team)
1629		return B_BAD_VALUE;
1630
1631	SystemProfiler* profiler = sProfiler;
1632	sProfiler = NULL;
1633	locker.Unlock();
1634
1635	profiler->ReleaseReference();
1636
1637	return B_OK;
1638}
1639
1640
1641status_t
1642_user_system_profiler_recorded(struct system_profiler_parameters* userParameters)
1643{
1644	if (userParameters == NULL || !IS_USER_ADDRESS(userParameters))
1645		return B_BAD_ADDRESS;
1646	if (sRecordedParameters == NULL)
1647		return B_ERROR;
1648
1649	// Transfer the area to the userland process
1650
1651	void* address;
1652	area_id newArea = transfer_area(sRecordedParameters->buffer_area, &address,
1653		B_ANY_ADDRESS, team_get_current_team_id(), true);
1654	if (newArea < 0)
1655		return newArea;
1656
1657	status_t status = set_area_protection(newArea, B_READ_AREA);
1658	if (status == B_OK) {
1659		sRecordedParameters->buffer_area = newArea;
1660
1661		status = user_memcpy(userParameters, sRecordedParameters,
1662			sizeof(system_profiler_parameters));
1663	}
1664	if (status != B_OK)
1665		delete_area(newArea);
1666
1667	delete sRecordedParameters;
1668	sRecordedParameters = NULL;
1669
1670	return status;
1671}
1672