1/*
2 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
5
6#include <scheduling_analysis.h>
7
8#include <elf.h>
9#include <kernel.h>
10#include <scheduler_defs.h>
11#include <tracing.h>
12#include <util/AutoLock.h>
13#include <util/khash.h>
14
15#include "scheduler_tracing.h"
16
17
18#if SCHEDULER_TRACING
19
20namespace SchedulingAnalysis {
21
22using namespace SchedulerTracing;
23
24#if SCHEDULING_ANALYSIS_TRACING
25using namespace SchedulingAnalysisTracing;
26#endif
27
28struct ThreadWaitObject;
29
30struct HashObjectKey {
31	virtual ~HashObjectKey()
32	{
33	}
34
35	virtual uint32 HashKey() const = 0;
36};
37
38
39struct HashObject {
40	HashObject*	next;
41
42	virtual ~HashObject()
43	{
44	}
45
46	virtual uint32 HashKey() const = 0;
47	virtual bool Equals(const HashObjectKey* key) const = 0;
48};
49
50
51struct ThreadKey : HashObjectKey {
52	thread_id	id;
53
54	ThreadKey(thread_id id)
55		:
56		id(id)
57	{
58	}
59
60	virtual uint32 HashKey() const
61	{
62		return id;
63	}
64};
65
66
67struct Thread : HashObject, scheduling_analysis_thread {
68	ScheduleState state;
69	bigtime_t lastTime;
70
71	ThreadWaitObject* waitObject;
72
73	Thread(thread_id id)
74		:
75		state(UNKNOWN),
76		lastTime(0),
77
78		waitObject(NULL)
79	{
80		this->id = id;
81		name[0] = '\0';
82
83		runs = 0;
84		total_run_time = 0;
85		min_run_time = 1;
86		max_run_time = -1;
87
88		latencies = 0;
89		total_latency = 0;
90		min_latency = -1;
91		max_latency = -1;
92
93		reruns = 0;
94		total_rerun_time = 0;
95		min_rerun_time = -1;
96		max_rerun_time = -1;
97
98		unspecified_wait_time = 0;
99
100		preemptions = 0;
101
102		wait_objects = NULL;
103	}
104
105	virtual uint32 HashKey() const
106	{
107		return id;
108	}
109
110	virtual bool Equals(const HashObjectKey* _key) const
111	{
112		const ThreadKey* key = dynamic_cast<const ThreadKey*>(_key);
113		if (key == NULL)
114			return false;
115		return key->id == id;
116	}
117};
118
119
120struct WaitObjectKey : HashObjectKey {
121	uint32	type;
122	void*	object;
123
124	WaitObjectKey(uint32 type, void* object)
125		:
126		type(type),
127		object(object)
128	{
129	}
130
131	virtual uint32 HashKey() const
132	{
133		return type ^ (uint32)(addr_t)object;
134	}
135};
136
137
138struct WaitObject : HashObject, scheduling_analysis_wait_object {
139	WaitObject(uint32 type, void* object)
140	{
141		this->type = type;
142		this->object = object;
143		name[0] = '\0';
144		referenced_object = NULL;
145	}
146
147	virtual uint32 HashKey() const
148	{
149		return type ^ (uint32)(addr_t)object;
150	}
151
152	virtual bool Equals(const HashObjectKey* _key) const
153	{
154		const WaitObjectKey* key = dynamic_cast<const WaitObjectKey*>(_key);
155		if (key == NULL)
156			return false;
157		return key->type == type && key->object == object;
158	}
159};
160
161
162struct ThreadWaitObjectKey : HashObjectKey {
163	thread_id				thread;
164	uint32					type;
165	void*					object;
166
167	ThreadWaitObjectKey(thread_id thread, uint32 type, void* object)
168		:
169		thread(thread),
170		type(type),
171		object(object)
172	{
173	}
174
175	virtual uint32 HashKey() const
176	{
177		return thread ^ type ^ (uint32)(addr_t)object;
178	}
179};
180
181
182struct ThreadWaitObject : HashObject, scheduling_analysis_thread_wait_object {
183	ThreadWaitObject(thread_id thread, WaitObject* waitObject)
184	{
185		this->thread = thread;
186		wait_object = waitObject;
187		wait_time = 0;
188		waits = 0;
189		next_in_list = NULL;
190	}
191
192	virtual uint32 HashKey() const
193	{
194		return thread ^ wait_object->type ^ (uint32)(addr_t)wait_object->object;
195	}
196
197	virtual bool Equals(const HashObjectKey* _key) const
198	{
199		const ThreadWaitObjectKey* key
200			= dynamic_cast<const ThreadWaitObjectKey*>(_key);
201		if (key == NULL)
202			return false;
203		return key->thread == thread && key->type == wait_object->type
204			&& key->object == wait_object->object;
205	}
206};
207
208
209class SchedulingAnalysisManager {
210public:
211	SchedulingAnalysisManager(void* buffer, size_t size)
212		:
213		fBuffer(buffer),
214		fSize(size),
215		fHashTable(),
216		fHashTableSize(0)
217	{
218		fAnalysis.thread_count = 0;
219		fAnalysis.threads = 0;
220		fAnalysis.wait_object_count = 0;
221		fAnalysis.thread_wait_object_count = 0;
222
223		size_t maxObjectSize = max_c(max_c(sizeof(Thread), sizeof(WaitObject)),
224			sizeof(ThreadWaitObject));
225		fHashTableSize = size / (maxObjectSize + sizeof(HashObject*));
226		fHashTable = (HashObject**)((uint8*)fBuffer + fSize) - fHashTableSize;
227		fNextAllocation = (uint8*)fBuffer;
228		fRemainingBytes = (addr_t)fHashTable - (addr_t)fBuffer;
229
230		image_info info;
231		if (elf_get_image_info_for_address((addr_t)&scheduler_init, &info)
232				== B_OK) {
233			fKernelStart = (addr_t)info.text;
234			fKernelEnd = (addr_t)info.data + info.data_size;
235		} else {
236			fKernelStart = 0;
237			fKernelEnd = 0;
238		}
239	}
240
241	const scheduling_analysis* Analysis() const
242	{
243		return &fAnalysis;
244	}
245
246	void* Allocate(size_t size)
247	{
248		size = (size + 7) & ~(size_t)7;
249
250		if (size > fRemainingBytes)
251			return NULL;
252
253		void* address = fNextAllocation;
254		fNextAllocation += size;
255		fRemainingBytes -= size;
256		return address;
257	}
258
259	void Insert(HashObject* object)
260	{
261		uint32 index = object->HashKey() % fHashTableSize;
262		object->next = fHashTable[index];
263		fHashTable[index] = object;
264	}
265
266	void Remove(HashObject* object)
267	{
268		uint32 index = object->HashKey() % fHashTableSize;
269		HashObject** slot = &fHashTable[index];
270		while (*slot != object)
271			slot = &(*slot)->next;
272
273		*slot = object->next;
274	}
275
276	HashObject* Lookup(const HashObjectKey& key) const
277	{
278		uint32 index = key.HashKey() % fHashTableSize;
279		HashObject* object = fHashTable[index];
280		while (object != NULL && !object->Equals(&key))
281			object = object->next;
282		return object;
283	}
284
285	Thread* ThreadFor(thread_id id) const
286	{
287		return dynamic_cast<Thread*>(Lookup(ThreadKey(id)));
288	}
289
290	WaitObject* WaitObjectFor(uint32 type, void* object) const
291	{
292		return dynamic_cast<WaitObject*>(Lookup(WaitObjectKey(type, object)));
293	}
294
295	ThreadWaitObject* ThreadWaitObjectFor(thread_id thread, uint32 type,
296		void* object) const
297	{
298		return dynamic_cast<ThreadWaitObject*>(
299			Lookup(ThreadWaitObjectKey(thread, type, object)));
300	}
301
302	status_t AddThread(thread_id id, const char* name)
303	{
304		Thread* thread = ThreadFor(id);
305		if (thread == NULL) {
306			void* memory = Allocate(sizeof(Thread));
307			if (memory == NULL)
308				return B_NO_MEMORY;
309
310			thread = new(memory) Thread(id);
311			Insert(thread);
312			fAnalysis.thread_count++;
313		}
314
315		if (name != NULL && thread->name[0] == '\0')
316			strlcpy(thread->name, name, sizeof(thread->name));
317
318		return B_OK;
319	}
320
321	status_t AddWaitObject(uint32 type, void* object,
322		WaitObject** _waitObject = NULL)
323	{
324		if (WaitObjectFor(type, object) != NULL)
325			return B_OK;
326
327		void* memory = Allocate(sizeof(WaitObject));
328		if (memory == NULL)
329			return B_NO_MEMORY;
330
331		WaitObject* waitObject = new(memory) WaitObject(type, object);
332		Insert(waitObject);
333		fAnalysis.wait_object_count++;
334
335		// Set a dummy name for snooze() and waiting for signals, so we don't
336		// try to update them later on.
337		if (type == THREAD_BLOCK_TYPE_SNOOZE
338			|| type == THREAD_BLOCK_TYPE_SIGNAL) {
339			strcpy(waitObject->name, "?");
340		}
341
342		if (_waitObject != NULL)
343			*_waitObject = waitObject;
344
345		return B_OK;
346	}
347
348	status_t UpdateWaitObject(uint32 type, void* object, const char* name,
349		void* referencedObject)
350	{
351		WaitObject* waitObject = WaitObjectFor(type, object);
352		if (waitObject == NULL)
353			return B_OK;
354
355		if (waitObject->name[0] != '\0') {
356			// This is a new object at the same address. Replace the old one.
357			Remove(waitObject);
358			status_t error = AddWaitObject(type, object, &waitObject);
359			if (error != B_OK)
360				return error;
361		}
362
363		if (name == NULL)
364			name = "?";
365
366		strlcpy(waitObject->name, name, sizeof(waitObject->name));
367		waitObject->referenced_object = referencedObject;
368
369		return B_OK;
370	}
371
372	bool UpdateWaitObjectDontAdd(uint32 type, void* object, const char* name,
373		void* referencedObject)
374	{
375		WaitObject* waitObject = WaitObjectFor(type, object);
376		if (waitObject == NULL || waitObject->name[0] != '\0')
377			return false;
378
379		if (name == NULL)
380			name = "?";
381
382		strlcpy(waitObject->name, name, sizeof(waitObject->name));
383		waitObject->referenced_object = referencedObject;
384
385		return B_OK;
386	}
387
388	status_t AddThreadWaitObject(Thread* thread, uint32 type, void* object)
389	{
390		WaitObject* waitObject = WaitObjectFor(type, object);
391		if (waitObject == NULL) {
392			// The algorithm should prevent this case.
393			return B_ERROR;
394		}
395
396		ThreadWaitObject* threadWaitObject = ThreadWaitObjectFor(thread->id,
397			type, object);
398		if (threadWaitObject == NULL
399			|| threadWaitObject->wait_object != waitObject) {
400			if (threadWaitObject != NULL)
401				Remove(threadWaitObject);
402
403			void* memory = Allocate(sizeof(ThreadWaitObject));
404			if (memory == NULL)
405				return B_NO_MEMORY;
406
407			threadWaitObject = new(memory) ThreadWaitObject(thread->id,
408				waitObject);
409			Insert(threadWaitObject);
410			fAnalysis.thread_wait_object_count++;
411
412			threadWaitObject->next_in_list = thread->wait_objects;
413			thread->wait_objects = threadWaitObject;
414		}
415
416		thread->waitObject = threadWaitObject;
417
418		return B_OK;
419	}
420
421	int32 MissingWaitObjects() const
422	{
423		// Iterate through the hash table and count the wait objects that don't
424		// have a name yet.
425		int32 count = 0;
426		for (uint32 i = 0; i < fHashTableSize; i++) {
427			HashObject* object = fHashTable[i];
428			while (object != NULL) {
429				WaitObject* waitObject = dynamic_cast<WaitObject*>(object);
430				if (waitObject != NULL && waitObject->name[0] == '\0')
431					count++;
432
433				object = object->next;
434			}
435		}
436
437		return count;
438	}
439
440	status_t FinishAnalysis()
441	{
442		// allocate the thread array
443		scheduling_analysis_thread** threads
444			= (scheduling_analysis_thread**)Allocate(
445				sizeof(Thread*) * fAnalysis.thread_count);
446		if (threads == NULL)
447			return B_NO_MEMORY;
448
449		// Iterate through the hash table and collect all threads. Also polish
450		// all wait objects that haven't been update yet.
451		int32 index = 0;
452		for (uint32 i = 0; i < fHashTableSize; i++) {
453			HashObject* object = fHashTable[i];
454			while (object != NULL) {
455				Thread* thread = dynamic_cast<Thread*>(object);
456				if (thread != NULL) {
457					threads[index++] = thread;
458				} else if (WaitObject* waitObject
459						= dynamic_cast<WaitObject*>(object)) {
460					_PolishWaitObject(waitObject);
461				}
462
463				object = object->next;
464			}
465		}
466
467		fAnalysis.threads = threads;
468dprintf("scheduling analysis: free bytes: %lu/%lu\n", fRemainingBytes, fSize);
469		return B_OK;
470	}
471
472private:
473	void _PolishWaitObject(WaitObject* waitObject)
474	{
475		if (waitObject->name[0] != '\0')
476			return;
477
478		switch (waitObject->type) {
479			case THREAD_BLOCK_TYPE_SEMAPHORE:
480			{
481				sem_info info;
482				if (get_sem_info((sem_id)(addr_t)waitObject->object, &info)
483						== B_OK) {
484					strlcpy(waitObject->name, info.name,
485						sizeof(waitObject->name));
486				}
487				break;
488			}
489			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
490			{
491				// If the condition variable object is in the kernel image,
492				// assume, it is still initialized.
493				ConditionVariable* variable
494					= (ConditionVariable*)waitObject->object;
495				if (!_IsInKernelImage(variable))
496					break;
497
498				waitObject->referenced_object = (void*)variable->Object();
499				strlcpy(waitObject->name, variable->ObjectType(),
500					sizeof(waitObject->name));
501				break;
502			}
503
504			case THREAD_BLOCK_TYPE_MUTEX:
505			{
506				// If the mutex object is in the kernel image, assume, it is
507				// still initialized.
508				mutex* lock = (mutex*)waitObject->object;
509				if (!_IsInKernelImage(lock))
510					break;
511
512				strlcpy(waitObject->name, lock->name, sizeof(waitObject->name));
513				break;
514			}
515
516			case THREAD_BLOCK_TYPE_RW_LOCK:
517			{
518				// If the mutex object is in the kernel image, assume, it is
519				// still initialized.
520				rw_lock* lock = (rw_lock*)waitObject->object;
521				if (!_IsInKernelImage(lock))
522					break;
523
524				strlcpy(waitObject->name, lock->name, sizeof(waitObject->name));
525				break;
526			}
527
528			case THREAD_BLOCK_TYPE_OTHER:
529			{
530				const char* name = (const char*)waitObject->object;
531				if (name == NULL || _IsInKernelImage(name))
532					return;
533
534				strlcpy(waitObject->name, name, sizeof(waitObject->name));
535			}
536
537			case THREAD_BLOCK_TYPE_SNOOZE:
538			case THREAD_BLOCK_TYPE_SIGNAL:
539			default:
540				break;
541		}
542
543		if (waitObject->name[0] != '\0')
544			return;
545
546		strcpy(waitObject->name, "?");
547	}
548
549	bool _IsInKernelImage(const void* _address)
550	{
551		addr_t address = (addr_t)_address;
552		return address >= fKernelStart && address < fKernelEnd;
553	}
554
555private:
556	scheduling_analysis	fAnalysis;
557	void*				fBuffer;
558	size_t				fSize;
559	HashObject**		fHashTable;
560	uint32				fHashTableSize;
561	uint8*				fNextAllocation;
562	size_t				fRemainingBytes;
563	addr_t				fKernelStart;
564	addr_t				fKernelEnd;
565};
566
567
568static status_t
569analyze_scheduling(bigtime_t from, bigtime_t until,
570	SchedulingAnalysisManager& manager)
571{
572	// analyze how much threads and locking primitives we're talking about
573	TraceEntryIterator iterator;
574	iterator.MoveTo(INT_MAX);
575	while (TraceEntry* _entry = iterator.Previous()) {
576		SchedulerTraceEntry* baseEntry
577			= dynamic_cast<SchedulerTraceEntry*>(_entry);
578		if (baseEntry == NULL || baseEntry->Time() >= until)
579			continue;
580		if (baseEntry->Time() < from)
581			break;
582
583		status_t error = manager.AddThread(baseEntry->ThreadID(),
584			baseEntry->Name());
585		if (error != B_OK)
586			return error;
587
588		if (ScheduleThread* entry = dynamic_cast<ScheduleThread*>(_entry)) {
589			error = manager.AddThread(entry->PreviousThreadID(), NULL);
590			if (error != B_OK)
591				return error;
592
593			if (entry->PreviousState() == B_THREAD_WAITING) {
594				void* waitObject = (void*)entry->PreviousWaitObject();
595				switch (entry->PreviousWaitObjectType()) {
596					case THREAD_BLOCK_TYPE_SNOOZE:
597					case THREAD_BLOCK_TYPE_SIGNAL:
598						waitObject = NULL;
599						break;
600					case THREAD_BLOCK_TYPE_SEMAPHORE:
601					case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
602					case THREAD_BLOCK_TYPE_MUTEX:
603					case THREAD_BLOCK_TYPE_RW_LOCK:
604					case THREAD_BLOCK_TYPE_OTHER:
605					default:
606						break;
607				}
608
609				error = manager.AddWaitObject(entry->PreviousWaitObjectType(),
610					waitObject);
611				if (error != B_OK)
612					return error;
613			}
614		}
615	}
616
617#if SCHEDULING_ANALYSIS_TRACING
618	int32 startEntryIndex = iterator.Index();
619#endif
620
621	while (TraceEntry* _entry = iterator.Next()) {
622#if SCHEDULING_ANALYSIS_TRACING
623		// might be info on a wait object
624		if (WaitObjectTraceEntry* waitObjectEntry
625				= dynamic_cast<WaitObjectTraceEntry*>(_entry)) {
626			status_t error = manager.UpdateWaitObject(waitObjectEntry->Type(),
627				waitObjectEntry->Object(), waitObjectEntry->Name(),
628				waitObjectEntry->ReferencedObject());
629			if (error != B_OK)
630				return error;
631			continue;
632		}
633#endif
634
635		SchedulerTraceEntry* baseEntry
636			= dynamic_cast<SchedulerTraceEntry*>(_entry);
637		if (baseEntry == NULL)
638			continue;
639		if (baseEntry->Time() >= until)
640			break;
641
642		if (ScheduleThread* entry = dynamic_cast<ScheduleThread*>(_entry)) {
643			// scheduled thread
644			Thread* thread = manager.ThreadFor(entry->ThreadID());
645
646			bigtime_t diffTime = entry->Time() - thread->lastTime;
647
648			if (thread->state == READY) {
649				// thread scheduled after having been woken up
650				thread->latencies++;
651				thread->total_latency += diffTime;
652				if (thread->min_latency < 0 || diffTime < thread->min_latency)
653					thread->min_latency = diffTime;
654				if (diffTime > thread->max_latency)
655					thread->max_latency = diffTime;
656			} else if (thread->state == PREEMPTED) {
657				// thread scheduled after having been preempted before
658				thread->reruns++;
659				thread->total_rerun_time += diffTime;
660				if (thread->min_rerun_time < 0
661						|| diffTime < thread->min_rerun_time) {
662					thread->min_rerun_time = diffTime;
663				}
664				if (diffTime > thread->max_rerun_time)
665					thread->max_rerun_time = diffTime;
666			}
667
668			if (thread->state == STILL_RUNNING) {
669				// Thread was running and continues to run.
670				thread->state = RUNNING;
671			}
672
673			if (thread->state != RUNNING) {
674				thread->lastTime = entry->Time();
675				thread->state = RUNNING;
676			}
677
678			// unscheduled thread
679
680			if (entry->ThreadID() == entry->PreviousThreadID())
681				continue;
682
683			thread = manager.ThreadFor(entry->PreviousThreadID());
684
685			diffTime = entry->Time() - thread->lastTime;
686
687			if (thread->state == STILL_RUNNING) {
688				// thread preempted
689				thread->runs++;
690				thread->preemptions++;
691				thread->total_run_time += diffTime;
692				if (thread->min_run_time < 0 || diffTime < thread->min_run_time)
693					thread->min_run_time = diffTime;
694				if (diffTime > thread->max_run_time)
695					thread->max_run_time = diffTime;
696
697				thread->lastTime = entry->Time();
698				thread->state = PREEMPTED;
699			} else if (thread->state == RUNNING) {
700				// thread starts waiting (it hadn't been added to the run
701				// queue before being unscheduled)
702				thread->runs++;
703				thread->total_run_time += diffTime;
704				if (thread->min_run_time < 0 || diffTime < thread->min_run_time)
705					thread->min_run_time = diffTime;
706				if (diffTime > thread->max_run_time)
707					thread->max_run_time = diffTime;
708
709				if (entry->PreviousState() == B_THREAD_WAITING) {
710					void* waitObject = (void*)entry->PreviousWaitObject();
711					switch (entry->PreviousWaitObjectType()) {
712						case THREAD_BLOCK_TYPE_SNOOZE:
713						case THREAD_BLOCK_TYPE_SIGNAL:
714							waitObject = NULL;
715							break;
716						case THREAD_BLOCK_TYPE_SEMAPHORE:
717						case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
718						case THREAD_BLOCK_TYPE_MUTEX:
719						case THREAD_BLOCK_TYPE_RW_LOCK:
720						case THREAD_BLOCK_TYPE_OTHER:
721						default:
722							break;
723					}
724
725					status_t error = manager.AddThreadWaitObject(thread,
726						entry->PreviousWaitObjectType(), waitObject);
727					if (error != B_OK)
728						return error;
729				}
730
731				thread->lastTime = entry->Time();
732				thread->state = WAITING;
733			} else if (thread->state == UNKNOWN) {
734				uint32 threadState = entry->PreviousState();
735				if (threadState == B_THREAD_WAITING
736					|| threadState == B_THREAD_SUSPENDED) {
737					thread->lastTime = entry->Time();
738					thread->state = WAITING;
739				} else if (threadState == B_THREAD_READY) {
740					thread->lastTime = entry->Time();
741					thread->state = PREEMPTED;
742				}
743			}
744		} else if (EnqueueThread* entry
745				= dynamic_cast<EnqueueThread*>(_entry)) {
746			// thread enqueued in run queue
747
748			Thread* thread = manager.ThreadFor(entry->ThreadID());
749
750			if (thread->state == RUNNING || thread->state == STILL_RUNNING) {
751				// Thread was running and is reentered into the run queue. This
752				// is done by the scheduler, if the thread remains ready.
753				thread->state = STILL_RUNNING;
754			} else {
755				// Thread was waiting and is ready now.
756				bigtime_t diffTime = entry->Time() - thread->lastTime;
757				if (thread->waitObject != NULL) {
758					thread->waitObject->wait_time += diffTime;
759					thread->waitObject->waits++;
760					thread->waitObject = NULL;
761				} else if (thread->state != UNKNOWN)
762					thread->unspecified_wait_time += diffTime;
763
764				thread->lastTime = entry->Time();
765				thread->state = READY;
766			}
767		} else if (RemoveThread* entry = dynamic_cast<RemoveThread*>(_entry)) {
768			// thread removed from run queue
769
770			Thread* thread = manager.ThreadFor(entry->ThreadID());
771
772			// This really only happens when the thread priority is changed
773			// while the thread is ready.
774
775			bigtime_t diffTime = entry->Time() - thread->lastTime;
776			if (thread->state == RUNNING) {
777				// This should never happen.
778				thread->runs++;
779				thread->total_run_time += diffTime;
780				if (thread->min_run_time < 0 || diffTime < thread->min_run_time)
781					thread->min_run_time = diffTime;
782				if (diffTime > thread->max_run_time)
783					thread->max_run_time = diffTime;
784			} else if (thread->state == READY || thread->state == PREEMPTED) {
785				// Not really correct, but the case is rare and we keep it
786				// simple.
787				thread->unspecified_wait_time += diffTime;
788			}
789
790			thread->lastTime = entry->Time();
791			thread->state = WAITING;
792		}
793	}
794
795
796#if SCHEDULING_ANALYSIS_TRACING
797	int32 missingWaitObjects = manager.MissingWaitObjects();
798	if (missingWaitObjects > 0) {
799		iterator.MoveTo(startEntryIndex + 1);
800		while (TraceEntry* _entry = iterator.Previous()) {
801			if (WaitObjectTraceEntry* waitObjectEntry
802					= dynamic_cast<WaitObjectTraceEntry*>(_entry)) {
803				if (manager.UpdateWaitObjectDontAdd(
804						waitObjectEntry->Type(), waitObjectEntry->Object(),
805						waitObjectEntry->Name(),
806						waitObjectEntry->ReferencedObject())) {
807					if (--missingWaitObjects == 0)
808						break;
809				}
810			}
811		}
812	}
813#endif
814
815	return B_OK;
816}
817
818}	// namespace SchedulingAnalysis
819
820#endif	// SCHEDULER_TRACING
821
822
823status_t
824_user_analyze_scheduling(bigtime_t from, bigtime_t until, void* buffer,
825	size_t size, scheduling_analysis* analysis)
826{
827#if SCHEDULER_TRACING
828	using namespace SchedulingAnalysis;
829
830	if ((addr_t)buffer & 0x7) {
831		addr_t diff = (addr_t)buffer & 0x7;
832		buffer = (void*)((addr_t)buffer + 8 - diff);
833		size -= 8 - diff;
834	}
835	size &= ~(size_t)0x7;
836
837	if (buffer == NULL || !IS_USER_ADDRESS(buffer) || size == 0)
838		return B_BAD_VALUE;
839
840	status_t error = lock_memory(buffer, size, B_READ_DEVICE);
841	if (error != B_OK)
842		return error;
843
844	SchedulingAnalysisManager manager(buffer, size);
845
846	InterruptsLocker locker;
847	lock_tracing_buffer();
848
849	error = analyze_scheduling(from, until, manager);
850
851	unlock_tracing_buffer();
852	locker.Unlock();
853
854	if (error == B_OK)
855		error = manager.FinishAnalysis();
856
857	unlock_memory(buffer, size, B_READ_DEVICE);
858
859	if (error == B_OK) {
860		error = user_memcpy(analysis, manager.Analysis(),
861			sizeof(scheduling_analysis));
862	}
863
864	return error;
865#else
866	return B_BAD_VALUE;
867#endif
868}
869