1/*
2 * Copyright 2013-2014, Pawe�� Dziepak, pdziepak@quarnos.org.
3 * Copyright 2009, Rene Gollent, rene@gollent.com.
4 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
5 * Copyright 2002-2010, Axel D��rfler, axeld@pinc-software.de.
6 * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
7 * Distributed under the terms of the MIT License.
8 *
9 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
10 * Distributed under the terms of the NewOS License.
11 */
12
13
14/*! The thread scheduler */
15
16
17#include <OS.h>
18
19#include <AutoDeleter.h>
20#include <cpu.h>
21#include <debug.h>
22#include <int.h>
23#include <kernel.h>
24#include <kscheduler.h>
25#include <listeners.h>
26#include <load_tracking.h>
27#include <scheduler_defs.h>
28#include <smp.h>
29#include <timer.h>
30#include <util/Random.h>
31
32#include "scheduler_common.h"
33#include "scheduler_cpu.h"
34#include "scheduler_locking.h"
35#include "scheduler_modes.h"
36#include "scheduler_profiler.h"
37#include "scheduler_thread.h"
38#include "scheduler_tracing.h"
39
40
41namespace Scheduler {
42
43
44class ThreadEnqueuer : public ThreadProcessing {
45public:
46	void		operator()(ThreadData* thread);
47};
48
49scheduler_mode gCurrentModeID;
50scheduler_mode_operations* gCurrentMode;
51
52bool gSingleCore;
53bool gTrackCoreLoad;
54bool gTrackCPULoad;
55
56}	// namespace Scheduler
57
58using namespace Scheduler;
59
60
61static bool sSchedulerEnabled;
62
63SchedulerListenerList gSchedulerListeners;
64spinlock gSchedulerListenersLock = B_SPINLOCK_INITIALIZER;
65
66static scheduler_mode_operations* sSchedulerModes[] = {
67	&gSchedulerLowLatencyMode,
68	&gSchedulerPowerSavingMode,
69};
70
71// Since CPU IDs used internally by the kernel bear no relation to the actual
72// CPU topology the following arrays are used to efficiently get the core
73// and the package that CPU in question belongs to.
74static int32* sCPUToCore;
75static int32* sCPUToPackage;
76
77
78static void enqueue(Thread* thread, bool newOne);
79
80
81void
82ThreadEnqueuer::operator()(ThreadData* thread)
83{
84	enqueue(thread->GetThread(), false);
85}
86
87
88void
89scheduler_dump_thread_data(Thread* thread)
90{
91	thread->scheduler_data->Dump();
92}
93
94
95static void
96enqueue(Thread* thread, bool newOne)
97{
98	SCHEDULER_ENTER_FUNCTION();
99
100	ThreadData* threadData = thread->scheduler_data;
101
102	int32 threadPriority = threadData->GetEffectivePriority();
103	T(EnqueueThread(thread, threadPriority));
104
105	CPUEntry* targetCPU = NULL;
106	CoreEntry* targetCore = NULL;
107	if (thread->pinned_to_cpu > 0) {
108		ASSERT(thread->previous_cpu != NULL);
109		ASSERT(threadData->Core() != NULL);
110		targetCPU = &gCPUEntries[thread->previous_cpu->cpu_num];
111	} else if (gSingleCore) {
112		targetCore = &gCoreEntries[0];
113	} else if (threadData->Core() != NULL
114		&& (!newOne || !threadData->HasCacheExpired())) {
115		targetCore = threadData->Rebalance();
116	}
117
118	const bool rescheduleNeeded = threadData->ChooseCoreAndCPU(targetCore, targetCPU);
119
120	TRACE("enqueueing thread %ld with priority %ld on CPU %ld (core %ld)\n",
121		thread->id, threadPriority, targetCPU->ID(), targetCore->ID());
122
123	bool wasRunQueueEmpty = false;
124	threadData->Enqueue(wasRunQueueEmpty);
125
126	// notify listeners
127	NotifySchedulerListeners(&SchedulerListener::ThreadEnqueuedInRunQueue,
128		thread);
129
130	int32 heapPriority = CPUPriorityHeap::GetKey(targetCPU);
131	if (threadPriority > heapPriority
132		|| (threadPriority == heapPriority && rescheduleNeeded)
133		|| wasRunQueueEmpty) {
134
135		if (targetCPU->ID() == smp_get_current_cpu()) {
136			gCPU[targetCPU->ID()].invoke_scheduler = true;
137		} else {
138			smp_send_ici(targetCPU->ID(), SMP_MSG_RESCHEDULE, 0, 0, 0,
139				NULL, SMP_MSG_FLAG_ASYNC);
140		}
141	}
142}
143
144
145/*!	Enqueues the thread into the run queue.
146	Note: thread lock must be held when entering this function
147*/
148void
149scheduler_enqueue_in_run_queue(Thread *thread)
150{
151	ASSERT(!are_interrupts_enabled());
152	SCHEDULER_ENTER_FUNCTION();
153
154	SchedulerModeLocker _;
155
156	TRACE("enqueueing new thread %ld with static priority %ld\n", thread->id,
157		thread->priority);
158
159	ThreadData* threadData = thread->scheduler_data;
160
161	if (threadData->ShouldCancelPenalty())
162		threadData->CancelPenalty();
163
164	enqueue(thread, true);
165}
166
167
168/*!	Sets the priority of a thread.
169*/
170int32
171scheduler_set_thread_priority(Thread *thread, int32 priority)
172{
173	ASSERT(are_interrupts_enabled());
174
175	InterruptsSpinLocker _(thread->scheduler_lock);
176	SchedulerModeLocker modeLocker;
177
178	SCHEDULER_ENTER_FUNCTION();
179
180	ThreadData* threadData = thread->scheduler_data;
181	int32 oldPriority = thread->priority;
182
183	TRACE("changing thread %ld priority to %ld (old: %ld, effective: %ld)\n",
184		thread->id, priority, oldPriority, threadData->GetEffectivePriority());
185
186	thread->priority = priority;
187	threadData->CancelPenalty();
188
189	if (priority == oldPriority)
190		return oldPriority;
191
192	if (thread->state != B_THREAD_READY) {
193		if (thread->state == B_THREAD_RUNNING) {
194			ASSERT(threadData->Core() != NULL);
195
196			ASSERT(thread->cpu != NULL);
197			CPUEntry* cpu = &gCPUEntries[thread->cpu->cpu_num];
198
199			CoreCPUHeapLocker _(threadData->Core());
200			cpu->UpdatePriority(priority);
201		}
202
203		return oldPriority;
204	}
205
206	// The thread is in the run queue. We need to remove it and re-insert it at
207	// a new position.
208
209	T(RemoveThread(thread));
210
211	// notify listeners
212	NotifySchedulerListeners(&SchedulerListener::ThreadRemovedFromRunQueue,
213		thread);
214
215	if (threadData->Dequeue())
216		enqueue(thread, true);
217
218	return oldPriority;
219}
220
221
222void
223scheduler_reschedule_ici()
224{
225	// This function is called as a result of an incoming ICI.
226	// Make sure the reschedule() is invoked.
227	get_cpu_struct()->invoke_scheduler = true;
228}
229
230
231static inline void
232stop_cpu_timers(Thread* fromThread, Thread* toThread)
233{
234	SpinLocker teamLocker(&fromThread->team->time_lock);
235	SpinLocker threadLocker(&fromThread->time_lock);
236
237	if (fromThread->HasActiveCPUTimeUserTimers()
238		|| fromThread->team->HasActiveCPUTimeUserTimers()) {
239		user_timer_stop_cpu_timers(fromThread, toThread);
240	}
241}
242
243
244static inline void
245continue_cpu_timers(Thread* thread, cpu_ent* cpu)
246{
247	SpinLocker teamLocker(&thread->team->time_lock);
248	SpinLocker threadLocker(&thread->time_lock);
249
250	if (thread->HasActiveCPUTimeUserTimers()
251		|| thread->team->HasActiveCPUTimeUserTimers()) {
252		user_timer_continue_cpu_timers(thread, cpu->previous_thread);
253	}
254}
255
256
257static void
258thread_resumes(Thread* thread)
259{
260	cpu_ent* cpu = thread->cpu;
261
262	release_spinlock(&cpu->previous_thread->scheduler_lock);
263
264	// continue CPU time based user timers
265	continue_cpu_timers(thread, cpu);
266
267	// notify the user debugger code
268	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
269		user_debug_thread_scheduled(thread);
270}
271
272
273void
274scheduler_new_thread_entry(Thread* thread)
275{
276	thread_resumes(thread);
277
278	SpinLocker locker(thread->time_lock);
279	thread->last_time = system_time();
280}
281
282
283/*!	Switches the currently running thread.
284	This is a service function for scheduler implementations.
285
286	\param fromThread The currently running thread.
287	\param toThread The thread to switch to. Must be different from
288		\a fromThread.
289*/
290static inline void
291switch_thread(Thread* fromThread, Thread* toThread)
292{
293	// notify the user debugger code
294	if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
295		user_debug_thread_unscheduled(fromThread);
296
297	// stop CPU time based user timers
298	stop_cpu_timers(fromThread, toThread);
299
300	// update CPU and Thread structures and perform the context switch
301	cpu_ent* cpu = fromThread->cpu;
302	toThread->previous_cpu = toThread->cpu = cpu;
303	fromThread->cpu = NULL;
304	cpu->running_thread = toThread;
305	cpu->previous_thread = fromThread;
306
307	arch_thread_set_current_thread(toThread);
308	arch_thread_context_switch(fromThread, toThread);
309
310	// The use of fromThread below looks weird, but is correct. fromThread had
311	// been unscheduled earlier, but is back now. For a thread scheduled the
312	// first time the same is done in thread.cpp:common_thread_entry().
313	thread_resumes(fromThread);
314}
315
316
317static void
318reschedule(int32 nextState)
319{
320	ASSERT(!are_interrupts_enabled());
321	SCHEDULER_ENTER_FUNCTION();
322
323	int32 thisCPU = smp_get_current_cpu();
324	gCPU[thisCPU].invoke_scheduler = false;
325
326	CPUEntry* cpu = CPUEntry::GetCPU(thisCPU);
327	CoreEntry* core = CoreEntry::GetCore(thisCPU);
328
329	Thread* oldThread = thread_get_current_thread();
330	ThreadData* oldThreadData = oldThread->scheduler_data;
331
332	oldThreadData->StopCPUTime();
333
334	SchedulerModeLocker modeLocker;
335
336	TRACE("reschedule(): cpu %ld, current thread = %ld\n", thisCPU,
337		oldThread->id);
338
339	oldThread->state = nextState;
340
341	// return time spent in interrupts
342	oldThreadData->SetStolenInterruptTime(gCPU[thisCPU].interrupt_time);
343
344	bool enqueueOldThread = false;
345	bool putOldThreadAtBack = false;
346	switch (nextState) {
347		case B_THREAD_RUNNING:
348		case B_THREAD_READY:
349			enqueueOldThread = true;
350
351			if (!oldThreadData->IsIdle()) {
352				oldThreadData->Continues();
353				if (oldThreadData->HasQuantumEnded(oldThread->cpu->preempted,
354						oldThread->has_yielded)) {
355					TRACE("enqueueing thread %ld into run queue priority ="
356						" %ld\n", oldThread->id,
357						oldThreadData->GetEffectivePriority());
358					putOldThreadAtBack = true;
359				} else {
360					TRACE("putting thread %ld back in run queue priority ="
361						" %ld\n", oldThread->id,
362						oldThreadData->GetEffectivePriority());
363					putOldThreadAtBack = false;
364				}
365			}
366
367			break;
368		case THREAD_STATE_FREE_ON_RESCHED:
369			oldThreadData->Dies();
370			break;
371		default:
372			oldThreadData->GoesAway();
373			TRACE("not enqueueing thread %ld into run queue next_state = %ld\n",
374				oldThread->id, nextState);
375			break;
376	}
377
378	oldThread->has_yielded = false;
379
380	// select thread with the biggest priority and enqueue back the old thread
381	ThreadData* nextThreadData;
382	if (gCPU[thisCPU].disabled) {
383		if (!oldThreadData->IsIdle()) {
384			if (oldThread->pinned_to_cpu == 0) {
385				putOldThreadAtBack = true;
386				oldThreadData->UnassignCore(true);
387			} else {
388				putOldThreadAtBack = false;
389			}
390
391			CPURunQueueLocker cpuLocker(cpu);
392			nextThreadData = cpu->PeekIdleThread();
393			cpu->Remove(nextThreadData);
394		} else
395			nextThreadData = oldThreadData;
396	} else {
397		nextThreadData
398			= cpu->ChooseNextThread(enqueueOldThread ? oldThreadData : NULL,
399				putOldThreadAtBack);
400
401		// update CPU heap
402		CoreCPUHeapLocker cpuLocker(core);
403		cpu->UpdatePriority(nextThreadData->GetEffectivePriority());
404	}
405
406	Thread* nextThread = nextThreadData->GetThread();
407	ASSERT(!gCPU[thisCPU].disabled || nextThreadData->IsIdle());
408
409	if (nextThread != oldThread) {
410		if (enqueueOldThread) {
411			if (putOldThreadAtBack)
412				enqueue(oldThread, false);
413			else
414				oldThreadData->PutBack();
415		}
416
417		acquire_spinlock(&nextThread->scheduler_lock);
418	}
419
420	TRACE("reschedule(): cpu %ld, next thread = %ld\n", thisCPU,
421		nextThread->id);
422
423	T(ScheduleThread(nextThread, oldThread));
424
425	// notify listeners
426	NotifySchedulerListeners(&SchedulerListener::ThreadScheduled,
427		oldThread, nextThread);
428
429	ASSERT(nextThreadData->Core() == core);
430	nextThread->state = B_THREAD_RUNNING;
431	nextThreadData->StartCPUTime();
432
433	// track CPU activity
434	cpu->TrackActivity(oldThreadData, nextThreadData);
435
436	if (nextThread != oldThread || oldThread->cpu->preempted) {
437		cpu->StartQuantumTimer(nextThreadData, oldThread->cpu->preempted);
438
439		oldThread->cpu->preempted = false;
440		if (!nextThreadData->IsIdle())
441			nextThreadData->Continues();
442		else
443			gCurrentMode->rebalance_irqs(true);
444		nextThreadData->StartQuantum();
445
446		modeLocker.Unlock();
447
448		SCHEDULER_EXIT_FUNCTION();
449
450		if (nextThread != oldThread)
451			switch_thread(oldThread, nextThread);
452	}
453}
454
455
456/*!	Runs the scheduler.
457	Note: expects thread spinlock to be held
458*/
459void
460scheduler_reschedule(int32 nextState)
461{
462	ASSERT(!are_interrupts_enabled());
463	SCHEDULER_ENTER_FUNCTION();
464
465	if (!sSchedulerEnabled) {
466		Thread* thread = thread_get_current_thread();
467		if (thread != NULL && nextState != B_THREAD_READY)
468			panic("scheduler_reschedule_no_op() called in non-ready thread");
469		return;
470	}
471
472	reschedule(nextState);
473}
474
475
476status_t
477scheduler_on_thread_create(Thread* thread, bool idleThread)
478{
479	thread->scheduler_data = new(std::nothrow) ThreadData(thread);
480	if (thread->scheduler_data == NULL)
481		return B_NO_MEMORY;
482	return B_OK;
483}
484
485
486void
487scheduler_on_thread_init(Thread* thread)
488{
489	ASSERT(thread->scheduler_data != NULL);
490
491	if (thread_is_idle_thread(thread)) {
492		static int32 sIdleThreadsID;
493		int32 cpuID = atomic_add(&sIdleThreadsID, 1);
494
495		thread->previous_cpu = &gCPU[cpuID];
496		thread->pinned_to_cpu = 1;
497
498		thread->scheduler_data->Init(CoreEntry::GetCore(cpuID));
499	} else
500		thread->scheduler_data->Init();
501}
502
503
504void
505scheduler_on_thread_destroy(Thread* thread)
506{
507	delete thread->scheduler_data;
508}
509
510
511/*!	This starts the scheduler. Must be run in the context of the initial idle
512	thread. Interrupts must be disabled and will be disabled when returning.
513*/
514void
515scheduler_start()
516{
517	InterruptsSpinLocker _(thread_get_current_thread()->scheduler_lock);
518	SCHEDULER_ENTER_FUNCTION();
519
520	reschedule(B_THREAD_READY);
521}
522
523
524status_t
525scheduler_set_operation_mode(scheduler_mode mode)
526{
527	if (mode != SCHEDULER_MODE_LOW_LATENCY
528		&& mode != SCHEDULER_MODE_POWER_SAVING) {
529		return B_BAD_VALUE;
530	}
531
532	dprintf("scheduler: switching to %s mode\n", sSchedulerModes[mode]->name);
533
534	InterruptsBigSchedulerLocker _;
535
536	gCurrentModeID = mode;
537	gCurrentMode = sSchedulerModes[mode];
538	gCurrentMode->switch_to_mode();
539
540	ThreadData::ComputeQuantumLengths();
541
542	return B_OK;
543}
544
545
546void
547scheduler_set_cpu_enabled(int32 cpuID, bool enabled)
548{
549#if KDEBUG
550	if (are_interrupts_enabled())
551		panic("scheduler_set_cpu_enabled: called with interrupts enabled");
552#endif
553
554	dprintf("scheduler: %s CPU %" B_PRId32 "\n",
555		enabled ? "enabling" : "disabling", cpuID);
556
557	InterruptsBigSchedulerLocker _;
558
559	gCurrentMode->set_cpu_enabled(cpuID, enabled);
560
561	CPUEntry* cpu = &gCPUEntries[cpuID];
562	CoreEntry* core = cpu->Core();
563
564	ASSERT(core->CPUCount() >= 0);
565	if (enabled)
566		cpu->Start();
567	else {
568		cpu->UpdatePriority(B_IDLE_PRIORITY);
569
570		ThreadEnqueuer enqueuer;
571		core->RemoveCPU(cpu, enqueuer);
572	}
573
574	gCPU[cpuID].disabled = !enabled;
575
576	if (!enabled) {
577		cpu->Stop();
578
579		// don't wait until the thread quantum ends
580		if (smp_get_current_cpu() != cpuID) {
581			smp_send_ici(cpuID, SMP_MSG_RESCHEDULE, 0, 0, 0, NULL,
582				SMP_MSG_FLAG_ASYNC);
583		}
584	}
585}
586
587
588static void
589traverse_topology_tree(const cpu_topology_node* node, int packageID, int coreID)
590{
591	switch (node->level) {
592		case CPU_TOPOLOGY_SMT:
593			sCPUToCore[node->id] = coreID;
594			sCPUToPackage[node->id] = packageID;
595			return;
596
597		case CPU_TOPOLOGY_CORE:
598			coreID = node->id;
599			break;
600
601		case CPU_TOPOLOGY_PACKAGE:
602			packageID = node->id;
603			break;
604
605		default:
606			break;
607	}
608
609	for (int32 i = 0; i < node->children_count; i++)
610		traverse_topology_tree(node->children[i], packageID, coreID);
611}
612
613
614static status_t
615build_topology_mappings(int32& cpuCount, int32& coreCount, int32& packageCount)
616{
617	cpuCount = smp_get_num_cpus();
618
619	sCPUToCore = new(std::nothrow) int32[cpuCount];
620	if (sCPUToCore == NULL)
621		return B_NO_MEMORY;
622	ArrayDeleter<int32> cpuToCoreDeleter(sCPUToCore);
623
624	sCPUToPackage = new(std::nothrow) int32[cpuCount];
625	if (sCPUToPackage == NULL)
626		return B_NO_MEMORY;
627	ArrayDeleter<int32> cpuToPackageDeleter(sCPUToPackage);
628
629	coreCount = 0;
630	for (int32 i = 0; i < cpuCount; i++) {
631		if (gCPU[i].topology_id[CPU_TOPOLOGY_SMT] == 0)
632			coreCount++;
633	}
634
635	packageCount = 0;
636	for (int32 i = 0; i < cpuCount; i++) {
637		if (gCPU[i].topology_id[CPU_TOPOLOGY_SMT] == 0
638			&& gCPU[i].topology_id[CPU_TOPOLOGY_CORE] == 0) {
639			packageCount++;
640		}
641	}
642
643	const cpu_topology_node* root = get_cpu_topology();
644	traverse_topology_tree(root, 0, 0);
645
646	cpuToCoreDeleter.Detach();
647	cpuToPackageDeleter.Detach();
648	return B_OK;
649}
650
651
652static status_t
653init()
654{
655	// create logical processor to core and package mappings
656	int32 cpuCount, coreCount, packageCount;
657	status_t result = build_topology_mappings(cpuCount, coreCount,
658		packageCount);
659	if (result != B_OK)
660		return result;
661
662	// disable parts of the scheduler logic that are not needed
663	gSingleCore = coreCount == 1;
664	scheduler_update_policy();
665
666	gCoreCount = coreCount;
667	gPackageCount = packageCount;
668
669	gCPUEntries = new(std::nothrow) CPUEntry[cpuCount];
670	if (gCPUEntries == NULL)
671		return B_NO_MEMORY;
672	ArrayDeleter<CPUEntry> cpuEntriesDeleter(gCPUEntries);
673
674	gCoreEntries = new(std::nothrow) CoreEntry[coreCount];
675	if (gCoreEntries == NULL)
676		return B_NO_MEMORY;
677	ArrayDeleter<CoreEntry> coreEntriesDeleter(gCoreEntries);
678
679	gPackageEntries = new(std::nothrow) PackageEntry[packageCount];
680	if (gPackageEntries == NULL)
681		return B_NO_MEMORY;
682	ArrayDeleter<PackageEntry> packageEntriesDeleter(gPackageEntries);
683
684	new(&gCoreLoadHeap) CoreLoadHeap(coreCount);
685	new(&gCoreHighLoadHeap) CoreLoadHeap(coreCount);
686
687	new(&gIdlePackageList) IdlePackageList;
688
689	for (int32 i = 0; i < cpuCount; i++) {
690		CoreEntry* core = &gCoreEntries[sCPUToCore[i]];
691		PackageEntry* package = &gPackageEntries[sCPUToPackage[i]];
692
693		package->Init(sCPUToPackage[i]);
694		core->Init(sCPUToCore[i], package);
695		gCPUEntries[i].Init(i, core);
696
697		core->AddCPU(&gCPUEntries[i]);
698	}
699
700	packageEntriesDeleter.Detach();
701	coreEntriesDeleter.Detach();
702	cpuEntriesDeleter.Detach();
703
704	return B_OK;
705}
706
707
708void
709scheduler_init()
710{
711	int32 cpuCount = smp_get_num_cpus();
712	dprintf("scheduler_init: found %" B_PRId32 " logical cpu%s and %" B_PRId32
713		" cache level%s\n", cpuCount, cpuCount != 1 ? "s" : "",
714		gCPUCacheLevelCount, gCPUCacheLevelCount != 1 ? "s" : "");
715
716#ifdef SCHEDULER_PROFILING
717	Profiling::Profiler::Initialize();
718#endif
719
720	status_t result = init();
721	if (result != B_OK)
722		panic("scheduler_init: failed to initialize scheduler\n");
723
724	scheduler_set_operation_mode(SCHEDULER_MODE_LOW_LATENCY);
725
726	init_debug_commands();
727
728#if SCHEDULER_TRACING
729	add_debugger_command_etc("scheduler", &cmd_scheduler,
730		"Analyze scheduler tracing information",
731		"<thread>\n"
732		"Analyzes scheduler tracing information for a given thread.\n"
733		"  <thread>  - ID of the thread.\n", 0);
734#endif
735}
736
737
738void
739scheduler_enable_scheduling()
740{
741	sSchedulerEnabled = true;
742}
743
744
745void
746scheduler_update_policy()
747{
748	gTrackCPULoad = increase_cpu_performance(0) == B_OK;
749	gTrackCoreLoad = !gSingleCore || gTrackCPULoad;
750	dprintf("scheduler switches: single core: %s, cpu load tracking: %s,"
751		" core load tracking: %s\n", gSingleCore ? "true" : "false",
752		gTrackCPULoad ? "true" : "false",
753		gTrackCoreLoad ? "true" : "false");
754}
755
756
757// #pragma mark - SchedulerListener
758
759
760SchedulerListener::~SchedulerListener()
761{
762}
763
764
765// #pragma mark - kernel private
766
767
768/*!	Add the given scheduler listener. Thread lock must be held.
769*/
770void
771scheduler_add_listener(struct SchedulerListener* listener)
772{
773	InterruptsSpinLocker _(gSchedulerListenersLock);
774	gSchedulerListeners.Add(listener);
775}
776
777
778/*!	Remove the given scheduler listener. Thread lock must be held.
779*/
780void
781scheduler_remove_listener(struct SchedulerListener* listener)
782{
783	InterruptsSpinLocker _(gSchedulerListenersLock);
784	gSchedulerListeners.Remove(listener);
785}
786
787
788// #pragma mark - Syscalls
789
790
791bigtime_t
792_user_estimate_max_scheduling_latency(thread_id id)
793{
794	syscall_64_bit_return_value();
795
796	// get the thread
797	Thread* thread;
798	if (id < 0) {
799		thread = thread_get_current_thread();
800		thread->AcquireReference();
801	} else {
802		thread = Thread::Get(id);
803		if (thread == NULL)
804			return 0;
805	}
806	BReference<Thread> threadReference(thread, true);
807
808#ifdef SCHEDULER_PROFILING
809	InterruptsLocker _;
810#endif
811
812	ThreadData* threadData = thread->scheduler_data;
813	CoreEntry* core = threadData->Core();
814	if (core == NULL)
815		core = &gCoreEntries[get_random<int32>() % gCoreCount];
816
817	int32 threadCount = core->ThreadCount();
818	if (core->CPUCount() > 0)
819		threadCount /= core->CPUCount();
820
821	if (threadData->GetEffectivePriority() > 0) {
822		threadCount -= threadCount * THREAD_MAX_SET_PRIORITY
823				/ threadData->GetEffectivePriority();
824	}
825
826	return std::min(std::max(threadCount * gCurrentMode->base_quantum,
827			gCurrentMode->minimal_quantum),
828		gCurrentMode->maximum_latency);
829}
830
831
832status_t
833_user_set_scheduler_mode(int32 mode)
834{
835	scheduler_mode schedulerMode = static_cast<scheduler_mode>(mode);
836	status_t error = scheduler_set_operation_mode(schedulerMode);
837	if (error == B_OK)
838		cpu_set_scheduler_mode(schedulerMode);
839	return error;
840}
841
842
843int32
844_user_get_scheduler_mode()
845{
846	return gCurrentModeID;
847}
848
849