1/*
2 * Copyright 2013, Pawe�� Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
4 */
5#ifndef KERNEL_SCHEDULER_THREAD_H
6#define KERNEL_SCHEDULER_THREAD_H
7
8
9#include <thread.h>
10#include <util/AutoLock.h>
11
12#include "scheduler_common.h"
13#include "scheduler_cpu.h"
14#include "scheduler_locking.h"
15#include "scheduler_profiler.h"
16
17
18namespace Scheduler {
19
20
21struct ThreadData : public DoublyLinkedListLinkImpl<ThreadData>,
22	RunQueueLinkImpl<ThreadData> {
23private:
24	inline	void		_InitBase();
25
26	inline	int32		_GetMinimalPriority() const;
27
28	inline	CoreEntry*	_ChooseCore() const;
29	inline	CPUEntry*	_ChooseCPU(CoreEntry* core,
30							bool& rescheduleNeeded) const;
31
32public:
33						ThreadData(Thread* thread);
34
35			void		Init();
36			void		Init(CoreEntry* core);
37
38			void		Dump() const;
39
40	inline	int32		GetPriority() const	{ return fThread->priority; }
41	inline	Thread*		GetThread() const	{ return fThread; }
42
43	inline	bool		IsRealTime() const;
44	inline	bool		IsIdle() const;
45
46	inline	bool		HasCacheExpired() const;
47	inline	CoreEntry*	Rebalance() const;
48
49	inline	int32		GetEffectivePriority() const;
50
51	inline	void		StartCPUTime();
52	inline	void		StopCPUTime();
53
54	inline	void		CancelPenalty();
55	inline	bool		ShouldCancelPenalty() const;
56
57			bool		ChooseCoreAndCPU(CoreEntry*& targetCore,
58							CPUEntry*& targetCPU);
59
60	inline	void		SetLastInterruptTime(bigtime_t interruptTime)
61							{ fLastInterruptTime = interruptTime; }
62	inline	void		SetStolenInterruptTime(bigtime_t interruptTime);
63
64			bigtime_t	ComputeQuantum() const;
65	inline	bigtime_t	GetQuantumLeft();
66	inline	void		StartQuantum();
67	inline	bool		HasQuantumEnded(bool wasPreempted, bool hasYielded);
68
69	inline	void		Continues();
70	inline	void		GoesAway();
71	inline	void		Dies();
72
73	inline	bigtime_t	WentSleep() const	{ return fWentSleep; }
74	inline	bigtime_t	WentSleepActive() const	{ return fWentSleepActive; }
75
76	inline	void		PutBack();
77	inline	void		Enqueue(bool& wasRunQueueEmpty);
78	inline	bool		Dequeue();
79
80	inline	void		UpdateActivity(bigtime_t active);
81
82	inline	bool		IsEnqueued() const	{ return fEnqueued; }
83	inline	void		SetDequeued()	{ fEnqueued = false; }
84
85	inline	int32		GetLoad() const	{ return fNeededLoad; }
86
87	inline	CoreEntry*	Core() const	{ return fCore; }
88			void		UnassignCore(bool running = false);
89
90	static	void		ComputeQuantumLengths();
91
92private:
93	inline	void		_IncreasePenalty();
94	inline	int32		_GetPenalty() const;
95
96			void		_ComputeNeededLoad();
97
98			void		_ComputeEffectivePriority() const;
99
100	static	bigtime_t	_ScaleQuantum(bigtime_t maxQuantum,
101							bigtime_t minQuantum, int32 maxPriority,
102							int32 minPriority, int32 priority);
103
104			bigtime_t	fStolenTime;
105			bigtime_t	fQuantumStart;
106			bigtime_t	fLastInterruptTime;
107
108			bigtime_t	fWentSleep;
109			bigtime_t	fWentSleepActive;
110
111			bool		fEnqueued;
112			bool		fReady;
113
114			Thread*		fThread;
115
116			int32		fPriorityPenalty;
117			int32		fAdditionalPenalty;
118
119	mutable	int32		fEffectivePriority;
120	mutable	bigtime_t	fBaseQuantum;
121
122			bigtime_t	fTimeUsed;
123
124			bigtime_t	fMeasureAvailableActiveTime;
125			bigtime_t	fMeasureAvailableTime;
126			bigtime_t	fLastMeasureAvailableTime;
127
128			int32		fNeededLoad;
129			uint32		fLoadMeasurementEpoch;
130
131			CoreEntry*	fCore;
132};
133
134class ThreadProcessing {
135public:
136	virtual				~ThreadProcessing();
137
138	virtual	void		operator()(ThreadData* thread) = 0;
139};
140
141
142inline int32
143ThreadData::_GetMinimalPriority() const
144{
145	SCHEDULER_ENTER_FUNCTION();
146
147	const int32 kDivisor = 5;
148
149	const int32 kMaximalPriority = 25;
150	const int32 kMinimalPriority = B_LOWEST_ACTIVE_PRIORITY;
151
152	int32 priority = GetPriority() / kDivisor;
153	return std::max(std::min(priority, kMaximalPriority), kMinimalPriority);
154}
155
156
157inline bool
158ThreadData::IsRealTime() const
159{
160	return GetPriority() >= B_FIRST_REAL_TIME_PRIORITY;
161}
162
163
164inline bool
165ThreadData::IsIdle() const
166{
167	return GetPriority() == B_IDLE_PRIORITY;
168}
169
170
171inline bool
172ThreadData::HasCacheExpired() const
173{
174	SCHEDULER_ENTER_FUNCTION();
175	return gCurrentMode->has_cache_expired(this);
176}
177
178
179inline CoreEntry*
180ThreadData::Rebalance() const
181{
182	SCHEDULER_ENTER_FUNCTION();
183
184	ASSERT(!gSingleCore);
185	return gCurrentMode->rebalance(this);
186}
187
188
189inline int32
190ThreadData::GetEffectivePriority() const
191{
192	SCHEDULER_ENTER_FUNCTION();
193	return fEffectivePriority;
194}
195
196
197inline void
198ThreadData::_IncreasePenalty()
199{
200	SCHEDULER_ENTER_FUNCTION();
201
202	if (IsIdle() || IsRealTime())
203		return;
204
205	TRACE("increasing thread %ld penalty\n", fThread->id);
206
207	int32 oldPenalty = fPriorityPenalty++;
208	const int kMinimalPriority = _GetMinimalPriority();
209	if (GetPriority() - oldPenalty <= kMinimalPriority)
210		fPriorityPenalty = oldPenalty;
211
212	_ComputeEffectivePriority();
213}
214
215
216inline void
217ThreadData::StartCPUTime()
218{
219	SCHEDULER_ENTER_FUNCTION();
220
221	SpinLocker threadTimeLocker(fThread->time_lock);
222	fThread->last_time = system_time();
223}
224
225
226inline void
227ThreadData::StopCPUTime()
228{
229	SCHEDULER_ENTER_FUNCTION();
230
231	// User time is tracked in thread_at_kernel_entry()
232	SpinLocker threadTimeLocker(fThread->time_lock);
233	fThread->kernel_time += system_time() - fThread->last_time;
234	fThread->last_time = 0;
235	threadTimeLocker.Unlock();
236
237	// If the old thread's team has user time timers, check them now.
238	Team* team = fThread->team;
239	SpinLocker teamTimeLocker(team->time_lock);
240	if (team->HasActiveUserTimeUserTimers())
241		user_timer_check_team_user_timers(team);
242}
243
244
245inline void
246ThreadData::CancelPenalty()
247{
248	SCHEDULER_ENTER_FUNCTION();
249
250	int32 oldPenalty = fPriorityPenalty;
251	fPriorityPenalty = 0;
252
253	if (oldPenalty != 0) {
254		TRACE("cancelling thread %ld penalty\n", fThread->id);
255		_ComputeEffectivePriority();
256	}
257}
258
259
260inline bool
261ThreadData::ShouldCancelPenalty() const
262{
263	SCHEDULER_ENTER_FUNCTION();
264
265	if (fCore == NULL)
266		return false;
267	return system_time() - fWentSleep > gCurrentMode->base_quantum / 2;
268}
269
270
271inline void
272ThreadData::SetStolenInterruptTime(bigtime_t interruptTime)
273{
274	SCHEDULER_ENTER_FUNCTION();
275
276	interruptTime -= fLastInterruptTime;
277	fStolenTime += interruptTime;
278}
279
280
281inline bigtime_t
282ThreadData::GetQuantumLeft()
283{
284	SCHEDULER_ENTER_FUNCTION();
285
286	bigtime_t stolenTime = std::min(fStolenTime, gCurrentMode->minimal_quantum);
287	ASSERT(stolenTime >= 0);
288	fStolenTime -= stolenTime;
289
290	bigtime_t quantum = ComputeQuantum() - fTimeUsed;
291	quantum += stolenTime;
292	quantum = std::max(quantum, gCurrentMode->minimal_quantum);
293
294	return quantum;
295}
296
297
298inline void
299ThreadData::StartQuantum()
300{
301	SCHEDULER_ENTER_FUNCTION();
302	fQuantumStart = system_time();
303}
304
305
306inline bool
307ThreadData::HasQuantumEnded(bool wasPreempted, bool hasYielded)
308{
309	SCHEDULER_ENTER_FUNCTION();
310
311	bigtime_t timeUsed = system_time() - fQuantumStart;
312	ASSERT(timeUsed >= 0);
313	fTimeUsed += timeUsed;
314
315	bigtime_t timeLeft = ComputeQuantum() - fTimeUsed;
316	timeLeft = std::max(bigtime_t(0), timeLeft);
317
318	// too little time left, it's better make the next quantum a bit longer
319	bigtime_t skipTime = gCurrentMode->minimal_quantum / 2;
320	if (hasYielded || wasPreempted || timeLeft <= skipTime) {
321		fStolenTime += timeLeft;
322		timeLeft = 0;
323	}
324
325	if (timeLeft == 0) {
326		fAdditionalPenalty++;
327		_IncreasePenalty();
328		fTimeUsed = 0;
329		return true;
330	}
331
332	return false;
333}
334
335
336inline void
337ThreadData::Continues()
338{
339	SCHEDULER_ENTER_FUNCTION();
340
341	ASSERT(fReady);
342	if (gTrackCoreLoad)
343		_ComputeNeededLoad();
344}
345
346
347inline void
348ThreadData::GoesAway()
349{
350	SCHEDULER_ENTER_FUNCTION();
351
352	ASSERT(fReady);
353
354	if (!HasQuantumEnded(false, false)) {
355		fAdditionalPenalty++;
356		_ComputeEffectivePriority();
357	}
358
359	fLastInterruptTime = 0;
360
361	fWentSleep = system_time();
362	fWentSleepActive = fCore->GetActiveTime();
363
364	if (gTrackCoreLoad)
365		fLoadMeasurementEpoch = fCore->RemoveLoad(fNeededLoad, false);
366	fReady = false;
367}
368
369
370inline void
371ThreadData::Dies()
372{
373	SCHEDULER_ENTER_FUNCTION();
374
375	ASSERT(fReady);
376	if (gTrackCoreLoad)
377		fCore->RemoveLoad(fNeededLoad, true);
378	fReady = false;
379}
380
381
382inline void
383ThreadData::PutBack()
384{
385	SCHEDULER_ENTER_FUNCTION();
386
387	int32 priority = GetEffectivePriority();
388
389	if (fThread->pinned_to_cpu > 0) {
390		ASSERT(fThread->cpu != NULL);
391		CPUEntry* cpu = CPUEntry::GetCPU(fThread->cpu->cpu_num);
392
393		CPURunQueueLocker _(cpu);
394		ASSERT(!fEnqueued);
395		fEnqueued = true;
396
397		cpu->PushFront(this, priority);
398	} else {
399		CoreRunQueueLocker _(fCore);
400		ASSERT(!fEnqueued);
401		fEnqueued = true;
402
403		fCore->PushFront(this, priority);
404	}
405}
406
407
408inline void
409ThreadData::Enqueue(bool& wasRunQueueEmpty)
410{
411	SCHEDULER_ENTER_FUNCTION();
412
413	if (!fReady) {
414		if (gTrackCoreLoad) {
415			bigtime_t timeSlept = system_time() - fWentSleep;
416			bool updateLoad = timeSlept > 0;
417
418			fCore->AddLoad(fNeededLoad, fLoadMeasurementEpoch, !updateLoad);
419			if (updateLoad) {
420				fMeasureAvailableTime += timeSlept;
421				_ComputeNeededLoad();
422			}
423		}
424
425		fReady = true;
426	}
427
428	fThread->state = B_THREAD_READY;
429
430	const int32 priority = GetEffectivePriority();
431	if (fThread->pinned_to_cpu > 0) {
432		ASSERT(fThread->previous_cpu != NULL);
433		CPUEntry* cpu = CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
434
435		CPURunQueueLocker _(cpu);
436		ASSERT(!fEnqueued);
437		fEnqueued = true;
438
439		ThreadData* top = cpu->PeekThread();
440		wasRunQueueEmpty = (top == NULL || top->IsIdle());
441
442		cpu->PushBack(this, priority);
443	} else {
444		CoreRunQueueLocker _(fCore);
445		ASSERT(!fEnqueued);
446		fEnqueued = true;
447
448		ThreadData* top = fCore->PeekThread();
449		wasRunQueueEmpty = (top == NULL || top->IsIdle());
450
451		fCore->PushBack(this, priority);
452	}
453}
454
455
456inline bool
457ThreadData::Dequeue()
458{
459	SCHEDULER_ENTER_FUNCTION();
460
461	if (fThread->pinned_to_cpu > 0) {
462		ASSERT(fThread->previous_cpu != NULL);
463		CPUEntry* cpu = CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
464
465		CPURunQueueLocker _(cpu);
466		if (!fEnqueued)
467			return false;
468		cpu->Remove(this);
469		ASSERT(!fEnqueued);
470		return true;
471	}
472
473	CoreRunQueueLocker _(fCore);
474	if (!fEnqueued)
475		return false;
476
477	fCore->Remove(this);
478	ASSERT(!fEnqueued);
479	return true;
480}
481
482
483inline void
484ThreadData::UpdateActivity(bigtime_t active)
485{
486	SCHEDULER_ENTER_FUNCTION();
487
488	if (!gTrackCoreLoad)
489		return;
490
491	fMeasureAvailableTime += active;
492	fMeasureAvailableActiveTime += active;
493}
494
495
496}	// namespace Scheduler
497
498
499#endif	// KERNEL_SCHEDULER_THREAD_H
500
501