1/*
2 * Copyright 2013, Pawe�� Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
4 */
5
6#include "scheduler_thread.h"
7
8
9using namespace Scheduler;
10
11
12static bigtime_t sQuantumLengths[THREAD_MAX_SET_PRIORITY + 1];
13
14const int32 kMaximumQuantumLengthsCount	= 20;
15static bigtime_t sMaximumQuantumLengths[kMaximumQuantumLengthsCount];
16
17
18void
19ThreadData::_InitBase()
20{
21	fStolenTime = 0;
22	fQuantumStart = 0;
23	fLastInterruptTime = 0;
24
25	fWentSleep = 0;
26	fWentSleepActive = 0;
27
28	fEnqueued = false;
29	fReady = false;
30
31	fPriorityPenalty = 0;
32	fAdditionalPenalty = 0;
33
34	fEffectivePriority = GetPriority();
35	fBaseQuantum = sQuantumLengths[GetEffectivePriority()];
36
37	fTimeUsed = 0;
38
39	fMeasureAvailableActiveTime = 0;
40	fLastMeasureAvailableTime = 0;
41	fMeasureAvailableTime = 0;
42}
43
44
45inline CoreEntry*
46ThreadData::_ChooseCore() const
47{
48	SCHEDULER_ENTER_FUNCTION();
49
50	ASSERT(!gSingleCore);
51	return gCurrentMode->choose_core(this);
52}
53
54
55inline CPUEntry*
56ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const
57{
58	SCHEDULER_ENTER_FUNCTION();
59
60	int32 threadPriority = GetEffectivePriority();
61
62	if (fThread->previous_cpu != NULL) {
63		CPUEntry* previousCPU
64			= CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
65		if (previousCPU->Core() == core && !fThread->previous_cpu->disabled) {
66			CoreCPUHeapLocker _(core);
67			if (CPUPriorityHeap::GetKey(previousCPU) < threadPriority) {
68				previousCPU->UpdatePriority(threadPriority);
69				rescheduleNeeded = true;
70				return previousCPU;
71			}
72		}
73	}
74
75	CoreCPUHeapLocker _(core);
76	CPUEntry* cpu = core->CPUHeap()->PeekRoot();
77	ASSERT(cpu != NULL);
78
79	if (CPUPriorityHeap::GetKey(cpu) < threadPriority) {
80		cpu->UpdatePriority(threadPriority);
81		rescheduleNeeded = true;
82	} else
83		rescheduleNeeded = false;
84
85	return cpu;
86}
87
88
89ThreadData::ThreadData(Thread* thread)
90	:
91	fThread(thread)
92{
93}
94
95
96void
97ThreadData::Init()
98{
99	_InitBase();
100	fCore = NULL;
101
102	Thread* currentThread = thread_get_current_thread();
103	ThreadData* currentThreadData = currentThread->scheduler_data;
104	fNeededLoad = currentThreadData->fNeededLoad;
105
106	if (!IsRealTime()) {
107		fPriorityPenalty = std::min(currentThreadData->fPriorityPenalty,
108				std::max(GetPriority() - _GetMinimalPriority(), int32(0)));
109		fAdditionalPenalty = currentThreadData->fAdditionalPenalty;
110
111		_ComputeEffectivePriority();
112	}
113}
114
115
116void
117ThreadData::Init(CoreEntry* core)
118{
119	_InitBase();
120
121	fCore = core;
122	fReady = true;
123	fNeededLoad = 0;
124}
125
126
127void
128ThreadData::Dump() const
129{
130	kprintf("\tpriority_penalty:\t%" B_PRId32 "\n", fPriorityPenalty);
131
132	int32 priority = GetPriority() - _GetPenalty();
133	priority = std::max(priority, int32(1));
134	kprintf("\tadditional_penalty:\t%" B_PRId32 " (%" B_PRId32 ")\n",
135		fAdditionalPenalty % priority, fAdditionalPenalty);
136	kprintf("\teffective_priority:\t%" B_PRId32 "\n", GetEffectivePriority());
137
138	kprintf("\ttime_used:\t\t%" B_PRId64 " us (quantum: %" B_PRId64 " us)\n",
139		fTimeUsed, ComputeQuantum());
140	kprintf("\tstolen_time:\t\t%" B_PRId64 " us\n", fStolenTime);
141	kprintf("\tquantum_start:\t\t%" B_PRId64 " us\n", fQuantumStart);
142	kprintf("\tneeded_load:\t\t%" B_PRId32 "%%\n", fNeededLoad / 10);
143	kprintf("\twent_sleep:\t\t%" B_PRId64 "\n", fWentSleep);
144	kprintf("\twent_sleep_active:\t%" B_PRId64 "\n", fWentSleepActive);
145	kprintf("\tcore:\t\t\t%" B_PRId32 "\n",
146		fCore != NULL ? fCore->ID() : -1);
147	if (fCore != NULL && HasCacheExpired())
148		kprintf("\tcache affinity has expired\n");
149}
150
151
152bool
153ThreadData::ChooseCoreAndCPU(CoreEntry*& targetCore, CPUEntry*& targetCPU)
154{
155	SCHEDULER_ENTER_FUNCTION();
156
157	bool rescheduleNeeded = false;
158
159	if (targetCore == NULL && targetCPU != NULL)
160		targetCore = targetCPU->Core();
161	else if (targetCore != NULL && targetCPU == NULL)
162		targetCPU = _ChooseCPU(targetCore, rescheduleNeeded);
163	else if (targetCore == NULL && targetCPU == NULL) {
164		targetCore = _ChooseCore();
165		targetCPU = _ChooseCPU(targetCore, rescheduleNeeded);
166	}
167
168	ASSERT(targetCore != NULL);
169	ASSERT(targetCPU != NULL);
170
171	if (fCore != targetCore) {
172		fLoadMeasurementEpoch = targetCore->LoadMeasurementEpoch() - 1;
173		if (fReady) {
174			if (fCore != NULL)
175				fCore->RemoveLoad(fNeededLoad, true);
176			targetCore->AddLoad(fNeededLoad, fLoadMeasurementEpoch, true);
177		}
178	}
179
180	fCore = targetCore;
181	return rescheduleNeeded;
182}
183
184
185bigtime_t
186ThreadData::ComputeQuantum() const
187{
188	SCHEDULER_ENTER_FUNCTION();
189
190	if (IsRealTime())
191		return fBaseQuantum;
192
193	int32 threadCount = fCore->ThreadCount();
194	if (fCore->CPUCount() > 0)
195		threadCount /= fCore->CPUCount();
196
197	bigtime_t quantum = fBaseQuantum;
198	if (threadCount < kMaximumQuantumLengthsCount)
199		quantum = std::min(sMaximumQuantumLengths[threadCount], quantum);
200	return quantum;
201}
202
203
204void
205ThreadData::UnassignCore(bool running)
206{
207	SCHEDULER_ENTER_FUNCTION();
208
209	ASSERT(fCore != NULL);
210	if (running || fThread->state == B_THREAD_READY)
211		fReady = false;
212	if (!fReady)
213		fCore = NULL;
214}
215
216
217/* static */ void
218ThreadData::ComputeQuantumLengths()
219{
220	SCHEDULER_ENTER_FUNCTION();
221
222	for (int32 priority = 0; priority <= THREAD_MAX_SET_PRIORITY; priority++) {
223		const bigtime_t kQuantum0 = gCurrentMode->base_quantum;
224		if (priority >= B_URGENT_DISPLAY_PRIORITY) {
225			sQuantumLengths[priority] = kQuantum0;
226			continue;
227		}
228
229		const bigtime_t kQuantum1
230			= kQuantum0 * gCurrentMode->quantum_multipliers[0];
231		if (priority > B_NORMAL_PRIORITY) {
232			sQuantumLengths[priority] = _ScaleQuantum(kQuantum1, kQuantum0,
233				B_URGENT_DISPLAY_PRIORITY, B_NORMAL_PRIORITY, priority);
234			continue;
235		}
236
237		const bigtime_t kQuantum2
238			= kQuantum0 * gCurrentMode->quantum_multipliers[1];
239		sQuantumLengths[priority] = _ScaleQuantum(kQuantum2, kQuantum1,
240			B_NORMAL_PRIORITY, B_IDLE_PRIORITY, priority);
241	}
242
243	for (int32 threadCount = 0; threadCount < kMaximumQuantumLengthsCount;
244		threadCount++) {
245
246		bigtime_t quantum = gCurrentMode->maximum_latency;
247		if (threadCount != 0)
248			quantum /= threadCount;
249		quantum = std::max(quantum, gCurrentMode->minimal_quantum);
250		sMaximumQuantumLengths[threadCount] = quantum;
251	}
252}
253
254
255inline int32
256ThreadData::_GetPenalty() const
257{
258	SCHEDULER_ENTER_FUNCTION();
259	return fPriorityPenalty;
260}
261
262
263void
264ThreadData::_ComputeNeededLoad()
265{
266	SCHEDULER_ENTER_FUNCTION();
267	ASSERT(!IsIdle());
268
269	int32 oldLoad = compute_load(fLastMeasureAvailableTime,
270		fMeasureAvailableActiveTime, fNeededLoad, fMeasureAvailableTime);
271	if (oldLoad < 0 || oldLoad == fNeededLoad)
272		return;
273
274	fCore->ChangeLoad(fNeededLoad - oldLoad);
275}
276
277
278void
279ThreadData::_ComputeEffectivePriority() const
280{
281	SCHEDULER_ENTER_FUNCTION();
282
283	if (IsIdle())
284		fEffectivePriority = B_IDLE_PRIORITY;
285	else if (IsRealTime())
286		fEffectivePriority = GetPriority();
287	else {
288		fEffectivePriority = GetPriority();
289		fEffectivePriority -= _GetPenalty();
290		if (fEffectivePriority > 0)
291			fEffectivePriority -= fAdditionalPenalty % fEffectivePriority;
292
293		ASSERT(fEffectivePriority < B_FIRST_REAL_TIME_PRIORITY);
294		ASSERT(fEffectivePriority >= B_LOWEST_ACTIVE_PRIORITY);
295	}
296
297	fBaseQuantum = sQuantumLengths[GetEffectivePriority()];
298}
299
300
301/* static */ bigtime_t
302ThreadData::_ScaleQuantum(bigtime_t maxQuantum, bigtime_t minQuantum,
303	int32 maxPriority, int32 minPriority, int32 priority)
304{
305	SCHEDULER_ENTER_FUNCTION();
306
307	ASSERT(priority <= maxPriority);
308	ASSERT(priority >= minPriority);
309
310	bigtime_t result = (maxQuantum - minQuantum) * (priority - minPriority);
311	result /= maxPriority - minPriority;
312	return maxQuantum - result;
313}
314
315
316ThreadProcessing::~ThreadProcessing()
317{
318}
319
320