1/*
2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/task.h>
31#include <mach/thread_act.h>
32
33#include <kern/kern_types.h>
34#include <kern/processor.h>
35#include <kern/thread.h>
36#include <kern/kalloc.h>
37
38#include <chud/chud_xnu.h>
39#include <chud/chud_xnu_private.h>
40#include <chud/chud_thread.h>
41
42#include <machine/machine_routines.h>
43
44#include <libkern/OSAtomic.h>
45
46// include the correct file to find real_ncpus
47#if defined(__i386__) || defined(__x86_64__)
48#	include <i386/mp.h>
49#else
50// fall back on declaring it extern.  The linker will sort us out.
51extern unsigned int real_ncpus;
52#endif
53
54// Mask for supported options
55#define T_CHUD_BIND_OPT_MASK (-1UL)
56
57#if 0
58#pragma mark **** thread binding ****
59#endif
60
61/*
62 * This method will bind a given thread to the requested CPU starting at the
63 * next time quantum.  If the thread is the current thread, this method will
64 * force a thread_block().  The result is that if you call this method on the
65 * current thread, you will be on the requested CPU when this method returns.
66 */
67__private_extern__ kern_return_t
68chudxnu_bind_thread(thread_t thread, int cpu, __unused int options)
69{
70    processor_t proc = NULL;
71
72	if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check
73		return KERN_FAILURE;
74
75	// temporary restriction until after phase 2 of the scheduler
76	if(thread != current_thread())
77		return KERN_FAILURE;
78
79	proc = cpu_to_processor(cpu);
80
81	/*
82	 * Potentially racey, but mainly to prevent bind to shutdown
83	 * processor.
84	 */
85	if(proc && !(proc->state == PROCESSOR_OFF_LINE) &&
86			!(proc->state == PROCESSOR_SHUTDOWN)) {
87
88		thread_bind(proc);
89
90		/*
91		 * If we're trying to bind the current thread, and
92		 * we're not on the target cpu, and not at interrupt
93		 * context, block the current thread to force a
94		 * reschedule on the target CPU.
95		 */
96		if(thread == current_thread() &&
97			!ml_at_interrupt_context() && cpu_number() != cpu) {
98			(void)thread_block(THREAD_CONTINUE_NULL);
99		}
100		return KERN_SUCCESS;
101	}
102    return KERN_FAILURE;
103}
104
105__private_extern__ kern_return_t
106chudxnu_unbind_thread(thread_t thread, __unused int options)
107{
108	if(thread == current_thread())
109		thread_bind(PROCESSOR_NULL);
110    return KERN_SUCCESS;
111}
112
113__private_extern__ boolean_t
114chudxnu_thread_get_idle(thread_t thread) {
115	/*
116	 * Instantaneous snapshot of the idle state of
117	 * a given thread.
118	 *
119	 * Should be called only on an interrupted or
120	 * suspended thread to avoid a race.
121	 */
122	return ((thread->state & TH_IDLE) == TH_IDLE);
123}
124
125__private_extern__ int
126chudxnu_thread_get_scheduler_state(thread_t thread) {
127	/*
128	 * Instantaneous snapshot of the scheduler state of
129	 * a given thread.
130	 *
131	 * MUST ONLY be called on an interrupted or
132	 * locked thread, to avoid a race.
133	 */
134
135	int state = 0;
136	int schedulerState = (volatile int)(thread->state);
137	processor_t lastProcessor = (volatile processor_t)(thread->last_processor);
138
139	if ((PROCESSOR_NULL != lastProcessor) && (thread == lastProcessor->active_thread)) {
140		state |= CHUDXNU_TS_RUNNING;
141	}
142
143	if (schedulerState & TH_RUN) {
144		state |= CHUDXNU_TS_RUNNABLE;
145	}
146
147	if (schedulerState & TH_WAIT) {
148		state |= CHUDXNU_TS_WAIT;
149	}
150
151	if (schedulerState & TH_UNINT) {
152		state |= CHUDXNU_TS_UNINT;
153	}
154
155	if (schedulerState & TH_SUSP) {
156		state |= CHUDXNU_TS_SUSP;
157	}
158
159	if (schedulerState & TH_TERMINATE) {
160		state |= CHUDXNU_TS_TERMINATE;
161	}
162
163	if (schedulerState & TH_IDLE) {
164		state |= CHUDXNU_TS_IDLE;
165	}
166
167	return state;
168}
169
170#if 0
171#pragma mark **** task and thread info ****
172#endif
173
174__private_extern__ boolean_t
175chudxnu_is_64bit_task(task_t task)
176{
177	return (task_has_64BitAddr(task));
178}
179
180#define THING_TASK		0
181#define THING_THREAD	1
182
183// an exact copy of processor_set_things() except no mig conversion at the end!
184static kern_return_t
185chudxnu_private_processor_set_things(
186	processor_set_t		pset,
187	mach_port_t		**thing_list,
188	mach_msg_type_number_t	*count,
189	int			type)
190{
191	unsigned int actual;	/* this many things */
192	unsigned int maxthings;
193	unsigned int i;
194
195	vm_size_t size, size_needed;
196	void  *addr;
197
198	if (pset == PROCESSOR_SET_NULL || pset != &pset0)
199		return (KERN_INVALID_ARGUMENT);
200
201	size = 0; addr = NULL;
202
203	for (;;) {
204		lck_mtx_lock(&tasks_threads_lock);
205
206		if (type == THING_TASK)
207			maxthings = tasks_count;
208		else
209			maxthings = threads_count;
210
211		/* do we have the memory we need? */
212
213		size_needed = maxthings * sizeof (mach_port_t);
214		if (size_needed <= size)
215			break;
216
217		lck_mtx_unlock(&tasks_threads_lock);
218
219		if (size != 0)
220			kfree(addr, size);
221
222		assert(size_needed > 0);
223		size = size_needed;
224
225		addr = kalloc(size);
226		if (addr == 0)
227			return (KERN_RESOURCE_SHORTAGE);
228	}
229
230	/* OK, have memory and the processor_set is locked & active */
231
232	actual = 0;
233	switch (type) {
234
235	case THING_TASK:
236	{
237		task_t		task, *task_list = (task_t *)addr;
238
239		for (task = (task_t)queue_first(&tasks);
240				!queue_end(&tasks, (queue_entry_t)task);
241					task = (task_t)queue_next(&task->tasks)) {
242			task_reference_internal(task);
243			task_list[actual++] = task;
244		}
245
246		break;
247	}
248
249	case THING_THREAD:
250	{
251		thread_t	thread, *thread_list = (thread_t *)addr;
252
253		for (i = 0, thread = (thread_t)queue_first(&threads);
254				!queue_end(&threads, (queue_entry_t)thread);
255					thread = (thread_t)queue_next(&thread->threads)) {
256			thread_reference_internal(thread);
257			thread_list[actual++] = thread;
258		}
259
260		break;
261	}
262	}
263
264	lck_mtx_unlock(&tasks_threads_lock);
265
266	if (actual < maxthings)
267		size_needed = actual * sizeof (mach_port_t);
268
269	if (actual == 0) {
270		/* no things, so return null pointer and deallocate memory */
271		*thing_list = NULL;
272		*count = 0;
273
274		if (size != 0)
275			kfree(addr, size);
276	}
277	else {
278		/* if we allocated too much, must copy */
279
280		if (size_needed < size) {
281			void *newaddr;
282
283			newaddr = kalloc(size_needed);
284			if (newaddr == 0) {
285				switch (type) {
286
287				case THING_TASK:
288				{
289					task_t		*task_list = (task_t *)addr;
290
291					for (i = 0; i < actual; i++)
292						task_deallocate(task_list[i]);
293					break;
294				}
295
296				case THING_THREAD:
297				{
298					thread_t	*thread_list = (thread_t *)addr;
299
300					for (i = 0; i < actual; i++)
301						thread_deallocate(thread_list[i]);
302					break;
303				}
304				}
305
306				kfree(addr, size);
307				return (KERN_RESOURCE_SHORTAGE);
308			}
309
310			bcopy((void *) addr, (void *) newaddr, size_needed);
311			kfree(addr, size);
312			addr = newaddr;
313		}
314
315		*thing_list = (mach_port_t *)addr;
316		*count = actual;
317	}
318
319	return (KERN_SUCCESS);
320}
321
322// an exact copy of task_threads() except no mig conversion at the end!
323static kern_return_t
324chudxnu_private_task_threads(
325	task_t			task,
326	thread_act_array_t      *threads_out,
327    	mach_msg_type_number_t  *count)
328{
329	mach_msg_type_number_t	actual;
330	thread_t				*thread_list;
331	thread_t				thread;
332	vm_size_t				size, size_needed;
333	void					*addr;
334	unsigned int			i, j;
335
336	if (task == TASK_NULL)
337		return (KERN_INVALID_ARGUMENT);
338
339	size = 0; addr = NULL;
340
341	for (;;) {
342		task_lock(task);
343		if (!task->active) {
344			task_unlock(task);
345
346			if (size != 0)
347				kfree(addr, size);
348
349			return (KERN_FAILURE);
350		}
351
352		actual = task->thread_count;
353
354		/* do we have the memory we need? */
355		size_needed = actual * sizeof (mach_port_t);
356		if (size_needed <= size)
357			break;
358
359		/* unlock the task and allocate more memory */
360		task_unlock(task);
361
362		if (size != 0)
363			kfree(addr, size);
364
365		assert(size_needed > 0);
366		size = size_needed;
367
368		addr = kalloc(size);
369		if (addr == 0)
370			return (KERN_RESOURCE_SHORTAGE);
371	}
372
373	/* OK, have memory and the task is locked & active */
374	thread_list = (thread_t *)addr;
375
376	i = j = 0;
377
378	for (thread = (thread_t)queue_first(&task->threads); i < actual;
379				++i, thread = (thread_t)queue_next(&thread->task_threads)) {
380		thread_reference_internal(thread);
381		thread_list[j++] = thread;
382	}
383
384	assert(queue_end(&task->threads, (queue_entry_t)thread));
385
386	actual = j;
387	size_needed = actual * sizeof (mach_port_t);
388
389	/* can unlock task now that we've got the thread refs */
390	task_unlock(task);
391
392	if (actual == 0) {
393		/* no threads, so return null pointer and deallocate memory */
394
395		*threads_out = NULL;
396		*count = 0;
397
398		if (size != 0)
399			kfree(addr, size);
400	}
401	else {
402		/* if we allocated too much, must copy */
403
404		if (size_needed < size) {
405			void *newaddr;
406
407			newaddr = kalloc(size_needed);
408			if (newaddr == 0) {
409				for (i = 0; i < actual; ++i)
410					thread_deallocate(thread_list[i]);
411				kfree(addr, size);
412				return (KERN_RESOURCE_SHORTAGE);
413			}
414
415			bcopy(addr, newaddr, size_needed);
416			kfree(addr, size);
417			thread_list = (thread_t *)newaddr;
418		}
419
420		*threads_out = thread_list;
421		*count = actual;
422	}
423
424	return (KERN_SUCCESS);
425}
426
427
428__private_extern__ kern_return_t
429chudxnu_all_tasks(
430	task_array_t		*task_list,
431	mach_msg_type_number_t	*count)
432{
433	return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)task_list, count, THING_TASK);
434}
435
436__private_extern__ kern_return_t
437chudxnu_free_task_list(
438	task_array_t		*task_list,
439	mach_msg_type_number_t	*count)
440{
441	vm_size_t size = (*count)*sizeof(mach_port_t);
442	void *addr = *task_list;
443
444	if(addr) {
445		int i, maxCount = *count;
446		for(i=0; i<maxCount; i++) {
447			task_deallocate((*task_list)[i]);
448		}
449		kfree(addr, size);
450		*task_list = NULL;
451		*count = 0;
452		return KERN_SUCCESS;
453	} else {
454		return KERN_FAILURE;
455	}
456}
457__private_extern__ kern_return_t
458chudxnu_all_threads(
459	thread_array_t		*thread_list,
460	mach_msg_type_number_t	*count)
461{
462	return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)thread_list, count, THING_THREAD);
463}
464
465__private_extern__ kern_return_t
466chudxnu_task_threads(
467	task_t task,
468	thread_array_t *thread_list,
469	mach_msg_type_number_t *count)
470{
471	return chudxnu_private_task_threads(task, thread_list, count);
472}
473
474__private_extern__ kern_return_t
475chudxnu_free_thread_list(
476	thread_array_t	*thread_list,
477	mach_msg_type_number_t	*count)
478{
479	vm_size_t size = (*count)*sizeof(mach_port_t);
480	void *addr = *thread_list;
481
482	if(addr) {
483		int i, maxCount = *count;
484		for(i=0; i<maxCount; i++) {
485			thread_deallocate((*thread_list)[i]);
486		}
487		kfree(addr, size);
488		*thread_list = NULL;
489		*count = 0;
490		return KERN_SUCCESS;
491	} else {
492		return KERN_FAILURE;
493	}
494}
495
496__private_extern__ task_t
497chudxnu_current_task(void)
498{
499	return current_task();
500}
501
502__private_extern__ thread_t
503chudxnu_current_thread(void)
504{
505	return current_thread();
506}
507
508__private_extern__ task_t
509chudxnu_task_for_thread(thread_t thread)
510{
511    return get_threadtask(thread);
512}
513
514__private_extern__ kern_return_t
515chudxnu_thread_info(
516	thread_t thread,
517	thread_flavor_t flavor,
518	thread_info_t thread_info_out,
519	mach_msg_type_number_t *thread_info_count)
520{
521	return thread_info(thread, flavor, thread_info_out, thread_info_count);
522}
523
524
525/* thread marking stuff */
526
527__private_extern__ boolean_t
528chudxnu_thread_get_marked(thread_t thread)
529{
530	if(thread)
531		return ((thread->t_chud & T_CHUD_MARKED) != 0);
532	return FALSE;
533}
534
535__private_extern__ boolean_t
536chudxnu_thread_set_marked(thread_t thread, boolean_t new_value)
537{
538	boolean_t old_val;
539
540	if(thread) {
541		if(new_value) {
542			// set the marked bit
543			old_val = OSBitOrAtomic(T_CHUD_MARKED,  &(thread->t_chud));
544		} else {
545			// clear the marked bit
546			old_val = OSBitAndAtomic(~T_CHUD_MARKED,  &(thread->t_chud));
547		}
548		return (old_val & T_CHUD_MARKED) == T_CHUD_MARKED;
549	}
550	return FALSE;
551}
552
553/* XXX: good thing this code is experimental... */
554
555/* external handler */
556extern void (*chudxnu_thread_ast_handler)(thread_t);
557void (*chudxnu_thread_ast_handler)(thread_t) = NULL;
558
559/* AST callback to dispatch to AppleProfile */
560extern void chudxnu_thread_ast(thread_t);
561void
562chudxnu_thread_ast(thread_t thread)
563{
564	/* atomicness for kdebug events */
565	void (*handler)(thread_t) = chudxnu_thread_ast_handler;
566	if( handler )
567		handler( thread );
568
569	thread->t_chud = 0;
570}
571
572
573
574/* Get and set bits on the thread and trigger an AST handler */
575void chudxnu_set_thread_ast( thread_t thread );
576void
577chudxnu_set_thread_ast( thread_t thread )
578{
579	/* FIXME: only call this on current thread from an interrupt handler for now... */
580	if( thread != current_thread() )
581		panic( "unsafe AST set" );
582
583	act_set_kperf(thread);
584}
585
586/* get and set the thread bits */
587extern uint32_t chudxnu_get_thread_bits( thread_t thread );
588extern void chudxnu_set_thread_bits( thread_t thread, uint32_t bits );
589
590uint32_t
591chudxnu_get_thread_bits( thread_t thread )
592{
593	return thread->t_chud;
594}
595
596void
597chudxnu_set_thread_bits( thread_t thread, uint32_t bits )
598{
599	thread->t_chud = bits;
600}
601
602/* get and set thread dirty bits. so CHUD can track whether the thread
603 * has been dispatched since it last looked. caller must hold the
604 * thread lock
605 */
606boolean_t
607chudxnu_thread_get_dirty(thread_t thread)
608{
609	if( thread->c_switch != thread->chud_c_switch )
610		return TRUE;
611	else
612		return FALSE;
613}
614
615void
616chudxnu_thread_set_dirty(thread_t thread, boolean_t makedirty)
617{
618	if( makedirty )
619		thread->chud_c_switch = thread->c_switch - 1;
620	else
621		thread->chud_c_switch = thread->c_switch;
622}
623