1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 *	processor.c: processor and processor_set manipulation routines.
61 */
62
63#include <mach/boolean.h>
64#include <mach/policy.h>
65#include <mach/processor.h>
66#include <mach/processor_info.h>
67#include <mach/vm_param.h>
68#include <kern/cpu_number.h>
69#include <kern/host.h>
70#include <kern/machine.h>
71#include <kern/misc_protos.h>
72#include <kern/processor.h>
73#include <kern/sched.h>
74#include <kern/task.h>
75#include <kern/thread.h>
76#include <kern/ipc_host.h>
77#include <kern/ipc_tt.h>
78#include <ipc/ipc_port.h>
79#include <kern/kalloc.h>
80
81/*
82 * Exported interface
83 */
84#include <mach/mach_host_server.h>
85#include <mach/processor_set_server.h>
86
87struct processor_set	pset0;
88struct pset_node		pset_node0;
89decl_simple_lock_data(static,pset_node_lock)
90
91queue_head_t			tasks;
92queue_head_t			terminated_tasks;	/* To be used ONLY for stackshot. */
93int						tasks_count;
94int						terminated_tasks_count;
95queue_head_t			threads;
96int						threads_count;
97decl_lck_mtx_data(,tasks_threads_lock)
98
99processor_t				processor_list;
100unsigned int			processor_count;
101static processor_t		processor_list_tail;
102decl_simple_lock_data(,processor_list_lock)
103
104uint32_t				processor_avail_count;
105
106processor_t		master_processor;
107int 			master_cpu = 0;
108boolean_t		sched_stats_active = FALSE;
109
110/* Forwards */
111kern_return_t	processor_set_things(
112		processor_set_t		pset,
113		mach_port_t		**thing_list,
114		mach_msg_type_number_t	*count,
115		int			type);
116
117void
118processor_bootstrap(void)
119{
120	pset_init(&pset0, &pset_node0);
121	pset_node0.psets = &pset0;
122
123	simple_lock_init(&pset_node_lock, 0);
124
125	queue_init(&tasks);
126	queue_init(&terminated_tasks);
127	queue_init(&threads);
128
129	simple_lock_init(&processor_list_lock, 0);
130
131	master_processor = cpu_to_processor(master_cpu);
132
133	processor_init(master_processor, master_cpu, &pset0);
134}
135
136/*
137 *	Initialize the given processor for the cpu
138 *	indicated by cpu_id, and assign to the
139 *	specified processor set.
140 */
141void
142processor_init(
143	processor_t			processor,
144	int					cpu_id,
145	processor_set_t		pset)
146{
147	spl_t		s;
148
149	if (processor != master_processor) {
150		/* Scheduler state deferred until sched_init() */
151		SCHED(processor_init)(processor);
152	}
153
154	processor->state = PROCESSOR_OFF_LINE;
155	processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL;
156	processor->processor_set = pset;
157	processor->current_pri = MINPRI;
158	processor->current_thmode = TH_MODE_NONE;
159	processor->cpu_id = cpu_id;
160	timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
161	processor->quantum_end = UINT64_MAX;
162	processor->deadline = UINT64_MAX;
163	processor->timeslice = 0;
164	processor->processor_primary = processor; /* no SMT relationship known at this point */
165	processor->processor_secondary = NULL;
166	processor->is_SMT = FALSE;
167	processor->processor_self = IP_NULL;
168	processor_data_init(processor);
169	processor->processor_list = NULL;
170
171	s = splsched();
172	pset_lock(pset);
173	if (pset->cpu_set_count++ == 0)
174		pset->cpu_set_low = pset->cpu_set_hi = cpu_id;
175	else {
176		pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low;
177		pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi;
178	}
179	pset_unlock(pset);
180	splx(s);
181
182	simple_lock(&processor_list_lock);
183	if (processor_list == NULL)
184		processor_list = processor;
185	else
186		processor_list_tail->processor_list = processor;
187	processor_list_tail = processor;
188	processor_count++;
189	simple_unlock(&processor_list_lock);
190}
191
192void
193processor_set_primary(
194	processor_t		processor,
195	processor_t		primary)
196{
197	assert(processor->processor_primary == primary || processor->processor_primary == processor);
198	/* Re-adjust primary point for this (possibly) secondary processor */
199	processor->processor_primary = primary;
200
201	assert(primary->processor_secondary == NULL || primary->processor_secondary == processor);
202	if (primary != processor) {
203		/* Link primary to secondary, assumes a 2-way SMT model
204		 * We'll need to move to a queue if any future architecture
205		 * requires otherwise.
206		 */
207		assert(processor->processor_secondary == NULL);
208		primary->processor_secondary = processor;
209		/* Mark both processors as SMT siblings */
210		primary->is_SMT = TRUE;
211		processor->is_SMT = TRUE;
212	}
213}
214
215processor_set_t
216processor_pset(
217	processor_t	processor)
218{
219	return (processor->processor_set);
220}
221
222pset_node_t
223pset_node_root(void)
224{
225	return &pset_node0;
226}
227
228processor_set_t
229pset_create(
230	pset_node_t			node)
231{
232#if defined(CONFIG_SCHED_MULTIQ)
233	/* multiq scheduler is not currently compatible with multiple psets */
234	if (sched_groups_enabled)
235		return processor_pset(master_processor);
236#endif /* defined(CONFIG_SCHED_MULTIQ) */
237
238	processor_set_t		*prev, pset = kalloc(sizeof (*pset));
239
240	if (pset != PROCESSOR_SET_NULL) {
241		pset_init(pset, node);
242
243		simple_lock(&pset_node_lock);
244
245		prev = &node->psets;
246		while (*prev != PROCESSOR_SET_NULL)
247			prev = &(*prev)->pset_list;
248
249		*prev = pset;
250
251		simple_unlock(&pset_node_lock);
252	}
253
254	return (pset);
255}
256
257/*
258 *	Initialize the given processor_set structure.
259 */
260void
261pset_init(
262	processor_set_t		pset,
263	pset_node_t			node)
264{
265	if (pset != &pset0) {
266		/* Scheduler state deferred until sched_init() */
267		SCHED(pset_init)(pset);
268	}
269
270	queue_init(&pset->active_queue);
271	queue_init(&pset->idle_queue);
272	queue_init(&pset->idle_secondary_queue);
273	pset->online_processor_count = 0;
274	pset->cpu_set_low = pset->cpu_set_hi = 0;
275	pset->cpu_set_count = 0;
276	pset->pending_AST_cpu_mask = 0;
277	pset_lock_init(pset);
278	pset->pset_self = IP_NULL;
279	pset->pset_name_self = IP_NULL;
280	pset->pset_list = PROCESSOR_SET_NULL;
281	pset->node = node;
282}
283
284kern_return_t
285processor_info_count(
286	processor_flavor_t		flavor,
287	mach_msg_type_number_t	*count)
288{
289	switch (flavor) {
290
291	case PROCESSOR_BASIC_INFO:
292		*count = PROCESSOR_BASIC_INFO_COUNT;
293		break;
294
295	case PROCESSOR_CPU_LOAD_INFO:
296		*count = PROCESSOR_CPU_LOAD_INFO_COUNT;
297		break;
298
299	default:
300		return (cpu_info_count(flavor, count));
301	}
302
303	return (KERN_SUCCESS);
304}
305
306
307kern_return_t
308processor_info(
309	register processor_t	processor,
310	processor_flavor_t		flavor,
311	host_t					*host,
312	processor_info_t		info,
313	mach_msg_type_number_t	*count)
314{
315	register int	cpu_id, state;
316	kern_return_t	result;
317
318	if (processor == PROCESSOR_NULL)
319		return (KERN_INVALID_ARGUMENT);
320
321	cpu_id = processor->cpu_id;
322
323	switch (flavor) {
324
325	case PROCESSOR_BASIC_INFO:
326	{
327		register processor_basic_info_t		basic_info;
328
329		if (*count < PROCESSOR_BASIC_INFO_COUNT)
330			return (KERN_FAILURE);
331
332		basic_info = (processor_basic_info_t) info;
333		basic_info->cpu_type = slot_type(cpu_id);
334		basic_info->cpu_subtype = slot_subtype(cpu_id);
335		state = processor->state;
336		if (state == PROCESSOR_OFF_LINE)
337			basic_info->running = FALSE;
338		else
339			basic_info->running = TRUE;
340		basic_info->slot_num = cpu_id;
341		if (processor == master_processor)
342			basic_info->is_master = TRUE;
343		else
344			basic_info->is_master = FALSE;
345
346		*count = PROCESSOR_BASIC_INFO_COUNT;
347		*host = &realhost;
348
349	    return (KERN_SUCCESS);
350	}
351
352	case PROCESSOR_CPU_LOAD_INFO:
353	{
354		processor_cpu_load_info_t	cpu_load_info;
355		timer_t		idle_state;
356		uint64_t	idle_time_snapshot1, idle_time_snapshot2;
357		uint64_t	idle_time_tstamp1, idle_time_tstamp2;
358
359		/*
360		 * We capture the accumulated idle time twice over
361		 * the course of this function, as well as the timestamps
362		 * when each were last updated. Since these are
363		 * all done using non-atomic racy mechanisms, the
364		 * most we can infer is whether values are stable.
365		 * timer_grab() is the only function that can be
366		 * used reliably on another processor's per-processor
367		 * data.
368		 */
369
370		if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
371			return (KERN_FAILURE);
372
373		cpu_load_info = (processor_cpu_load_info_t) info;
374		if (precise_user_kernel_time) {
375			cpu_load_info->cpu_ticks[CPU_STATE_USER] =
376							(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval);
377			cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
378							(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval);
379		} else {
380			uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) +
381				timer_grab(&PROCESSOR_DATA(processor, system_state));
382
383			cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval);
384			cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
385		}
386
387		idle_state = &PROCESSOR_DATA(processor, idle_state);
388		idle_time_snapshot1 = timer_grab(idle_state);
389		idle_time_tstamp1 = idle_state->tstamp;
390
391		/*
392		 * Idle processors are not continually updating their
393		 * per-processor idle timer, so it may be extremely
394		 * out of date, resulting in an over-representation
395		 * of non-idle time between two measurement
396		 * intervals by e.g. top(1). If we are non-idle, or
397		 * have evidence that the timer is being updated
398		 * concurrently, we consider its value up-to-date.
399		 */
400		if (PROCESSOR_DATA(processor, current_state) != idle_state) {
401			cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
402							(uint32_t)(idle_time_snapshot1 / hz_tick_interval);
403		} else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
404				   (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))){
405			/* Idle timer is being updated concurrently, second stamp is good enough */
406			cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
407							(uint32_t)(idle_time_snapshot2 / hz_tick_interval);
408		} else {
409			/*
410			 * Idle timer may be very stale. Fortunately we have established
411			 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
412			 */
413			idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
414
415			cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
416				(uint32_t)(idle_time_snapshot1 / hz_tick_interval);
417		}
418
419		cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
420
421	    *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
422	    *host = &realhost;
423
424	    return (KERN_SUCCESS);
425	}
426
427	default:
428	    result = cpu_info(flavor, cpu_id, info, count);
429	    if (result == KERN_SUCCESS)
430			*host = &realhost;
431
432	    return (result);
433	}
434}
435
436kern_return_t
437processor_start(
438	processor_t			processor)
439{
440	processor_set_t		pset;
441	thread_t			thread;
442	kern_return_t		result;
443	spl_t				s;
444
445	if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL)
446		return (KERN_INVALID_ARGUMENT);
447
448	if (processor == master_processor) {
449		processor_t		prev;
450
451		prev = thread_bind(processor);
452		thread_block(THREAD_CONTINUE_NULL);
453
454		result = cpu_start(processor->cpu_id);
455
456		thread_bind(prev);
457
458		return (result);
459	}
460
461	s = splsched();
462	pset = processor->processor_set;
463	pset_lock(pset);
464	if (processor->state != PROCESSOR_OFF_LINE) {
465		pset_unlock(pset);
466		splx(s);
467
468		return (KERN_FAILURE);
469	}
470
471	processor->state = PROCESSOR_START;
472	pset_unlock(pset);
473	splx(s);
474
475	/*
476	 *	Create the idle processor thread.
477	 */
478	if (processor->idle_thread == THREAD_NULL) {
479		result = idle_thread_create(processor);
480		if (result != KERN_SUCCESS) {
481			s = splsched();
482			pset_lock(pset);
483			processor->state = PROCESSOR_OFF_LINE;
484			pset_unlock(pset);
485			splx(s);
486
487			return (result);
488		}
489	}
490
491	/*
492	 *	If there is no active thread, the processor
493	 *	has never been started.  Create a dedicated
494	 *	start up thread.
495	 */
496	if (	processor->active_thread == THREAD_NULL		&&
497			processor->next_thread == THREAD_NULL		) {
498		result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
499		if (result != KERN_SUCCESS) {
500			s = splsched();
501			pset_lock(pset);
502			processor->state = PROCESSOR_OFF_LINE;
503			pset_unlock(pset);
504			splx(s);
505
506			return (result);
507		}
508
509		s = splsched();
510		thread_lock(thread);
511		thread->bound_processor = processor;
512		processor->next_thread = thread;
513		thread->state = TH_RUN;
514		thread_unlock(thread);
515		splx(s);
516
517		thread_deallocate(thread);
518	}
519
520	if (processor->processor_self == IP_NULL)
521		ipc_processor_init(processor);
522
523	result = cpu_start(processor->cpu_id);
524	if (result != KERN_SUCCESS) {
525		s = splsched();
526		pset_lock(pset);
527		processor->state = PROCESSOR_OFF_LINE;
528		pset_unlock(pset);
529		splx(s);
530
531		return (result);
532	}
533
534	ipc_processor_enable(processor);
535
536	return (KERN_SUCCESS);
537}
538
539kern_return_t
540processor_exit(
541	processor_t	processor)
542{
543	if (processor == PROCESSOR_NULL)
544		return(KERN_INVALID_ARGUMENT);
545
546	return(processor_shutdown(processor));
547}
548
549kern_return_t
550processor_control(
551	processor_t		processor,
552	processor_info_t	info,
553	mach_msg_type_number_t	count)
554{
555	if (processor == PROCESSOR_NULL)
556		return(KERN_INVALID_ARGUMENT);
557
558	return(cpu_control(processor->cpu_id, info, count));
559}
560
561kern_return_t
562processor_set_create(
563	__unused host_t		host,
564	__unused processor_set_t	*new_set,
565	__unused processor_set_t	*new_name)
566{
567	return(KERN_FAILURE);
568}
569
570kern_return_t
571processor_set_destroy(
572	__unused processor_set_t	pset)
573{
574	return(KERN_FAILURE);
575}
576
577kern_return_t
578processor_get_assignment(
579	processor_t	processor,
580	processor_set_t	*pset)
581{
582	int state;
583
584	if (processor == PROCESSOR_NULL)
585		return(KERN_INVALID_ARGUMENT);
586
587	state = processor->state;
588	if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
589		return(KERN_FAILURE);
590
591	*pset = &pset0;
592
593	return(KERN_SUCCESS);
594}
595
596kern_return_t
597processor_set_info(
598	processor_set_t		pset,
599	int			flavor,
600	host_t			*host,
601	processor_set_info_t	info,
602	mach_msg_type_number_t	*count)
603{
604	if (pset == PROCESSOR_SET_NULL)
605		return(KERN_INVALID_ARGUMENT);
606
607	if (flavor == PROCESSOR_SET_BASIC_INFO) {
608		register processor_set_basic_info_t	basic_info;
609
610		if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
611			return(KERN_FAILURE);
612
613		basic_info = (processor_set_basic_info_t) info;
614		basic_info->processor_count = processor_avail_count;
615		basic_info->default_policy = POLICY_TIMESHARE;
616
617		*count = PROCESSOR_SET_BASIC_INFO_COUNT;
618		*host = &realhost;
619		return(KERN_SUCCESS);
620	}
621	else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
622		register policy_timeshare_base_t	ts_base;
623
624		if (*count < POLICY_TIMESHARE_BASE_COUNT)
625			return(KERN_FAILURE);
626
627		ts_base = (policy_timeshare_base_t) info;
628		ts_base->base_priority = BASEPRI_DEFAULT;
629
630		*count = POLICY_TIMESHARE_BASE_COUNT;
631		*host = &realhost;
632		return(KERN_SUCCESS);
633	}
634	else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
635		register policy_fifo_base_t		fifo_base;
636
637		if (*count < POLICY_FIFO_BASE_COUNT)
638			return(KERN_FAILURE);
639
640		fifo_base = (policy_fifo_base_t) info;
641		fifo_base->base_priority = BASEPRI_DEFAULT;
642
643		*count = POLICY_FIFO_BASE_COUNT;
644		*host = &realhost;
645		return(KERN_SUCCESS);
646	}
647	else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
648		register policy_rr_base_t		rr_base;
649
650		if (*count < POLICY_RR_BASE_COUNT)
651			return(KERN_FAILURE);
652
653		rr_base = (policy_rr_base_t) info;
654		rr_base->base_priority = BASEPRI_DEFAULT;
655		rr_base->quantum = 1;
656
657		*count = POLICY_RR_BASE_COUNT;
658		*host = &realhost;
659		return(KERN_SUCCESS);
660	}
661	else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
662		register policy_timeshare_limit_t	ts_limit;
663
664		if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
665			return(KERN_FAILURE);
666
667		ts_limit = (policy_timeshare_limit_t) info;
668		ts_limit->max_priority = MAXPRI_KERNEL;
669
670		*count = POLICY_TIMESHARE_LIMIT_COUNT;
671		*host = &realhost;
672		return(KERN_SUCCESS);
673	}
674	else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
675		register policy_fifo_limit_t		fifo_limit;
676
677		if (*count < POLICY_FIFO_LIMIT_COUNT)
678			return(KERN_FAILURE);
679
680		fifo_limit = (policy_fifo_limit_t) info;
681		fifo_limit->max_priority = MAXPRI_KERNEL;
682
683		*count = POLICY_FIFO_LIMIT_COUNT;
684		*host = &realhost;
685		return(KERN_SUCCESS);
686	}
687	else if (flavor == PROCESSOR_SET_RR_LIMITS) {
688		register policy_rr_limit_t		rr_limit;
689
690		if (*count < POLICY_RR_LIMIT_COUNT)
691			return(KERN_FAILURE);
692
693		rr_limit = (policy_rr_limit_t) info;
694		rr_limit->max_priority = MAXPRI_KERNEL;
695
696		*count = POLICY_RR_LIMIT_COUNT;
697		*host = &realhost;
698		return(KERN_SUCCESS);
699	}
700	else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
701		register int				*enabled;
702
703		if (*count < (sizeof(*enabled)/sizeof(int)))
704			return(KERN_FAILURE);
705
706		enabled = (int *) info;
707		*enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
708
709		*count = sizeof(*enabled)/sizeof(int);
710		*host = &realhost;
711		return(KERN_SUCCESS);
712	}
713
714
715	*host = HOST_NULL;
716	return(KERN_INVALID_ARGUMENT);
717}
718
719/*
720 *	processor_set_statistics
721 *
722 *	Returns scheduling statistics for a processor set.
723 */
724kern_return_t
725processor_set_statistics(
726	processor_set_t         pset,
727	int                     flavor,
728	processor_set_info_t    info,
729	mach_msg_type_number_t	*count)
730{
731	if (pset == PROCESSOR_SET_NULL || pset != &pset0)
732		return (KERN_INVALID_PROCESSOR_SET);
733
734	if (flavor == PROCESSOR_SET_LOAD_INFO) {
735		register processor_set_load_info_t     load_info;
736
737		if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
738			return(KERN_FAILURE);
739
740		load_info = (processor_set_load_info_t) info;
741
742		load_info->mach_factor = sched_mach_factor;
743		load_info->load_average = sched_load_average;
744
745		load_info->task_count = tasks_count;
746		load_info->thread_count = threads_count;
747
748		*count = PROCESSOR_SET_LOAD_INFO_COUNT;
749		return(KERN_SUCCESS);
750	}
751
752	return(KERN_INVALID_ARGUMENT);
753}
754
755/*
756 *	processor_set_max_priority:
757 *
758 *	Specify max priority permitted on processor set.  This affects
759 *	newly created and assigned threads.  Optionally change existing
760 * 	ones.
761 */
762kern_return_t
763processor_set_max_priority(
764	__unused processor_set_t	pset,
765	__unused int			max_priority,
766	__unused boolean_t		change_threads)
767{
768	return (KERN_INVALID_ARGUMENT);
769}
770
771/*
772 *	processor_set_policy_enable:
773 *
774 *	Allow indicated policy on processor set.
775 */
776
777kern_return_t
778processor_set_policy_enable(
779	__unused processor_set_t	pset,
780	__unused int			policy)
781{
782	return (KERN_INVALID_ARGUMENT);
783}
784
785/*
786 *	processor_set_policy_disable:
787 *
788 *	Forbid indicated policy on processor set.  Time sharing cannot
789 *	be forbidden.
790 */
791kern_return_t
792processor_set_policy_disable(
793	__unused processor_set_t	pset,
794	__unused int			policy,
795	__unused boolean_t		change_threads)
796{
797	return (KERN_INVALID_ARGUMENT);
798}
799
800#define THING_TASK	0
801#define THING_THREAD	1
802
803/*
804 *	processor_set_things:
805 *
806 *	Common internals for processor_set_{threads,tasks}
807 */
808kern_return_t
809processor_set_things(
810	processor_set_t			pset,
811	mach_port_t				**thing_list,
812	mach_msg_type_number_t	*count,
813	int						type)
814{
815	unsigned int actual;	/* this many things */
816	unsigned int maxthings;
817	unsigned int i;
818
819	vm_size_t size, size_needed;
820	void  *addr;
821
822	if (pset == PROCESSOR_SET_NULL || pset != &pset0)
823		return (KERN_INVALID_ARGUMENT);
824
825	size = 0;
826	addr = NULL;
827
828	for (;;) {
829		lck_mtx_lock(&tasks_threads_lock);
830
831		if (type == THING_TASK)
832			maxthings = tasks_count;
833		else
834			maxthings = threads_count;
835
836		/* do we have the memory we need? */
837
838		size_needed = maxthings * sizeof (mach_port_t);
839		if (size_needed <= size)
840			break;
841
842		/* unlock and allocate more memory */
843		lck_mtx_unlock(&tasks_threads_lock);
844
845		if (size != 0)
846			kfree(addr, size);
847
848		assert(size_needed > 0);
849		size = size_needed;
850
851		addr = kalloc(size);
852		if (addr == 0)
853			return (KERN_RESOURCE_SHORTAGE);
854	}
855
856	/* OK, have memory and the list locked */
857
858	actual = 0;
859	switch (type) {
860
861	case THING_TASK: {
862		task_t		task, *task_list = (task_t *)addr;
863
864		for (task = (task_t)queue_first(&tasks);
865						!queue_end(&tasks, (queue_entry_t)task);
866								task = (task_t)queue_next(&task->tasks)) {
867#if defined(SECURE_KERNEL)
868			if (task != kernel_task) {
869#endif
870				task_reference_internal(task);
871				task_list[actual++] = task;
872#if defined(SECURE_KERNEL)
873			}
874#endif
875		}
876
877		break;
878	}
879
880	case THING_THREAD: {
881		thread_t	thread, *thread_list = (thread_t *)addr;
882
883		for (thread = (thread_t)queue_first(&threads);
884						!queue_end(&threads, (queue_entry_t)thread);
885								thread = (thread_t)queue_next(&thread->threads)) {
886			thread_reference_internal(thread);
887			thread_list[actual++] = thread;
888		}
889
890		break;
891	}
892
893	}
894
895	lck_mtx_unlock(&tasks_threads_lock);
896
897	if (actual < maxthings)
898		size_needed = actual * sizeof (mach_port_t);
899
900	if (actual == 0) {
901		/* no things, so return null pointer and deallocate memory */
902		*thing_list = NULL;
903		*count = 0;
904
905		if (size != 0)
906			kfree(addr, size);
907	}
908	else {
909		/* if we allocated too much, must copy */
910
911		if (size_needed < size) {
912			void *newaddr;
913
914			newaddr = kalloc(size_needed);
915			if (newaddr == 0) {
916				switch (type) {
917
918				case THING_TASK: {
919					task_t		*task_list = (task_t *)addr;
920
921					for (i = 0; i < actual; i++)
922						task_deallocate(task_list[i]);
923					break;
924				}
925
926				case THING_THREAD: {
927					thread_t	*thread_list = (thread_t *)addr;
928
929					for (i = 0; i < actual; i++)
930						thread_deallocate(thread_list[i]);
931					break;
932				}
933
934				}
935
936				kfree(addr, size);
937				return (KERN_RESOURCE_SHORTAGE);
938			}
939
940			bcopy((void *) addr, (void *) newaddr, size_needed);
941			kfree(addr, size);
942			addr = newaddr;
943		}
944
945		*thing_list = (mach_port_t *)addr;
946		*count = actual;
947
948		/* do the conversion that Mig should handle */
949
950		switch (type) {
951
952		case THING_TASK: {
953			task_t		*task_list = (task_t *)addr;
954
955			for (i = 0; i < actual; i++)
956				(*thing_list)[i] = convert_task_to_port(task_list[i]);
957			break;
958		}
959
960		case THING_THREAD: {
961			thread_t	*thread_list = (thread_t *)addr;
962
963			for (i = 0; i < actual; i++)
964			  	(*thing_list)[i] = convert_thread_to_port(thread_list[i]);
965			break;
966		}
967
968		}
969	}
970
971	return (KERN_SUCCESS);
972}
973
974
975/*
976 *	processor_set_tasks:
977 *
978 *	List all tasks in the processor set.
979 */
980kern_return_t
981processor_set_tasks(
982	processor_set_t		pset,
983	task_array_t		*task_list,
984	mach_msg_type_number_t	*count)
985{
986    return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
987}
988
989/*
990 *	processor_set_threads:
991 *
992 *	List all threads in the processor set.
993 */
994#if defined(SECURE_KERNEL)
995kern_return_t
996processor_set_threads(
997	__unused processor_set_t		pset,
998	__unused thread_array_t		*thread_list,
999	__unused mach_msg_type_number_t	*count)
1000{
1001    return KERN_FAILURE;
1002}
1003#else
1004kern_return_t
1005processor_set_threads(
1006	processor_set_t		pset,
1007	thread_array_t		*thread_list,
1008	mach_msg_type_number_t	*count)
1009{
1010    return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
1011}
1012#endif
1013
1014/*
1015 *	processor_set_policy_control
1016 *
1017 *	Controls the scheduling attributes governing the processor set.
1018 *	Allows control of enabled policies, and per-policy base and limit
1019 *	priorities.
1020 */
1021kern_return_t
1022processor_set_policy_control(
1023	__unused processor_set_t		pset,
1024	__unused int				flavor,
1025	__unused processor_set_info_t	policy_info,
1026	__unused mach_msg_type_number_t	count,
1027	__unused boolean_t			change)
1028{
1029	return (KERN_INVALID_ARGUMENT);
1030}
1031
1032#undef pset_deallocate
1033void pset_deallocate(processor_set_t pset);
1034void
1035pset_deallocate(
1036__unused processor_set_t	pset)
1037{
1038	return;
1039}
1040
1041#undef pset_reference
1042void pset_reference(processor_set_t pset);
1043void
1044pset_reference(
1045__unused processor_set_t	pset)
1046{
1047	return;
1048}
1049