1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 *	processor.c: processor and processor_set manipulation routines.
61 */
62
63#include <mach/boolean.h>
64#include <mach/policy.h>
65#include <mach/processor.h>
66#include <mach/processor_info.h>
67#include <mach/vm_param.h>
68#include <kern/cpu_number.h>
69#include <kern/host.h>
70#include <kern/machine.h>
71#include <kern/misc_protos.h>
72#include <kern/processor.h>
73#include <kern/sched.h>
74#include <kern/task.h>
75#include <kern/thread.h>
76#include <kern/ipc_host.h>
77#include <kern/ipc_tt.h>
78#include <ipc/ipc_port.h>
79#include <kern/kalloc.h>
80
81/*
82 * Exported interface
83 */
84#include <mach/mach_host_server.h>
85#include <mach/processor_set_server.h>
86
87struct processor_set	pset0;
88struct pset_node		pset_node0;
89decl_simple_lock_data(static,pset_node_lock)
90
91queue_head_t			tasks;
92int						tasks_count;
93queue_head_t			threads;
94int						threads_count;
95decl_mutex_data(,tasks_threads_lock)
96
97processor_t				processor_list;
98unsigned int			processor_count;
99static processor_t		processor_list_tail;
100decl_simple_lock_data(,processor_list_lock)
101
102uint32_t				processor_avail_count;
103
104processor_t	master_processor;
105int 		master_cpu = 0;
106
107/* Forwards */
108kern_return_t	processor_set_things(
109		processor_set_t		pset,
110		mach_port_t		**thing_list,
111		mach_msg_type_number_t	*count,
112		int			type);
113
114void
115processor_bootstrap(void)
116{
117	pset_init(&pset0, &pset_node0);
118	pset_node0.psets = &pset0;
119
120	simple_lock_init(&pset_node_lock, 0);
121
122	mutex_init(&tasks_threads_lock, 0);
123	queue_init(&tasks);
124	queue_init(&threads);
125
126	simple_lock_init(&processor_list_lock, 0);
127
128	master_processor = cpu_to_processor(master_cpu);
129
130	processor_init(master_processor, master_cpu, &pset0);
131}
132
133/*
134 *	Initialize the given processor for the cpu
135 *	indicated by cpu_num, and assign to the
136 *	specified processor set.
137 */
138void
139processor_init(
140	processor_t			processor,
141	int					cpu_num,
142	processor_set_t		pset)
143{
144	run_queue_init(&processor->runq);
145
146	processor->state = PROCESSOR_OFF_LINE;
147	processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL;
148	processor->processor_set = pset;
149	processor->current_pri = MINPRI;
150	processor->cpu_num = cpu_num;
151	timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
152	processor->deadline = UINT64_MAX;
153	processor->timeslice = 0;
154	processor->processor_self = IP_NULL;
155	simple_lock_init(&processor->lock, 0);
156	processor_data_init(processor);
157	processor->processor_list = NULL;
158
159	simple_lock(&processor_list_lock);
160	if (processor_list == NULL)
161		processor_list = processor;
162	else
163		processor_list_tail->processor_list = processor;
164	processor_list_tail = processor;
165	processor_count++;
166	simple_unlock(&processor_list_lock);
167}
168
169processor_set_t
170processor_pset(
171	processor_t	processor)
172{
173	return (processor->processor_set);
174}
175
176pset_node_t
177pset_node_root(void)
178{
179	return &pset_node0;
180}
181
182processor_set_t
183pset_create(
184	pset_node_t			node)
185{
186	processor_set_t		*prev, pset = kalloc(sizeof (*pset));
187
188	if (pset != PROCESSOR_SET_NULL) {
189		pset_init(pset, node);
190
191		simple_lock(&pset_node_lock);
192
193		prev = &node->psets;
194		while (*prev != PROCESSOR_SET_NULL)
195			prev = &(*prev)->pset_list;
196
197		*prev = pset;
198
199		simple_unlock(&pset_node_lock);
200	}
201
202	return (pset);
203}
204
205/*
206 *	Initialize the given processor_set structure.
207 */
208void
209pset_init(
210	processor_set_t		pset,
211	pset_node_t			node)
212{
213	queue_init(&pset->active_queue);
214	queue_init(&pset->idle_queue);
215	pset->processor_count = 0;
216	pset->low_pri = pset->low_count = PROCESSOR_NULL;
217	pset_lock_init(pset);
218	pset->pset_self = IP_NULL;
219	pset->pset_name_self = IP_NULL;
220	pset->pset_list = PROCESSOR_SET_NULL;
221	pset->node = node;
222}
223
224kern_return_t
225processor_info_count(
226	processor_flavor_t		flavor,
227	mach_msg_type_number_t	*count)
228{
229	switch (flavor) {
230
231	case PROCESSOR_BASIC_INFO:
232		*count = PROCESSOR_BASIC_INFO_COUNT;
233		break;
234
235	case PROCESSOR_CPU_LOAD_INFO:
236		*count = PROCESSOR_CPU_LOAD_INFO_COUNT;
237		break;
238
239	default:
240		return (cpu_info_count(flavor, count));
241	}
242
243	return (KERN_SUCCESS);
244}
245
246
247kern_return_t
248processor_info(
249	register processor_t	processor,
250	processor_flavor_t		flavor,
251	host_t					*host,
252	processor_info_t		info,
253	mach_msg_type_number_t	*count)
254{
255	register int	cpu_num, state;
256	kern_return_t	result;
257
258	if (processor == PROCESSOR_NULL)
259		return (KERN_INVALID_ARGUMENT);
260
261	cpu_num = processor->cpu_num;
262
263	switch (flavor) {
264
265	case PROCESSOR_BASIC_INFO:
266	{
267		register processor_basic_info_t		basic_info;
268
269		if (*count < PROCESSOR_BASIC_INFO_COUNT)
270			return (KERN_FAILURE);
271
272		basic_info = (processor_basic_info_t) info;
273		basic_info->cpu_type = slot_type(cpu_num);
274		basic_info->cpu_subtype = slot_subtype(cpu_num);
275		state = processor->state;
276		if (state == PROCESSOR_OFF_LINE)
277			basic_info->running = FALSE;
278		else
279			basic_info->running = TRUE;
280		basic_info->slot_num = cpu_num;
281		if (processor == master_processor)
282			basic_info->is_master = TRUE;
283		else
284			basic_info->is_master = FALSE;
285
286		*count = PROCESSOR_BASIC_INFO_COUNT;
287		*host = &realhost;
288
289	    return (KERN_SUCCESS);
290	}
291
292	case PROCESSOR_CPU_LOAD_INFO:
293	{
294		register processor_cpu_load_info_t	cpu_load_info;
295
296	    if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
297			return (KERN_FAILURE);
298
299	    cpu_load_info = (processor_cpu_load_info_t) info;
300		cpu_load_info->cpu_ticks[CPU_STATE_USER] =
301							timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval;
302		cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
303							timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval;
304		cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
305							timer_grab(&PROCESSOR_DATA(processor, idle_state)) / hz_tick_interval;
306		cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
307
308	    *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
309	    *host = &realhost;
310
311	    return (KERN_SUCCESS);
312	}
313
314	default:
315	    result = cpu_info(flavor, cpu_num, info, count);
316	    if (result == KERN_SUCCESS)
317			*host = &realhost;
318
319	    return (result);
320	}
321}
322
323kern_return_t
324processor_start(
325	processor_t			processor)
326{
327	processor_set_t		pset;
328	thread_t			thread;
329	kern_return_t		result;
330	spl_t				s;
331
332	if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL)
333		return (KERN_INVALID_ARGUMENT);
334
335	if (processor == master_processor) {
336		processor_t		prev;
337
338		prev = thread_bind(processor);
339		thread_block(THREAD_CONTINUE_NULL);
340
341		result = cpu_start(processor->cpu_num);
342
343		thread_bind(prev);
344
345		return (result);
346	}
347
348	s = splsched();
349	pset = processor->processor_set;
350	pset_lock(pset);
351	if (processor->state != PROCESSOR_OFF_LINE) {
352		pset_unlock(pset);
353		splx(s);
354
355		return (KERN_FAILURE);
356	}
357
358	processor->state = PROCESSOR_START;
359	pset_unlock(pset);
360	splx(s);
361
362	/*
363	 *	Create the idle processor thread.
364	 */
365	if (processor->idle_thread == THREAD_NULL) {
366		result = idle_thread_create(processor);
367		if (result != KERN_SUCCESS) {
368			s = splsched();
369			pset_lock(pset);
370			processor->state = PROCESSOR_OFF_LINE;
371			pset_unlock(pset);
372			splx(s);
373
374			return (result);
375		}
376	}
377
378	/*
379	 *	If there is no active thread, the processor
380	 *	has never been started.  Create a dedicated
381	 *	start up thread.
382	 */
383	if (	processor->active_thread == THREAD_NULL		&&
384			processor->next_thread == THREAD_NULL		) {
385		result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
386		if (result != KERN_SUCCESS) {
387			s = splsched();
388			pset_lock(pset);
389			processor->state = PROCESSOR_OFF_LINE;
390			pset_unlock(pset);
391			splx(s);
392
393			return (result);
394		}
395
396		s = splsched();
397		thread_lock(thread);
398		thread->bound_processor = processor;
399		processor->next_thread = thread;
400		thread->state = TH_RUN;
401		thread_unlock(thread);
402		splx(s);
403
404		thread_deallocate(thread);
405	}
406
407	if (processor->processor_self == IP_NULL)
408		ipc_processor_init(processor);
409
410	result = cpu_start(processor->cpu_num);
411	if (result != KERN_SUCCESS) {
412		s = splsched();
413		pset_lock(pset);
414		processor->state = PROCESSOR_OFF_LINE;
415		pset_unlock(pset);
416		splx(s);
417
418		return (result);
419	}
420
421	ipc_processor_enable(processor);
422
423	return (KERN_SUCCESS);
424}
425
426kern_return_t
427processor_exit(
428	processor_t	processor)
429{
430	if (processor == PROCESSOR_NULL)
431		return(KERN_INVALID_ARGUMENT);
432
433	return(processor_shutdown(processor));
434}
435
436kern_return_t
437processor_control(
438	processor_t		processor,
439	processor_info_t	info,
440	mach_msg_type_number_t	count)
441{
442	if (processor == PROCESSOR_NULL)
443		return(KERN_INVALID_ARGUMENT);
444
445	return(cpu_control(processor->cpu_num, info, count));
446}
447
448kern_return_t
449processor_set_create(
450	__unused host_t		host,
451	__unused processor_set_t	*new_set,
452	__unused processor_set_t	*new_name)
453{
454	return(KERN_FAILURE);
455}
456
457kern_return_t
458processor_set_destroy(
459	__unused processor_set_t	pset)
460{
461	return(KERN_FAILURE);
462}
463
464kern_return_t
465processor_get_assignment(
466	processor_t	processor,
467	processor_set_t	*pset)
468{
469	int state;
470
471	state = processor->state;
472	if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
473		return(KERN_FAILURE);
474
475	*pset = &pset0;
476
477	return(KERN_SUCCESS);
478}
479
480kern_return_t
481processor_set_info(
482	processor_set_t		pset,
483	int			flavor,
484	host_t			*host,
485	processor_set_info_t	info,
486	mach_msg_type_number_t	*count)
487{
488	if (pset == PROCESSOR_SET_NULL)
489		return(KERN_INVALID_ARGUMENT);
490
491	if (flavor == PROCESSOR_SET_BASIC_INFO) {
492		register processor_set_basic_info_t	basic_info;
493
494		if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
495			return(KERN_FAILURE);
496
497		basic_info = (processor_set_basic_info_t) info;
498		basic_info->processor_count = processor_avail_count;
499		basic_info->default_policy = POLICY_TIMESHARE;
500
501		*count = PROCESSOR_SET_BASIC_INFO_COUNT;
502		*host = &realhost;
503		return(KERN_SUCCESS);
504	}
505	else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
506		register policy_timeshare_base_t	ts_base;
507
508		if (*count < POLICY_TIMESHARE_BASE_COUNT)
509			return(KERN_FAILURE);
510
511		ts_base = (policy_timeshare_base_t) info;
512		ts_base->base_priority = BASEPRI_DEFAULT;
513
514		*count = POLICY_TIMESHARE_BASE_COUNT;
515		*host = &realhost;
516		return(KERN_SUCCESS);
517	}
518	else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
519		register policy_fifo_base_t		fifo_base;
520
521		if (*count < POLICY_FIFO_BASE_COUNT)
522			return(KERN_FAILURE);
523
524		fifo_base = (policy_fifo_base_t) info;
525		fifo_base->base_priority = BASEPRI_DEFAULT;
526
527		*count = POLICY_FIFO_BASE_COUNT;
528		*host = &realhost;
529		return(KERN_SUCCESS);
530	}
531	else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
532		register policy_rr_base_t		rr_base;
533
534		if (*count < POLICY_RR_BASE_COUNT)
535			return(KERN_FAILURE);
536
537		rr_base = (policy_rr_base_t) info;
538		rr_base->base_priority = BASEPRI_DEFAULT;
539		rr_base->quantum = 1;
540
541		*count = POLICY_RR_BASE_COUNT;
542		*host = &realhost;
543		return(KERN_SUCCESS);
544	}
545	else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
546		register policy_timeshare_limit_t	ts_limit;
547
548		if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
549			return(KERN_FAILURE);
550
551		ts_limit = (policy_timeshare_limit_t) info;
552		ts_limit->max_priority = MAXPRI_KERNEL;
553
554		*count = POLICY_TIMESHARE_LIMIT_COUNT;
555		*host = &realhost;
556		return(KERN_SUCCESS);
557	}
558	else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
559		register policy_fifo_limit_t		fifo_limit;
560
561		if (*count < POLICY_FIFO_LIMIT_COUNT)
562			return(KERN_FAILURE);
563
564		fifo_limit = (policy_fifo_limit_t) info;
565		fifo_limit->max_priority = MAXPRI_KERNEL;
566
567		*count = POLICY_FIFO_LIMIT_COUNT;
568		*host = &realhost;
569		return(KERN_SUCCESS);
570	}
571	else if (flavor == PROCESSOR_SET_RR_LIMITS) {
572		register policy_rr_limit_t		rr_limit;
573
574		if (*count < POLICY_RR_LIMIT_COUNT)
575			return(KERN_FAILURE);
576
577		rr_limit = (policy_rr_limit_t) info;
578		rr_limit->max_priority = MAXPRI_KERNEL;
579
580		*count = POLICY_RR_LIMIT_COUNT;
581		*host = &realhost;
582		return(KERN_SUCCESS);
583	}
584	else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
585		register int				*enabled;
586
587		if (*count < (sizeof(*enabled)/sizeof(int)))
588			return(KERN_FAILURE);
589
590		enabled = (int *) info;
591		*enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
592
593		*count = sizeof(*enabled)/sizeof(int);
594		*host = &realhost;
595		return(KERN_SUCCESS);
596	}
597
598
599	*host = HOST_NULL;
600	return(KERN_INVALID_ARGUMENT);
601}
602
603/*
604 *	processor_set_statistics
605 *
606 *	Returns scheduling statistics for a processor set.
607 */
608kern_return_t
609processor_set_statistics(
610	processor_set_t         pset,
611	int                     flavor,
612	processor_set_info_t    info,
613	mach_msg_type_number_t	*count)
614{
615	if (pset == PROCESSOR_SET_NULL || pset != &pset0)
616		return (KERN_INVALID_PROCESSOR_SET);
617
618	if (flavor == PROCESSOR_SET_LOAD_INFO) {
619		register processor_set_load_info_t     load_info;
620
621		if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
622			return(KERN_FAILURE);
623
624		load_info = (processor_set_load_info_t) info;
625
626		load_info->mach_factor = sched_mach_factor;
627		load_info->load_average = sched_load_average;
628
629		load_info->task_count = tasks_count;
630		load_info->thread_count = threads_count;
631
632		*count = PROCESSOR_SET_LOAD_INFO_COUNT;
633		return(KERN_SUCCESS);
634	}
635
636	return(KERN_INVALID_ARGUMENT);
637}
638
639/*
640 *	processor_set_max_priority:
641 *
642 *	Specify max priority permitted on processor set.  This affects
643 *	newly created and assigned threads.  Optionally change existing
644 * 	ones.
645 */
646kern_return_t
647processor_set_max_priority(
648	__unused processor_set_t	pset,
649	__unused int			max_priority,
650	__unused boolean_t		change_threads)
651{
652	return (KERN_INVALID_ARGUMENT);
653}
654
655/*
656 *	processor_set_policy_enable:
657 *
658 *	Allow indicated policy on processor set.
659 */
660
661kern_return_t
662processor_set_policy_enable(
663	__unused processor_set_t	pset,
664	__unused int			policy)
665{
666	return (KERN_INVALID_ARGUMENT);
667}
668
669/*
670 *	processor_set_policy_disable:
671 *
672 *	Forbid indicated policy on processor set.  Time sharing cannot
673 *	be forbidden.
674 */
675kern_return_t
676processor_set_policy_disable(
677	__unused processor_set_t	pset,
678	__unused int			policy,
679	__unused boolean_t		change_threads)
680{
681	return (KERN_INVALID_ARGUMENT);
682}
683
684#define THING_TASK	0
685#define THING_THREAD	1
686
687/*
688 *	processor_set_things:
689 *
690 *	Common internals for processor_set_{threads,tasks}
691 */
692kern_return_t
693processor_set_things(
694	processor_set_t			pset,
695	mach_port_t				**thing_list,
696	mach_msg_type_number_t	*count,
697	int						type)
698{
699	unsigned int actual;	/* this many things */
700	unsigned int maxthings;
701	unsigned int i;
702
703	vm_size_t size, size_needed;
704	void  *addr;
705
706	if (pset == PROCESSOR_SET_NULL || pset != &pset0)
707		return (KERN_INVALID_ARGUMENT);
708
709	size = 0;
710	addr = NULL;
711
712	for (;;) {
713		mutex_lock(&tasks_threads_lock);
714
715		if (type == THING_TASK)
716			maxthings = tasks_count;
717		else
718			maxthings = threads_count;
719
720		/* do we have the memory we need? */
721
722		size_needed = maxthings * sizeof (mach_port_t);
723		if (size_needed <= size)
724			break;
725
726		/* unlock and allocate more memory */
727		mutex_unlock(&tasks_threads_lock);
728
729		if (size != 0)
730			kfree(addr, size);
731
732		assert(size_needed > 0);
733		size = size_needed;
734
735		addr = kalloc(size);
736		if (addr == 0)
737			return (KERN_RESOURCE_SHORTAGE);
738	}
739
740	/* OK, have memory and the list locked */
741
742	actual = 0;
743	switch (type) {
744
745	case THING_TASK: {
746		task_t		task, *task_list = (task_t *)addr;
747
748		for (task = (task_t)queue_first(&tasks);
749						!queue_end(&tasks, (queue_entry_t)task);
750								task = (task_t)queue_next(&task->tasks)) {
751#if defined(SECURE_KERNEL)
752			if (task != kernel_task) {
753#endif
754				task_reference_internal(task);
755				task_list[actual++] = task;
756#if defined(SECURE_KERNEL)
757			}
758#endif
759		}
760
761		break;
762	}
763
764	case THING_THREAD: {
765		thread_t	thread, *thread_list = (thread_t *)addr;
766
767		for (thread = (thread_t)queue_first(&threads);
768						!queue_end(&threads, (queue_entry_t)thread);
769								thread = (thread_t)queue_next(&thread->threads)) {
770			thread_reference_internal(thread);
771			thread_list[actual++] = thread;
772		}
773
774		break;
775	}
776
777	}
778
779	mutex_unlock(&tasks_threads_lock);
780
781	if (actual < maxthings)
782		size_needed = actual * sizeof (mach_port_t);
783
784	if (actual == 0) {
785		/* no things, so return null pointer and deallocate memory */
786		*thing_list = NULL;
787		*count = 0;
788
789		if (size != 0)
790			kfree(addr, size);
791	}
792	else {
793		/* if we allocated too much, must copy */
794
795		if (size_needed < size) {
796			void *newaddr;
797
798			newaddr = kalloc(size_needed);
799			if (newaddr == 0) {
800				switch (type) {
801
802				case THING_TASK: {
803					task_t		*task_list = (task_t *)addr;
804
805					for (i = 0; i < actual; i++)
806						task_deallocate(task_list[i]);
807					break;
808				}
809
810				case THING_THREAD: {
811					thread_t	*thread_list = (thread_t *)addr;
812
813					for (i = 0; i < actual; i++)
814						thread_deallocate(thread_list[i]);
815					break;
816				}
817
818				}
819
820				kfree(addr, size);
821				return (KERN_RESOURCE_SHORTAGE);
822			}
823
824			bcopy((void *) addr, (void *) newaddr, size_needed);
825			kfree(addr, size);
826			addr = newaddr;
827		}
828
829		*thing_list = (mach_port_t *)addr;
830		*count = actual;
831
832		/* do the conversion that Mig should handle */
833
834		switch (type) {
835
836		case THING_TASK: {
837			task_t		*task_list = (task_t *)addr;
838
839			for (i = 0; i < actual; i++)
840				(*thing_list)[i] = convert_task_to_port(task_list[i]);
841			break;
842		}
843
844		case THING_THREAD: {
845			thread_t	*thread_list = (thread_t *)addr;
846
847			for (i = 0; i < actual; i++)
848			  	(*thing_list)[i] = convert_thread_to_port(thread_list[i]);
849			break;
850		}
851
852		}
853	}
854
855	return (KERN_SUCCESS);
856}
857
858
859/*
860 *	processor_set_tasks:
861 *
862 *	List all tasks in the processor set.
863 */
864kern_return_t
865processor_set_tasks(
866	processor_set_t		pset,
867	task_array_t		*task_list,
868	mach_msg_type_number_t	*count)
869{
870    return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
871}
872
873/*
874 *	processor_set_threads:
875 *
876 *	List all threads in the processor set.
877 */
878#if defined(SECURE_KERNEL)
879kern_return_t
880processor_set_threads(
881	__unused processor_set_t		pset,
882	__unused thread_array_t		*thread_list,
883	__unused mach_msg_type_number_t	*count)
884{
885    return KERN_FAILURE;
886}
887#elif defined(CONFIG_EMBEDDED)
888kern_return_t
889processor_set_threads(
890	__unused processor_set_t		pset,
891	__unused thread_array_t		*thread_list,
892	__unused mach_msg_type_number_t	*count)
893{
894    return KERN_NOT_SUPPORTED;
895}
896#else
897kern_return_t
898processor_set_threads(
899	processor_set_t		pset,
900	thread_array_t		*thread_list,
901	mach_msg_type_number_t	*count)
902{
903    return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
904}
905#endif
906
907/*
908 *	processor_set_policy_control
909 *
910 *	Controls the scheduling attributes governing the processor set.
911 *	Allows control of enabled policies, and per-policy base and limit
912 *	priorities.
913 */
914kern_return_t
915processor_set_policy_control(
916	__unused processor_set_t		pset,
917	__unused int				flavor,
918	__unused processor_set_info_t	policy_info,
919	__unused mach_msg_type_number_t	count,
920	__unused boolean_t			change)
921{
922	return (KERN_INVALID_ARGUMENT);
923}
924
925#undef pset_deallocate
926void pset_deallocate(processor_set_t pset);
927void
928pset_deallocate(
929__unused processor_set_t	pset)
930{
931	return;
932}
933
934#undef pset_reference
935void pset_reference(processor_set_t pset);
936void
937pset_reference(
938__unused processor_set_t	pset)
939{
940	return;
941}
942