1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 *	processor.c: processor and processor_set manipulation routines.
61 */
62
63#include <mach/boolean.h>
64#include <mach/policy.h>
65#include <mach/processor.h>
66#include <mach/processor_info.h>
67#include <mach/vm_param.h>
68#include <kern/cpu_number.h>
69#include <kern/host.h>
70#include <kern/machine.h>
71#include <kern/misc_protos.h>
72#include <kern/processor.h>
73#include <kern/sched.h>
74#include <kern/task.h>
75#include <kern/thread.h>
76#include <kern/ipc_host.h>
77#include <kern/ipc_tt.h>
78#include <ipc/ipc_port.h>
79#include <kern/kalloc.h>
80
81/*
82 * Exported interface
83 */
84#include <mach/mach_host_server.h>
85#include <mach/processor_set_server.h>
86
87struct processor_set	pset0;
88struct pset_node		pset_node0;
89decl_simple_lock_data(static,pset_node_lock)
90
91queue_head_t			tasks;
92queue_head_t			terminated_tasks;	/* To be used ONLY for stackshot. */
93int						tasks_count;
94queue_head_t			threads;
95int						threads_count;
96decl_lck_mtx_data(,tasks_threads_lock)
97
98processor_t				processor_list;
99unsigned int			processor_count;
100static processor_t		processor_list_tail;
101decl_simple_lock_data(,processor_list_lock)
102
103uint32_t				processor_avail_count;
104
105processor_t		master_processor;
106int 			master_cpu = 0;
107boolean_t		sched_stats_active = FALSE;
108
109/* Forwards */
110kern_return_t	processor_set_things(
111		processor_set_t		pset,
112		mach_port_t		**thing_list,
113		mach_msg_type_number_t	*count,
114		int			type);
115
116void
117processor_bootstrap(void)
118{
119	pset_init(&pset0, &pset_node0);
120	pset_node0.psets = &pset0;
121
122	simple_lock_init(&pset_node_lock, 0);
123
124	queue_init(&tasks);
125	queue_init(&terminated_tasks);
126	queue_init(&threads);
127
128	simple_lock_init(&processor_list_lock, 0);
129
130	master_processor = cpu_to_processor(master_cpu);
131
132	processor_init(master_processor, master_cpu, &pset0);
133}
134
135/*
136 *	Initialize the given processor for the cpu
137 *	indicated by cpu_id, and assign to the
138 *	specified processor set.
139 */
140void
141processor_init(
142	processor_t			processor,
143	int					cpu_id,
144	processor_set_t		pset)
145{
146	spl_t		s;
147
148	if (processor != master_processor) {
149		/* Scheduler state deferred until sched_init() */
150		SCHED(processor_init)(processor);
151	}
152
153	processor->state = PROCESSOR_OFF_LINE;
154	processor->active_thread = THREAD_NULL;
155    processor->next_thread = THREAD_NULL;
156    processor->idle_thread = THREAD_NULL;
157	processor->processor_set = pset;
158	processor->current_pri = MINPRI;
159	processor->current_thmode = TH_MODE_NONE;
160	processor->cpu_id = cpu_id;
161	timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
162	processor->deadline = UINT64_MAX;
163	processor->timeslice = 0;
164	processor->processor_meta = PROCESSOR_META_NULL;
165	processor->processor_self = IP_NULL;
166	processor_data_init(processor);
167	processor->processor_list = NULL;
168
169	s = splsched();
170	pset_lock(pset);
171	if (pset->cpu_set_count++ == 0)
172		pset->cpu_set_low = pset->cpu_set_hi = cpu_id;
173	else {
174		pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low;
175		pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi;
176	}
177	pset_unlock(pset);
178	splx(s);
179
180	simple_lock(&processor_list_lock);
181	if (processor_list == NULL)
182		processor_list = processor;
183	else
184		processor_list_tail->processor_list = processor;
185	processor_list_tail = processor;
186	processor_count++;
187	simple_unlock(&processor_list_lock);
188}
189
190void
191processor_meta_init(
192	processor_t		processor,
193	processor_t		primary)
194{
195	processor_meta_t	pmeta = primary->processor_meta;
196
197	if (pmeta == PROCESSOR_META_NULL) {
198		pmeta = kalloc(sizeof (*pmeta));
199
200		queue_init(&pmeta->idle_queue);
201
202		pmeta->primary = primary;
203	}
204
205	processor->processor_meta = pmeta;
206}
207
208processor_set_t
209processor_pset(
210	processor_t	processor)
211{
212	return (processor->processor_set);
213}
214
215pset_node_t
216pset_node_root(void)
217{
218	return &pset_node0;
219}
220
221processor_set_t
222pset_create(
223	pset_node_t			node)
224{
225	processor_set_t		*prev, pset = kalloc(sizeof (*pset));
226
227	if (pset != PROCESSOR_SET_NULL) {
228		pset_init(pset, node);
229
230		simple_lock(&pset_node_lock);
231
232		prev = &node->psets;
233		while (*prev != PROCESSOR_SET_NULL)
234			prev = &(*prev)->pset_list;
235
236		*prev = pset;
237
238		simple_unlock(&pset_node_lock);
239	}
240
241	return (pset);
242}
243
244/*
245 *	Initialize the given processor_set structure.
246 */
247void
248pset_init(
249	processor_set_t		pset,
250	pset_node_t			node)
251{
252	if (pset != &pset0) {
253		/* Scheduler state deferred until sched_init() */
254		SCHED(pset_init)(pset);
255	}
256
257	queue_init(&pset->active_queue);
258	queue_init(&pset->idle_queue);
259	pset->online_processor_count = 0;
260	pset_pri_init_hint(pset, PROCESSOR_NULL);
261	pset_count_init_hint(pset, PROCESSOR_NULL);
262	pset->cpu_set_low = pset->cpu_set_hi = 0;
263	pset->cpu_set_count = 0;
264	pset_lock_init(pset);
265	pset->pset_self = IP_NULL;
266	pset->pset_name_self = IP_NULL;
267	pset->pset_list = PROCESSOR_SET_NULL;
268	pset->node = node;
269}
270
271kern_return_t
272processor_info_count(
273	processor_flavor_t		flavor,
274	mach_msg_type_number_t	*count)
275{
276	switch (flavor) {
277
278	case PROCESSOR_BASIC_INFO:
279		*count = PROCESSOR_BASIC_INFO_COUNT;
280		break;
281
282	case PROCESSOR_CPU_LOAD_INFO:
283		*count = PROCESSOR_CPU_LOAD_INFO_COUNT;
284		break;
285
286	default:
287		return (cpu_info_count(flavor, count));
288	}
289
290	return (KERN_SUCCESS);
291}
292
293
294kern_return_t
295processor_info(
296	register processor_t	processor,
297	processor_flavor_t		flavor,
298	host_t					*host,
299	processor_info_t		info,
300	mach_msg_type_number_t	*count)
301{
302	register int	cpu_id, state;
303	kern_return_t	result;
304
305	if (processor == PROCESSOR_NULL)
306		return (KERN_INVALID_ARGUMENT);
307
308	cpu_id = processor->cpu_id;
309
310	switch (flavor) {
311
312	case PROCESSOR_BASIC_INFO:
313	{
314		register processor_basic_info_t		basic_info;
315
316		if (*count < PROCESSOR_BASIC_INFO_COUNT)
317			return (KERN_FAILURE);
318
319		basic_info = (processor_basic_info_t) info;
320		basic_info->cpu_type = slot_type(cpu_id);
321		basic_info->cpu_subtype = slot_subtype(cpu_id);
322		state = processor->state;
323		if (state == PROCESSOR_OFF_LINE)
324			basic_info->running = FALSE;
325		else
326			basic_info->running = TRUE;
327		basic_info->slot_num = cpu_id;
328		if (processor == master_processor)
329			basic_info->is_master = TRUE;
330		else
331			basic_info->is_master = FALSE;
332
333		*count = PROCESSOR_BASIC_INFO_COUNT;
334		*host = &realhost;
335
336	    return (KERN_SUCCESS);
337	}
338
339	case PROCESSOR_CPU_LOAD_INFO:
340	{
341		processor_cpu_load_info_t	cpu_load_info;
342		timer_data_t	idle_temp;
343		timer_t		idle_state;
344
345		if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
346			return (KERN_FAILURE);
347
348		cpu_load_info = (processor_cpu_load_info_t) info;
349		if (precise_user_kernel_time) {
350			cpu_load_info->cpu_ticks[CPU_STATE_USER] =
351							(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval);
352			cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
353							(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval);
354		} else {
355			uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) +
356				timer_grab(&PROCESSOR_DATA(processor, system_state));
357
358			cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval);
359			cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
360		}
361
362		idle_state = &PROCESSOR_DATA(processor, idle_state);
363		idle_temp = *idle_state;
364
365		if (PROCESSOR_DATA(processor, current_state) != idle_state ||
366		    timer_grab(&idle_temp) != timer_grab(idle_state)) {
367			cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
368							(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, idle_state)) / hz_tick_interval);
369		} else {
370			timer_advance(&idle_temp, mach_absolute_time() - idle_temp.tstamp);
371
372			cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
373				(uint32_t)(timer_grab(&idle_temp) / hz_tick_interval);
374		}
375
376		cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
377
378	    *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
379	    *host = &realhost;
380
381	    return (KERN_SUCCESS);
382	}
383
384	default:
385	    result = cpu_info(flavor, cpu_id, info, count);
386	    if (result == KERN_SUCCESS)
387			*host = &realhost;
388
389	    return (result);
390	}
391}
392
393kern_return_t
394processor_start(
395	processor_t			processor)
396{
397	processor_set_t		pset;
398	thread_t			thread;
399	kern_return_t		result;
400	spl_t				s;
401
402	if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL)
403		return (KERN_INVALID_ARGUMENT);
404
405	if (processor == master_processor) {
406		processor_t		prev;
407
408		prev = thread_bind(processor);
409		thread_block(THREAD_CONTINUE_NULL);
410
411		result = cpu_start(processor->cpu_id);
412
413		thread_bind(prev);
414
415		return (result);
416	}
417
418	s = splsched();
419	pset = processor->processor_set;
420	pset_lock(pset);
421	if (processor->state != PROCESSOR_OFF_LINE) {
422		pset_unlock(pset);
423		splx(s);
424
425		return (KERN_FAILURE);
426	}
427
428	processor->state = PROCESSOR_START;
429	pset_unlock(pset);
430	splx(s);
431
432	/*
433	 *	Create the idle processor thread.
434	 */
435	if (processor->idle_thread == THREAD_NULL) {
436		result = idle_thread_create(processor);
437		if (result != KERN_SUCCESS) {
438			s = splsched();
439			pset_lock(pset);
440			processor->state = PROCESSOR_OFF_LINE;
441			pset_unlock(pset);
442			splx(s);
443
444			return (result);
445		}
446	}
447
448	/*
449	 *	If there is no active thread, the processor
450	 *	has never been started.  Create a dedicated
451	 *	start up thread.
452	 */
453	if (	processor->active_thread == THREAD_NULL		&&
454			processor->next_thread == THREAD_NULL		) {
455		result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
456		if (result != KERN_SUCCESS) {
457			s = splsched();
458			pset_lock(pset);
459			processor->state = PROCESSOR_OFF_LINE;
460			pset_unlock(pset);
461			splx(s);
462
463			return (result);
464		}
465
466		s = splsched();
467		thread_lock(thread);
468		thread->bound_processor = processor;
469		processor->next_thread = thread;
470		thread->state = TH_RUN;
471		thread_unlock(thread);
472		splx(s);
473
474		thread_deallocate(thread);
475	}
476
477	if (processor->processor_self == IP_NULL)
478		ipc_processor_init(processor);
479
480	result = cpu_start(processor->cpu_id);
481	if (result != KERN_SUCCESS) {
482		s = splsched();
483		pset_lock(pset);
484		processor->state = PROCESSOR_OFF_LINE;
485		pset_unlock(pset);
486		splx(s);
487
488		return (result);
489	}
490
491	ipc_processor_enable(processor);
492
493	return (KERN_SUCCESS);
494}
495
496kern_return_t
497processor_exit(
498	processor_t	processor)
499{
500	if (processor == PROCESSOR_NULL)
501		return(KERN_INVALID_ARGUMENT);
502
503	return(processor_shutdown(processor));
504}
505
506kern_return_t
507processor_control(
508	processor_t		processor,
509	processor_info_t	info,
510	mach_msg_type_number_t	count)
511{
512	if (processor == PROCESSOR_NULL)
513		return(KERN_INVALID_ARGUMENT);
514
515	return(cpu_control(processor->cpu_id, info, count));
516}
517
518kern_return_t
519processor_set_create(
520	__unused host_t		host,
521	__unused processor_set_t	*new_set,
522	__unused processor_set_t	*new_name)
523{
524	return(KERN_FAILURE);
525}
526
527kern_return_t
528processor_set_destroy(
529	__unused processor_set_t	pset)
530{
531	return(KERN_FAILURE);
532}
533
534kern_return_t
535processor_get_assignment(
536	processor_t	processor,
537	processor_set_t	*pset)
538{
539	int state;
540
541	if (processor == PROCESSOR_NULL)
542		return(KERN_INVALID_ARGUMENT);
543
544	state = processor->state;
545	if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
546		return(KERN_FAILURE);
547
548	*pset = &pset0;
549
550	return(KERN_SUCCESS);
551}
552
553kern_return_t
554processor_set_info(
555	processor_set_t		pset,
556	int			flavor,
557	host_t			*host,
558	processor_set_info_t	info,
559	mach_msg_type_number_t	*count)
560{
561	if (pset == PROCESSOR_SET_NULL)
562		return(KERN_INVALID_ARGUMENT);
563
564	if (flavor == PROCESSOR_SET_BASIC_INFO) {
565		register processor_set_basic_info_t	basic_info;
566
567		if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
568			return(KERN_FAILURE);
569
570		basic_info = (processor_set_basic_info_t) info;
571		basic_info->processor_count = processor_avail_count;
572		basic_info->default_policy = POLICY_TIMESHARE;
573
574		*count = PROCESSOR_SET_BASIC_INFO_COUNT;
575		*host = &realhost;
576		return(KERN_SUCCESS);
577	}
578	else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
579		register policy_timeshare_base_t	ts_base;
580
581		if (*count < POLICY_TIMESHARE_BASE_COUNT)
582			return(KERN_FAILURE);
583
584		ts_base = (policy_timeshare_base_t) info;
585		ts_base->base_priority = BASEPRI_DEFAULT;
586
587		*count = POLICY_TIMESHARE_BASE_COUNT;
588		*host = &realhost;
589		return(KERN_SUCCESS);
590	}
591	else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
592		register policy_fifo_base_t		fifo_base;
593
594		if (*count < POLICY_FIFO_BASE_COUNT)
595			return(KERN_FAILURE);
596
597		fifo_base = (policy_fifo_base_t) info;
598		fifo_base->base_priority = BASEPRI_DEFAULT;
599
600		*count = POLICY_FIFO_BASE_COUNT;
601		*host = &realhost;
602		return(KERN_SUCCESS);
603	}
604	else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
605		register policy_rr_base_t		rr_base;
606
607		if (*count < POLICY_RR_BASE_COUNT)
608			return(KERN_FAILURE);
609
610		rr_base = (policy_rr_base_t) info;
611		rr_base->base_priority = BASEPRI_DEFAULT;
612		rr_base->quantum = 1;
613
614		*count = POLICY_RR_BASE_COUNT;
615		*host = &realhost;
616		return(KERN_SUCCESS);
617	}
618	else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
619		register policy_timeshare_limit_t	ts_limit;
620
621		if (*count < POLICY_TIMESHARE_LIMIT_COUNT)
622			return(KERN_FAILURE);
623
624		ts_limit = (policy_timeshare_limit_t) info;
625		ts_limit->max_priority = MAXPRI_KERNEL;
626
627		*count = POLICY_TIMESHARE_LIMIT_COUNT;
628		*host = &realhost;
629		return(KERN_SUCCESS);
630	}
631	else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
632		register policy_fifo_limit_t		fifo_limit;
633
634		if (*count < POLICY_FIFO_LIMIT_COUNT)
635			return(KERN_FAILURE);
636
637		fifo_limit = (policy_fifo_limit_t) info;
638		fifo_limit->max_priority = MAXPRI_KERNEL;
639
640		*count = POLICY_FIFO_LIMIT_COUNT;
641		*host = &realhost;
642		return(KERN_SUCCESS);
643	}
644	else if (flavor == PROCESSOR_SET_RR_LIMITS) {
645		register policy_rr_limit_t		rr_limit;
646
647		if (*count < POLICY_RR_LIMIT_COUNT)
648			return(KERN_FAILURE);
649
650		rr_limit = (policy_rr_limit_t) info;
651		rr_limit->max_priority = MAXPRI_KERNEL;
652
653		*count = POLICY_RR_LIMIT_COUNT;
654		*host = &realhost;
655		return(KERN_SUCCESS);
656	}
657	else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
658		register int				*enabled;
659
660		if (*count < (sizeof(*enabled)/sizeof(int)))
661			return(KERN_FAILURE);
662
663		enabled = (int *) info;
664		*enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
665
666		*count = sizeof(*enabled)/sizeof(int);
667		*host = &realhost;
668		return(KERN_SUCCESS);
669	}
670
671
672	*host = HOST_NULL;
673	return(KERN_INVALID_ARGUMENT);
674}
675
676/*
677 *	processor_set_statistics
678 *
679 *	Returns scheduling statistics for a processor set.
680 */
681kern_return_t
682processor_set_statistics(
683	processor_set_t         pset,
684	int                     flavor,
685	processor_set_info_t    info,
686	mach_msg_type_number_t	*count)
687{
688	if (pset == PROCESSOR_SET_NULL || pset != &pset0)
689		return (KERN_INVALID_PROCESSOR_SET);
690
691	if (flavor == PROCESSOR_SET_LOAD_INFO) {
692		register processor_set_load_info_t     load_info;
693
694		if (*count < PROCESSOR_SET_LOAD_INFO_COUNT)
695			return(KERN_FAILURE);
696
697		load_info = (processor_set_load_info_t) info;
698
699		load_info->mach_factor = sched_mach_factor;
700		load_info->load_average = sched_load_average;
701
702		load_info->task_count = tasks_count;
703		load_info->thread_count = threads_count;
704
705		*count = PROCESSOR_SET_LOAD_INFO_COUNT;
706		return(KERN_SUCCESS);
707	}
708
709	return(KERN_INVALID_ARGUMENT);
710}
711
712/*
713 *	processor_set_max_priority:
714 *
715 *	Specify max priority permitted on processor set.  This affects
716 *	newly created and assigned threads.  Optionally change existing
717 * 	ones.
718 */
719kern_return_t
720processor_set_max_priority(
721	__unused processor_set_t	pset,
722	__unused int			max_priority,
723	__unused boolean_t		change_threads)
724{
725	return (KERN_INVALID_ARGUMENT);
726}
727
728/*
729 *	processor_set_policy_enable:
730 *
731 *	Allow indicated policy on processor set.
732 */
733
734kern_return_t
735processor_set_policy_enable(
736	__unused processor_set_t	pset,
737	__unused int			policy)
738{
739	return (KERN_INVALID_ARGUMENT);
740}
741
742/*
743 *	processor_set_policy_disable:
744 *
745 *	Forbid indicated policy on processor set.  Time sharing cannot
746 *	be forbidden.
747 */
748kern_return_t
749processor_set_policy_disable(
750	__unused processor_set_t	pset,
751	__unused int			policy,
752	__unused boolean_t		change_threads)
753{
754	return (KERN_INVALID_ARGUMENT);
755}
756
757#define THING_TASK	0
758#define THING_THREAD	1
759
760/*
761 *	processor_set_things:
762 *
763 *	Common internals for processor_set_{threads,tasks}
764 */
765kern_return_t
766processor_set_things(
767	processor_set_t			pset,
768	mach_port_t				**thing_list,
769	mach_msg_type_number_t	*count,
770	int						type)
771{
772	unsigned int actual;	/* this many things */
773	unsigned int maxthings;
774	unsigned int i;
775
776	vm_size_t size, size_needed;
777	void  *addr;
778
779	if (pset == PROCESSOR_SET_NULL || pset != &pset0)
780		return (KERN_INVALID_ARGUMENT);
781
782	size = 0;
783	addr = NULL;
784
785	for (;;) {
786		lck_mtx_lock(&tasks_threads_lock);
787
788		if (type == THING_TASK)
789			maxthings = tasks_count;
790		else
791			maxthings = threads_count;
792
793		/* do we have the memory we need? */
794
795		size_needed = maxthings * sizeof (mach_port_t);
796		if (size_needed <= size)
797			break;
798
799		/* unlock and allocate more memory */
800		lck_mtx_unlock(&tasks_threads_lock);
801
802		if (size != 0)
803			kfree(addr, size);
804
805		assert(size_needed > 0);
806		size = size_needed;
807
808		addr = kalloc(size);
809		if (addr == 0)
810			return (KERN_RESOURCE_SHORTAGE);
811	}
812
813	/* OK, have memory and the list locked */
814
815	actual = 0;
816	switch (type) {
817
818	case THING_TASK: {
819		task_t		task, *task_list = (task_t *)addr;
820
821		for (task = (task_t)queue_first(&tasks);
822						!queue_end(&tasks, (queue_entry_t)task);
823								task = (task_t)queue_next(&task->tasks)) {
824#if defined(SECURE_KERNEL)
825			if (task != kernel_task) {
826#endif
827				task_reference_internal(task);
828				task_list[actual++] = task;
829#if defined(SECURE_KERNEL)
830			}
831#endif
832		}
833
834		break;
835	}
836
837	case THING_THREAD: {
838		thread_t	thread, *thread_list = (thread_t *)addr;
839
840		for (thread = (thread_t)queue_first(&threads);
841						!queue_end(&threads, (queue_entry_t)thread);
842								thread = (thread_t)queue_next(&thread->threads)) {
843			thread_reference_internal(thread);
844			thread_list[actual++] = thread;
845		}
846
847		break;
848	}
849
850	}
851
852	lck_mtx_unlock(&tasks_threads_lock);
853
854	if (actual < maxthings)
855		size_needed = actual * sizeof (mach_port_t);
856
857	if (actual == 0) {
858		/* no things, so return null pointer and deallocate memory */
859		*thing_list = NULL;
860		*count = 0;
861
862		if (size != 0)
863			kfree(addr, size);
864	}
865	else {
866		/* if we allocated too much, must copy */
867
868		if (size_needed < size) {
869			void *newaddr;
870
871			newaddr = kalloc(size_needed);
872			if (newaddr == 0) {
873				switch (type) {
874
875				case THING_TASK: {
876					task_t		*task_list = (task_t *)addr;
877
878					for (i = 0; i < actual; i++)
879						task_deallocate(task_list[i]);
880					break;
881				}
882
883				case THING_THREAD: {
884					thread_t	*thread_list = (thread_t *)addr;
885
886					for (i = 0; i < actual; i++)
887						thread_deallocate(thread_list[i]);
888					break;
889				}
890
891				}
892
893				kfree(addr, size);
894				return (KERN_RESOURCE_SHORTAGE);
895			}
896
897			bcopy((void *) addr, (void *) newaddr, size_needed);
898			kfree(addr, size);
899			addr = newaddr;
900		}
901
902		*thing_list = (mach_port_t *)addr;
903		*count = actual;
904
905		/* do the conversion that Mig should handle */
906
907		switch (type) {
908
909		case THING_TASK: {
910			task_t		*task_list = (task_t *)addr;
911
912			for (i = 0; i < actual; i++)
913				(*thing_list)[i] = convert_task_to_port(task_list[i]);
914			break;
915		}
916
917		case THING_THREAD: {
918			thread_t	*thread_list = (thread_t *)addr;
919
920			for (i = 0; i < actual; i++)
921			  	(*thing_list)[i] = convert_thread_to_port(thread_list[i]);
922			break;
923		}
924
925		}
926	}
927
928	return (KERN_SUCCESS);
929}
930
931
932/*
933 *	processor_set_tasks:
934 *
935 *	List all tasks in the processor set.
936 */
937kern_return_t
938processor_set_tasks(
939	processor_set_t		pset,
940	task_array_t		*task_list,
941	mach_msg_type_number_t	*count)
942{
943    return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK));
944}
945
946/*
947 *	processor_set_threads:
948 *
949 *	List all threads in the processor set.
950 */
951#if defined(SECURE_KERNEL)
952kern_return_t
953processor_set_threads(
954	__unused processor_set_t		pset,
955	__unused thread_array_t		*thread_list,
956	__unused mach_msg_type_number_t	*count)
957{
958    return KERN_FAILURE;
959}
960#elif defined(CONFIG_EMBEDDED)
961kern_return_t
962processor_set_threads(
963	__unused processor_set_t		pset,
964	__unused thread_array_t		*thread_list,
965	__unused mach_msg_type_number_t	*count)
966{
967    return KERN_NOT_SUPPORTED;
968}
969#else
970kern_return_t
971processor_set_threads(
972	processor_set_t		pset,
973	thread_array_t		*thread_list,
974	mach_msg_type_number_t	*count)
975{
976    return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD));
977}
978#endif
979
980/*
981 *	processor_set_policy_control
982 *
983 *	Controls the scheduling attributes governing the processor set.
984 *	Allows control of enabled policies, and per-policy base and limit
985 *	priorities.
986 */
987kern_return_t
988processor_set_policy_control(
989	__unused processor_set_t		pset,
990	__unused int				flavor,
991	__unused processor_set_info_t	policy_info,
992	__unused mach_msg_type_number_t	count,
993	__unused boolean_t			change)
994{
995	return (KERN_INVALID_ARGUMENT);
996}
997
998#undef pset_deallocate
999void pset_deallocate(processor_set_t pset);
1000void
1001pset_deallocate(
1002__unused processor_set_t	pset)
1003{
1004	return;
1005}
1006
1007#undef pset_reference
1008void pset_reference(processor_set_t pset);
1009void
1010pset_reference(
1011__unused processor_set_t	pset)
1012{
1013	return;
1014}
1015