1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 *	host.c
61 *
62 *	Non-ipc host functions.
63 */
64
65#include <mach/mach_types.h>
66#include <mach/boolean.h>
67#include <mach/host_info.h>
68#include <mach/host_special_ports.h>
69#include <mach/kern_return.h>
70#include <mach/machine.h>
71#include <mach/port.h>
72#include <mach/processor_info.h>
73#include <mach/vm_param.h>
74#include <mach/processor.h>
75#include <mach/mach_host_server.h>
76#include <mach/host_priv_server.h>
77#include <mach/vm_map.h>
78
79#include <kern/kern_types.h>
80#include <kern/assert.h>
81#include <kern/kalloc.h>
82#include <kern/host.h>
83#include <kern/host_statistics.h>
84#include <kern/ipc_host.h>
85#include <kern/misc_protos.h>
86#include <kern/sched.h>
87#include <kern/processor.h>
88
89#include <vm/vm_map.h>
90
91host_data_t	realhost;
92
93vm_extmod_statistics_data_t host_extmod_statistics;
94
95kern_return_t
96host_processors(
97	host_priv_t				host_priv,
98	processor_array_t		*out_array,
99	mach_msg_type_number_t	*countp)
100{
101	register processor_t	processor, *tp;
102	void					*addr;
103	unsigned int			count, i;
104
105	if (host_priv == HOST_PRIV_NULL)
106		return (KERN_INVALID_ARGUMENT);
107
108	assert(host_priv == &realhost);
109
110	count = processor_count;
111	assert(count != 0);
112
113	addr = kalloc((vm_size_t) (count * sizeof(mach_port_t)));
114	if (addr == 0)
115		return (KERN_RESOURCE_SHORTAGE);
116
117	tp = (processor_t *) addr;
118	*tp++ = processor = processor_list;
119
120	if (count > 1) {
121		simple_lock(&processor_list_lock);
122
123		for (i = 1; i < count; i++)
124			*tp++ = processor = processor->processor_list;
125
126		simple_unlock(&processor_list_lock);
127	}
128
129	*countp = count;
130	*out_array = (processor_array_t)addr;
131
132	/* do the conversion that Mig should handle */
133
134	tp = (processor_t *) addr;
135	for (i = 0; i < count; i++)
136		((mach_port_t *) tp)[i] =
137		      (mach_port_t)convert_processor_to_port(tp[i]);
138
139	return (KERN_SUCCESS);
140}
141
142kern_return_t
143host_info(
144	host_t					host,
145	host_flavor_t			flavor,
146	host_info_t				info,
147	mach_msg_type_number_t	*count)
148{
149
150	if (host == HOST_NULL)
151		return (KERN_INVALID_ARGUMENT);
152
153	switch (flavor) {
154
155	case HOST_BASIC_INFO:
156	{
157		register host_basic_info_t	basic_info;
158		register int				master_id;
159
160		/*
161		 *	Basic information about this host.
162		 */
163		if (*count < HOST_BASIC_INFO_OLD_COUNT)
164			return (KERN_FAILURE);
165
166		basic_info = (host_basic_info_t) info;
167
168		basic_info->memory_size = machine_info.memory_size;
169		basic_info->max_cpus = machine_info.max_cpus;
170		basic_info->avail_cpus = processor_avail_count;
171		master_id = master_processor->cpu_id;
172		basic_info->cpu_type = slot_type(master_id);
173		basic_info->cpu_subtype = slot_subtype(master_id);
174
175		if (*count >= HOST_BASIC_INFO_COUNT) {
176			basic_info->cpu_threadtype = slot_threadtype(master_id);
177			basic_info->physical_cpu = machine_info.physical_cpu;
178			basic_info->physical_cpu_max = machine_info.physical_cpu_max;
179			basic_info->logical_cpu = machine_info.logical_cpu;
180			basic_info->logical_cpu_max = machine_info.logical_cpu_max;
181			basic_info->max_mem = machine_info.max_mem;
182
183			*count = HOST_BASIC_INFO_COUNT;
184		} else {
185			*count = HOST_BASIC_INFO_OLD_COUNT;
186		}
187
188		return (KERN_SUCCESS);
189	}
190
191	case HOST_SCHED_INFO:
192	{
193		register host_sched_info_t	sched_info;
194		uint32_t quantum_time;
195		uint64_t quantum_ns;
196
197		/*
198		 *	Return scheduler information.
199		 */
200		if (*count < HOST_SCHED_INFO_COUNT)
201			return (KERN_FAILURE);
202
203		sched_info = (host_sched_info_t) info;
204
205		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
206		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
207
208		sched_info->min_timeout =
209			sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
210
211		*count = HOST_SCHED_INFO_COUNT;
212
213		return (KERN_SUCCESS);
214	}
215
216	case HOST_RESOURCE_SIZES:
217	{
218		/*
219		 * Return sizes of kernel data structures
220		 */
221		if (*count < HOST_RESOURCE_SIZES_COUNT)
222			return (KERN_FAILURE);
223
224		/* XXX Fail until ledgers are implemented */
225		return (KERN_INVALID_ARGUMENT);
226	}
227
228	case HOST_PRIORITY_INFO:
229	{
230		register host_priority_info_t	priority_info;
231
232		if (*count < HOST_PRIORITY_INFO_COUNT)
233			return (KERN_FAILURE);
234
235		priority_info = (host_priority_info_t) info;
236
237		priority_info->kernel_priority	= MINPRI_KERNEL;
238		priority_info->system_priority	= MINPRI_KERNEL;
239		priority_info->server_priority	= MINPRI_RESERVED;
240		priority_info->user_priority	= BASEPRI_DEFAULT;
241		priority_info->depress_priority	= DEPRESSPRI;
242		priority_info->idle_priority	= IDLEPRI;
243		priority_info->minimum_priority	= MINPRI_USER;
244		priority_info->maximum_priority	= MAXPRI_RESERVED;
245
246		*count = HOST_PRIORITY_INFO_COUNT;
247
248		return (KERN_SUCCESS);
249	}
250
251	/*
252	 * Gestalt for various trap facilities.
253	 */
254	case HOST_MACH_MSG_TRAP:
255	case HOST_SEMAPHORE_TRAPS:
256	{
257		*count = 0;
258		return (KERN_SUCCESS);
259	}
260
261	default:
262		return (KERN_INVALID_ARGUMENT);
263	}
264}
265
266kern_return_t
267host_statistics(
268	host_t					host,
269	host_flavor_t			flavor,
270	host_info_t				info,
271	mach_msg_type_number_t	*count)
272{
273	uint32_t	i;
274
275	if (host == HOST_NULL)
276		return (KERN_INVALID_HOST);
277
278	switch(flavor) {
279
280	case HOST_LOAD_INFO:
281	{
282		host_load_info_t	load_info;
283
284		if (*count < HOST_LOAD_INFO_COUNT)
285			return (KERN_FAILURE);
286
287		load_info = (host_load_info_t) info;
288
289		bcopy((char *) avenrun,
290			  (char *) load_info->avenrun, sizeof avenrun);
291		bcopy((char *) mach_factor,
292			  (char *) load_info->mach_factor, sizeof mach_factor);
293
294		*count = HOST_LOAD_INFO_COUNT;
295		return (KERN_SUCCESS);
296	}
297
298	case HOST_VM_INFO:
299	{
300		register processor_t		processor;
301		register vm_statistics64_t	stat;
302		vm_statistics64_data_t		host_vm_stat;
303		vm_statistics_t			stat32;
304		mach_msg_type_number_t		original_count;
305
306		if (*count < HOST_VM_INFO_REV0_COUNT)
307			return (KERN_FAILURE);
308
309		processor = processor_list;
310		stat = &PROCESSOR_DATA(processor, vm_stat);
311		host_vm_stat = *stat;
312
313		if (processor_count > 1) {
314			simple_lock(&processor_list_lock);
315
316			while ((processor = processor->processor_list) != NULL) {
317				stat = &PROCESSOR_DATA(processor, vm_stat);
318
319				host_vm_stat.zero_fill_count +=	stat->zero_fill_count;
320				host_vm_stat.reactivations += stat->reactivations;
321				host_vm_stat.pageins += stat->pageins;
322				host_vm_stat.pageouts += stat->pageouts;
323				host_vm_stat.faults += stat->faults;
324				host_vm_stat.cow_faults += stat->cow_faults;
325				host_vm_stat.lookups += stat->lookups;
326				host_vm_stat.hits += stat->hits;
327			}
328
329			simple_unlock(&processor_list_lock);
330		}
331
332		stat32 = (vm_statistics_t) info;
333
334		stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
335		stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
336
337		if (vm_page_local_q) {
338			for (i = 0; i < vm_page_local_q_count; i++) {
339				struct vpl	*lq;
340
341				lq = &vm_page_local_q[i].vpl_un.vpl;
342
343				stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
344			}
345		}
346		stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
347#if CONFIG_EMBEDDED
348		stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
349#else
350		stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
351#endif
352		stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
353		stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
354		stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
355		stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
356		stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
357		stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
358		stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
359		stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
360
361		/*
362		 * Fill in extra info added in later revisions of the
363		 * vm_statistics data structure.  Fill in only what can fit
364		 * in the data structure the caller gave us !
365		 */
366		original_count = *count;
367		*count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
368		if (original_count >= HOST_VM_INFO_REV1_COUNT) {
369			/* rev1 added "purgeable" info */
370			stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
371			stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
372			*count = HOST_VM_INFO_REV1_COUNT;
373		}
374
375		if (original_count >= HOST_VM_INFO_REV2_COUNT) {
376			/* rev2 added "speculative" info */
377			stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
378			*count = HOST_VM_INFO_REV2_COUNT;
379		}
380
381		/* rev3 changed some of the fields to be 64-bit*/
382
383		return (KERN_SUCCESS);
384	}
385
386	case HOST_CPU_LOAD_INFO:
387	{
388		register processor_t	processor;
389		host_cpu_load_info_t	cpu_load_info;
390
391		if (*count < HOST_CPU_LOAD_INFO_COUNT)
392			return (KERN_FAILURE);
393
394#define GET_TICKS_VALUE(processor, state, timer)			 \
395MACRO_BEGIN								 \
396	cpu_load_info->cpu_ticks[(state)] +=				 \
397		(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, timer)) \
398				/ hz_tick_interval);			 \
399MACRO_END
400
401		cpu_load_info = (host_cpu_load_info_t)info;
402		cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
403		cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
404		cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
405		cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
406
407		simple_lock(&processor_list_lock);
408
409		for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
410			timer_data_t	idle_temp;
411			timer_t		idle_state;
412
413			GET_TICKS_VALUE(processor, CPU_STATE_USER, user_state);
414			if (precise_user_kernel_time) {
415				GET_TICKS_VALUE(processor, CPU_STATE_SYSTEM, system_state);
416			} else {
417				/* system_state may represent either sys or user */
418				GET_TICKS_VALUE(processor, CPU_STATE_USER, system_state);
419			}
420
421			idle_state = &PROCESSOR_DATA(processor, idle_state);
422			idle_temp = *idle_state;
423
424			if (PROCESSOR_DATA(processor, current_state) != idle_state ||
425			    timer_grab(&idle_temp) != timer_grab(idle_state))
426				GET_TICKS_VALUE(processor, CPU_STATE_IDLE, idle_state);
427			else {
428				timer_advance(&idle_temp, mach_absolute_time() - idle_temp.tstamp);
429
430				cpu_load_info->cpu_ticks[CPU_STATE_IDLE] +=
431					(uint32_t)(timer_grab(&idle_temp) / hz_tick_interval);
432			}
433		}
434		simple_unlock(&processor_list_lock);
435
436		*count = HOST_CPU_LOAD_INFO_COUNT;
437
438		return (KERN_SUCCESS);
439	}
440
441	case HOST_EXPIRED_TASK_INFO:
442	{
443		if (*count < TASK_POWER_INFO_COUNT) {
444			return (KERN_FAILURE);
445		}
446
447		task_power_info_t tinfo = (task_power_info_t)info;
448
449		tinfo->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
450		tinfo->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
451
452		tinfo->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
453		tinfo->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
454
455		tinfo->total_user = dead_task_statistics.total_user_time;
456		tinfo->total_system = dead_task_statistics.total_system_time;
457
458		return (KERN_SUCCESS);
459	}
460
461	default:
462		return (KERN_INVALID_ARGUMENT);
463	}
464}
465
466
467kern_return_t
468host_statistics64(
469	host_t				host,
470	host_flavor_t			flavor,
471	host_info64_t			info,
472	mach_msg_type_number_t		*count)
473{
474	uint32_t	i;
475
476	if (host == HOST_NULL)
477		return (KERN_INVALID_HOST);
478
479	switch(flavor) {
480
481		case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
482		{
483			register processor_t		processor;
484			register vm_statistics64_t	stat;
485			vm_statistics64_data_t		host_vm_stat;
486
487			if (*count < HOST_VM_INFO64_COUNT)
488				return (KERN_FAILURE);
489
490			processor = processor_list;
491			stat = &PROCESSOR_DATA(processor, vm_stat);
492			host_vm_stat = *stat;
493
494			if (processor_count > 1) {
495				simple_lock(&processor_list_lock);
496
497				while ((processor = processor->processor_list) != NULL) {
498					stat = &PROCESSOR_DATA(processor, vm_stat);
499
500					host_vm_stat.zero_fill_count +=	stat->zero_fill_count;
501					host_vm_stat.reactivations += stat->reactivations;
502					host_vm_stat.pageins += stat->pageins;
503					host_vm_stat.pageouts += stat->pageouts;
504					host_vm_stat.faults += stat->faults;
505					host_vm_stat.cow_faults += stat->cow_faults;
506					host_vm_stat.lookups += stat->lookups;
507					host_vm_stat.hits += stat->hits;
508				}
509
510				simple_unlock(&processor_list_lock);
511			}
512
513			stat = (vm_statistics64_t) info;
514
515			stat->free_count = vm_page_free_count + vm_page_speculative_count;
516			stat->active_count = vm_page_active_count;
517
518			if (vm_page_local_q) {
519				for (i = 0; i < vm_page_local_q_count; i++) {
520					struct vpl	*lq;
521
522					lq = &vm_page_local_q[i].vpl_un.vpl;
523
524					stat->active_count += lq->vpl_count;
525				}
526			}
527			stat->inactive_count = vm_page_inactive_count;
528#if CONFIG_EMBEDDED
529			stat->wire_count = vm_page_wire_count;
530#else
531			stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
532#endif
533			stat->zero_fill_count = host_vm_stat.zero_fill_count;
534			stat->reactivations = host_vm_stat.reactivations;
535			stat->pageins = host_vm_stat.pageins;
536			stat->pageouts = host_vm_stat.pageouts;
537			stat->faults = host_vm_stat.faults;
538			stat->cow_faults = host_vm_stat.cow_faults;
539			stat->lookups = host_vm_stat.lookups;
540			stat->hits = host_vm_stat.hits;
541
542			/* rev1 added "purgable" info */
543			stat->purgeable_count = vm_page_purgeable_count;
544			stat->purges = vm_page_purged_count;
545
546			/* rev2 added "speculative" info */
547			stat->speculative_count = vm_page_speculative_count;
548
549			*count = HOST_VM_INFO64_COUNT;
550
551			return(KERN_SUCCESS);
552		}
553
554		case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
555		{
556			vm_extmod_statistics_t		out_extmod_statistics;
557
558			if (*count < HOST_EXTMOD_INFO64_COUNT)
559				return (KERN_FAILURE);
560
561			out_extmod_statistics = (vm_extmod_statistics_t) info;
562			*out_extmod_statistics = host_extmod_statistics;
563
564			*count = HOST_EXTMOD_INFO64_COUNT;
565
566			return(KERN_SUCCESS);
567		}
568
569		default: /* If we didn't recognize the flavor, send to host_statistics */
570			return(host_statistics(host, flavor, (host_info_t) info, count));
571	}
572}
573
574
575/*
576 * Get host statistics that require privilege.
577 * None for now, just call the un-privileged version.
578 */
579kern_return_t
580host_priv_statistics(
581	host_priv_t		host_priv,
582	host_flavor_t		flavor,
583	host_info_t		info,
584	mach_msg_type_number_t	*count)
585{
586	return(host_statistics((host_t)host_priv, flavor, info, count));
587}
588
589kern_return_t
590set_sched_stats_active(
591		boolean_t active)
592{
593	sched_stats_active = active;
594	return KERN_SUCCESS;
595}
596
597
598kern_return_t
599get_sched_statistics(
600		struct _processor_statistics_np *out,
601		uint32_t *count)
602{
603	processor_t processor;
604
605	if (!sched_stats_active) {
606		return KERN_FAILURE;
607	}
608
609	simple_lock(&processor_list_lock);
610
611	if (*count < (processor_count + 2) * sizeof(struct _processor_statistics_np)) { /* One for RT, one for FS */
612		simple_unlock(&processor_list_lock);
613		return KERN_FAILURE;
614	}
615
616	processor = processor_list;
617	while (processor) {
618		struct processor_sched_statistics *stats = &processor->processor_data.sched_stats;
619
620		out->ps_cpuid 			= processor->cpu_id;
621		out->ps_csw_count 		= stats->csw_count;
622		out->ps_preempt_count 		= stats->preempt_count;
623		out->ps_preempted_rt_count 	= stats->preempted_rt_count;
624		out->ps_preempted_by_rt_count 	= stats->preempted_by_rt_count;
625		out->ps_rt_sched_count		= stats->rt_sched_count;
626		out->ps_interrupt_count 	= stats->interrupt_count;
627		out->ps_ipi_count 		= stats->ipi_count;
628		out->ps_timer_pop_count 	= stats->timer_pop_count;
629		out->ps_runq_count_sum 		= SCHED(processor_runq_stats_count_sum)(processor);
630		out->ps_idle_transitions	= stats->idle_transitions;
631		out->ps_quantum_timer_expirations	= stats->quantum_timer_expirations;
632
633		out++;
634		processor = processor->processor_list;
635	}
636
637	*count = (uint32_t) (processor_count * sizeof(struct _processor_statistics_np));
638
639	simple_unlock(&processor_list_lock);
640
641	/* And include RT Queue information */
642	bzero(out, sizeof(*out));
643	out->ps_cpuid = (-1);
644	out->ps_runq_count_sum = rt_runq.runq_stats.count_sum;
645	out++;
646	*count += (uint32_t)sizeof(struct _processor_statistics_np);
647
648	/* And include Fair Share Queue information at the end */
649	bzero(out, sizeof(*out));
650	out->ps_cpuid = (-2);
651	out->ps_runq_count_sum = SCHED(fairshare_runq_stats_count_sum)();
652	*count += (uint32_t)sizeof(struct _processor_statistics_np);
653
654	return KERN_SUCCESS;
655}
656
657kern_return_t
658host_page_size(
659	host_t		host,
660	vm_size_t	*out_page_size)
661{
662	if (host == HOST_NULL)
663		return(KERN_INVALID_ARGUMENT);
664
665        *out_page_size = PAGE_SIZE;
666
667	return(KERN_SUCCESS);
668}
669
670/*
671 *	Return kernel version string (more than you ever
672 *	wanted to know about what version of the kernel this is).
673 */
674extern char	version[];
675
676kern_return_t
677host_kernel_version(
678	host_t			host,
679	kernel_version_t	out_version)
680{
681
682	if (host == HOST_NULL)
683		return(KERN_INVALID_ARGUMENT);
684
685	(void) strncpy(out_version, version, sizeof(kernel_version_t));
686
687	return(KERN_SUCCESS);
688}
689
690/*
691 *	host_processor_sets:
692 *
693 *	List all processor sets on the host.
694 */
695kern_return_t
696host_processor_sets(
697	host_priv_t			host_priv,
698	processor_set_name_array_t	*pset_list,
699	mach_msg_type_number_t		*count)
700{
701	void *addr;
702
703	if (host_priv == HOST_PRIV_NULL)
704		return (KERN_INVALID_ARGUMENT);
705
706	/*
707	 *	Allocate memory.  Can be pageable because it won't be
708	 *	touched while holding a lock.
709	 */
710
711	addr = kalloc((vm_size_t) sizeof(mach_port_t));
712	if (addr == 0)
713		return (KERN_RESOURCE_SHORTAGE);
714
715	/* do the conversion that Mig should handle */
716	*((ipc_port_t *) addr) = convert_pset_name_to_port(&pset0);
717
718	*pset_list = (processor_set_array_t)addr;
719	*count = 1;
720
721	return (KERN_SUCCESS);
722}
723
724/*
725 *	host_processor_set_priv:
726 *
727 *	Return control port for given processor set.
728 */
729kern_return_t
730host_processor_set_priv(
731	host_priv_t	host_priv,
732	processor_set_t	pset_name,
733	processor_set_t	*pset)
734{
735    if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
736		*pset = PROCESSOR_SET_NULL;
737
738		return (KERN_INVALID_ARGUMENT);
739    }
740
741    *pset = pset_name;
742
743    return (KERN_SUCCESS);
744}
745
746/*
747 *	host_processor_info
748 *
749 *	Return info about the processors on this host.  It will return
750 *	the number of processors, and the specific type of info requested
751 *	in an OOL array.
752 */
753kern_return_t
754host_processor_info(
755	host_t					host,
756	processor_flavor_t		flavor,
757	natural_t				*out_pcount,
758	processor_info_array_t	*out_array,
759	mach_msg_type_number_t	*out_array_count)
760{
761	kern_return_t			result;
762	processor_t				processor;
763	host_t					thost;
764	processor_info_t		info;
765	unsigned int			icount, tcount;
766	unsigned int			pcount, i;
767	vm_offset_t				addr;
768	vm_size_t				size, needed;
769	vm_map_copy_t			copy;
770
771	if (host == HOST_NULL)
772		return (KERN_INVALID_ARGUMENT);
773
774	result = processor_info_count(flavor, &icount);
775	if (result != KERN_SUCCESS)
776		return (result);
777
778	pcount = processor_count;
779	assert(pcount != 0);
780
781	needed = pcount * icount * sizeof(natural_t);
782	size = round_page(needed);
783	result = kmem_alloc(ipc_kernel_map, &addr, size);
784	if (result != KERN_SUCCESS)
785		return (KERN_RESOURCE_SHORTAGE);
786
787	info = (processor_info_t) addr;
788	processor = processor_list;
789	tcount = icount;
790
791	result = processor_info(processor, flavor, &thost, info, &tcount);
792	if (result != KERN_SUCCESS) {
793		kmem_free(ipc_kernel_map, addr, size);
794		return (result);
795	}
796
797	if (pcount > 1) {
798		for (i = 1; i < pcount; i++) {
799			simple_lock(&processor_list_lock);
800			processor = processor->processor_list;
801			simple_unlock(&processor_list_lock);
802
803			info += icount;
804			tcount = icount;
805			result = processor_info(processor, flavor, &thost, info, &tcount);
806			if (result != KERN_SUCCESS) {
807				kmem_free(ipc_kernel_map, addr, size);
808				return (result);
809			}
810		}
811	}
812
813	if (size != needed)
814		bzero((char *) addr + needed, size - needed);
815
816	result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
817			       vm_map_round_page(addr + size), FALSE);
818	assert(result == KERN_SUCCESS);
819	result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
820			       (vm_map_size_t)size, TRUE, &copy);
821	assert(result == KERN_SUCCESS);
822
823	*out_pcount = pcount;
824	*out_array = (processor_info_array_t) copy;
825	*out_array_count = pcount * icount;
826
827	return (KERN_SUCCESS);
828}
829
830/*
831 *      Kernel interface for setting a special port.
832 */
833kern_return_t
834kernel_set_special_port(
835	host_priv_t	host_priv,
836	int		id,
837	ipc_port_t	port)
838{
839	ipc_port_t old_port;
840
841	host_lock(host_priv);
842	old_port = host_priv->special[id];
843	host_priv->special[id] = port;
844	host_unlock(host_priv);
845	if (IP_VALID(old_port))
846		ipc_port_release_send(old_port);
847	return KERN_SUCCESS;
848}
849
850/*
851 *      User interface for setting a special port.
852 *
853 *      Only permits the user to set a user-owned special port
854 *      ID, rejecting a kernel-owned special port ID.
855 *
856 *      A special kernel port cannot be set up using this
857 *      routine; use kernel_set_special_port() instead.
858 */
859kern_return_t
860host_set_special_port(
861        host_priv_t     host_priv,
862        int             id,
863        ipc_port_t      port)
864{
865	if (host_priv == HOST_PRIV_NULL ||
866	    id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT ) {
867		if (IP_VALID(port))
868			ipc_port_release_send(port);
869		return KERN_INVALID_ARGUMENT;
870	}
871
872	return kernel_set_special_port(host_priv, id, port);
873}
874
875
876/*
877 *      User interface for retrieving a special port.
878 *
879 *      Note that there is nothing to prevent a user special
880 *      port from disappearing after it has been discovered by
881 *      the caller; thus, using a special port can always result
882 *      in a "port not valid" error.
883 */
884
885kern_return_t
886host_get_special_port(
887        host_priv_t     host_priv,
888        __unused int    node,
889        int             id,
890        ipc_port_t      *portp)
891{
892	ipc_port_t	port;
893
894	if (host_priv == HOST_PRIV_NULL ||
895	    id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < 0)
896		return KERN_INVALID_ARGUMENT;
897
898	host_lock(host_priv);
899	port = realhost.special[id];
900	*portp = ipc_port_copy_send(port);
901	host_unlock(host_priv);
902
903	return KERN_SUCCESS;
904}
905
906
907/*
908 * 	host_get_io_master
909 *
910 *	Return the IO master access port for this host.
911 */
912kern_return_t
913host_get_io_master(
914        host_t host,
915        io_master_t *io_masterp)
916{
917	if (host == HOST_NULL)
918		return KERN_INVALID_ARGUMENT;
919
920	return (host_get_io_master_port(host_priv_self(), io_masterp));
921}
922
923host_t
924host_self(void)
925{
926  return &realhost;
927}
928
929host_priv_t
930host_priv_self(void)
931{
932  return &realhost;
933}
934
935host_security_t
936host_security_self(void)
937{
938  return &realhost;
939}
940
941