1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 *	host.c
61 *
62 *	Non-ipc host functions.
63 */
64
65#include <mach/mach_types.h>
66#include <mach/boolean.h>
67#include <mach/host_info.h>
68#include <mach/host_special_ports.h>
69#include <mach/kern_return.h>
70#include <mach/machine.h>
71#include <mach/port.h>
72#include <mach/processor_info.h>
73#include <mach/vm_param.h>
74#include <mach/processor.h>
75#include <mach/mach_host_server.h>
76#include <mach/host_priv_server.h>
77#include <mach/vm_map.h>
78#include <mach/task_info.h>
79
80#include <kern/kern_types.h>
81#include <kern/assert.h>
82#include <kern/kalloc.h>
83#include <kern/host.h>
84#include <kern/host_statistics.h>
85#include <kern/ipc_host.h>
86#include <kern/misc_protos.h>
87#include <kern/sched.h>
88#include <kern/processor.h>
89
90#include <vm/vm_map.h>
91#include <vm/vm_purgeable_internal.h>
92#include <vm/vm_pageout.h>
93
94host_data_t	realhost;
95
96vm_extmod_statistics_data_t host_extmod_statistics;
97
98kern_return_t
99host_processors(
100	host_priv_t				host_priv,
101	processor_array_t		*out_array,
102	mach_msg_type_number_t	*countp)
103{
104	register processor_t	processor, *tp;
105	void					*addr;
106	unsigned int			count, i;
107
108	if (host_priv == HOST_PRIV_NULL)
109		return (KERN_INVALID_ARGUMENT);
110
111	assert(host_priv == &realhost);
112
113	count = processor_count;
114	assert(count != 0);
115
116	addr = kalloc((vm_size_t) (count * sizeof(mach_port_t)));
117	if (addr == 0)
118		return (KERN_RESOURCE_SHORTAGE);
119
120	tp = (processor_t *) addr;
121	*tp++ = processor = processor_list;
122
123	if (count > 1) {
124		simple_lock(&processor_list_lock);
125
126		for (i = 1; i < count; i++)
127			*tp++ = processor = processor->processor_list;
128
129		simple_unlock(&processor_list_lock);
130	}
131
132	*countp = count;
133	*out_array = (processor_array_t)addr;
134
135	/* do the conversion that Mig should handle */
136
137	tp = (processor_t *) addr;
138	for (i = 0; i < count; i++)
139		((mach_port_t *) tp)[i] =
140		      (mach_port_t)convert_processor_to_port(tp[i]);
141
142	return (KERN_SUCCESS);
143}
144
145kern_return_t
146host_info(
147	host_t					host,
148	host_flavor_t			flavor,
149	host_info_t				info,
150	mach_msg_type_number_t	*count)
151{
152
153	if (host == HOST_NULL)
154		return (KERN_INVALID_ARGUMENT);
155
156	switch (flavor) {
157
158	case HOST_BASIC_INFO:
159	{
160		register host_basic_info_t	basic_info;
161		register int				master_id;
162
163		/*
164		 *	Basic information about this host.
165		 */
166		if (*count < HOST_BASIC_INFO_OLD_COUNT)
167			return (KERN_FAILURE);
168
169		basic_info = (host_basic_info_t) info;
170
171		basic_info->memory_size = machine_info.memory_size;
172		basic_info->max_cpus = machine_info.max_cpus;
173		basic_info->avail_cpus = processor_avail_count;
174		master_id = master_processor->cpu_id;
175		basic_info->cpu_type = slot_type(master_id);
176		basic_info->cpu_subtype = slot_subtype(master_id);
177
178		if (*count >= HOST_BASIC_INFO_COUNT) {
179			basic_info->cpu_threadtype = slot_threadtype(master_id);
180			basic_info->physical_cpu = machine_info.physical_cpu;
181			basic_info->physical_cpu_max = machine_info.physical_cpu_max;
182			basic_info->logical_cpu = machine_info.logical_cpu;
183			basic_info->logical_cpu_max = machine_info.logical_cpu_max;
184			basic_info->max_mem = machine_info.max_mem;
185
186			*count = HOST_BASIC_INFO_COUNT;
187		} else {
188			*count = HOST_BASIC_INFO_OLD_COUNT;
189		}
190
191		return (KERN_SUCCESS);
192	}
193
194	case HOST_SCHED_INFO:
195	{
196		register host_sched_info_t	sched_info;
197		uint32_t quantum_time;
198		uint64_t quantum_ns;
199
200		/*
201		 *	Return scheduler information.
202		 */
203		if (*count < HOST_SCHED_INFO_COUNT)
204			return (KERN_FAILURE);
205
206		sched_info = (host_sched_info_t) info;
207
208		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
209		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
210
211		sched_info->min_timeout =
212			sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
213
214		*count = HOST_SCHED_INFO_COUNT;
215
216		return (KERN_SUCCESS);
217	}
218
219	case HOST_RESOURCE_SIZES:
220	{
221		/*
222		 * Return sizes of kernel data structures
223		 */
224		if (*count < HOST_RESOURCE_SIZES_COUNT)
225			return (KERN_FAILURE);
226
227		/* XXX Fail until ledgers are implemented */
228		return (KERN_INVALID_ARGUMENT);
229	}
230
231	case HOST_PRIORITY_INFO:
232	{
233		register host_priority_info_t	priority_info;
234
235		if (*count < HOST_PRIORITY_INFO_COUNT)
236			return (KERN_FAILURE);
237
238		priority_info = (host_priority_info_t) info;
239
240		priority_info->kernel_priority	= MINPRI_KERNEL;
241		priority_info->system_priority	= MINPRI_KERNEL;
242		priority_info->server_priority	= MINPRI_RESERVED;
243		priority_info->user_priority	= BASEPRI_DEFAULT;
244		priority_info->depress_priority	= DEPRESSPRI;
245		priority_info->idle_priority	= IDLEPRI;
246		priority_info->minimum_priority	= MINPRI_USER;
247		priority_info->maximum_priority	= MAXPRI_RESERVED;
248
249		*count = HOST_PRIORITY_INFO_COUNT;
250
251		return (KERN_SUCCESS);
252	}
253
254	/*
255	 * Gestalt for various trap facilities.
256	 */
257	case HOST_MACH_MSG_TRAP:
258	case HOST_SEMAPHORE_TRAPS:
259	{
260		*count = 0;
261		return (KERN_SUCCESS);
262	}
263
264	case HOST_VM_PURGABLE:
265	{
266		if (*count < HOST_VM_PURGABLE_COUNT)
267			return (KERN_FAILURE);
268
269		vm_purgeable_stats((vm_purgeable_info_t) info, NULL);
270
271		*count = HOST_VM_PURGABLE_COUNT;
272		return (KERN_SUCCESS);
273	}
274
275	default:
276		return (KERN_INVALID_ARGUMENT);
277	}
278}
279
280kern_return_t
281host_statistics(
282	host_t					host,
283	host_flavor_t			flavor,
284	host_info_t				info,
285	mach_msg_type_number_t	*count)
286{
287	uint32_t	i;
288
289	if (host == HOST_NULL)
290		return (KERN_INVALID_HOST);
291
292	switch(flavor) {
293
294	case HOST_LOAD_INFO:
295	{
296		host_load_info_t	load_info;
297
298		if (*count < HOST_LOAD_INFO_COUNT)
299			return (KERN_FAILURE);
300
301		load_info = (host_load_info_t) info;
302
303		bcopy((char *) avenrun,
304			  (char *) load_info->avenrun, sizeof avenrun);
305		bcopy((char *) mach_factor,
306			  (char *) load_info->mach_factor, sizeof mach_factor);
307
308		*count = HOST_LOAD_INFO_COUNT;
309		return (KERN_SUCCESS);
310	}
311
312	case HOST_VM_INFO:
313	{
314		register processor_t		processor;
315		register vm_statistics64_t	stat;
316		vm_statistics64_data_t		host_vm_stat;
317		vm_statistics_t			stat32;
318		mach_msg_type_number_t		original_count;
319
320		if (*count < HOST_VM_INFO_REV0_COUNT)
321			return (KERN_FAILURE);
322
323		processor = processor_list;
324		stat = &PROCESSOR_DATA(processor, vm_stat);
325		host_vm_stat = *stat;
326
327		if (processor_count > 1) {
328			simple_lock(&processor_list_lock);
329
330			while ((processor = processor->processor_list) != NULL) {
331				stat = &PROCESSOR_DATA(processor, vm_stat);
332
333				host_vm_stat.zero_fill_count +=	stat->zero_fill_count;
334				host_vm_stat.reactivations += stat->reactivations;
335				host_vm_stat.pageins += stat->pageins;
336				host_vm_stat.pageouts += stat->pageouts;
337				host_vm_stat.faults += stat->faults;
338				host_vm_stat.cow_faults += stat->cow_faults;
339				host_vm_stat.lookups += stat->lookups;
340				host_vm_stat.hits += stat->hits;
341			}
342
343			simple_unlock(&processor_list_lock);
344		}
345
346		stat32 = (vm_statistics_t) info;
347
348		stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
349		stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
350
351		if (vm_page_local_q) {
352			for (i = 0; i < vm_page_local_q_count; i++) {
353				struct vpl	*lq;
354
355				lq = &vm_page_local_q[i].vpl_un.vpl;
356
357				stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
358			}
359		}
360		stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
361		stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
362		stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
363		stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
364		stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
365		stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
366		stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
367		stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
368		stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
369		stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
370
371		/*
372		 * Fill in extra info added in later revisions of the
373		 * vm_statistics data structure.  Fill in only what can fit
374		 * in the data structure the caller gave us !
375		 */
376		original_count = *count;
377		*count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
378		if (original_count >= HOST_VM_INFO_REV1_COUNT) {
379			/* rev1 added "purgeable" info */
380			stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
381			stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
382			*count = HOST_VM_INFO_REV1_COUNT;
383		}
384
385		if (original_count >= HOST_VM_INFO_REV2_COUNT) {
386			/* rev2 added "speculative" info */
387			stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
388			*count = HOST_VM_INFO_REV2_COUNT;
389		}
390
391		/* rev3 changed some of the fields to be 64-bit*/
392
393		return (KERN_SUCCESS);
394	}
395
396	case HOST_CPU_LOAD_INFO:
397	{
398		register processor_t	processor;
399		host_cpu_load_info_t	cpu_load_info;
400
401		if (*count < HOST_CPU_LOAD_INFO_COUNT)
402			return (KERN_FAILURE);
403
404#define GET_TICKS_VALUE(state, ticks)			 \
405MACRO_BEGIN								 \
406	cpu_load_info->cpu_ticks[(state)] +=				 \
407		(uint32_t)(ticks / hz_tick_interval);			 \
408MACRO_END
409#define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer)			 \
410MACRO_BEGIN								 \
411	GET_TICKS_VALUE(state, timer_grab(&PROCESSOR_DATA(processor, timer))); \
412MACRO_END
413
414		cpu_load_info = (host_cpu_load_info_t)info;
415		cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
416		cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
417		cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
418		cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
419
420		simple_lock(&processor_list_lock);
421
422		for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
423			timer_t			idle_state;
424			uint64_t		idle_time_snapshot1, idle_time_snapshot2;
425			uint64_t		idle_time_tstamp1, idle_time_tstamp2;
426
427			/* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
428
429			GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, user_state);
430			if (precise_user_kernel_time) {
431				GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_SYSTEM, system_state);
432			} else {
433				/* system_state may represent either sys or user */
434				GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state);
435			}
436
437			idle_state = &PROCESSOR_DATA(processor, idle_state);
438			idle_time_snapshot1 = timer_grab(idle_state);
439			idle_time_tstamp1 = idle_state->tstamp;
440
441			if (PROCESSOR_DATA(processor, current_state) != idle_state) {
442				/* Processor is non-idle, so idle timer should be accurate */
443				GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state);
444			} else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
445					   (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))){
446				/* Idle timer is being updated concurrently, second stamp is good enough */
447				GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2);
448			} else {
449				/*
450				 * Idle timer may be very stale. Fortunately we have established
451				 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
452				 */
453				idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
454
455				GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot1);
456			}
457		}
458		simple_unlock(&processor_list_lock);
459
460		*count = HOST_CPU_LOAD_INFO_COUNT;
461
462		return (KERN_SUCCESS);
463	}
464
465	case HOST_EXPIRED_TASK_INFO:
466	{
467		if (*count < TASK_POWER_INFO_COUNT) {
468			return (KERN_FAILURE);
469		}
470
471		task_power_info_t tinfo = (task_power_info_t)info;
472
473		tinfo->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
474		tinfo->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
475
476		tinfo->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
477
478		tinfo->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
479
480		tinfo->total_user = dead_task_statistics.total_user_time;
481		tinfo->total_system = dead_task_statistics.total_system_time;
482
483		return (KERN_SUCCESS);
484
485	}
486	default:
487		return (KERN_INVALID_ARGUMENT);
488	}
489}
490
491extern uint32_t        c_segment_pages_compressed;
492
493kern_return_t
494host_statistics64(
495	host_t				host,
496	host_flavor_t			flavor,
497	host_info64_t			info,
498	mach_msg_type_number_t		*count)
499{
500	uint32_t	i;
501
502	if (host == HOST_NULL)
503		return (KERN_INVALID_HOST);
504
505	switch(flavor) {
506
507		case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
508		{
509			register processor_t		processor;
510			register vm_statistics64_t	stat;
511			vm_statistics64_data_t		host_vm_stat;
512			mach_msg_type_number_t          original_count;
513			unsigned int			local_q_internal_count;
514			unsigned int			local_q_external_count;
515
516			if (*count < HOST_VM_INFO64_REV0_COUNT)
517				return (KERN_FAILURE);
518
519			processor = processor_list;
520			stat = &PROCESSOR_DATA(processor, vm_stat);
521			host_vm_stat = *stat;
522
523			if (processor_count > 1) {
524				simple_lock(&processor_list_lock);
525
526				while ((processor = processor->processor_list) != NULL) {
527					stat = &PROCESSOR_DATA(processor, vm_stat);
528
529					host_vm_stat.zero_fill_count +=	stat->zero_fill_count;
530					host_vm_stat.reactivations += stat->reactivations;
531					host_vm_stat.pageins += stat->pageins;
532					host_vm_stat.pageouts += stat->pageouts;
533					host_vm_stat.faults += stat->faults;
534					host_vm_stat.cow_faults += stat->cow_faults;
535					host_vm_stat.lookups += stat->lookups;
536					host_vm_stat.hits += stat->hits;
537					host_vm_stat.compressions += stat->compressions;
538					host_vm_stat.decompressions += stat->decompressions;
539					host_vm_stat.swapins += stat->swapins;
540					host_vm_stat.swapouts += stat->swapouts;
541				}
542
543				simple_unlock(&processor_list_lock);
544			}
545
546			stat = (vm_statistics64_t) info;
547
548			stat->free_count = vm_page_free_count + vm_page_speculative_count;
549			stat->active_count = vm_page_active_count;
550
551			local_q_internal_count = 0;
552			local_q_external_count = 0;
553			if (vm_page_local_q) {
554				for (i = 0; i < vm_page_local_q_count; i++) {
555					struct vpl	*lq;
556
557					lq = &vm_page_local_q[i].vpl_un.vpl;
558
559					stat->active_count += lq->vpl_count;
560					local_q_internal_count +=
561						lq->vpl_internal_count;
562					local_q_external_count +=
563						lq->vpl_external_count;
564				}
565			}
566			stat->inactive_count = vm_page_inactive_count;
567			stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
568			stat->zero_fill_count = host_vm_stat.zero_fill_count;
569			stat->reactivations = host_vm_stat.reactivations;
570			stat->pageins = host_vm_stat.pageins;
571			stat->pageouts = host_vm_stat.pageouts;
572			stat->faults = host_vm_stat.faults;
573			stat->cow_faults = host_vm_stat.cow_faults;
574			stat->lookups = host_vm_stat.lookups;
575			stat->hits = host_vm_stat.hits;
576
577			stat->purgeable_count = vm_page_purgeable_count;
578			stat->purges = vm_page_purged_count;
579
580			stat->speculative_count = vm_page_speculative_count;
581
582			/*
583			 * Fill in extra info added in later revisions of the
584			 * vm_statistics data structure.  Fill in only what can fit
585			 * in the data structure the caller gave us !
586			 */
587			original_count = *count;
588			*count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
589			if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
590				/* rev1 added "throttled count" */
591				stat->throttled_count = vm_page_throttled_count;
592				/* rev1 added "compression" info */
593				stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
594				stat->compressions = host_vm_stat.compressions;
595				stat->decompressions = host_vm_stat.decompressions;
596				stat->swapins = host_vm_stat.swapins;
597				stat->swapouts = host_vm_stat.swapouts;
598				/* rev1 added:
599				 * "external page count"
600				 * "anonymous page count"
601				 * "total # of pages (uncompressed) held in the compressor"
602				 */
603				stat->external_page_count =
604					(vm_page_pageable_external_count +
605					 local_q_external_count);
606				stat->internal_page_count =
607					(vm_page_pageable_internal_count +
608					 local_q_internal_count);
609				stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
610				*count = HOST_VM_INFO64_REV1_COUNT;
611			}
612
613			return(KERN_SUCCESS);
614		}
615
616		case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
617		{
618			vm_extmod_statistics_t		out_extmod_statistics;
619
620			if (*count < HOST_EXTMOD_INFO64_COUNT)
621				return (KERN_FAILURE);
622
623			out_extmod_statistics = (vm_extmod_statistics_t) info;
624			*out_extmod_statistics = host_extmod_statistics;
625
626			*count = HOST_EXTMOD_INFO64_COUNT;
627
628			return(KERN_SUCCESS);
629		}
630
631		default: /* If we didn't recognize the flavor, send to host_statistics */
632			return(host_statistics(host, flavor, (host_info_t) info, count));
633	}
634}
635
636
637/*
638 * Get host statistics that require privilege.
639 * None for now, just call the un-privileged version.
640 */
641kern_return_t
642host_priv_statistics(
643	host_priv_t		host_priv,
644	host_flavor_t		flavor,
645	host_info_t		info,
646	mach_msg_type_number_t	*count)
647{
648	return(host_statistics((host_t)host_priv, flavor, info, count));
649}
650
651kern_return_t
652set_sched_stats_active(
653		boolean_t active)
654{
655	sched_stats_active = active;
656	return KERN_SUCCESS;
657}
658
659
660kern_return_t
661get_sched_statistics(
662		struct _processor_statistics_np *out,
663		uint32_t *count)
664{
665	processor_t processor;
666
667	if (!sched_stats_active) {
668		return KERN_FAILURE;
669	}
670
671	simple_lock(&processor_list_lock);
672
673	if (*count < (processor_count + 2) * sizeof(struct _processor_statistics_np)) { /* One for RT, one for FS */
674		simple_unlock(&processor_list_lock);
675		return KERN_FAILURE;
676	}
677
678	processor = processor_list;
679	while (processor) {
680		struct processor_sched_statistics *stats = &processor->processor_data.sched_stats;
681
682		out->ps_cpuid 			= processor->cpu_id;
683		out->ps_csw_count 		= stats->csw_count;
684		out->ps_preempt_count 		= stats->preempt_count;
685		out->ps_preempted_rt_count 	= stats->preempted_rt_count;
686		out->ps_preempted_by_rt_count 	= stats->preempted_by_rt_count;
687		out->ps_rt_sched_count		= stats->rt_sched_count;
688		out->ps_interrupt_count 	= stats->interrupt_count;
689		out->ps_ipi_count 		= stats->ipi_count;
690		out->ps_timer_pop_count 	= stats->timer_pop_count;
691		out->ps_runq_count_sum 		= SCHED(processor_runq_stats_count_sum)(processor);
692		out->ps_idle_transitions	= stats->idle_transitions;
693		out->ps_quantum_timer_expirations	= stats->quantum_timer_expirations;
694
695		out++;
696		processor = processor->processor_list;
697	}
698
699	*count = (uint32_t) (processor_count * sizeof(struct _processor_statistics_np));
700
701	simple_unlock(&processor_list_lock);
702
703	/* And include RT Queue information */
704	bzero(out, sizeof(*out));
705	out->ps_cpuid = (-1);
706	out->ps_runq_count_sum = rt_runq.runq_stats.count_sum;
707	out++;
708	*count += (uint32_t)sizeof(struct _processor_statistics_np);
709
710	/* And include Fair Share Queue information at the end */
711	bzero(out, sizeof(*out));
712	out->ps_cpuid = (-2);
713	out->ps_runq_count_sum = SCHED(fairshare_runq_stats_count_sum)();
714	*count += (uint32_t)sizeof(struct _processor_statistics_np);
715
716	return KERN_SUCCESS;
717}
718
719kern_return_t
720host_page_size(
721	host_t		host,
722	vm_size_t	*out_page_size)
723{
724	if (host == HOST_NULL)
725		return(KERN_INVALID_ARGUMENT);
726
727	vm_map_t map = get_task_map(current_task());
728	*out_page_size = vm_map_page_size(map);
729
730	return(KERN_SUCCESS);
731}
732
733/*
734 *	Return kernel version string (more than you ever
735 *	wanted to know about what version of the kernel this is).
736 */
737extern char	version[];
738
739kern_return_t
740host_kernel_version(
741	host_t			host,
742	kernel_version_t	out_version)
743{
744
745	if (host == HOST_NULL)
746		return(KERN_INVALID_ARGUMENT);
747
748	(void) strncpy(out_version, version, sizeof(kernel_version_t));
749
750	return(KERN_SUCCESS);
751}
752
753/*
754 *	host_processor_sets:
755 *
756 *	List all processor sets on the host.
757 */
758kern_return_t
759host_processor_sets(
760	host_priv_t			host_priv,
761	processor_set_name_array_t	*pset_list,
762	mach_msg_type_number_t		*count)
763{
764	void *addr;
765
766	if (host_priv == HOST_PRIV_NULL)
767		return (KERN_INVALID_ARGUMENT);
768
769	/*
770	 *	Allocate memory.  Can be pageable because it won't be
771	 *	touched while holding a lock.
772	 */
773
774	addr = kalloc((vm_size_t) sizeof(mach_port_t));
775	if (addr == 0)
776		return (KERN_RESOURCE_SHORTAGE);
777
778	/* do the conversion that Mig should handle */
779	*((ipc_port_t *) addr) = convert_pset_name_to_port(&pset0);
780
781	*pset_list = (processor_set_array_t)addr;
782	*count = 1;
783
784	return (KERN_SUCCESS);
785}
786
787/*
788 *	host_processor_set_priv:
789 *
790 *	Return control port for given processor set.
791 */
792kern_return_t
793host_processor_set_priv(
794	host_priv_t	host_priv,
795	processor_set_t	pset_name,
796	processor_set_t	*pset)
797{
798    if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
799		*pset = PROCESSOR_SET_NULL;
800
801		return (KERN_INVALID_ARGUMENT);
802    }
803
804    *pset = pset_name;
805
806    return (KERN_SUCCESS);
807}
808
809/*
810 *	host_processor_info
811 *
812 *	Return info about the processors on this host.  It will return
813 *	the number of processors, and the specific type of info requested
814 *	in an OOL array.
815 */
816kern_return_t
817host_processor_info(
818	host_t					host,
819	processor_flavor_t		flavor,
820	natural_t				*out_pcount,
821	processor_info_array_t	*out_array,
822	mach_msg_type_number_t	*out_array_count)
823{
824	kern_return_t			result;
825	processor_t				processor;
826	host_t					thost;
827	processor_info_t		info;
828	unsigned int			icount, tcount;
829	unsigned int			pcount, i;
830	vm_offset_t				addr;
831	vm_size_t				size, needed;
832	vm_map_copy_t			copy;
833
834	if (host == HOST_NULL)
835		return (KERN_INVALID_ARGUMENT);
836
837	result = processor_info_count(flavor, &icount);
838	if (result != KERN_SUCCESS)
839		return (result);
840
841	pcount = processor_count;
842	assert(pcount != 0);
843
844	needed = pcount * icount * sizeof(natural_t);
845	size = vm_map_round_page(needed,
846				 VM_MAP_PAGE_MASK(ipc_kernel_map));
847	result = kmem_alloc(ipc_kernel_map, &addr, size);
848	if (result != KERN_SUCCESS)
849		return (KERN_RESOURCE_SHORTAGE);
850
851	info = (processor_info_t) addr;
852	processor = processor_list;
853	tcount = icount;
854
855	result = processor_info(processor, flavor, &thost, info, &tcount);
856	if (result != KERN_SUCCESS) {
857		kmem_free(ipc_kernel_map, addr, size);
858		return (result);
859	}
860
861	if (pcount > 1) {
862		for (i = 1; i < pcount; i++) {
863			simple_lock(&processor_list_lock);
864			processor = processor->processor_list;
865			simple_unlock(&processor_list_lock);
866
867			info += icount;
868			tcount = icount;
869			result = processor_info(processor, flavor, &thost, info, &tcount);
870			if (result != KERN_SUCCESS) {
871				kmem_free(ipc_kernel_map, addr, size);
872				return (result);
873			}
874		}
875	}
876
877	if (size != needed)
878		bzero((char *) addr + needed, size - needed);
879
880	result = vm_map_unwire(
881		ipc_kernel_map,
882		vm_map_trunc_page(addr,
883				  VM_MAP_PAGE_MASK(ipc_kernel_map)),
884		vm_map_round_page(addr + size,
885				  VM_MAP_PAGE_MASK(ipc_kernel_map)),
886		FALSE);
887	assert(result == KERN_SUCCESS);
888	result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
889			       (vm_map_size_t)size, TRUE, &copy);
890	assert(result == KERN_SUCCESS);
891
892	*out_pcount = pcount;
893	*out_array = (processor_info_array_t) copy;
894	*out_array_count = pcount * icount;
895
896	return (KERN_SUCCESS);
897}
898
899/*
900 *      Kernel interface for setting a special port.
901 */
902kern_return_t
903kernel_set_special_port(
904	host_priv_t	host_priv,
905	int		id,
906	ipc_port_t	port)
907{
908	ipc_port_t old_port;
909
910	host_lock(host_priv);
911	old_port = host_priv->special[id];
912	host_priv->special[id] = port;
913	host_unlock(host_priv);
914	if (IP_VALID(old_port))
915		ipc_port_release_send(old_port);
916	return KERN_SUCCESS;
917}
918
919/*
920 *      User interface for setting a special port.
921 *
922 *      Only permits the user to set a user-owned special port
923 *      ID, rejecting a kernel-owned special port ID.
924 *
925 *      A special kernel port cannot be set up using this
926 *      routine; use kernel_set_special_port() instead.
927 */
928kern_return_t
929host_set_special_port(
930        host_priv_t     host_priv,
931        int             id,
932        ipc_port_t      port)
933{
934	if (host_priv == HOST_PRIV_NULL ||
935	    id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT ) {
936		if (IP_VALID(port))
937			ipc_port_release_send(port);
938		return KERN_INVALID_ARGUMENT;
939	}
940
941	return kernel_set_special_port(host_priv, id, port);
942}
943
944
945/*
946 *      User interface for retrieving a special port.
947 *
948 *      Note that there is nothing to prevent a user special
949 *      port from disappearing after it has been discovered by
950 *      the caller; thus, using a special port can always result
951 *      in a "port not valid" error.
952 */
953
954kern_return_t
955host_get_special_port(
956        host_priv_t     host_priv,
957        __unused int    node,
958        int             id,
959        ipc_port_t      *portp)
960{
961	ipc_port_t	port;
962
963	if (host_priv == HOST_PRIV_NULL ||
964	    id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < 0)
965		return KERN_INVALID_ARGUMENT;
966
967	host_lock(host_priv);
968	port = realhost.special[id];
969	*portp = ipc_port_copy_send(port);
970	host_unlock(host_priv);
971
972	return KERN_SUCCESS;
973}
974
975
976/*
977 * 	host_get_io_master
978 *
979 *	Return the IO master access port for this host.
980 */
981kern_return_t
982host_get_io_master(
983        host_t host,
984        io_master_t *io_masterp)
985{
986	if (host == HOST_NULL)
987		return KERN_INVALID_ARGUMENT;
988
989	return (host_get_io_master_port(host_priv_self(), io_masterp));
990}
991
992host_t
993host_self(void)
994{
995  return &realhost;
996}
997
998host_priv_t
999host_priv_self(void)
1000{
1001  return &realhost;
1002}
1003
1004host_security_t
1005host_security_self(void)
1006{
1007  return &realhost;
1008}
1009
1010