1/*
2 *  commpage_tests.c
3 *  xnu_quick_test
4 *
5 *  Copyright 2009 Apple Inc. All rights reserved.
6 *
7 */
8
9#include "tests.h"
10#include <unistd.h>
11#include <stdint.h>
12#include <err.h>
13#include <sys/param.h>
14#include <System/machine/cpu_capabilities.h>
15#include <mach/mach.h>
16#include <mach/mach_error.h>
17#include <mach/bootstrap.h>
18
19
20#ifdef _COMM_PAGE_ACTIVE_CPUS
21int active_cpu_test(void);
22#endif
23
24int get_sys_uint64(const char *sel, uint64_t *val);
25int get_sys_int32(const char *sel, int32_t *val);
26
27#define getcommptr(var, commpageaddr) do { \
28		var = (typeof(var))(uintptr_t)(commpageaddr); \
29	} while(0)
30
31/*
32 * Check some of the data in the commpage
33 * against manual sysctls
34 */
35int commpage_data_tests( void * the_argp )
36{
37	int ret;
38	uint64_t sys_u64;
39	int32_t sys_i32;
40
41	volatile uint64_t *comm_u64;
42	volatile uint32_t *comm_u32;
43	volatile uint16_t *comm_u16;
44	volatile uint8_t *comm_u8;
45
46
47	/* _COMM_PAGE_CPU_CAPABILITIES */
48	getcommptr(comm_u32, _COMM_PAGE_CPU_CAPABILITIES);
49
50	ret = get_sys_int32("hw.ncpu", &sys_i32);
51	if (ret) goto fail;
52
53	if (sys_i32 != ((*comm_u32 & kNumCPUs) >> kNumCPUsShift)) {
54		warnx("kNumCPUs does not match hw.ncpu");
55		ret = -1;
56		goto fail;
57	}
58
59	getcommptr(comm_u8, _COMM_PAGE_NCPUS);
60	if (sys_i32 != (*comm_u8)) {
61		warnx("_COMM_PAGE_NCPUS does not match hw.ncpu");
62		ret = -1;
63		goto fail;
64	}
65
66	ret = get_sys_int32("hw.logicalcpu", &sys_i32);
67	if (ret) goto fail;
68
69	if (sys_i32 != ((*comm_u32 & kNumCPUs) >> kNumCPUsShift)) {
70		warnx("kNumCPUs does not match hw.logicalcpu");
71		ret = -1;
72		goto fail;
73	}
74
75	/* Intel only capabilities */
76#if defined(__i386__) || defined(__x86_64__)
77	ret = get_sys_int32("hw.optional.mmx", &sys_i32);
78	if (ret) goto fail;
79
80	if (!(sys_i32) ^ !(*comm_u32 & kHasMMX)) {
81		warnx("kHasMMX does not match hw.optional.mmx");
82		ret = -1;
83		goto fail;
84	}
85
86	ret = get_sys_int32("hw.optional.sse", &sys_i32);
87	if (ret) goto fail;
88
89	if (!(sys_i32) ^ !(*comm_u32 & kHasSSE)) {
90		warnx("kHasSSE does not match hw.optional.sse");
91		ret = -1;
92		goto fail;
93	}
94	ret = get_sys_int32("hw.optional.sse2", &sys_i32);
95	if (ret) goto fail;
96
97	if (!(sys_i32) ^ !(*comm_u32 & kHasSSE2)) {
98		warnx("kHasSSE2 does not match hw.optional.sse2");
99		ret = -1;
100		goto fail;
101	}
102
103	ret = get_sys_int32("hw.optional.sse3", &sys_i32);
104	if (ret) goto fail;
105
106	if (!(sys_i32) ^ !(*comm_u32 & kHasSSE3)) {
107		warnx("kHasSSE3 does not match hw.optional.sse3");
108		ret = -1;
109		goto fail;
110	}
111
112	ret = get_sys_int32("hw.optional.supplementalsse3", &sys_i32);
113	if (ret) goto fail;
114
115	if (!(sys_i32) ^ !(*comm_u32 & kHasSupplementalSSE3)) {
116		warnx("kHasSupplementalSSE3 does not match hw.optional.supplementalsse3");
117		ret = -1;
118		goto fail;
119	}
120
121	ret = get_sys_int32("hw.optional.sse4_1", &sys_i32);
122	if (ret) goto fail;
123
124	if (!(sys_i32) ^ !(*comm_u32 & kHasSSE4_1)) {
125		warnx("kHasSSE4_1 does not match hw.optional.sse4_1");
126		ret = -1;
127		goto fail;
128	}
129
130	ret = get_sys_int32("hw.optional.sse4_2", &sys_i32);
131	if (ret) goto fail;
132
133	if (!(sys_i32) ^ !(*comm_u32 & kHasSSE4_2)) {
134		warnx("kHasSSE4_2 does not match hw.optional.sse4_2");
135		ret = -1;
136		goto fail;
137	}
138
139	ret = get_sys_int32("hw.optional.aes", &sys_i32);
140	if (ret) goto fail;
141
142	if (!(sys_i32) ^ !(*comm_u32 & kHasAES)) {
143		warnx("kHasAES does not match hw.optional.aes");
144		ret = -1;
145		goto fail;
146	}
147
148	ret = get_sys_int32("hw.optional.x86_64", &sys_i32);
149	if (ret) goto fail;
150
151	if (!(sys_i32) ^ !(*comm_u32 & k64Bit)) {
152		warnx("k64Bit does not match hw.optional.x86_64");
153		ret = -1;
154		goto fail;
155	}
156#endif /* __i386__ || __x86_64__ */
157
158	/* These fields are not implemented for all architectures */
159#if defined(_COMM_PAGE_SCHED_GEN) && !TARGET_OS_EMBEDDED
160	uint32_t preempt_count1, preempt_count2;
161	uint64_t count;
162
163	ret = get_sys_uint64("hw.cpufrequency_max", &sys_u64);
164	if (ret) goto fail;
165
166    getcommptr(comm_u32, _COMM_PAGE_SCHED_GEN);
167	preempt_count1 = *comm_u32;
168	/* execute for around 1 quantum (10ms) */
169	for(count = MAX(10000000ULL, sys_u64/64); count > 0; count--) {
170		asm volatile("");
171	}
172	preempt_count2 = *comm_u32;
173	if (preempt_count1 >= preempt_count2) {
174		warnx("_COMM_PAGE_SCHED_GEN not incrementing (%u => %u)",
175			  preempt_count1, preempt_count2);
176		ret = -1;
177		goto fail;
178	}
179#endif /* _COMM_PAGE_SCHED_GEN */
180
181#ifdef _COMM_PAGE_ACTIVE_CPUS
182	ret = get_sys_int32("hw.activecpu", &sys_i32);
183	if (ret) goto fail;
184
185	getcommptr(comm_u8, _COMM_PAGE_ACTIVE_CPUS);
186	if (sys_i32 != (*comm_u8)) {
187		warnx("_COMM_PAGE_ACTIVE_CPUS does not match hw.activecpu");
188		ret = -1;
189		goto fail;
190	}
191
192	/* We shouldn't be supporting userspace processor_start/processor_exit on embedded */
193#if !TARGET_OS_EMBEDDED
194	ret = active_cpu_test();
195	if (ret) goto fail;
196#endif /* !TARGET_OS_EMBEDDED */
197#endif /* _COMM_PAGE_ACTIVE_CPUS */
198
199#ifdef _COMM_PAGE_PHYSICAL_CPUS
200	ret = get_sys_int32("hw.physicalcpu_max", &sys_i32);
201	if (ret) goto fail;
202
203	getcommptr(comm_u8, _COMM_PAGE_PHYSICAL_CPUS);
204	if (sys_i32 != (*comm_u8)) {
205		warnx("_COMM_PAGE_PHYSICAL_CPUS does not match hw.physicalcpu_max");
206		ret = -1;
207		goto fail;
208	}
209#endif /* _COMM_PAGE_PHYSICAL_CPUS */
210
211#ifdef _COMM_PAGE_LOGICAL_CPUS
212	ret = get_sys_int32("hw.logicalcpu_max", &sys_i32);
213	if (ret) goto fail;
214
215	getcommptr(comm_u8, _COMM_PAGE_LOGICAL_CPUS);
216	if (sys_i32 != (*comm_u8)) {
217		warnx("_COMM_PAGE_LOGICAL_CPUS does not match hw.logicalcpu_max");
218		ret = -1;
219		goto fail;
220	}
221#endif /* _COMM_PAGE_LOGICAL_CPUS */
222
223#if 0
224#ifdef _COMM_PAGE_MEMORY_SIZE
225	ret = get_sys_uint64("hw.memsize", &sys_u64);
226	if (ret) goto fail;
227
228	getcommptr(comm_u64, _COMM_PAGE_MEMORY_SIZE);
229	if (sys_u64 != (*comm_u64)) {
230		warnx("_COMM_PAGE_MEMORY_SIZE does not match hw.memsize");
231		ret = -1;
232		goto fail;
233	}
234#endif /* _COMM_PAGE_MEMORY_SIZE */
235#endif
236
237	ret = 0;
238
239fail:
240
241	return ret;
242}
243
244
245int get_sys_uint64(const char *sel, uint64_t *val)
246{
247	size_t size = sizeof(*val);
248	int ret;
249
250	ret = sysctlbyname(sel, val, &size, NULL, 0);
251	if (ret == -1) {
252		warn("sysctlbyname(%s)", sel);
253		return ret;
254	}
255
256//	warnx("sysctlbyname(%s) => %llx", sel, *val);
257
258	return 0;
259}
260
261int get_sys_int32(const char *sel, int32_t *val)
262{
263	size_t size = sizeof(*val);
264	int ret;
265
266	ret = sysctlbyname(sel, val, &size, NULL, 0);
267	if (ret == -1) {
268		warn("sysctlbyname(%s)", sel);
269		return ret;
270	}
271
272//	warnx("sysctlbyname(%s) => %x", sel, *val);
273
274	return 0;
275}
276
277#ifdef _COMM_PAGE_ACTIVE_CPUS
278/*
279 * Try to find a secondary processor that we can disable,
280 * and make sure the commpage reflects that. This test
281 * will pass on UP systems, and if all secondary processors
282 * have been manually disabled
283 */
284int active_cpu_test(void)
285{
286	volatile uint8_t *activeaddr;
287	uint8_t original_activecpu;
288	boolean_t test_failed = FALSE;
289
290	/* Code stolen from hostinfo.c */
291	kern_return_t           ret;
292	processor_t             *processor_list;
293	host_name_port_t        host;
294	struct processor_basic_info     processor_basic_info;
295	mach_msg_type_number_t  cpu_count;
296	mach_msg_type_number_t  data_count;
297	int                     i;
298
299
300	getcommptr(activeaddr, _COMM_PAGE_ACTIVE_CPUS);
301	original_activecpu = *activeaddr;
302
303	host = mach_host_self();
304	ret = host_processors(host,
305						  (processor_array_t *) &processor_list, &cpu_count);
306	if (ret != KERN_SUCCESS) {
307		mach_error("host_processors()", ret);
308		return ret;
309	}
310
311	/* skip master processor */
312	for (i = 1; i < cpu_count; i++) {
313		data_count = PROCESSOR_BASIC_INFO_COUNT;
314		ret = processor_info(processor_list[i], PROCESSOR_BASIC_INFO,
315							 &host,
316							 (processor_info_t) &processor_basic_info,
317							 &data_count);
318		if (ret != KERN_SUCCESS) {
319			if (ret == MACH_SEND_INVALID_DEST) {
320				continue;
321			}
322			mach_error("processor_info", ret);
323			return ret;
324		}
325
326		if (processor_basic_info.running) {
327			/* found victim */
328			ret = processor_exit(processor_list[i]);
329			if (ret != KERN_SUCCESS) {
330				mach_error("processor_exit()", ret);
331				return ret;
332			}
333
334			sleep(1);
335
336			if (*activeaddr != (original_activecpu - 1)) {
337				test_failed = TRUE;
338			}
339
340			ret = processor_start(processor_list[i]);
341			if (ret != KERN_SUCCESS) {
342				mach_error("processor_exit()", ret);
343				return ret;
344			}
345
346			sleep(1);
347
348			break;
349		}
350	}
351
352	if (test_failed) {
353		warnx("_COMM_PAGE_ACTIVE_CPUS not updated after disabling a CPU");
354		return -1;
355	}
356
357	if (*activeaddr != original_activecpu) {
358		warnx("_COMM_PAGE_ACTIVE_CPUS not restored to original value");
359		return -1;
360	}
361
362	return 0;
363}
364#endif
365