1/*
2 *  machvm_tests.c
3 *  xnu_quick_test
4 *
5 *  Copyright 2008 Apple Inc. All rights reserved.
6 *
7 */
8
9#include "tests.h"
10#include <mach/mach.h>
11#include <unistd.h>
12#include <err.h>
13#include <sys/param.h>
14#include <mach-o/ldsyms.h>
15
16int machvm_tests( void * the_argp )
17{
18	int pagesize = getpagesize();
19	int regionsizes[] = { 1, 3, 7, 13, 77, 1223 }; /* sizes must be in increasing order */
20	char *regionbuffers[] = { NULL, NULL, NULL, NULL, NULL, NULL };
21	int i;
22	kern_return_t kret;
23
24	/* Use vm_allocate to grab some memory */
25	for (i=0; i < sizeof(regionsizes)/sizeof(regionsizes[0]); i++) {
26		vm_address_t addr = 0;
27
28		kret = vm_allocate(mach_task_self(), &addr, regionsizes[i]*pagesize, VM_FLAGS_ANYWHERE);
29		if (kret != KERN_SUCCESS) {
30			warnx("vm_allocate of %d pages failed: %d", regionsizes[i], kret);
31			goto fail;
32		}
33		regionbuffers[i] = (char *)addr;
34	}
35
36	/* deallocate one range without having touched it, scribble on another, then deallocate that one */
37	kret = vm_deallocate(mach_task_self(), (vm_address_t)regionbuffers[4], regionsizes[4]*pagesize);
38	if (kret != KERN_SUCCESS) {
39		warnx("vm_deallocate of %d pages failed: %d", regionsizes[4], kret);
40		goto fail;
41	}
42	regionbuffers[4] = NULL;
43
44	memset(regionbuffers[3], 0x4f, pagesize*MIN(3, regionsizes[3]));
45
46	kret = vm_deallocate(mach_task_self(), (vm_address_t)regionbuffers[3], regionsizes[3]*pagesize);
47	if (kret != KERN_SUCCESS) {
48		warnx("vm_deallocate of %d pages failed: %d", regionsizes[3], kret);
49		goto fail;
50	}
51	regionbuffers[3] = NULL;
52
53	// populate the largest buffer with a byte pattern that matches the page offset, then fix it to readonly
54	for (i=0; i < regionsizes[5]; i++) {
55		memset(regionbuffers[5] + i*pagesize, (unsigned char)i, pagesize);
56	}
57	kret = vm_protect(mach_task_self(), (vm_offset_t)regionbuffers[5], regionsizes[5]*pagesize, FALSE, VM_PROT_READ);
58	if (kret != KERN_SUCCESS) {
59		warnx("vm_protect of %d pages failed: %d", regionsizes[5], kret);
60		goto fail;
61	}
62
63	// read the last few pagse of the largest buffer and verify its contents
64	{
65		vm_offset_t	newdata;
66		mach_msg_type_number_t newcount;
67
68		kret = vm_read(mach_task_self(), (vm_address_t)regionbuffers[5] + (regionsizes[5]-5)*pagesize, 5*pagesize,
69					   &newdata, &newcount);
70		if (kret != KERN_SUCCESS) {
71			warnx("vm_read of %d pages failed: %d", 5, kret);
72			goto fail;
73		}
74
75		if (0 != memcmp((char *)newdata, regionbuffers[5] + (regionsizes[5]-5)*pagesize,
76						5*pagesize)) {
77			warnx("vm_read comparison of %d pages failed", 5);
78			kret = -1;
79			vm_deallocate(mach_task_self(), newdata, 5*pagesize);
80			goto fail;
81		}
82
83		kret = vm_deallocate(mach_task_self(), newdata, 5*pagesize);
84		if (kret != KERN_SUCCESS) {
85			warnx("vm_deallocate of %d pages failed: %d", 5, kret);
86			goto fail;
87		}
88	}
89
90	// do a list read to repopulate slots 3 and 4
91	{
92		vm_read_entry_t	readlist;
93
94		readlist[0].address = (vm_offset_t)regionbuffers[5] + 10*pagesize;
95		readlist[0].size = regionsizes[3]*pagesize;
96		readlist[1].address = (vm_offset_t)regionbuffers[5] + 10*pagesize + regionsizes[3]*pagesize;
97		readlist[1].size = regionsizes[4]*pagesize;
98
99		kret = vm_read_list(mach_task_self(), readlist, 2);
100		if (kret != KERN_SUCCESS) {
101			warnx("vm_read_list failed: %d", kret);
102			goto fail;
103		}
104
105		if (0 != memcmp((char *)readlist[0].address, regionbuffers[5] + 10*pagesize,
106						regionsizes[3]*pagesize)) {
107			warnx("vm_read_list comparison of allocation 0 failed");
108			kret = -1;
109			vm_deallocate(mach_task_self(), readlist[0].address, readlist[0].size);
110			vm_deallocate(mach_task_self(), readlist[1].address, readlist[1].size);
111			goto fail;
112		}
113
114		if (0 != memcmp((char *)readlist[1].address, regionbuffers[5] + 10*pagesize + regionsizes[3]*pagesize,
115						regionsizes[4]*pagesize)) {
116			warnx("vm_read_list comparison of allocation 1 failed");
117			kret = -1;
118			vm_deallocate(mach_task_self(), readlist[0].address, readlist[0].size);
119			vm_deallocate(mach_task_self(), readlist[1].address, readlist[1].size);
120			goto fail;
121		}
122
123		regionbuffers[3] = (char *)readlist[0].address;
124		regionbuffers[4] = (char *)readlist[1].address;
125	}
126
127	// do a read_overwrite and copy, which should be about the same
128	{
129		vm_size_t count;
130
131		kret = vm_read_overwrite(mach_task_self(), (vm_offset_t)regionbuffers[3],
132								 regionsizes[0]*pagesize,
133								 (vm_offset_t)regionbuffers[0],
134								 &count);
135		if (kret != KERN_SUCCESS) {
136			warnx("vm_read_overwrite of %d pages failed: %d", regionsizes[0], kret);
137			goto fail;
138		}
139
140		kret = vm_copy(mach_task_self(), (vm_offset_t)regionbuffers[0],
141								 regionsizes[0]*pagesize,
142								 (vm_offset_t)regionbuffers[1]);
143		if (kret != KERN_SUCCESS) {
144			warnx("vm_copy of %d pages failed: %d", regionsizes[0], kret);
145			goto fail;
146		}
147
148		if (0 != memcmp(regionbuffers[1], regionbuffers[3],
149						regionsizes[0]*pagesize)) {
150			warnx("vm_read_overwrite/vm_copy comparison failed");
151			kret = -1;
152			goto fail;
153		}
154	}
155
156	// do a vm_copy of our mach-o header and compare.
157
158	kret = vm_write(mach_task_self(), (vm_address_t)regionbuffers[2],
159						(vm_offset_t)&_mh_execute_header, pagesize);
160	if (kret != KERN_SUCCESS) {
161		warnx("vm_write of %d pages failed: %d", 1, kret);
162		goto fail;
163	}
164
165	if (_mh_execute_header.magic != *(uint32_t *)regionbuffers[2]) {
166		warnx("vm_write comparison failed");
167		kret = -1;
168		goto fail;
169	}
170
171	// check that the vm_protects above worked
172	{
173		vm_address_t addr = (vm_address_t)regionbuffers[5]+7*pagesize;
174		vm_size_t size = pagesize;
175		int _basic[VM_REGION_BASIC_INFO_COUNT];
176		vm_region_basic_info_t basic = (vm_region_basic_info_t)_basic;
177		int _basic64[VM_REGION_BASIC_INFO_COUNT_64];
178		vm_region_basic_info_64_t basic64 = (vm_region_basic_info_64_t)_basic64;
179		int _submap[VM_REGION_SUBMAP_INFO_COUNT];
180		vm_region_submap_info_t submap = (vm_region_submap_info_t)_submap;
181		mach_msg_type_number_t	infocnt;
182		mach_port_t	objname;
183		natural_t nesting_depth = 0;
184
185#if !__LP64__
186		infocnt = VM_REGION_BASIC_INFO_COUNT;
187		kret = vm_region(mach_task_self(), &addr, &size, VM_REGION_BASIC_INFO,
188						 (vm_region_info_t)basic, &infocnt, &objname);
189		if (kret != KERN_SUCCESS) {
190			warnx("vm_region(VM_REGION_BASIC_INFO) failed: %d", kret);
191			goto fail;
192		}
193		if (VM_REGION_BASIC_INFO_COUNT != infocnt) {
194			warnx("vm_region(VM_REGION_BASIC_INFO) returned a bad info count");
195			kret = -1;
196			goto fail;
197		}
198
199		// when we did the vm_read_list above, it should have split this region into
200		// a 10 page sub-region
201		if (addr != (vm_address_t)regionbuffers[5] || size != 10*pagesize) {
202			warnx("vm_region(VM_REGION_BASIC_INFO) returned a bad region range");
203			kret = -1;
204			goto fail;
205		}
206
207		if (basic->protection != VM_PROT_READ) {
208			warnx("vm_region(VM_REGION_BASIC_INFO) returned a bad protection");
209			kret = -1;
210			goto fail;
211		}
212#endif
213
214		infocnt = VM_REGION_BASIC_INFO_COUNT_64;
215		// intentionally use VM_REGION_BASIC_INFO and get up-converted
216		kret = vm_region_64(mach_task_self(), &addr, &size, VM_REGION_BASIC_INFO,
217						 (vm_region_info_t)basic64, &infocnt, &objname);
218		if (kret != KERN_SUCCESS) {
219			warnx("vm_region_64(VM_REGION_BASIC_INFO) failed: %d", kret);
220			goto fail;
221		}
222		if (VM_REGION_BASIC_INFO_COUNT_64 != infocnt) {
223			warnx("vm_region_64(VM_REGION_BASIC_INFO) returned a bad info count");
224			kret = -1;
225			goto fail;
226		}
227
228		// when we did the vm_read_list above, it should have split this region into
229		// a 10 page sub-region
230		if (addr != (vm_address_t)regionbuffers[5] || size != 10*pagesize) {
231			warnx("vm_region_64(VM_REGION_BASIC_INFO) returned a bad region range");
232			kret = -1;
233			goto fail;
234		}
235
236		if (basic64->protection != VM_PROT_READ) {
237			warnx("vm_region_64(VM_REGION_BASIC_INFO) returned a bad protection");
238			kret = -1;
239			goto fail;
240		}
241
242#if !__LP64__
243		// try to compare some stuff. Particularly important for fields after offset
244		if (basic->offset != basic64->offset ||
245			basic->behavior != basic64->behavior ||
246			basic->user_wired_count != basic64->user_wired_count) {
247			warnx("vm_region and vm_region_64 did not agree");
248			kret = -1;
249			goto fail;
250		}
251#endif
252
253#if !__LP64__
254		infocnt = VM_REGION_SUBMAP_INFO_COUNT;
255		kret = vm_region_recurse(mach_task_self(), &addr, &size,
256								 &nesting_depth, (vm_region_info_t)submap,
257								 &infocnt);
258		if (kret != KERN_SUCCESS) {
259			warnx("vm_region_recurse() failed: %d", kret);
260			goto fail;
261		}
262
263		if (VM_REGION_SUBMAP_INFO_COUNT != infocnt) {
264			warnx("vm_region_recurse() returned a bad info count");
265			kret = -1;
266			goto fail;
267		}
268
269		if (submap->pages_dirtied != 10) {
270			warnx("vm_region_recurse() returned bage pages_dirtied");
271			kret = -1;
272			goto fail;
273		}
274
275#endif /* !__LP64__ */
276
277	}
278
279	// exercise mach_make_memory_entry/vm_map
280	{
281		vm_address_t addr1, addr2;
282		vm_size_t size;
283		mach_port_t mem_handle = MACH_PORT_NULL;
284
285		addr1 = 0;
286		size = 11*pagesize;
287		kret = vm_allocate(mach_task_self(), &addr1, size, VM_FLAGS_ANYWHERE);
288		if (kret != KERN_SUCCESS) {
289			warnx("vm_allocate failed: %d", kret);
290			kret = -1;
291			goto fail;
292		}
293
294		*(uint32_t *)(uintptr_t)addr1 = 'test';
295
296		kret = mach_make_memory_entry(mach_task_self(),
297									  &size, addr1, VM_PROT_DEFAULT,
298									  &mem_handle, MACH_PORT_NULL);
299		if (kret != KERN_SUCCESS) {
300			warnx("mach_make_memory_entry failed: %d", kret);
301			kret = -1;
302			goto fail;
303		}
304
305		kret = vm_deallocate(mach_task_self(), addr1, size);
306		if (kret != KERN_SUCCESS) {
307			warnx("vm_deallocate failed: %d", kret);
308			kret = -1;
309			goto fail;
310		}
311
312		addr2 = 0;
313		kret = vm_map(mach_task_self(), &addr2, size, 0, VM_FLAGS_ANYWHERE,
314					  mem_handle, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
315					  VM_INHERIT_NONE);
316		if (kret != KERN_SUCCESS) {
317			warnx("vm_map failed: %d", kret);
318			kret = -1;
319			goto fail;
320		}
321
322		if (*(uint32_t *)(uintptr_t)addr2 != 'test') {
323			warnx("mapped data mismatch");
324			kret = -1;
325			goto fail;
326		}
327
328		kret = vm_deallocate(mach_task_self(), addr2, size);
329		if (kret != KERN_SUCCESS) {
330			warnx("vm_deallocate failed: %d", kret);
331			kret = -1;
332			goto fail;
333		}
334
335		kret = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
336		if (kret != KERN_SUCCESS) {
337			warnx("mach_port_mod_refs(-1) failed: %d", kret);
338			kret = -1;
339			goto fail;
340		}
341
342		addr2 = 0;
343		kret = vm_map(mach_task_self(), &addr2, size, 0, VM_FLAGS_ANYWHERE,
344					  mem_handle, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
345					  VM_INHERIT_NONE);
346		if (kret == KERN_SUCCESS) {
347			warnx("vm_map succeeded when it should not have");
348			kret = -1;
349			goto fail;
350		}
351
352		kret = KERN_SUCCESS;
353	}
354
355fail:
356	for (i=0; i < sizeof(regionsizes)/sizeof(regionsizes[0]); i++) {
357		if (regionbuffers[i]) {
358			vm_deallocate(mach_task_self(), (vm_address_t)regionbuffers[i], regionsizes[i]*pagesize);
359		}
360	}
361
362	return kret;
363}
364
365