1/*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 *	File:	vm/vm32_user.c
60 *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 *	User-exported virtual memory functions.
63 */
64
65#include <debug.h>
66
67#include <mach/boolean.h>
68#include <mach/kern_return.h>
69#include <mach/mach_types.h>	/* to get vm_address_t */
70#include <mach/memory_object.h>
71#include <mach/std_types.h>	/* to get pointer_t */
72#include <mach/vm_attributes.h>
73#include <mach/vm_param.h>
74#include <mach/vm_statistics.h>
75#include <mach/mach_syscalls.h>
76
77#include <mach/host_priv_server.h>
78#include <mach/mach_vm_server.h>
79#include <mach/vm32_map_server.h>
80
81#include <kern/host.h>
82#include <kern/kalloc.h>
83#include <kern/task.h>
84#include <kern/misc_protos.h>
85#include <vm/vm_fault.h>
86#include <vm/vm_map.h>
87#include <vm/vm_object.h>
88#include <vm/vm_page.h>
89#include <vm/memory_object.h>
90#include <vm/vm_pageout.h>
91#include <vm/vm_protos.h>
92
93#if VM32_SUPPORT
94
95/*
96 * See vm_user.c for the real implementation of all of these functions.
97 * We call through to the mach_ "wide" versions of the routines, and trust
98 * that the VM system verifies the arguments and only returns address that
99 * are appropriate for the task's address space size.
100 *
101 * New VM call implementations should not be added here, because they would
102 * be available only to 32-bit userspace clients. Add them to vm_user.c
103 * and the corresponding prototype to mach_vm.defs (subsystem 4800).
104 */
105
106kern_return_t
107vm32_allocate(
108	vm_map_t	map,
109	vm32_offset_t	*addr,
110	vm32_size_t	size,
111	int		flags)
112{
113	mach_vm_offset_t	maddr;
114	kern_return_t		result;
115
116	maddr = *addr;
117	result = mach_vm_allocate(map, &maddr, size, flags);
118	*addr = CAST_DOWN_EXPLICIT(vm32_offset_t, maddr);
119
120	return result;
121}
122
123kern_return_t
124vm32_deallocate(
125	vm_map_t	map,
126	vm32_offset_t		start,
127	vm32_size_t		size)
128{
129	if ((map == VM_MAP_NULL) || (start + size < start))
130		return(KERN_INVALID_ARGUMENT);
131
132	return mach_vm_deallocate(map, start, size);
133}
134
135kern_return_t
136vm32_inherit(
137	vm_map_t	map,
138	vm32_offset_t		start,
139	vm32_size_t		size,
140	vm_inherit_t		new_inheritance)
141{
142	if ((map == VM_MAP_NULL) || (start + size < start))
143		return(KERN_INVALID_ARGUMENT);
144
145	return mach_vm_inherit(map, start, size, new_inheritance);
146}
147
148kern_return_t
149vm32_protect(
150	vm_map_t		map,
151	vm32_offset_t		start,
152	vm32_size_t		size,
153	boolean_t		set_maximum,
154	vm_prot_t		new_protection)
155{
156	if ((map == VM_MAP_NULL) || (start + size < start))
157		return(KERN_INVALID_ARGUMENT);
158
159	return mach_vm_protect(map, start, size, set_maximum, new_protection);
160}
161
162kern_return_t
163vm32_machine_attribute(
164	vm_map_t	map,
165	vm32_address_t	addr,
166	vm32_size_t	size,
167	vm_machine_attribute_t	attribute,
168	vm_machine_attribute_val_t* value)		/* IN/OUT */
169{
170	if ((map == VM_MAP_NULL) || (addr + size < addr))
171		return(KERN_INVALID_ARGUMENT);
172
173	return mach_vm_machine_attribute(map, addr, size, attribute, value);
174}
175
176kern_return_t
177vm32_read(
178	vm_map_t		map,
179	vm32_address_t		addr,
180	vm32_size_t		size,
181	pointer_t		*data,
182	mach_msg_type_number_t	*data_size)
183{
184	return mach_vm_read(map, addr, size, data, data_size);
185}
186
187kern_return_t
188vm32_read_list(
189	vm_map_t		map,
190	vm32_read_entry_t	data_list,
191	natural_t		count)
192{
193	mach_vm_read_entry_t	mdata_list;
194	mach_msg_type_number_t	i;
195	kern_return_t			result;
196
197	for (i=0; i < VM_MAP_ENTRY_MAX; i++) {
198		mdata_list[i].address = data_list[i].address;
199		mdata_list[i].size = data_list[i].size;
200	}
201
202	result = mach_vm_read_list(map, mdata_list, count);
203
204	for (i=0; i < VM_MAP_ENTRY_MAX; i++) {
205		data_list[i].address = CAST_DOWN_EXPLICIT(vm32_address_t, mdata_list[i].address);
206		data_list[i].size = CAST_DOWN_EXPLICIT(vm32_size_t, mdata_list[i].size);
207	}
208
209	return result;
210}
211
212kern_return_t
213vm32_read_overwrite(
214	vm_map_t	map,
215	vm32_address_t	address,
216	vm32_size_t	size,
217	vm32_address_t	data,
218	vm32_size_t	*data_size)
219{
220	kern_return_t	result;
221	mach_vm_size_t	mdata_size;
222
223	mdata_size = *data_size;
224	result = mach_vm_read_overwrite(map, address, size, data, &mdata_size);
225	*data_size = CAST_DOWN_EXPLICIT(vm32_size_t, mdata_size);
226
227	return result;
228}
229
230kern_return_t
231vm32_write(
232	vm_map_t			map,
233	vm32_address_t			address,
234	pointer_t			data,
235	mach_msg_type_number_t	size)
236{
237	return mach_vm_write(map, address, data, size);
238}
239
240kern_return_t
241vm32_copy(
242	vm_map_t	map,
243	vm32_address_t	source_address,
244	vm32_size_t	size,
245	vm32_address_t	dest_address)
246{
247	return mach_vm_copy(map, source_address, size, dest_address);
248}
249
250kern_return_t
251vm32_map_64(
252	vm_map_t		target_map,
253	vm32_offset_t		*address,
254	vm32_size_t		size,
255	vm32_offset_t		mask,
256	int			flags,
257	ipc_port_t		port,
258	vm_object_offset_t	offset,
259	boolean_t		copy,
260	vm_prot_t		cur_protection,
261	vm_prot_t		max_protection,
262	vm_inherit_t		inheritance)
263{
264	mach_vm_offset_t	maddress;
265	kern_return_t		result;
266
267	maddress = *address;
268	result = mach_vm_map(target_map, &maddress, size, mask,
269						 flags, port, offset, copy,
270						 cur_protection, max_protection, inheritance);
271	*address = CAST_DOWN_EXPLICIT(vm32_offset_t, maddress);
272
273	return result;
274}
275
276kern_return_t
277vm32_map(
278	vm_map_t		target_map,
279	vm32_offset_t		*address,
280	vm32_size_t		size,
281	vm32_offset_t		mask,
282	int			flags,
283	ipc_port_t		port,
284	vm32_offset_t		offset,
285	boolean_t		copy,
286	vm_prot_t		cur_protection,
287	vm_prot_t		max_protection,
288	vm_inherit_t		inheritance)
289{
290	return vm32_map_64(target_map, address, size, mask,
291						  flags, port, offset, copy,
292						  cur_protection, max_protection, inheritance);
293}
294
295kern_return_t
296vm32_remap(
297	vm_map_t		target_map,
298	vm32_offset_t		*address,
299	vm32_size_t		size,
300	vm32_offset_t		mask,
301	boolean_t		anywhere,
302	vm_map_t		src_map,
303	vm32_offset_t		memory_address,
304	boolean_t		copy,
305	vm_prot_t		*cur_protection,
306	vm_prot_t		*max_protection,
307	vm_inherit_t		inheritance)
308{
309	mach_vm_offset_t	maddress;
310	kern_return_t		result;
311
312	maddress = *address;
313	result = mach_vm_remap(target_map, &maddress, size, mask,
314						 anywhere, src_map, memory_address, copy,
315						 cur_protection, max_protection, inheritance);
316	*address = CAST_DOWN_EXPLICIT(vm32_offset_t, maddress);
317
318	return result;
319}
320
321kern_return_t
322vm32_msync(
323	vm_map_t	map,
324	vm32_address_t	address,
325	vm32_size_t	size,
326	vm_sync_t	sync_flags)
327{
328	return mach_vm_msync(map, address, size, sync_flags);
329}
330
331kern_return_t
332vm32_behavior_set(
333	vm_map_t		map,
334	vm32_offset_t		start,
335	vm32_size_t		size,
336	vm_behavior_t		new_behavior)
337{
338	if ((map == VM_MAP_NULL) || (start + size < start))
339		return(KERN_INVALID_ARGUMENT);
340
341	return mach_vm_behavior_set(map, start, size, new_behavior);
342}
343
344kern_return_t
345vm32_region_64(
346	vm_map_t		 map,
347	vm32_offset_t	        *address,		/* IN/OUT */
348	vm32_size_t		*size,			/* OUT */
349	vm_region_flavor_t	 flavor,		/* IN */
350	vm_region_info_t	 info,			/* OUT */
351	mach_msg_type_number_t	*count,			/* IN/OUT */
352	mach_port_t		*object_name)		/* OUT */
353{
354	mach_vm_offset_t	maddress;
355	mach_vm_size_t		msize;
356	kern_return_t		result;
357
358	maddress = *address;
359	msize = *size;
360	result = mach_vm_region(map, &maddress, &msize, flavor, info, count, object_name);
361	*size = CAST_DOWN_EXPLICIT(vm32_size_t, msize);
362	*address = CAST_DOWN_EXPLICIT(vm32_offset_t, maddress);
363
364	return result;
365}
366
367kern_return_t
368vm32_region(
369	vm_map_t			map,
370	vm32_address_t	      		*address,	/* IN/OUT */
371	vm32_size_t			*size,		/* OUT */
372	vm_region_flavor_t	 	flavor,	/* IN */
373	vm_region_info_t	 	info,		/* OUT */
374	mach_msg_type_number_t	*count,	/* IN/OUT */
375	mach_port_t			*object_name)	/* OUT */
376{
377	vm_map_address_t 	map_addr;
378	vm_map_size_t 		map_size;
379	kern_return_t		kr;
380
381	if (VM_MAP_NULL == map)
382		return KERN_INVALID_ARGUMENT;
383
384	map_addr = (vm_map_address_t)*address;
385	map_size = (vm_map_size_t)*size;
386
387	kr = vm_map_region(map,
388			   &map_addr, &map_size,
389			   flavor, info, count,
390			   object_name);
391
392	*address = CAST_DOWN_EXPLICIT(vm32_address_t, map_addr);
393	*size = CAST_DOWN_EXPLICIT(vm32_size_t, map_size);
394
395	if (KERN_SUCCESS == kr && map_addr + map_size > VM32_MAX_ADDRESS)
396		return KERN_INVALID_ADDRESS;
397	return kr;
398}
399
400kern_return_t
401vm32_region_recurse_64(
402	vm_map_t			map,
403	vm32_address_t			*address,
404	vm32_size_t			*size,
405	uint32_t			*depth,
406	vm_region_recurse_info_64_t	info,
407	mach_msg_type_number_t 	*infoCnt)
408{
409	mach_vm_address_t	maddress;
410	mach_vm_size_t		msize;
411	kern_return_t		result;
412
413	maddress = *address;
414	msize = *size;
415	result = mach_vm_region_recurse(map, &maddress, &msize, depth, info, infoCnt);
416	*address = CAST_DOWN_EXPLICIT(vm32_address_t, maddress);
417	*size = CAST_DOWN_EXPLICIT(vm32_size_t, msize);
418
419	return result;
420}
421
422kern_return_t
423vm32_region_recurse(
424	vm_map_t			map,
425	vm32_offset_t	       	*address,	/* IN/OUT */
426	vm32_size_t			*size,		/* OUT */
427	natural_t	 		*depth,	/* IN/OUT */
428	vm_region_recurse_info_t	info32,	/* IN/OUT */
429	mach_msg_type_number_t	*infoCnt)	/* IN/OUT */
430{
431	vm_region_submap_info_data_64_t info64;
432	vm_region_submap_info_t info;
433	vm_map_address_t	map_addr;
434	vm_map_size_t		map_size;
435	kern_return_t		kr;
436
437	if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT)
438		return KERN_INVALID_ARGUMENT;
439
440
441	map_addr = (vm_map_address_t)*address;
442	map_size = (vm_map_size_t)*size;
443	info = (vm_region_submap_info_t)info32;
444	*infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
445
446	kr = vm_map_region_recurse_64(map, &map_addr,&map_size,
447				      depth, &info64, infoCnt);
448
449	info->protection = info64.protection;
450	info->max_protection = info64.max_protection;
451	info->inheritance = info64.inheritance;
452	info->offset = (uint32_t)info64.offset; /* trouble-maker */
453        info->user_tag = info64.user_tag;
454        info->pages_resident = info64.pages_resident;
455        info->pages_shared_now_private = info64.pages_shared_now_private;
456        info->pages_swapped_out = info64.pages_swapped_out;
457        info->pages_dirtied = info64.pages_dirtied;
458        info->ref_count = info64.ref_count;
459        info->shadow_depth = info64.shadow_depth;
460        info->external_pager = info64.external_pager;
461        info->share_mode = info64.share_mode;
462	info->is_submap = info64.is_submap;
463	info->behavior = info64.behavior;
464	info->object_id = info64.object_id;
465	info->user_wired_count = info64.user_wired_count;
466
467	*address = CAST_DOWN_EXPLICIT(vm32_address_t, map_addr);
468	*size = CAST_DOWN_EXPLICIT(vm32_size_t, map_size);
469	*infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
470
471	if (KERN_SUCCESS == kr && map_addr + map_size > VM32_MAX_ADDRESS)
472		return KERN_INVALID_ADDRESS;
473	return kr;
474}
475
476kern_return_t
477vm32_purgable_control(
478	vm_map_t		map,
479	vm32_offset_t		address,
480	vm_purgable_t		control,
481	int			*state)
482{
483	if (VM_MAP_NULL == map)
484		return KERN_INVALID_ARGUMENT;
485
486	return vm_map_purgable_control(map,
487				       vm_map_trunc_page(address, PAGE_MASK),
488				       control,
489				       state);
490}
491
492kern_return_t
493vm32_map_page_query(
494	vm_map_t		map,
495	vm32_offset_t		offset,
496	int			*disposition,
497	int			*ref_count)
498{
499	if (VM_MAP_NULL == map)
500		return KERN_INVALID_ARGUMENT;
501
502	return vm_map_page_query_internal(
503		map,
504		vm_map_trunc_page(offset, PAGE_MASK),
505		disposition,
506		ref_count);
507}
508
509kern_return_t
510vm32_make_memory_entry_64(
511	vm_map_t		target_map,
512	memory_object_size_t	*size,
513	memory_object_offset_t offset,
514	vm_prot_t		permission,
515	ipc_port_t		*object_handle,
516	ipc_port_t		parent_handle)
517{
518	// use the existing entrypoint
519	return _mach_make_memory_entry(target_map, size, offset, permission, object_handle, parent_handle);
520}
521
522kern_return_t
523vm32_make_memory_entry(
524	vm_map_t		target_map,
525	vm32_size_t		*size,
526	vm32_offset_t		offset,
527	vm_prot_t		permission,
528	ipc_port_t		*object_handle,
529	ipc_port_t		parent_entry)
530{
531	memory_object_size_t 	mo_size;
532	kern_return_t		kr;
533
534	mo_size = (memory_object_size_t)*size;
535	kr = _mach_make_memory_entry(target_map, &mo_size,
536			(memory_object_offset_t)offset, permission, object_handle,
537			parent_entry);
538	*size = CAST_DOWN_EXPLICIT(vm32_size_t, mo_size);
539	return kr;
540}
541
542kern_return_t
543vm32__task_wire(
544	vm_map_t	map,
545	boolean_t	must_wire)
546{
547	if (map == VM_MAP_NULL)
548		return(KERN_INVALID_ARGUMENT);
549
550	if (must_wire)
551		map->wiring_required = TRUE;
552	else
553		map->wiring_required = FALSE;
554
555	return(KERN_SUCCESS);
556}
557
558#endif /* VM32_SUPPORT */
559