1/* 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29#include <sys/errno.h> 30 31#include <mach/mach_types.h> 32#include <mach/kern_return.h> 33#include <mach/memory_object_control.h> 34#include <mach/memory_object_types.h> 35#include <mach/port.h> 36#include <mach/policy.h> 37#include <mach/upl.h> 38#include <kern/kern_types.h> 39#include <kern/ipc_kobject.h> 40#include <kern/host.h> 41#include <kern/thread.h> 42#include <ipc/ipc_port.h> 43#include <ipc/ipc_space.h> 44#include <device/device_port.h> 45#include <vm/memory_object.h> 46#include <vm/vm_pageout.h> 47#include <vm/vm_map.h> 48#include <vm/vm_kern.h> 49#include <vm/vm_pageout.h> 50#include <vm/vm_protos.h> 51 52 53/* Device VM COMPONENT INTERFACES */ 54 55 56/* 57 * Device PAGER 58 */ 59 60 61/* until component support available */ 62 63 64 65/* until component support available */ 66const struct memory_object_pager_ops device_pager_ops = { 67 device_pager_reference, 68 device_pager_deallocate, 69 device_pager_init, 70 device_pager_terminate, 71 device_pager_data_request, 72 device_pager_data_return, 73 device_pager_data_initialize, 74 device_pager_data_unlock, 75 device_pager_synchronize, 76 device_pager_map, 77 device_pager_last_unmap, 78 NULL, /* data_reclaim */ 79 "device pager" 80}; 81 82typedef uintptr_t device_port_t; 83 84/* 85 * The start of "struct device_pager" MUST match a "struct memory_object". 86 */ 87typedef struct device_pager { 88 struct ipc_object_header pager_header; /* fake ip_kotype() */ 89 memory_object_pager_ops_t pager_ops; /* == &device_pager_ops */ 90 unsigned int ref_count; /* reference count */ 91 memory_object_control_t control_handle; /* mem object's cntrl handle */ 92 device_port_t device_handle; /* device_handle */ 93 vm_size_t size; 94 int flags; 95} *device_pager_t; 96 97#define pager_ikot pager_header.io_bits 98 99 100device_pager_t 101device_pager_lookup( /* forward */ 102 memory_object_t); 103 104device_pager_t 105device_object_create(void); /* forward */ 106 107zone_t device_pager_zone; 108 109 110#define DEVICE_PAGER_NULL ((device_pager_t) 0) 111 112 113#define MAX_DNODE 10000 114 115 116 117 118 119/* 120 * 121 */ 122void 123device_pager_bootstrap(void) 124{ 125 register vm_size_t size; 126 127 size = (vm_size_t) sizeof(struct device_pager); 128 device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size, 129 PAGE_SIZE, "device node pager structures"); 130 zone_change(device_pager_zone, Z_CALLERACCT, FALSE); 131 return; 132} 133 134/* 135 * 136 */ 137memory_object_t 138device_pager_setup( 139 __unused memory_object_t device, 140 uintptr_t device_handle, 141 vm_size_t size, 142 int flags) 143{ 144 device_pager_t device_object; 145 146 device_object = device_object_create(); 147 if (device_object == DEVICE_PAGER_NULL) 148 panic("device_pager_setup: device_object_create() failed"); 149 150 device_object->device_handle = device_handle; 151 device_object->size = size; 152 device_object->flags = flags; 153 154 return((memory_object_t)device_object); 155} 156 157/* 158 * 159 */ 160kern_return_t 161device_pager_populate_object( 162 memory_object_t device, 163 memory_object_offset_t offset, 164 ppnum_t page_num, 165 vm_size_t size) 166{ 167 device_pager_t device_object; 168 vm_object_t vm_object; 169 kern_return_t kr; 170 upl_t upl; 171 172 device_object = device_pager_lookup(device); 173 if(device_object == DEVICE_PAGER_NULL) 174 return KERN_FAILURE; 175 176 vm_object = (vm_object_t)memory_object_control_to_vm_object( 177 device_object->control_handle); 178 if(vm_object == NULL) 179 return KERN_FAILURE; 180 181 kr = vm_object_populate_with_private( 182 vm_object, offset, page_num, size); 183 if(kr != KERN_SUCCESS) 184 return kr; 185 186 if(!vm_object->phys_contiguous) { 187 unsigned int null_size = 0; 188 assert((upl_size_t) size == size); 189 kr = vm_object_upl_request(vm_object, 190 (vm_object_offset_t)offset, 191 (upl_size_t) size, &upl, NULL, 192 &null_size, 193 (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE)); 194 if(kr != KERN_SUCCESS) 195 panic("device_pager_populate_object: list_req failed"); 196 197 upl_commit(upl, NULL, 0); 198 upl_deallocate(upl); 199 } 200 201 202 return kr; 203} 204 205/* 206 * 207 */ 208device_pager_t 209device_pager_lookup( 210 memory_object_t name) 211{ 212 device_pager_t device_object; 213 214 device_object = (device_pager_t)name; 215 assert(device_object->pager_ops == &device_pager_ops); 216 return (device_object); 217} 218 219/* 220 * 221 */ 222kern_return_t 223device_pager_init( 224 memory_object_t mem_obj, 225 memory_object_control_t control, 226 __unused memory_object_cluster_size_t pg_size) 227{ 228 device_pager_t device_object; 229 kern_return_t kr; 230 memory_object_attr_info_data_t attributes; 231 232 vm_object_t vm_object; 233 234 235 if (control == MEMORY_OBJECT_CONTROL_NULL) 236 return KERN_INVALID_ARGUMENT; 237 238 device_object = device_pager_lookup(mem_obj); 239 240 memory_object_control_reference(control); 241 device_object->control_handle = control; 242 243 244/* The following settings should be done through an expanded change */ 245/* attributes call */ 246 247 vm_object = (vm_object_t)memory_object_control_to_vm_object(control); 248 vm_object_lock(vm_object); 249 vm_object->private = TRUE; 250 if(device_object->flags & DEVICE_PAGER_CONTIGUOUS) 251 vm_object->phys_contiguous = TRUE; 252 if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE) 253 vm_object->nophyscache = TRUE; 254 255 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK; 256 vm_object_unlock(vm_object); 257 258 259 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY; 260 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/ 261 attributes.cluster_size = (1 << (PAGE_SHIFT)); 262 attributes.may_cache_object = FALSE; 263 attributes.temporary = TRUE; 264 265 kr = memory_object_change_attributes( 266 control, 267 MEMORY_OBJECT_ATTRIBUTE_INFO, 268 (memory_object_info_t) &attributes, 269 MEMORY_OBJECT_ATTR_INFO_COUNT); 270 if (kr != KERN_SUCCESS) 271 panic("device_pager_init: memory_object_change_attributes() failed"); 272 273 return(KERN_SUCCESS); 274} 275 276/* 277 * 278 */ 279/*ARGSUSED6*/ 280kern_return_t 281device_pager_data_return( 282 memory_object_t mem_obj, 283 memory_object_offset_t offset, 284 memory_object_cluster_size_t data_cnt, 285 __unused memory_object_offset_t *resid_offset, 286 __unused int *io_error, 287 __unused boolean_t dirty, 288 __unused boolean_t kernel_copy, 289 __unused int upl_flags) 290{ 291 device_pager_t device_object; 292 293 device_object = device_pager_lookup(mem_obj); 294 if (device_object == DEVICE_PAGER_NULL) 295 panic("device_pager_data_return: lookup failed"); 296 297 return device_data_action(device_object->device_handle, 298 (ipc_port_t) device_object, 299 VM_PROT_READ | VM_PROT_WRITE, 300 offset, data_cnt); 301} 302 303/* 304 * 305 */ 306kern_return_t 307device_pager_data_request( 308 memory_object_t mem_obj, 309 memory_object_offset_t offset, 310 memory_object_cluster_size_t length, 311 __unused vm_prot_t protection_required, 312 __unused memory_object_fault_info_t fault_info) 313{ 314 device_pager_t device_object; 315 316 device_object = device_pager_lookup(mem_obj); 317 318 if (device_object == DEVICE_PAGER_NULL) 319 panic("device_pager_data_request: lookup failed"); 320 321 device_data_action(device_object->device_handle, 322 (ipc_port_t) device_object, 323 VM_PROT_READ, offset, length); 324 return KERN_SUCCESS; 325} 326 327/* 328 * 329 */ 330void 331device_pager_reference( 332 memory_object_t mem_obj) 333{ 334 device_pager_t device_object; 335 unsigned int new_ref_count; 336 337 device_object = device_pager_lookup(mem_obj); 338 new_ref_count = hw_atomic_add(&device_object->ref_count, 1); 339 assert(new_ref_count > 1); 340} 341 342/* 343 * 344 */ 345void 346device_pager_deallocate( 347 memory_object_t mem_obj) 348{ 349 device_pager_t device_object; 350 memory_object_control_t device_control; 351 352 device_object = device_pager_lookup(mem_obj); 353 354 if (hw_atomic_sub(&device_object->ref_count, 1) == 0) { 355 if (device_object->device_handle != (device_port_t) NULL) { 356 device_close(device_object->device_handle); 357 device_object->device_handle = (device_port_t) NULL; 358 } 359 device_control = device_object->control_handle; 360 if (device_control != MEMORY_OBJECT_CONTROL_NULL) { 361 /* 362 * The VM object should already have been disconnected 363 * from the pager at this point. 364 * We still have to release the "memory object control" 365 * handle. 366 */ 367 assert(device_control->moc_object == VM_OBJECT_NULL); 368 memory_object_control_deallocate(device_control); 369 device_object->control_handle = 370 MEMORY_OBJECT_CONTROL_NULL; 371 } 372 373 zfree(device_pager_zone, device_object); 374 } 375 return; 376} 377 378kern_return_t 379device_pager_data_initialize( 380 __unused memory_object_t mem_obj, 381 __unused memory_object_offset_t offset, 382 __unused memory_object_cluster_size_t data_cnt) 383{ 384 panic("device_pager_data_initialize"); 385 return KERN_FAILURE; 386} 387 388kern_return_t 389device_pager_data_unlock( 390 __unused memory_object_t mem_obj, 391 __unused memory_object_offset_t offset, 392 __unused memory_object_size_t size, 393 __unused vm_prot_t desired_access) 394{ 395 return KERN_FAILURE; 396} 397 398kern_return_t 399device_pager_terminate( 400 __unused memory_object_t mem_obj) 401{ 402 return KERN_SUCCESS; 403} 404 405 406 407/* 408 * 409 */ 410kern_return_t 411device_pager_synchronize( 412 memory_object_t mem_obj, 413 memory_object_offset_t offset, 414 memory_object_size_t length, 415 __unused vm_sync_t sync_flags) 416{ 417 device_pager_t device_object; 418 419 device_object = device_pager_lookup(mem_obj); 420 421 memory_object_synchronize_completed( 422 device_object->control_handle, offset, length); 423 424 return KERN_SUCCESS; 425} 426 427/* 428 * 429 */ 430kern_return_t 431device_pager_map( 432 __unused memory_object_t mem_obj, 433 __unused vm_prot_t prot) 434{ 435 return KERN_SUCCESS; 436} 437 438kern_return_t 439device_pager_last_unmap( 440 __unused memory_object_t mem_obj) 441{ 442 return KERN_SUCCESS; 443} 444 445 446 447/* 448 * 449 */ 450device_pager_t 451device_object_create(void) 452{ 453 register device_pager_t device_object; 454 455 device_object = (struct device_pager *) zalloc(device_pager_zone); 456 if (device_object == DEVICE_PAGER_NULL) 457 return(DEVICE_PAGER_NULL); 458 device_object->pager_ops = &device_pager_ops; 459 device_object->pager_ikot = IKOT_MEMORY_OBJECT; 460 device_object->ref_count = 1; 461 device_object->control_handle = MEMORY_OBJECT_CONTROL_NULL; 462 463 464 return(device_object); 465} 466 467