/darwin-on-arm/xnu/osfmk/vm/ |
H A D | vm_page.h | 90 * we allow for a protected page... they can be older if there is no 97 * defines the amount of time a speculative page is normally 137 * page, indexed by page number. Each structure 150 * and offset to which this page belongs (for pageout), 154 * object that the page belongs to (O) or by the lock on the page 171 * by the "page queues" lock. 180 /* boolean_t */ active:1, /* page is in active list (P) */ 181 inactive:1, /* page i [all...] |
H A D | vm_pageout.h | 162 extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset); 163 extern vm_object_offset_t vm_page_get_offset(vm_page_t page); 164 extern ppnum_t vm_page_get_phys_page(vm_page_t page); 165 extern vm_page_t vm_page_get_next(vm_page_t page); 176 * must hold the page queues lock to 255 /* universal page list structure */ 369 /* wired page list structure */ 374 extern void vm_pageout_throttle_down(vm_page_t page); 375 extern void vm_pageout_throttle_up(vm_page_t page); 385 vm_page_t page, [all...] |
H A D | vm_fault.c | 120 * will be throttled. The throttling is done by giving the thread that's trying to demand zero a page a 121 * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again. 157 vm_page_t page, 161 vm_page_t page); 398 * so, compute a potential page to deactivate and 403 * return TRUE if we actually deactivate a page 572 * prevent us from creating a ZF page... 623 * treat this as if we couldn't grab a page 646 * do the work to zero fill a page and 650 * page queu 4465 vm_fault_copy_cleanup( vm_page_t page, vm_page_t top_page) argument 4483 vm_fault_copy_dst_cleanup( vm_page_t page) argument 4951 vm_page_validate_cs_mapped( vm_page_t page, const void *kaddr) argument 5035 vm_page_validate_cs( vm_page_t page) argument [all...] |
H A D | pmap.h | 91 /* Copy between a physical page and a virtual address */ 142 * to allocate page frames. 175 * physical page. 231 extern void pmap_page_protect( /* Restrict access to page. */ 271 * page number sent */ 281 * Set (override) cache attributes for the specified physical page 291 * the given physical page is mapped into no pmap. 380 #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, flags, wired) \ 383 vm_page_t __page = (page); \ 397 #define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protectio [all...] |
H A D | vm_pageout.c | 63 * The proverbial page-out daemon. 127 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */ 517 * Handle the "target" page(s). These pages are to be freed if 522 * adjacent page and conversion to a target. 534 * Revoke all access to the page. Since the object is 535 * locked, and the page is busy, this prevents the page 539 * Since the page is left "dirty" but "not modifed", we 540 * can detect whether the page was redirtied during 565 * page, s 6703 vm_paging_map_object( vm_map_offset_t *address, vm_page_t page, vm_object_t object, vm_object_offset_t offset, vm_map_size_t *size, vm_prot_t protection, boolean_t can_unlock_object) argument 7066 vm_page_encrypt( vm_page_t page, vm_map_offset_t kernel_mapping_offset) argument 7205 vm_page_decrypt( vm_page_t page, vm_map_offset_t kernel_mapping_offset) argument 7360 vm_page_t page; local 7477 vm_page_encrypt( __unused vm_page_t page, __unused vm_map_offset_t kernel_mapping_offset) argument 7484 vm_page_decrypt( __unused vm_page_t page, __unused vm_map_offset_t kernel_mapping_offset) argument 7496 vm_pageout_steal_laundry(vm_page_t page, boolean_t queues_locked) argument 7892 vm_page_slide( vm_page_t page, vm_map_offset_t kernel_mapping_offset) argument [all...] |
H A D | memory_object.c | 121 * Determine whether the given page should be returned, 122 * based on the page's state and on the given return policy. 124 * We should return the page if one of the following is true: 159 * given page. See the description of 164 * completed, blocked, or that the page must 175 "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n", 192 * dump the page, pager wants us to 204 * The page is wired... just clean or return the page if needed. 216 * need to return the page 756 vm_page_t page; local [all...] |
H A D | vm_object.h | 228 /* Allow full page overwrite 230 * page is absent */ 232 /* Instead of sending page 235 * wants to discard it, page 248 * page cleaning during 265 * zero filled page. 307 vm_offset_t cow_hint; /* last page present in */ 357 #define VM_PAGE_REMOVE(page) \ 359 vm_page_t __page = (page); \ 377 #define VM_PAGE_INSERT(page, objec [all...] |
H A D | vm_object.c | 104 * page of memory exists within exactly one object. 1337 * break the page scan into 2 pieces to minimize the time spent 1338 * behind the page queue lock... 1384 * so if we get here, this page can't be on the pageout queue 1397 * Add this page to our list of reclaimed pages, 1440 * put the page queues lock back to the caller's 1732 /* Must take page lock for this - using it to protect token queue */ 1867 * hogging the page queue lock too long 1905 * zero-fill this page since we're conceptually 1916 * can't purge a wired page 6420 vm_page_t page; local [all...] |
H A D | vm_resident.c | 122 static void vm_page_free_prepare(vm_page_t page); 129 * Associated with page of user-allocatable memory is a 130 * page structure. 145 * (virtual memory object, offset) to page lookup, employs 208 * The virtual page size is currently implemented as a runtime 214 * All references to the virtual page size outside this 223 * Resident page structures are initialized from 254 * resident page structures that do not refer to 255 * real pages, for example to leave a page with 258 * These page structure 4459 vm_page_set_offset(vm_page_t page, vm_object_offset_t offset) argument 4465 vm_page_get_next(vm_page_t page) argument 4471 vm_page_get_offset(vm_page_t page) argument 4477 vm_page_get_phys_page(vm_page_t page) argument [all...] |
/darwin-on-arm/xnu/bsd/dev/dtrace/ |
H A D | dtrace_ptss.c | 47 * page of memory, the underlying kernel _MALLOC may block. 59 // Nothing on the free list. Allocate a new page, its okay if multiple threads race here. 60 struct dtrace_ptss_page* page = dtrace_ptss_allocate_page(p); local 62 // Make sure we actually got a page 63 if (page == NULL) 66 // Add the page to the page list 67 page->next = p->p_dtrace_ptss_pages; 68 p->p_dtrace_ptss_pages = page; 72 page [all...] |
/darwin-on-arm/xnu/iokit/Kernel/ |
H A D | IOHibernateRestoreKernel.c | 234 hibernate_page_bitmap(hibernate_page_list_t * list, uint32_t page) argument 241 if ((page >= bitmap->first_page) && (page <= bitmap->last_page)) 254 uint32_t bank, page = *pPage; local 259 if (page <= bitmap->first_page) 264 if (page <= bitmap->last_page) 275 hibernate_page_bitset(hibernate_page_list_t * list, boolean_t set, uint32_t page) argument 279 bitmap = hibernate_page_bitmap(list, page); 282 page -= bitmap->first_page; 284 bitmap->bitmap[page >> 293 hibernate_page_bittst(hibernate_page_list_t * list, uint32_t page) argument 309 hibernate_page_bitmap_count(hibernate_bitmap_t * bitmap, uint32_t set, uint32_t page) argument 426 uint32_t page; local [all...] |
H A D | IOHibernateIO.cpp | 43 hibernate_setup() in osfmk is called to allocate page bitmaps for all dram, and 44 page out any pages it wants to (currently zero, but probably some percentage of memory). 67 Each wired page is compressed and written and then each non-wired page. Compression and 83 - hibernate_kernel_entrypoint() removes pages occupied by the raw image from the page bitmaps. 185 static uint32_t gIOHibernateFreeRatio = 0; // free page target (percent) 344 // free page 359 uint32_t page = *pPage; local 363 while ((bitmap = hibernate_page_bitmap_pin(list, &page))) 365 count = hibernate_page_bitmap_count(bitmap, TRUE, page); 2055 vm_offset_t ppnum, page; local 2819 unsigned int page; local [all...] |
H A D | IOMapper.cpp | 264 ppnum_t IOMapperInsertPage(ppnum_t addr, unsigned offset, ppnum_t page) argument 268 IOMapper::gSystem->iovmInsert(addr, (IOItemCount) offset, page); 272 return page;
|
H A D | IOMemoryDescriptor.cpp | 145 vm_address_t fPageInfo; // Pointer to page list or index into it 147 ppnum_t fMappedPage; // Page number of first page in this iopl 148 unsigned int fPageOffset; // Offset within first page of iopl 767 // Pre-compute the offset into the UPL's page list 1012 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used 1459 // Now add the iopl 1st page offset. 1531 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must 1928 ppnum_t page, end; 1930 page = atop_64(round_page_64(pa)); 1932 for (; page < en [all...] |
/darwin-on-arm/xnu/iokit/IOKit/ |
H A D | IOMapper.h | 42 ppnum_t IOMapperInsertPage(ppnum_t addr, unsigned offset, ppnum_t page); 110 virtual void iovmInsert(ppnum_t addr, IOItemCount offset, ppnum_t page) = 0;
|
H A D | IOHibernatePrivate.h | 364 hibernate_page_bitset(hibernate_page_list_t * list, boolean_t set, uint32_t page); 367 hibernate_page_bittst(hibernate_page_list_t * list, uint32_t page); 370 hibernate_page_bitmap_pin(hibernate_page_list_t * list, uint32_t * page); 373 hibernate_page_bitmap_count(hibernate_bitmap_t * bitmap, uint32_t set, uint32_t page);
|
/darwin-on-arm/xnu/osfmk/default_pager/ |
H A D | default_pager_internal.h | 231 unsigned int gs_pages_init; /* # page init requests */ 232 unsigned int gs_pages_init_writes; /* # page init writes */ 289 unsigned int bs_pages_in; /* # page read requests */ 290 unsigned int bs_pages_in_fail; /* # page read errors */ 291 unsigned int bs_pages_out; /* # page write requests */ 292 unsigned int bs_pages_out_fail; /* # page write errors */ 454 #define VSM_SETPG(vsm, page) ((vsm).vsmap_bmap |= (1 << (page))) 455 #define VSM_CLRPG(vsm, page) ((vsm).vsmap_bmap &= ~(1 << (page))) [all...] |
/darwin-on-arm/xnu/osfmk/arm/ |
H A D | pmap.c | 156 * page, one mapping exists for each page. Pages that are mapped in 159 * go through the leaf mappings if accessing/modifying page entries. 250 * valid virtual mappings of that page. An entry is 268 physical page to (potentially many) virtual addresses within pmaps. In the previous 277 An initial array of these is created at boot time, one per physical page of memory, 278 indexed by the physical page number. Additionally, a pool of entries is created from a 292 for (every page/pte in the space) { 319 every physical page of memory. The hashed pve's are larger due to the addition of the hash 350 * OS level page bit 2230 uint32_t page; local 2441 vm_page_t page; local 2771 vm_page_t page = pmap_grab_page(map); local [all...] |
/darwin-on-arm/xnu/osfmk/arm/armv/ |
H A D | cpufunc-v6.s | 80 add r0, r0, #0x1000 @ page size
|
H A D | cpufunc-v7.s | 96 add r0, r0, #0x1000 @ page size
|
/darwin-on-arm/xnu/bsd/kern/ |
H A D | ubc_subr.c | 178 uint32_t base; // first page number 199 uint8_t pageSize; /* log2(page size in bytes); 0 => infinite */ 267 * Locating a page hash 272 unsigned page, 312 /* this scatter beyond page we're looking for? */ 313 if (sbase > page) { 317 if (sbase+scount >= page) { 319 * referencing our page */ 335 /* this scatter struct is before the page we're looking 341 hash = base + (page 270 hashes( const CS_CodeDirectory *cd, unsigned page, char *lower_bound, char *upper_bound) argument [all...] |
H A D | uipc_mbuf.c | 291 * get populated when the corresponding cluster is created. Because a page 293 * mbufs so that there is a 1-to-1 mapping between them. A page that never 296 * page is allocated and used for the entire object. 377 * Each slab controls a page of memory. 391 #define SLF_MAPPED 0x0001 /* backed by a mapped page */ 1419 * This yields mcl_audit_t units, each one representing a page. 2613 vm_offset_t page = 0; local 2626 * the physical page(s), our view of the cluster map may no longer 2629 * operation (including the page mapping) is serialized. 2652 page [all...] |
/darwin-on-arm/xnu/osfmk/i386/ |
H A D | cpuid.c | 83 SMALL, /* Small page TLB */ 84 LARGE, /* Large page TLB */ 85 BOTH /* Small and Large page TLB */ 387 /* Compute the number of page colors for this cache, 394 * To VM, it is composed of a page offset, a page 406 * by the page offset. 457 int page; local 466 page = (descp->size == SMALL) ? TLB_SMALL : TLB_LARGE; 488 info_p->cpuid_tlb[id][page][leve [all...] |
H A D | pmap.c | 215 * Array of physical page attribites for managed pages. 216 * One byte per physical page. 289 * for 64 bit, causes the pdpt page containing the pde entry to be mapped, 290 * then returns the mapped address of the pde entry in that page 307 * the single pml4 page per pmap is allocated at pmap create time and exists 308 * for the duration of the pmap. we allocate this page in kernel vm (to save us one 309 * level of page table dynamic mapping. 310 * this returns the address of the requested pml4 entry in the top level page. 320 * maps in the pml4 page, if any, containing the pdpt entry requested 321 * and returns the address of the pdpt entry in that mapped page 1758 uint32_t page; local [all...] |
/darwin-on-arm/xnu/osfmk/x86_64/ |
H A D | pmap.c | 199 * Array of physical page attribites for managed pages. 200 * One byte per physical page. 207 * page-directory entry. 301 * Ensure global page feature is disabled at this point. 332 * Map the kernel's code and data, and allocate the system page table. 380 * Reserve some special page table entries/VA space for temporary 607 /* The anchor is required to be page aligned. Zone debugging adds 684 * 3) read and write-protect page zero (for K32) 685 * 4) map the global page at the appropriate virtual address. 740 * There's also a size miscalculation here: pend is one page les 1336 uint32_t page; local [all...] |