Searched refs:page (Results 1 - 25 of 39) sorted by relevance

12

/macosx-10.5.8/xnu-1228.15.4/osfmk/vm/
H A Dvm_page.h92 * we allow for a protected page... they can be older if there is no
99 * defines the amount of time a speculative page is normally
138 * page, indexed by page number. Each structure
151 * and offset to which this page belongs (for pageout),
155 * object that the page belongs to (O) or by the lock on the page
171 * by the "page queues" lock.
174 /* boolean_t */ inactive:1, /* page is in inactive list (P) */
175 active:1, /* page i
[all...]
H A Dvm_pageout.h128 /* universal page list structure */
208 /* wired page list structure */
216 extern void vm_pageout_throttle_down(vm_page_t page);
217 extern void vm_pageout_throttle_up(vm_page_t page);
227 vm_page_t page,
231 vm_page_t page,
235 vm_page_t page,
H A Dvm_fault.c138 vm_page_t page,
142 vm_page_t page);
362 * so, compute a potential page to deactivate and
367 * return TRUE if we actually deactivate a page
448 * prevent us from creating a ZF page...
493 * treat this as if we couldn't grab a page
508 * do the work to zero fill a page and
512 * page queue lock must NOT be held
520 * This is is a zero-fill page fault...
522 * Checking the page loc
3769 vm_fault_copy_cleanup( vm_page_t page, vm_page_t top_page) argument
3785 vm_fault_copy_dst_cleanup( vm_page_t page) argument
4199 vm_page_validate_cs_mapped( vm_page_t page, const void *kaddr) argument
4282 vm_page_validate_cs( vm_page_t page) argument
[all...]
H A Dvm_pageout.c63 * The proverbial page-out daemon.
120 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
141 #define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds per page */
244 * must hold the page queues lock to
455 /* page. When the bit is on the upl commit code will */
456 /* respect the pageout bit in the target page over the */
457 /* caller's page list indication */
473 * Handle the "target" page(s). These pages are to be freed if
478 * adjacent page and conversion to a target.
490 * Revoke all access to the page
5330 vm_paging_map_object( vm_map_offset_t *address, vm_page_t page, vm_object_t object, vm_object_offset_t offset, vm_map_size_t *size, vm_prot_t protection, boolean_t can_unlock_object) argument
5701 vm_page_encrypt( vm_page_t page, vm_map_offset_t kernel_mapping_offset) argument
5839 vm_page_decrypt( vm_page_t page, vm_map_offset_t kernel_mapping_offset) argument
5991 vm_page_t page; local
6087 vm_page_encrypt( __unused vm_page_t page, __unused vm_map_offset_t kernel_mapping_offset) argument
6094 vm_page_decrypt( __unused vm_page_t page, __unused vm_map_offset_t kernel_mapping_offset) argument
[all...]
H A Dpmap.h91 /* Copy between a physical page and a virtual address */
142 * to allocate page frames.
174 * physical page.
218 extern void pmap_page_protect( /* Restrict access to page. */
258 * page number sent */
272 * the given physical page is mapped into no pmap.
361 #define PMAP_ENTER(pmap, virtual_address, page, protection, flags, wired) \
364 vm_page_t __page = (page); \
370 panic("VM page %p should not have an error\n", \
H A Dvm_object.h198 /* Allow full page overwrite
200 * page is absent */
202 /* Instead of sending page
205 * wants to discard it, page
218 * page cleaning during
235 * zero filled page.
277 vm_offset_t cow_hint; /* last page present in */
313 #define VM_PAGE_REMOVE(page) \
315 vm_page_t __page = (page); \
333 #define VM_PAGE_INSERT(page, objec
[all...]
H A Dmemory_object.c120 * Determine whether the given page should be returned,
121 * based on the page's state and on the given return policy.
123 * We should return the page if one of the following is true:
158 * given page. See the description of
163 * completed, blocked, or that the page must
174 "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
178 * If we cannot change access to the page,
180 * (busy page) or because a mapping has been
194 /* dump the page, pager wants us to */
209 * If the page i
764 vm_page_t page; local
[all...]
H A Dvm_object.c101 * page of memory exists within exactly one object.
1153 * the page queues lock too long.
1197 * leaves the page structure around for
1222 vm_pageout_cluster(p); /* flush page */
1231 * Add this page to our list of reclaimed pages,
1407 /* Must take page lock for this - using it to protect token queue */
1434 * hogging the page queue lock too long
1938 /* page count then we should interate over pages otherwise */
1939 /* inverse page look-up will be faster */
2160 * lock -- the busy page wil
5685 vm_page_t page; local
[all...]
/macosx-10.5.8/xnu-1228.15.4/iokit/Kernel/
H A DIOHibernateRestoreKernel.c111 hibernate_page_bitmap(hibernate_page_list_t * list, uint32_t page) argument
118 if ((page >= bitmap->first_page) && (page <= bitmap->last_page))
131 uint32_t bank, page = *pPage; local
136 if (page <= bitmap->first_page)
141 if (page <= bitmap->last_page)
152 hibernate_page_bitset(hibernate_page_list_t * list, boolean_t set, uint32_t page) argument
156 bitmap = hibernate_page_bitmap(list, page);
159 page -= bitmap->first_page;
161 bitmap->bitmap[page >>
170 hibernate_page_bittst(hibernate_page_list_t * list, uint32_t page) argument
186 hibernate_page_bitmap_count(hibernate_bitmap_t * bitmap, uint32_t set, uint32_t page) argument
298 uint32_t page; local
[all...]
H A DIOCopyMapper.cpp89 /* uint */ fPPNum :31; // ppnum_t page of translation
90 #define ACTIVEDARTENTRY(page) { true, page }
94 /* uint */ fPPNum :31, // ppnum_t page of translation
96 #define ACTIVEDARTENTRY(page) { page, true }
203 // Zero is never a valid page to return
257 // ret is free list offset not page offset;
384 void IOCopyMapper::iovmInsert(ppnum_t addr, IOItemCount offset, ppnum_t page) argument
387 addr += offset; // Add the offset page t
[all...]
H A DIOCopyMapper.h80 virtual void iovmInsert(ppnum_t addr, IOItemCount offset, ppnum_t page);
H A DIOHibernateIO.cpp43 hibernate_setup() in osfmk is called to allocate page bitmaps for all dram, and
44 page out any pages it wants to (currently zero, but probably some percentage of memory).
67 Each wired page is compressed and written and then each non-wired page. Compression and
83 - hibernate_kernel_entrypoint() removes pages occupied by the raw image from the page bitmaps.
196 static uint32_t gIOHibernateFreeRatio = 0; // free page target (percent)
334 // free page
349 uint32_t page = *pPage; local
353 while ((bitmap = hibernate_page_bitmap_pin(list, &page)))
355 count = hibernate_page_bitmap_count(bitmap, TRUE, page);
1702 IOItemCount page, count; local
2312 unsigned int page; local
[all...]
H A DIOMapper.cpp204 ppnum_t IOMapperInsertPage(ppnum_t addr, unsigned offset, ppnum_t page) argument
207 IOMapper::gSystem->iovmInsert(addr, (IOItemCount) offset, page);
211 return page;
H A DIOMemoryDescriptor.cpp72 /* Copy between a physical page and a virtual address in the given vm_map */
220 vm_offset_t fPageInfo; // Pointer to page list or index into it
221 ppnum_t fMappedBase; // Page number of first page in this iopl
222 unsigned int fPageOffset; // Offset within first page of iopl
879 // Pre-compute the offset into the UPL's page list
1095 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1348 // Now add the iopl 1st page offset.
1760 unsigned int pageCount, page;
1770 for (page = 0; page < pageCoun
[all...]
/macosx-10.5.8/xnu-1228.15.4/bsd/dev/dtrace/
H A Ddtrace_ptss.c47 * page of memory, the underlying kernel _MALLOC may block.
59 // Nothing on the free list. Allocate a new page, its okay if multiple threads race here.
60 struct dtrace_ptss_page* page = dtrace_ptss_allocate_page(p); local
62 // Make sure we actually got a page
63 if (page == NULL)
66 // Add the page to the page list
67 page->next = p->p_dtrace_ptss_pages;
68 p->p_dtrace_ptss_pages = page;
72 page
[all...]
/macosx-10.5.8/xnu-1228.15.4/osfmk/mach/i386/
H A Dvm_param.h95 #define I386_PGBYTES 4096 /* bytes per 80386 page */
112 * Round off or truncate to the nearest page. These will work
113 * for either addresses or counts. (i.e. 1 byte rounds to 1 page
195 #define PMAP_ENTER(pmap, virtual_address, page, protection, flags, wired) \
198 vm_page_t __page = (page); \
/macosx-10.5.8/xnu-1228.15.4/osfmk/ppc/
H A Dhibernate_ppc.c103 uint32_t page, count, PCAsize; local
109 page = atop_64(hash_table_base - PCAsize);
112 hibernate_set_page_state(page_list, page_list_wired, page, count, 0);
H A Datomic_switch.s115 rlwinm r6,r26,0,0,19 ; Start of page is bttd
185 rlwinm r6,r26,0,0,19 ; Start of page is bttd
H A DAltiAssist.s51 ; 3) No attempt is made to resolve page faults. PTE misses are handled
H A Dskiplists.s173 subi r0,r11,4096 ; get offset last page in mapping
174 add r10,r10,r0 ; r10 <- last page in this mapping
175 cmpld r5,r10 ; does this mapping cover our page?
261 subi r0,r11,4096 ; get address of last page in submap
262 add r10,r10,r0 ; r10 <- last page in this mapping
263 cmplw r5,r10 ; does this mapping cover our page?
387 addic. r0,r11,-4096 ; get offset last page in mapping (set cr0_eq if 1 page)
395 bne-- cr0,mapSrchFull64e ; handle mapping larger than one page
421 add r4,r4,r0 ; r4 <- last page i
[all...]
H A Dhw_vm.s104 * Maps a page or block into a pmap
215 addic r23,r9,-4096 ; Get the length to the last page
222 addc r9,r0,r23 ; Add size to get last page in new range
269 rlwinm r11,r24,mpPcfgb+2,mpPcfg>>6 ; Get the index into the page config table
271 lwz r11,lgpPcfg(r11) ; Get the page config
274 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
276 addi r4,r4,1 ; Bump up the mapped page count
277 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
290 srw r9,r9,r11 ; Isolate just the page index
451 * Remove mapping via pmap, regular page, n
[all...]
/macosx-10.5.8/xnu-1228.15.4/iokit/IOKit/
H A DIOMapper.h42 ppnum_t IOMapperInsertPage(ppnum_t addr, unsigned offset, ppnum_t page);
107 virtual void iovmInsert(ppnum_t addr, IOItemCount offset, ppnum_t page) = 0;
H A DIOHibernatePrivate.h275 hibernate_page_bitset(hibernate_page_list_t * list, boolean_t set, uint32_t page);
278 hibernate_page_bittst(hibernate_page_list_t * list, uint32_t page);
281 hibernate_page_bitmap_pin(hibernate_page_list_t * list, uint32_t * page);
284 hibernate_page_bitmap_count(hibernate_bitmap_t * bitmap, uint32_t set, uint32_t page);
/macosx-10.5.8/xnu-1228.15.4/osfmk/default_pager/
H A Ddefault_pager_internal.h240 unsigned int gs_pages_init; /* # page init requests */
241 unsigned int gs_pages_init_writes; /* # page init writes */
295 unsigned int bs_pages_in; /* # page read requests */
296 unsigned int bs_pages_in_fail; /* # page read errors */
297 unsigned int bs_pages_out; /* # page write requests */
298 unsigned int bs_pages_out_fail; /* # page write errors */
473 #define VSM_SETPG(vsm, page) ((vsm).vsmap_bmap |= (1 << (page)))
474 #define VSM_CLRPG(vsm, page) ((vsm).vsmap_bmap &= ~(1 << (page)))
[all...]
/macosx-10.5.8/AppleMacRiscPCI-3.4/
H A DAppleDART.cpp95 // These are with respect to the PCI address, i.e. not page number based.
205 virtual void iovmInsert(ppnum_t addr, IOItemCount offset, ppnum_t page);
250 /* uint */ fPPNum :19; // ppnum_t page of translation
405 // Zero is never a valid page to return
412 // Force an extra page on every allocation
460 // ret is free list offset not page offset;
526 // Force an extra page on every allocation
608 void AppleDART::iovmInsert(ppnum_t addr, IOItemCount offset, ppnum_t page) argument
610 addr += offset; // Add the offset page to the base address
613 *activeDART = page | kValidEntr
[all...]

Completed in 200 milliseconds

12