Searched refs:dirty (Results 1 - 18 of 18) sorted by relevance

/macosx-10.5.8/xnu-1228.15.4/osfmk/kern/
H A Dhibernate.c113 if (!m->dirty)
120 m->dirty = TRUE;
127 discard = (!m->dirty)
273 if (m->dirty)
293 if (m->dirty)
333 if (m->dirty)
390 if (m->dirty)
420 if (m->dirty)
435 if (m->dirty)
/macosx-10.5.8/xnu-1228.15.4/osfmk/vm/
H A Dvm_pageout.c460 assert((m->dirty) || (m->precious) ||
495 * Since the page is left "dirty" but "not modifed", we
500 m->dirty = TRUE;
502 m->dirty = FALSE;
504 if (m->dirty) {
533 /* We do not re-set m->dirty ! */
538 /* will take care of resetting dirty. We clear the */
553 * Set the dirty state according to whether or not the page was
559 * consulted if m->dirty is false.
562 m->dirty
2602 boolean_t dirty; local
[all...]
H A Dmemory_object.c125 * 1. Page is dirty and should_return is not RETURN_NONE.
129 * As a side effect, m->dirty will be made consistent
136 (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \
215 if (m->dirty)
267 if (m->dirty)
337 * is RETURN_DIRTY and the page is dirty, or
339 * is either dirty or precious); and,
465 * are flushed, that is dirty & precious pages are written to
574 * End of a cluster of dirty page
2083 memory_object_data_return( memory_object_t memory_object, memory_object_offset_t offset, vm_size_t size, memory_object_offset_t *resid_offset, int *io_error, boolean_t dirty, boolean_t kernel_copy, int upl_flags ) argument
[all...]
H A Dvm_resident.c420 m->dirty = FALSE;
2095 mem->dirty = FALSE;
2488 m->dirty && m->object->internal &&
2564 !m->fictitious && m->dirty && m->object->internal &&
2961 * + consider dirty pages
3120 if (m->pmapped || m->dirty)
3224 * when substituing for pmapped/dirty pages
3280 if (m1->pmapped || m1->dirty) {
3302 m2->dirty = m1->dirty;
[all...]
H A Dvm_apple_protect.c99 boolean_t dirty,
263 * the pages provided by this EMM are not supposed to be dirty or dirtied
274 __unused boolean_t dirty,
552 * The pages are currently dirty because we've just been
268 apple_protect_pager_data_return( __unused memory_object_t mem_obj, __unused memory_object_offset_t offset, __unused vm_size_t data_cnt, __unused memory_object_offset_t *resid_offset, __unused int *io_error, __unused boolean_t dirty, __unused boolean_t kernel_copy, __unused int upl_flags) argument
H A Dvm_fault.c735 * faults to set the dirty bit.
1631 copy_m->dirty = TRUE;
1843 copy_m->dirty = TRUE;
1851 * dirty is protected by the object lock
1853 copy_m->dirty = TRUE;
2775 m->dirty = TRUE;
4037 * Copy the page, and note that it is dirty
4066 if(!dst_page->dirty){
4068 dst_page->dirty = TRUE;
4080 if(!dst_page->dirty){
[all...]
H A Ddevice_vm.c284 __unused boolean_t dirty,
278 device_pager_data_return( memory_object_t mem_obj, memory_object_offset_t offset, vm_size_t data_cnt, __unused memory_object_offset_t *resid_offset, __unused int *io_error, __unused boolean_t dirty, __unused boolean_t kernel_copy, __unused int upl_flags) argument
H A Dvm_page.h205 dirty:1, /* Page must be cleaned (O) */ member in struct:vm_page
300 * object/offset-page mapping, and may be dirty.
H A Dvm_object.c1213 if (!p->dirty && p->wpmapped)
1214 p->dirty = pmap_is_modified(p->phys_page);
1216 if ((p->dirty || p->precious) && !p->error && object->alive) {
1804 m->dirty = FALSE;
2180 new_page->dirty = TRUE;
5157 * skip referenced/dirty pages, pages on the active queue, etc. We're more
5266 p->dirty = TRUE;
5270 if (p->dirty || p->precious) {
6292 if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
6310 if (ops & UPL_POP_DIRTY) dst_page->dirty
[all...]
H A Dbsd_vm.c403 dst_page->dirty = TRUE;
574 __unused boolean_t dirty,
568 vnode_pager_data_return( memory_object_t mem_obj, memory_object_offset_t offset, vm_size_t data_cnt, memory_object_offset_t *resid_offset, int *io_error, __unused boolean_t dirty, __unused boolean_t kernel_copy, int upl_flags) argument
H A Dvm_map.c9194 (p->dirty || pmap_is_modified(p->phys_page)))
10972 if (m->dirty || pmap_is_modified(m->phys_page))
11007 * - discard pages, write dirty or precious
11011 * - write dirty or precious pages back to
11204 * is writable (ie dirty pages may have already been sent back)
/macosx-10.5.8/xnu-1228.15.4/osfmk/mach/
H A Dmemory_object_types.h130 boolean_t dirty,
223 /* ... only dirty pages. */
225 /* ... dirty and precious pages. */
394 dirty:1, /* Page must be cleaned (O) */ member in struct:upl_page_info
522 * with a dirty page that hasn't yet been seen by the FS
602 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].dirty) : FALSE)
/macosx-10.5.8/xnu-1228.15.4/bsd/hfs/hfscommon/Misc/
H A DVolumeAllocation.c121 Boolean dirty);
377 // Update the volume's free block count, and mark the VCB as dirty.
548 ; dirty
554 Boolean dirty)
559 if (dirty)
565 if (dirty) {
690 Boolean dirty = false; local
791 dirty = true;
876 (void) ReleaseBitmapBlock(vcb, blockRef, dirty);
551 ReleaseBitmapBlock( ExtendedVCB *vcb, u_int32_t blockRef, Boolean dirty) argument
/macosx-10.5.8/xnu-1228.15.4/bsd/nfs/
H A Dnfs_bio.c336 * Called by vnode_pager() on pageout request of non-dirty page.
361 * If there's a dirty range in the buffer, check to
362 * see if this page intersects with the dirty range.
421 * update buffer's valid/dirty info from UBC
446 /* anything beyond the end of the file is not valid or dirty */
993 * non-needcommit nocache buffer AND no pages are dirty.
1205 /* move to dirty list */
2049 * Flush the buffer if it's dirty.
2097 /* there are also dirty page(s) (or range) in the read range, */
2117 /* buffer's not dirty, s
2695 uint32_t dirty = bp->nb_dirty; local
3064 int error = 0, retv, wcred_set, flags, dirty; local
[all...]
/macosx-10.5.8/xnu-1228.15.4/osfmk/default_pager/
H A Ddp_memory_object.c715 __unused boolean_t dirty,
709 dp_memory_object_data_return( memory_object_t mem_obj, memory_object_offset_t offset, vm_size_t size, __unused memory_object_offset_t *resid_offset, __unused int *io_error, __unused boolean_t dirty, __unused boolean_t kernel_copy, __unused int upl_flags) argument
/macosx-10.5.8/xnu-1228.15.4/bsd/vfs/
H A Dvfs_cluster.c2547 * written as dirty
2768 * UPL has been automatically set to clear the dirty flags (both software and hardware)
2804 * cluster method of delaying dirty pages
4761 * - only pages that are currently dirty are returned... these are the ones we need to clean
4762 * - the hardware dirty bit is cleared when the page is gathered into the UPL... the software dirty bit is set
4763 * - if we have to abort the I/O for some reason, the software dirty bit is left set since we didn't clean the page
4764 * - when we commit the page, the software dirty bit is cleared... the hardware dirty bit is untouched so that if
4787 * since we only asked for the dirty page
5665 vfs_drt_do_mark_pages( void **private, u_int64_t offset, u_int length, u_int *setcountp, int dirty) argument
[all...]
/macosx-10.5.8/xnu-1228.15.4/bsd/dev/dtrace/
H A Ddtrace.c1116 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1125 dtrace_dynvar_t *dirty;
1135 * If the dirty list is NULL, there is no dirty work to do.
1152 * Atomically move the dirty list aside.
1155 dirty = dcpu->dtdsc_dirty;
1158 * Before we zap the dirty list, set the rinsing list.
1161 * on a hash chain, either the dirty list or the
1164 dcpu->dtdsc_rinsing = dirty;
1167 dirty, NUL
[all...]
/macosx-10.5.8/xnu-1228.15.4/iokit/Kernel/
H A DIOMemoryDescriptor.cpp1778 page_list[page].dirty = 0;
1822 // Pages do not need to be marked as dirty on commit

Completed in 179 milliseconds