Searched refs:dirty (Results 1 - 19 of 19) sorted by relevance

/barrelfish-2018-10-04/usr/tests/memtest/
H A Dmem_alloc.c71 bool dirty = false; local
76 dirty = true;
80 if(dirty) {
81 printf("Memory dirty\n");
/barrelfish-2018-10-04/lib/libc/nameser/
H A Dns_ttl.c101 int ch, digits, dirty; local
106 dirty = 0;
131 dirty = 1;
134 if (dirty)
138 } else if (!dirty)
/barrelfish-2018-10-04/lib/libc/inet/
H A Dinet_net_pton.c67 int n, ch, tmp = 0, dirty, bits; local
77 dirty = 0;
84 if (dirty == 0)
88 if (++dirty == 2) {
92 dirty = 0;
95 if (dirty) { /*%< Odd trailing nybble? */
/barrelfish-2018-10-04/kernel/include/target/x86_32/
H A Dpaging_kernel_target.h113 uint64_t dirty :1; member in struct:x86_32_ptable_entry::__anon457
129 uint64_t dirty :1; member in struct:x86_32_ptable_entry::__anon458
171 uint32_t dirty :1; member in struct:x86_32_ptable_entry::__anon460
186 uint32_t dirty :1; member in struct:x86_32_ptable_entry::__anon461
/barrelfish-2018-10-04/include/vm/
H A Dvm_page.c1095 * Set all bits in the page's dirty field.
1113 m->dirty = VM_PAGE_BITS_ALL;
1224 * Since we are inserting a new and possibly dirty page,
1399 * Note: we *always* dirty the page. It is necessary both for the
1715 KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m));
2022 KASSERT(m->dirty == 0,
2023 ("vm_page_alloc_init: page %p is dirty", m));
2525 m_new->dirty = m->dirty;
[all...]
H A Dvm_page.h101 * dirty field is machine dependent (M). In the
109 * contains the dirty field. In the machine-independent layer,
160 vm_page_bits_t dirty; /* map of dirty DEV_BSIZE chunks (M) */ member in struct:vm_page
331 #define PG_WINATCFLS 0x0040 /* flush dirty page on inactive q */
637 * Set all bits in the page's dirty field.
652 m->dirty = VM_PAGE_BITS_ALL;
675 * Set page to not be dirty. Note: does not clear pmap modify bits
682 m->dirty = 0;
H A Dphys_pager.c156 KASSERT(m[i]->dirty == 0,
157 ("phys_pager_getpages: dirty page %p", m[i]));
H A Dvnode_pager.c456 * dirty bit for a partially zeroed block is not
462 * Clear out partial-page dirty bits.
596 KASSERT((m->dirty & bits) == 0,
597 ("vnode_pager_input_smlfs: page %p is dirty", m));
667 KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
842 KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty",
1076 KASSERT(mt->dirty == 0,
1077 ("%s: page %p is dirty", __func_
[all...]
H A Dvm_pageout.c197 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
219 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
222 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
411 if (p->dirty == 0) {
439 if (p->dirty == 0)
494 * We do not have to fixup the clean/dirty bits here... we can
497 * NOTE! mc[i]->dirty may be partial or fragmented due to an
801 m->pindex != pindex || m->dirty == 0) {
819 * If a page is dirty, then it is either being washed
843 * vm_pageout_scan does the dirty wor
[all...]
H A Dvm_pager.c260 * not dirty and belong to the proper object.
268 KASSERT(m[i]->dirty == 0,
269 ("%s: page %p is dirty", __func__, m[i]));
H A Dvm_fault.c202 * possible for the no-NOSYNC thread to see m->dirty
212 * if the page is already dirty to prevent data written with
219 if (m->dirty == 0) {
228 * written NOW so dirty it explicitly to save on
232 * any swap backing since the page is now dirty.
362 * dirty pages. Under these conditions, a read lock on the top-level
828 * dirty in the first object so that it will go out
1250 (*mp)->dirty != VM_PAGE_BITS_ALL) {
1252 * Explicitly dirty the physical page. Otherwise, the
1435 dst_m->dirty
[all...]
H A Dvm_mmap.c891 if (m->dirty == 0 && pmap_is_modified(m))
893 if (m->dirty != 0)
H A Dvm_object.c813 * this is a nosync mmap then the object is likely to stay dirty so do not
831 return (p->dirty != 0);
838 * Clean all dirty pages in the specified range of object. Leaves page
841 * leaving the object dirty.
1374 /* vm_page_rename() will handle dirty and cache. */
1621 * through the rename. vm_page_rename() will handle dirty and
1865 * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
1939 if (p->dirty)
H A Dswap_pager.c52 * does not try to keep previously allocated swap blocks for dirty
58 * blocks only exist for dirty vm_page_t's now and these are already
1050 * This routine is typically called when a page is made dirty, at
1055 * should make the page dirty before calling this routine. This routine
1056 * does NOT change the m->dirty status of the page. Also: MADV_FREE
1229 * and mark a page dirty here because the caller is likely to mark
1325 * The page is left dirty until the pageout operation completes
1392 * Must set dirty range for NFS to work.
1511 * NOTE: for reads, m->dirty will probably
1530 * NOTE: for reads, m->dirty wil
[all...]
/barrelfish-2018-10-04/kernel/include/target/x86_64/
H A Dpaging_kernel_target.h106 uint64_t dirty :1; member in struct:x86_64_ptable_entry::__anon463
125 uint64_t dirty :1; member in struct:x86_64_ptable_entry::__anon464
144 uint64_t dirty :1; member in struct:x86_64_ptable_entry::__anon465
/barrelfish-2018-10-04/tools/elver/
H A Delver.c87 uint64_t dirty :1; member in struct:ptable_entry::__anon1422
105 uint64_t dirty :1; member in struct:ptable_entry::__anon1423
/barrelfish-2018-10-04/lib/lua/src/
H A Dllex.c420 luaZ_resetbuffer(ls->buff); /* `skip_sep' may dirty the buffer */
423 luaZ_resetbuffer(ls->buff); /* previous call may dirty the buff. */
/barrelfish-2018-10-04/lib/tommath/
H A Dmakefile177 echo Scanning for scratch/dirty files
/barrelfish-2018-10-04/lib/openssl-1.0.0d/engines/
H A Dmakeengines.com227 $! Do the dirty work.

Completed in 375 milliseconds