Lines Matching refs:FALSE

376 static boolean_t vm_pageout_need_to_refill_clean_queue = FALSE;
377 static boolean_t vm_pageout_precleaning_delayed = FALSE;
398 boolean_t vm_pages_encrypted = FALSE; /* are there encrypted pages ? */
491 p->pageout = FALSE;
527 m->cleaning = FALSE;
528 m->encrypted_cleaning = FALSE;
529 m->pageout = FALSE;
544 SET_PAGE_DIRTY(m, FALSE);
546 m->dirty = FALSE;
575 * the (COPY_OUT_FROM == FALSE) request_page_list case
589 m->busy = FALSE;
590 m->absent = FALSE;
593 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
600 m->overwriting = FALSE;
618 m->dirty = FALSE;
622 m->encrypted_cleaning = FALSE;
623 m->busy = FALSE;
625 m->cleaning = FALSE;
680 SET_PAGE_DIRTY(m, FALSE);
681 m->precious = FALSE;
689 new_m->fictitious = FALSE;
700 new_m->busy = FALSE;
773 SET_PAGE_DIRTY(m, FALSE);
870 q->pgo_idle = FALSE;
911 m->pageout_queue = FALSE;
920 m->laundry = FALSE;
924 q->pgo_throttled = FALSE;
928 q->pgo_draining = FALSE;
938 vm_pageout_precleaning_delayed = FALSE;
1024 * wait_for_pressure FALSE, so that code path must remain safe in the
1131 FALSE); \
1174 boolean_t inactive_throttled = FALSE;
1184 boolean_t grab_anonymous = FALSE;
1262 try_failed = FALSE;
1426 (vm_page_cleaned_count >= VM_PAGE_CLEANED_TARGET || vm_pageout_need_to_refill_clean_queue == FALSE) &&
1432 vm_pageout_need_to_refill_clean_queue = FALSE;
1480 boolean_t can_steal = FALSE;
1572 exceeded_burst_throttle = FALSE;
1605 grab_anonymous = FALSE;
1751 vm_pageout_adjust_io_throttles(iq, eq, FALSE);
1753 if (vm_page_cleaned_count > 0 && exceeded_burst_throttle == FALSE) {
1790 iq->pgo_throttled = FALSE;
1809 boolean_t pageout_making_free = ((vm_page_free_count + local_freed) < vm_page_free_target); /* TRUE if making free, FALSE if making clean */
1850 if (grab_anonymous == FALSE || queue_empty(&vm_page_queue_anonymous)) {
1867 grab_anonymous = FALSE;
1901 if (pageout_making_free == FALSE) {
1976 m->reference = FALSE;
1994 else if (grab_anonymous == FALSE || queue_empty(&vm_page_queue_anonymous))
2020 try_failed = FALSE;
2062 vm_page_speculate(m, FALSE);
2068 VM_PAGE_ENQUEUE_INACTIVE(m, FALSE);
2153 SET_PAGE_DIRTY(m, FALSE);
2206 if (m->reference == FALSE && m->pmapped == TRUE) {
2212 SET_PAGE_DIRTY(m, FALSE);
2320 SET_PAGE_DIRTY(m, FALSE);
2325 forced_reclaim = FALSE;
2338 * FALSE (or possibly "reactivate_limit" was
2349 inactive_throttled = FALSE;
2384 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && object->internal == FALSE) {
2403 vm_pageout_adjust_io_throttles(iq, eq, FALSE);
2449 * Note that if 'pmapped' is FALSE then the page is not
2458 SET_PAGE_DIRTY(m, FALSE);
2524 m->reference = FALSE;
2583 vm_pageout_cluster(m, FALSE);
2725 m->pageout_queue = FALSE;
2804 m->pageout = FALSE;
2884 FALSE,
2885 FALSE,
2896 q->pgo_busy = FALSE;
2912 boolean_t set_iq = FALSE;
2913 boolean_t set_eq = FALSE;
2916 req_lowpriority = FALSE;
3006 static boolean_t set_up_thread = FALSE;
3041 boolean_t buf_large_zfree = FALSE;
3059 first_try = FALSE;
3149 vm_pageout_queue_external.pgo_idle = FALSE;
3150 vm_pageout_queue_external.pgo_busy = FALSE;
3151 vm_pageout_queue_external.pgo_throttled = FALSE;
3152 vm_pageout_queue_external.pgo_draining = FALSE;
3153 vm_pageout_queue_external.pgo_lowpriority = FALSE;
3155 vm_pageout_queue_external.pgo_inited = FALSE;
3161 vm_pageout_queue_internal.pgo_idle = FALSE;
3162 vm_pageout_queue_internal.pgo_busy = FALSE;
3163 vm_pageout_queue_internal.pgo_throttled = FALSE;
3164 vm_pageout_queue_internal.pgo_draining = FALSE;
3165 vm_pageout_queue_internal.pgo_lowpriority = FALSE;
3167 vm_pageout_queue_internal.pgo_inited = FALSE;
3435 boolean_t encountered_lrp = FALSE;
3502 user_page_list[0].device = FALSE;
3514 upl->map_object->can_persist = FALSE;
3565 FALSE, /* should_return */
3782 dst_page->precious = FALSE;
3793 alias_page->absent = FALSE;
3804 SET_PAGE_DIRTY(dst_page, FALSE);
3806 dst_page->dirty = FALSE;
3864 FALSE, /* should_return */
3905 dst_page->pageout = FALSE;
3907 vm_pageout_steal_laundry(dst_page, FALSE);
3985 dst_page->busy = FALSE;
4013 dst_page->encrypted = FALSE;
4047 dst_page->precious = FALSE;
4058 alias_page->absent = FALSE;
4096 dst_page->restart = FALSE;
4105 SET_PAGE_DIRTY(dst_page, FALSE);
4106 dst_page->precious = FALSE;
4111 dst_page->precious = FALSE;
4125 user_page_list[entry].device = FALSE;
4126 user_page_list[entry].needed = FALSE;
4130 user_page_list[entry].speculative = FALSE;
4491 *upl_size, FALSE,
4495 sync_cow_data = FALSE;
4513 (vm_object_size_t)*upl_size, FALSE,
4517 force_data_sync = FALSE;
4603 kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE, VM_FLAGS_ANYWHERE, &vector_upl_submap);
4659 upl->map_object->can_persist = FALSE;
4689 alias_page->fictitious = FALSE;
4725 alias_page->busy = FALSE;
4726 alias_page->absent = FALSE;
4749 VM_FLAGS_ANYWHERE, upl->map_object, offset, FALSE,
4759 VM_FLAGS_FIXED, upl->map_object, offset, FALSE,
4928 boolean_t should_be_throttled = FALSE;
4930 *empty = FALSE;
5015 shadow_object->blocked_access = FALSE;
5066 t->pageout = FALSE;
5092 SET_PAGE_DIRTY(m, FALSE);
5094 m->dirty = FALSE;
5105 m->cs_validated = FALSE;
5129 m->absent = FALSE;
5151 m->dirty = FALSE;
5159 m->pageout = FALSE;
5170 m->cs_validated = FALSE;
5178 * the (COPY_OUT_FROM == FALSE) request_page_list case
5181 m->absent = FALSE;
5186 * alternate (COPY_OUT_FROM == FALSE) page_list case
5194 m->overwriting = FALSE;
5197 m->encrypted_cleaning = FALSE;
5201 m->cleaning = FALSE;
5211 m->pageout = FALSE;
5223 SET_PAGE_DIRTY(m, FALSE);
5242 m->dirty = FALSE;
5265 m->precious = FALSE;
5268 SET_PAGE_DIRTY(m, FALSE);
5270 m->dirty = FALSE;
5274 if (hibernate_cleaning_in_progress == FALSE && !m->dirty && (upl->flags & UPL_PAGEOUT)) {
5289 SET_PAGE_DIRTY(m, FALSE);
5341 m->busy = FALSE;
5456 *empty = FALSE;
5488 *empty = FALSE;
5542 shadow_object->blocked_access = FALSE;
5561 needed = FALSE;
5581 t->pageout = FALSE;
5598 * COPYOUT = FALSE case
5604 m->absent = FALSE;
5606 must_free = FALSE;
5608 m->restart = FALSE;
5610 must_free = FALSE;
5612 m->restart = FALSE;
5613 m->absent = FALSE;
5616 must_free = FALSE;
5618 if (m->clustered && needed == FALSE) {
5640 m->cleaning = FALSE;
5641 m->encrypted_cleaning = FALSE;
5657 m->overwriting = FALSE;
5695 m->overwriting = FALSE;
5698 m->encrypted_cleaning = FALSE;
5702 m->pageout = FALSE;
5703 m->cleaning = FALSE;
5746 m->busy = FALSE;
5891 int no_zero_fill = FALSE;
5910 if (vm_lopage_needed == FALSE)
5982 user_page_list[0].device = FALSE;
6087 FALSE, /* should_return */
6106 fault_info.no_cache = FALSE;
6107 fault_info.stealth = FALSE;
6108 fault_info.io_sync = FALSE;
6109 fault_info.cs_bypass = FALSE;
6157 prot | VM_PROT_WRITE, FALSE,
6161 FALSE, &fault_info);
6269 dst_page->pageout = FALSE;
6271 vm_pageout_steal_laundry(dst_page, FALSE);
6318 SET_PAGE_DIRTY(low_page, FALSE);
6330 dst_page->busy = FALSE;
6371 user_page_list[entry].device = FALSE;
6372 user_page_list[entry].needed = FALSE;
6376 user_page_list[entry].speculative = FALSE;
6460 need_unwire = FALSE;
6471 need_unwire = FALSE;
6509 upls_locked = FALSE;
6569 upls_locked = FALSE;
6642 boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
6735 if (vm_paging_page_inuse[i] == FALSE) {
6832 FALSE,
6940 vm_paging_page_inuse[i] = FALSE;
6956 boolean_t swap_crypt_ctx_initialized = FALSE;
6962 boolean_t swap_crypt_ctx_tested = FALSE;
6983 if (swap_crypt_ctx_initialized == FALSE) {
7003 if (swap_crypt_ctx_tested == FALSE) {
7118 FALSE);
7129 if (swap_crypt_ctx_initialized == FALSE) {
7246 FALSE);
7311 page->dirty = FALSE;
7312 assert (page->cs_validated == FALSE);
7315 page->encrypted = FALSE;
7328 * that page. That code relies on "pmapped" being FALSE, so that the
7332 page->pmapped = FALSE;
7333 page->wpmapped = FALSE;
7574 return FALSE;
7578 return FALSE;
7611 return FALSE;
7623 return FALSE;
7868 boolean_t result = FALSE;
7926 FALSE);
7956 page->dirty = FALSE;