Lines Matching refs:base

2610 vm_page_bits(int base, int size)
2616 base + size <= PAGE_SIZE,
2617 ("vm_page_bits: illegal base/size %d/%d", base, size)
2623 first_bit = base >> DEV_BSHIFT;
2624 last_bit = (base + size - 1) >> DEV_BSHIFT;
2638 * (base + size) must be less then or equal to PAGE_SIZE.
2641 vm_page_set_valid(vm_page_t m, int base, int size)
2650 * If the base is not DEV_BSIZE aligned and the valid
2654 if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2655 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
2656 pmap_zero_page_area(m, frag, base - frag);
2663 endoff = base + size;
2673 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
2679 m->valid |= vm_page_bits(base, size);
2744 * (base + size) must be less then or equal to PAGE_SIZE.
2747 vm_page_set_validclean(vm_page_t m, int base, int size)
2757 * If the base is not DEV_BSIZE aligned and the valid
2761 if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2762 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
2763 pmap_zero_page_area(m, frag, base - frag);
2770 endoff = base + size;
2788 pagebits = vm_page_bits(base, size);
2791 if ((frag = base & (DEV_BSIZE - 1)) != 0) {
2793 base += frag;
2798 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
2800 if (base == 0 && size == PAGE_SIZE) {
2825 vm_page_clear_dirty(vm_page_t m, int base, int size)
2828 vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
2838 vm_page_set_invalid(vm_page_t m, int base, int size)
2845 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) +
2849 bits = vm_page_bits(base, size);
2911 vm_page_is_valid(vm_page_t m, int base, int size)
2916 bits = vm_page_bits(base, size);