Lines Matching refs:PAGE_SIZE

101 	unsigned long size = PAGE_SIZE;
118 if (size != PAGE_SIZE) {
358 } while (pte++, addr += PAGE_SIZE, addr != end);
512 } while (pte++, addr += PAGE_SIZE, addr != end);
649 * @pages: pages to map (always PAGE_SIZE pages)
689 * @pages: pages to map (always PAGE_SIZE pages)
1487 * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1529 * that is bigger then PAGE_SIZE.
1769 * a) align <= PAGE_SIZE, because it does not make any sense.
1770 * All blocks(their start addresses) are at least PAGE_SIZE
1773 * specified [vstart:vend] interval and an alignment > PAGE_SIZE.
1776 if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
1842 unsigned int idx = (size - 1) / PAGE_SIZE;
2099 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
2457 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
2468 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
2730 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2786 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2956 va = alloc_vmap_area(size, PAGE_SIZE,
3096 size += PAGE_SIZE;
3244 page_size = PAGE_SIZE << page_order;
3477 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
3482 count * PAGE_SIZE, vmap_pfn_apply, &data)) {
3488 (unsigned long)area->addr + count * PAGE_SIZE);
3592 * tracking is done per PAGE_SIZE page so as to keep the
3625 if (array_size > PAGE_SIZE) {
3635 nr_small_pages * PAGE_SIZE, array_size);
3672 area->nr_pages * PAGE_SIZE);
3700 area->nr_pages * PAGE_SIZE);
4081 num = min_t(size_t, remains, PAGE_SIZE);
4109 length = PAGE_SIZE - offset;
4400 uaddr += PAGE_SIZE;
4401 kaddr += PAGE_SIZE;
4402 size -= PAGE_SIZE;
5022 vmap_zone_size = (1 << 4) * PAGE_SIZE;