Lines Matching refs:page

291  * get populated when the corresponding cluster is created.  Because a page
293 * mbufs so that there is a 1-to-1 mapping between them. A page that never
296 * page is allocated and used for the entire object.
377 * Each slab controls a page of memory.
391 #define SLF_MAPPED 0x0001 /* backed by a mapped page */
1419 * This yields mcl_audit_t units, each one representing a page.
2613 vm_offset_t page = 0;
2626 * the physical page(s), our view of the cluster map may no longer
2629 * operation (including the page mapping) is serialized.
2652 page = kmem_mb_alloc(mb_map, size, large_buffer);
2659 if (large_buffer && page == 0)
2660 page = kmem_mb_alloc(mb_map, size, 0);
2662 if (page == 0) {
2664 /* Try for 1 page if failed, only 4KB request */
2666 page = kmem_mb_alloc(mb_map, size, 0);
2669 if (page == 0) {
2675 VERIFY(IS_P2ALIGNED(page, NBPG));
2709 for (i = 0; i < numpages; i++, page += NBPG) {
2710 ppnum_t offset = ((char *)page - (char *)mbutl) / NBPG;
2711 ppnum_t new_page = pmap_find_phys(kernel_pmap, page);
2715 * code noops and returns the input page; if there is a
2716 * mapper the appropriate I/O page is returned.
2720 bzero((void *)(uintptr_t) page, page_size);
2725 /* Pattern-fill this fresh page */
2728 (caddr_t)page, NBPG);
2731 union mbigcluster *mbc = (union mbigcluster *)page;
2733 /* One for the entire page */
2752 union m16kcluster *m16kcl = (union m16kcluster *)page;
2767 * 2nd-Nth page's slab is part of the first one,
2771 nsp = slab_get(((union mbigcluster *)page) + k);
2895 /* how many objects will we cut the page into? */
5608 /* Does the data cross one or more page boundaries? */
6306 * For the mbuf case, find the index of the page
6308 * base address of the page. Then find out the
6309 * mbuf index relative to the page base and use
6318 * Same thing as above, but for 2KB clusters in a page.