Lines Matching refs:size

128  * The large page size "segkmem_lpsize" for kernel heap is selected in the
196 hat_memload_alloc(vmem_t *vmp, size_t size, int flags)
199 return (segkmem_alloc(vmp, size, flags));
209 segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags)
211 return (segkmem_alloc(vmp, size, flags | VM_NORELOC));
358 boot_mapin(caddr_t addr, size_t size)
364 if (page_resv(btop(size), KM_NOSLEEP) == 0)
367 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
411 boot_alloc(void *inaddr, size_t size, uint_t align)
419 size = ptob(btopr(size));
421 if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
424 if (BOP_ALLOC(bootops, addr, size, align) != addr)
427 boot_mapin((caddr_t)addr, size);
441 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
451 if (seg->s_as != &kas || size > seg->s_size ||
452 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
460 return (SEGOP_FAULT(hat, segkp, addr, size, type, rw));
465 npages = btopr(size);
492 hat_reserve(seg->s_as, addr, size);
509 segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
513 if (seg->s_as != &kas || size > seg->s_size ||
514 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
522 return (SEGOP_SETPROT(segkp, addr, size, prot));
525 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
527 hat_chgprot(kas.a_hat, addr, size, prot);
537 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
549 return (SEGOP_CHECKPROT(segkp, addr, size, prot));
580 segkmem_xdump_range(void *arg, void *start, size_t size)
584 caddr_t addr_end = addr + size;
596 segkmem_dump_range(void *arg, void *start, size_t size)
599 caddr_t addr_end = addr + size;
608 vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
612 segkmem_xdump_range(arg, start, size);
824 segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
849 return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size,
854 * Allocate pages to back the virtual address range [addr, addr + size).
858 segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
863 pgcnt_t npages = btopr(size);
866 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
873 vmem_free(vmp, addr, size);
877 ppl = page_create_func(addr, size, vmflag, pcarg);
880 vmem_free(vmp, addr, size);
928 segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp)
948 if (gcp->gc_arena == vmp && gcp->gc_size == size) {
954 addr = vmem_alloc(vmp, size, vmflag | VM_PANIC);
955 if (boot_alloc(addr, size, BO_NO_ALIGN) != addr)
959 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
964 segkmem_alloc(vmem_t *vmp, size_t size, int vmflag)
966 return (segkmem_alloc_vn(vmp, size, vmflag, &kvp));
970 segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag)
972 return (segkmem_alloc_vn(vmp, size, vmflag, &zvp));
982 segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
988 pgcnt_t npages = btopr(size);
996 gc->gc_size = size;
1002 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1004 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
1034 vmem_free(vmp, inaddr, size);
1039 segkmem_xfree(vmem_t *vmp, void *inaddr, size_t size, void (*func)(page_t *))
1041 segkmem_free_vn(vmp, inaddr, size, &kvp, func);
1045 segkmem_free(vmem_t *vmp, void *inaddr, size_t size)
1047 segkmem_free_vn(vmp, inaddr, size, &kvp, NULL);
1051 segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size)
1053 segkmem_free_vn(vmp, inaddr, size, &zvp, NULL);
1071 segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot,
1074 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1075 hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot,
1080 segkmem_mapout(struct seg *seg, void *addr, size_t size)
1082 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1103 segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg)
1118 return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
1124 * [addr, addr + size). If addr is NULL, allocate the virtual address
1128 segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag,
1134 pgcnt_t npages = btopr(size);
1136 pgcnt_t nlpages = size >> segkmem_lpshift;
1156 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
1208 vmem_free(vmp, addr, size);
1220 segkmem_free_one_lp(caddr_t addr, size_t size)
1223 pgcnt_t pgs_left = btopr(size);
1225 ASSERT(size == segkmem_lpsize);
1227 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1248 * have to "upgrade the requested size" to kmem_lp_arena quantum. If
1256 size_t size;
1262 size = *sizep;
1268 size_t asize = P2ROUNDUP(size, kmemlp_qnt);
1276 ASSERT(asize >= size);
1296 return (segkmem_alloc(vmp, size, vmflag));
1308 * quantum size chunk that everybody is going to
1367 lpcb->alloc_bytes_failed += size;
1374 return (segkmem_alloc(vmp, size, vmflag));
1378 segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size)
1381 segkmem_free(vmp, inaddr, size);
1383 vmem_free(kmem_lp_arena, inaddr, size);
1393 segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag)
1398 ASSERT(size != 0);
1407 addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0,
1418 segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size)
1420 pgcnt_t nlpages = size >> segkmem_lpshift;
1423 pgcnt_t npages = btopr(size);
1437 vmem_free(vmp, inaddr, size);
1461 /* get a platform dependent value of large page size for kernel heap */
1541 segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag)
1547 return (segkmem_alloc(vmp, size, vmflag));
1549 ASSERT((size & (ppaquantum - 1)) == 0);
1551 addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag);
1552 if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0,
1554 vmem_xfree(vmp, addr, size);
1562 segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size)
1569 segkmem_free(vmp, addr, size);
1571 segkmem_free(NULL, addr, size);
1572 vmem_xfree(vmp, addr, size);