vm_machdep.c revision 259335
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary :forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 */ 42 43#include <sys/cdefs.h> 44__FBSDID("$FreeBSD: stable/10/sys/arm/arm/vm_machdep.c 259335 2013-12-13 21:40:12Z ian $"); 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/kernel.h> 49#include <sys/malloc.h> 50#include <sys/mbuf.h> 51#include <sys/proc.h> 52#include <sys/socketvar.h> 53#include <sys/sf_buf.h> 54#include <sys/syscall.h> 55#include <sys/sysctl.h> 56#include <sys/sysent.h> 57#include <sys/unistd.h> 58#include <machine/cpu.h> 59#include <machine/frame.h> 60#include <machine/pcb.h> 61#include <machine/sysarch.h> 62#include <sys/lock.h> 63#include <sys/mutex.h> 64 65#include <vm/vm.h> 66#include <vm/pmap.h> 67#include <vm/vm_extern.h> 68#include <vm/vm_kern.h> 69#include <vm/vm_page.h> 70#include <vm/vm_map.h> 71#include <vm/vm_param.h> 72#include <vm/vm_pageout.h> 73#include <vm/uma.h> 74#include <vm/uma_int.h> 75 76#include <machine/md_var.h> 77 78/* 79 * struct switchframe and trapframe must both be a multiple of 8 80 * for correct stack alignment. 81 */ 82CTASSERT(sizeof(struct switchframe) == 24); 83CTASSERT(sizeof(struct trapframe) == 80); 84 85#ifndef ARM_USE_SMALL_ALLOC 86 87#ifndef NSFBUFS 88#define NSFBUFS (512 + maxusers * 16) 89#endif 90 91static int nsfbufs; 92static int nsfbufspeak; 93static int nsfbufsused; 94 95SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0, 96 "Maximum number of sendfile(2) sf_bufs available"); 97SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0, 98 "Number of sendfile(2) sf_bufs at peak usage"); 99SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0, 100 "Number of sendfile(2) sf_bufs in use"); 101 102static void sf_buf_init(void *arg); 103SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL); 104 105LIST_HEAD(sf_head, sf_buf); 106 107/* 108 * A hash table of active sendfile(2) buffers 109 */ 110static struct sf_head *sf_buf_active; 111static u_long sf_buf_hashmask; 112 113#define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask) 114 115static TAILQ_HEAD(, sf_buf) sf_buf_freelist; 116static u_int sf_buf_alloc_want; 117 118/* 119 * A lock used to synchronize access to the hash table and free list 120 */ 121static struct mtx sf_buf_lock; 122#endif /* !ARM_USE_SMALL_ALLOC */ 123 124/* 125 * Finish a fork operation, with process p2 nearly set up. 126 * Copy and update the pcb, set up the stack so that the child 127 * ready to run and return to user mode. 128 */ 129void 130cpu_fork(register struct thread *td1, register struct proc *p2, 131 struct thread *td2, int flags) 132{ 133 struct pcb *pcb2; 134 struct trapframe *tf; 135 struct switchframe *sf; 136 struct mdproc *mdp2; 137 138 if ((flags & RFPROC) == 0) 139 return; 140 pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; 141#ifdef __XSCALE__ 142#ifndef CPU_XSCALE_CORE3 143 pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE); 144#endif 145#endif 146 td2->td_pcb = pcb2; 147 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); 148 mdp2 = &p2->p_md; 149 bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2)); 150 pcb2->un_32.pcb32_und_sp = td2->td_kstack + USPACE_UNDEF_STACK_TOP; 151 pcb2->un_32.pcb32_sp = td2->td_kstack + 152 USPACE_SVC_STACK_TOP - sizeof(*pcb2); 153 pmap_activate(td2); 154 td2->td_frame = tf = (struct trapframe *)STACKALIGN( 155 pcb2->un_32.pcb32_sp - sizeof(struct trapframe)); 156 *tf = *td1->td_frame; 157 sf = (struct switchframe *)tf - 1; 158 sf->sf_r4 = (u_int)fork_return; 159 sf->sf_r5 = (u_int)td2; 160 sf->sf_pc = (u_int)fork_trampoline; 161 tf->tf_spsr &= ~PSR_C_bit; 162 tf->tf_r0 = 0; 163 tf->tf_r1 = 0; 164 pcb2->un_32.pcb32_sp = (u_int)sf; 165 KASSERT((pcb2->un_32.pcb32_sp & 7) == 0, 166 ("cpu_fork: Incorrect stack alignment")); 167 168 /* Setup to release spin count in fork_exit(). */ 169 td2->td_md.md_spinlock_count = 1; 170 td2->td_md.md_saved_cspr = 0; 171#ifdef ARM_TP_ADDRESS 172 td2->td_md.md_tp = *(register_t *)ARM_TP_ADDRESS; 173#else 174 td2->td_md.md_tp = (register_t) get_tls(); 175#endif 176} 177 178void 179cpu_thread_swapin(struct thread *td) 180{ 181} 182 183void 184cpu_thread_swapout(struct thread *td) 185{ 186} 187 188/* 189 * Detatch mapped page and release resources back to the system. 190 */ 191void 192sf_buf_free(struct sf_buf *sf) 193{ 194#ifndef ARM_USE_SMALL_ALLOC 195 mtx_lock(&sf_buf_lock); 196 sf->ref_count--; 197 if (sf->ref_count == 0) { 198 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry); 199 nsfbufsused--; 200 pmap_kremove(sf->kva); 201 sf->m = NULL; 202 LIST_REMOVE(sf, list_entry); 203 if (sf_buf_alloc_want > 0) 204 wakeup(&sf_buf_freelist); 205 } 206 mtx_unlock(&sf_buf_lock); 207#endif 208} 209 210#ifndef ARM_USE_SMALL_ALLOC 211/* 212 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) 213 */ 214static void 215sf_buf_init(void *arg) 216{ 217 struct sf_buf *sf_bufs; 218 vm_offset_t sf_base; 219 int i; 220 221 nsfbufs = NSFBUFS; 222 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); 223 224 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask); 225 TAILQ_INIT(&sf_buf_freelist); 226 sf_base = kva_alloc(nsfbufs * PAGE_SIZE); 227 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, 228 M_NOWAIT | M_ZERO); 229 for (i = 0; i < nsfbufs; i++) { 230 sf_bufs[i].kva = sf_base + i * PAGE_SIZE; 231 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry); 232 } 233 sf_buf_alloc_want = 0; 234 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF); 235} 236#endif 237 238/* 239 * Get an sf_buf from the freelist. Will block if none are available. 240 */ 241struct sf_buf * 242sf_buf_alloc(struct vm_page *m, int flags) 243{ 244#ifdef ARM_USE_SMALL_ALLOC 245 return ((struct sf_buf *)m); 246#else 247 struct sf_head *hash_list; 248 struct sf_buf *sf; 249 int error; 250 251 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 252 mtx_lock(&sf_buf_lock); 253 LIST_FOREACH(sf, hash_list, list_entry) { 254 if (sf->m == m) { 255 sf->ref_count++; 256 if (sf->ref_count == 1) { 257 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 258 nsfbufsused++; 259 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 260 } 261 goto done; 262 } 263 } 264 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) { 265 if (flags & SFB_NOWAIT) 266 goto done; 267 sf_buf_alloc_want++; 268 SFSTAT_INC(sf_allocwait); 269 error = msleep(&sf_buf_freelist, &sf_buf_lock, 270 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); 271 sf_buf_alloc_want--; 272 273 274 /* 275 * If we got a signal, don't risk going back to sleep. 276 */ 277 if (error) 278 goto done; 279 } 280 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 281 if (sf->m != NULL) 282 LIST_REMOVE(sf, list_entry); 283 LIST_INSERT_HEAD(hash_list, sf, list_entry); 284 sf->ref_count = 1; 285 sf->m = m; 286 nsfbufsused++; 287 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 288 pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m)); 289done: 290 mtx_unlock(&sf_buf_lock); 291 return (sf); 292#endif 293} 294 295void 296cpu_set_syscall_retval(struct thread *td, int error) 297{ 298 struct trapframe *frame; 299 int fixup; 300#ifdef __ARMEB__ 301 uint32_t insn; 302#endif 303 304 frame = td->td_frame; 305 fixup = 0; 306 307#ifdef __ARMEB__ 308 insn = *(u_int32_t *)(frame->tf_pc - INSN_SIZE); 309 if ((insn & 0x000fffff) == SYS___syscall) { 310 register_t *ap = &frame->tf_r0; 311 register_t code = ap[_QUAD_LOWWORD]; 312 if (td->td_proc->p_sysent->sv_mask) 313 code &= td->td_proc->p_sysent->sv_mask; 314 fixup = (code != SYS_freebsd6_lseek && code != SYS_lseek) 315 ? 1 : 0; 316 } 317#endif 318 319 switch (error) { 320 case 0: 321 if (fixup) { 322 frame->tf_r0 = 0; 323 frame->tf_r1 = td->td_retval[0]; 324 } else { 325 frame->tf_r0 = td->td_retval[0]; 326 frame->tf_r1 = td->td_retval[1]; 327 } 328 frame->tf_spsr &= ~PSR_C_bit; /* carry bit */ 329 break; 330 case ERESTART: 331 /* 332 * Reconstruct the pc to point at the swi. 333 */ 334 frame->tf_pc -= INSN_SIZE; 335 break; 336 case EJUSTRETURN: 337 /* nothing to do */ 338 break; 339 default: 340 frame->tf_r0 = error; 341 frame->tf_spsr |= PSR_C_bit; /* carry bit */ 342 break; 343 } 344} 345 346/* 347 * Initialize machine state (pcb and trap frame) for a new thread about to 348 * upcall. Put enough state in the new thread's PCB to get it to go back 349 * userret(), where we can intercept it again to set the return (upcall) 350 * Address and stack, along with those from upcals that are from other sources 351 * such as those generated in thread_userret() itself. 352 */ 353void 354cpu_set_upcall(struct thread *td, struct thread *td0) 355{ 356 struct trapframe *tf; 357 struct switchframe *sf; 358 359 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); 360 bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); 361 tf = td->td_frame; 362 sf = (struct switchframe *)tf - 1; 363 sf->sf_r4 = (u_int)fork_return; 364 sf->sf_r5 = (u_int)td; 365 sf->sf_pc = (u_int)fork_trampoline; 366 tf->tf_spsr &= ~PSR_C_bit; 367 tf->tf_r0 = 0; 368 td->td_pcb->un_32.pcb32_sp = (u_int)sf; 369 td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + USPACE_UNDEF_STACK_TOP; 370 KASSERT((td->td_pcb->un_32.pcb32_sp & 7) == 0, 371 ("cpu_set_upcall: Incorrect stack alignment")); 372 373 /* Setup to release spin count in fork_exit(). */ 374 td->td_md.md_spinlock_count = 1; 375 td->td_md.md_saved_cspr = 0; 376} 377 378/* 379 * Set that machine state for performing an upcall that has to 380 * be done in thread_userret() so that those upcalls generated 381 * in thread_userret() itself can be done as well. 382 */ 383void 384cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, 385 stack_t *stack) 386{ 387 struct trapframe *tf = td->td_frame; 388 389 tf->tf_usr_sp = STACKALIGN((int)stack->ss_sp + stack->ss_size 390 - sizeof(struct trapframe)); 391 tf->tf_pc = (int)entry; 392 tf->tf_r0 = (int)arg; 393 tf->tf_spsr = PSR_USR32_MODE; 394} 395 396int 397cpu_set_user_tls(struct thread *td, void *tls_base) 398{ 399 400 td->td_md.md_tp = (register_t)tls_base; 401 if (td == curthread) { 402 critical_enter(); 403#ifdef ARM_TP_ADDRESS 404 *(register_t *)ARM_TP_ADDRESS = (register_t)tls_base; 405#else 406 set_tls((void *)tls_base); 407#endif 408 critical_exit(); 409 } 410 return (0); 411} 412 413void 414cpu_thread_exit(struct thread *td) 415{ 416} 417 418void 419cpu_thread_alloc(struct thread *td) 420{ 421 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * 422 PAGE_SIZE) - 1; 423 /* 424 * Ensure td_frame is aligned to an 8 byte boundary as it will be 425 * placed into the stack pointer which must be 8 byte aligned in 426 * the ARM EABI. 427 */ 428 td->td_frame = (struct trapframe *)STACKALIGN((u_int)td->td_kstack + 429 USPACE_SVC_STACK_TOP - sizeof(struct pcb) - 430 sizeof(struct trapframe)); 431#ifdef __XSCALE__ 432#ifndef CPU_XSCALE_CORE3 433 pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE); 434#endif 435#endif 436} 437 438void 439cpu_thread_free(struct thread *td) 440{ 441} 442 443void 444cpu_thread_clean(struct thread *td) 445{ 446} 447 448/* 449 * Intercept the return address from a freshly forked process that has NOT 450 * been scheduled yet. 451 * 452 * This is needed to make kernel threads stay in kernel mode. 453 */ 454void 455cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg) 456{ 457 struct switchframe *sf; 458 struct trapframe *tf; 459 460 tf = td->td_frame; 461 sf = (struct switchframe *)tf - 1; 462 sf->sf_r4 = (u_int)func; 463 sf->sf_r5 = (u_int)arg; 464 td->td_pcb->un_32.pcb32_sp = (u_int)sf; 465 KASSERT((td->td_pcb->un_32.pcb32_sp & 7) == 0, 466 ("cpu_set_fork_handler: Incorrect stack alignment")); 467} 468 469/* 470 * Software interrupt handler for queued VM system processing. 471 */ 472void 473swi_vm(void *dummy) 474{ 475 476 if (busdma_swi_pending) 477 busdma_swi(); 478} 479 480void 481cpu_exit(struct thread *td) 482{ 483} 484 485#ifdef ARM_USE_SMALL_ALLOC 486 487static TAILQ_HEAD(,arm_small_page) pages_normal = 488 TAILQ_HEAD_INITIALIZER(pages_normal); 489static TAILQ_HEAD(,arm_small_page) pages_wt = 490 TAILQ_HEAD_INITIALIZER(pages_wt); 491static TAILQ_HEAD(,arm_small_page) free_pgdesc = 492 TAILQ_HEAD_INITIALIZER(free_pgdesc); 493 494extern uma_zone_t l2zone; 495 496struct mtx smallalloc_mtx; 497 498vm_offset_t alloc_firstaddr; 499 500#ifdef ARM_HAVE_SUPERSECTIONS 501#define S_FRAME L1_SUP_FRAME 502#define S_SIZE L1_SUP_SIZE 503#else 504#define S_FRAME L1_S_FRAME 505#define S_SIZE L1_S_SIZE 506#endif 507 508vm_offset_t 509arm_ptovirt(vm_paddr_t pa) 510{ 511 int i; 512 vm_offset_t addr = alloc_firstaddr; 513 514 KASSERT(alloc_firstaddr != 0, ("arm_ptovirt called too early ?")); 515 for (i = 0; dump_avail[i + 1]; i += 2) { 516 if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) 517 break; 518 addr += (dump_avail[i + 1] & S_FRAME) + S_SIZE - 519 (dump_avail[i] & S_FRAME); 520 } 521 KASSERT(dump_avail[i + 1] != 0, ("Trying to access invalid physical address")); 522 return (addr + (pa - (dump_avail[i] & S_FRAME))); 523} 524 525void 526arm_init_smallalloc(void) 527{ 528 vm_offset_t to_map = 0, mapaddr; 529 int i; 530 531 /* 532 * We need to use dump_avail and not phys_avail, since we want to 533 * map the whole memory and not just the memory available to the VM 534 * to be able to do a pa => va association for any address. 535 */ 536 537 for (i = 0; dump_avail[i + 1]; i+= 2) { 538 to_map += (dump_avail[i + 1] & S_FRAME) + S_SIZE - 539 (dump_avail[i] & S_FRAME); 540 } 541 alloc_firstaddr = mapaddr = KERNBASE - to_map; 542 for (i = 0; dump_avail[i + 1]; i+= 2) { 543 vm_offset_t size = (dump_avail[i + 1] & S_FRAME) + 544 S_SIZE - (dump_avail[i] & S_FRAME); 545 vm_offset_t did = 0; 546 while (size > 0) { 547#ifdef ARM_HAVE_SUPERSECTIONS 548 pmap_kenter_supersection(mapaddr, 549 (dump_avail[i] & L1_SUP_FRAME) + did, 550 SECTION_CACHE); 551#else 552 pmap_kenter_section(mapaddr, 553 (dump_avail[i] & L1_S_FRAME) + did, SECTION_CACHE); 554#endif 555 mapaddr += S_SIZE; 556 did += S_SIZE; 557 size -= S_SIZE; 558 } 559 } 560} 561 562void 563arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable) 564{ 565 struct arm_small_page *pg; 566 567 bytes &= ~PAGE_MASK; 568 while (bytes > 0) { 569 pg = (struct arm_small_page *)list; 570 pg->addr = mem; 571 if (pagetable) 572 TAILQ_INSERT_HEAD(&pages_wt, pg, pg_list); 573 else 574 TAILQ_INSERT_HEAD(&pages_normal, pg, pg_list); 575 list = (char *)list + sizeof(*pg); 576 mem = (char *)mem + PAGE_SIZE; 577 bytes -= PAGE_SIZE; 578 } 579} 580 581void * 582uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 583{ 584 void *ret; 585 struct arm_small_page *sp; 586 TAILQ_HEAD(,arm_small_page) *head; 587 vm_page_t m; 588 589 *flags = UMA_SLAB_PRIV; 590 /* 591 * For CPUs where we setup page tables as write back, there's no 592 * need to maintain two separate pools. 593 */ 594 if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) 595 head = (void *)&pages_wt; 596 else 597 head = (void *)&pages_normal; 598 599 mtx_lock(&smallalloc_mtx); 600 sp = TAILQ_FIRST(head); 601 602 if (!sp) { 603 int pflags; 604 605 mtx_unlock(&smallalloc_mtx); 606 if (zone == l2zone && 607 pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) { 608 *flags = UMA_SLAB_KMEM; 609 ret = ((void *)kmem_malloc(kmem_arena, bytes, 610 M_NOWAIT)); 611 return (ret); 612 } 613 pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; 614 for (;;) { 615 m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); 616 if (m == NULL) { 617 if (wait & M_NOWAIT) 618 return (NULL); 619 VM_WAIT; 620 } else 621 break; 622 } 623 ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m)); 624 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 625 bzero(ret, PAGE_SIZE); 626 return (ret); 627 } 628 TAILQ_REMOVE(head, sp, pg_list); 629 TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list); 630 ret = sp->addr; 631 mtx_unlock(&smallalloc_mtx); 632 if ((wait & M_ZERO)) 633 bzero(ret, bytes); 634 return (ret); 635} 636 637void 638uma_small_free(void *mem, int size, u_int8_t flags) 639{ 640 pd_entry_t *pd; 641 pt_entry_t *pt; 642 643 if (flags & UMA_SLAB_KMEM) 644 kmem_free(kmem_arena, (vm_offset_t)mem, size); 645 else { 646 struct arm_small_page *sp; 647 648 if ((vm_offset_t)mem >= KERNBASE) { 649 mtx_lock(&smallalloc_mtx); 650 sp = TAILQ_FIRST(&free_pgdesc); 651 KASSERT(sp != NULL, ("No more free page descriptor ?")); 652 TAILQ_REMOVE(&free_pgdesc, sp, pg_list); 653 sp->addr = mem; 654 pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd, 655 &pt); 656 if ((*pd & pte_l1_s_cache_mask) == 657 pte_l1_s_cache_mode_pt && 658 pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode) 659 TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list); 660 else 661 TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list); 662 mtx_unlock(&smallalloc_mtx); 663 } else { 664 vm_page_t m; 665 vm_paddr_t pa = vtophys((vm_offset_t)mem); 666 667 m = PHYS_TO_VM_PAGE(pa); 668 m->wire_count--; 669 vm_page_free(m); 670 atomic_subtract_int(&cnt.v_wire_count, 1); 671 } 672 } 673} 674 675#endif 676