vm_machdep.c revision 195779
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary :forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 */ 42 43#include <sys/cdefs.h> 44__FBSDID("$FreeBSD: head/sys/arm/arm/vm_machdep.c 195779 2009-07-20 07:53:07Z raj $"); 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/kernel.h> 49#include <sys/malloc.h> 50#include <sys/mbuf.h> 51#include <sys/proc.h> 52#include <sys/socketvar.h> 53#include <sys/sf_buf.h> 54#include <sys/unistd.h> 55#include <machine/cpu.h> 56#include <machine/pcb.h> 57#include <machine/sysarch.h> 58#include <sys/lock.h> 59#include <sys/mutex.h> 60 61#include <vm/vm.h> 62#include <vm/pmap.h> 63#include <vm/vm_extern.h> 64#include <vm/vm_kern.h> 65#include <vm/vm_page.h> 66#include <vm/vm_map.h> 67#include <vm/vm_param.h> 68#include <vm/vm_pageout.h> 69#include <vm/uma.h> 70#include <vm/uma_int.h> 71 72#include <machine/md_var.h> 73 74#ifndef NSFBUFS 75#define NSFBUFS (512 + maxusers * 16) 76#endif 77 78#ifndef ARM_USE_SMALL_ALLOC 79static void sf_buf_init(void *arg); 80SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL); 81 82LIST_HEAD(sf_head, sf_buf); 83 84 85/* 86 * A hash table of active sendfile(2) buffers 87 */ 88static struct sf_head *sf_buf_active; 89static u_long sf_buf_hashmask; 90 91#define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask) 92 93static TAILQ_HEAD(, sf_buf) sf_buf_freelist; 94static u_int sf_buf_alloc_want; 95 96/* 97 * A lock used to synchronize access to the hash table and free list 98 */ 99static struct mtx sf_buf_lock; 100#endif 101 102/* 103 * Finish a fork operation, with process p2 nearly set up. 104 * Copy and update the pcb, set up the stack so that the child 105 * ready to run and return to user mode. 106 */ 107void 108cpu_fork(register struct thread *td1, register struct proc *p2, 109 struct thread *td2, int flags) 110{ 111 struct pcb *pcb2; 112 struct trapframe *tf; 113 struct switchframe *sf; 114 struct mdproc *mdp2; 115 116 if ((flags & RFPROC) == 0) 117 return; 118 pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; 119#ifdef __XSCALE__ 120#ifndef CPU_XSCALE_CORE3 121 pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE); 122 if (td2->td_altkstack) 123 pmap_use_minicache(td2->td_altkstack, td2->td_altkstack_pages * 124 PAGE_SIZE); 125#endif 126#endif 127 td2->td_pcb = pcb2; 128 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); 129 mdp2 = &p2->p_md; 130 bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2)); 131 pcb2->un_32.pcb32_und_sp = td2->td_kstack + USPACE_UNDEF_STACK_TOP; 132 pcb2->un_32.pcb32_sp = td2->td_kstack + 133 USPACE_SVC_STACK_TOP - sizeof(*pcb2); 134 pmap_activate(td2); 135 td2->td_frame = tf = 136 (struct trapframe *)pcb2->un_32.pcb32_sp - 1; 137 *tf = *td1->td_frame; 138 sf = (struct switchframe *)tf - 1; 139 sf->sf_r4 = (u_int)fork_return; 140 sf->sf_r5 = (u_int)td2; 141 sf->sf_pc = (u_int)fork_trampoline; 142 tf->tf_spsr &= ~PSR_C_bit; 143 tf->tf_r0 = 0; 144 tf->tf_r1 = 0; 145 pcb2->un_32.pcb32_sp = (u_int)sf; 146 147 /* Setup to release spin count in fork_exit(). */ 148 td2->td_md.md_spinlock_count = 1; 149 td2->td_md.md_saved_cspr = 0; 150 td2->td_md.md_tp = *(uint32_t **)ARM_TP_ADDRESS; 151} 152 153void 154cpu_thread_swapin(struct thread *td) 155{ 156} 157 158void 159cpu_thread_swapout(struct thread *td) 160{ 161} 162 163/* 164 * Detatch mapped page and release resources back to the system. 165 */ 166void 167sf_buf_free(struct sf_buf *sf) 168{ 169#ifndef ARM_USE_SMALL_ALLOC 170 mtx_lock(&sf_buf_lock); 171 sf->ref_count--; 172 if (sf->ref_count == 0) { 173 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry); 174 nsfbufsused--; 175 if (sf_buf_alloc_want > 0) 176 wakeup_one(&sf_buf_freelist); 177 } 178 mtx_unlock(&sf_buf_lock); 179#endif 180} 181 182#ifndef ARM_USE_SMALL_ALLOC 183/* 184 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) 185 */ 186static void 187sf_buf_init(void *arg) 188{ 189 struct sf_buf *sf_bufs; 190 vm_offset_t sf_base; 191 int i; 192 193 nsfbufs = NSFBUFS; 194 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); 195 196 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask); 197 TAILQ_INIT(&sf_buf_freelist); 198 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE); 199 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, 200 M_NOWAIT | M_ZERO); 201 for (i = 0; i < nsfbufs; i++) { 202 sf_bufs[i].kva = sf_base + i * PAGE_SIZE; 203 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry); 204 } 205 sf_buf_alloc_want = 0; 206 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF); 207} 208#endif 209 210/* 211 * Get an sf_buf from the freelist. Will block if none are available. 212 */ 213struct sf_buf * 214sf_buf_alloc(struct vm_page *m, int flags) 215{ 216#ifdef ARM_USE_SMALL_ALLOC 217 return ((struct sf_buf *)m); 218#else 219 struct sf_head *hash_list; 220 struct sf_buf *sf; 221 int error; 222 223 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 224 mtx_lock(&sf_buf_lock); 225 LIST_FOREACH(sf, hash_list, list_entry) { 226 if (sf->m == m) { 227 sf->ref_count++; 228 if (sf->ref_count == 1) { 229 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 230 nsfbufsused++; 231 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 232 } 233 goto done; 234 } 235 } 236 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) { 237 if (flags & SFB_NOWAIT) 238 goto done; 239 sf_buf_alloc_want++; 240 mbstat.sf_allocwait++; 241 error = msleep(&sf_buf_freelist, &sf_buf_lock, 242 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); 243 sf_buf_alloc_want--; 244 245 246 /* 247 * If we got a signal, don't risk going back to sleep. 248 */ 249 if (error) 250 goto done; 251 } 252 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 253 if (sf->m != NULL) 254 LIST_REMOVE(sf, list_entry); 255 LIST_INSERT_HEAD(hash_list, sf, list_entry); 256 sf->ref_count = 1; 257 sf->m = m; 258 nsfbufsused++; 259 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 260 pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m)); 261done: 262 mtx_unlock(&sf_buf_lock); 263 return (sf); 264#endif 265} 266 267/* 268 * Initialize machine state (pcb and trap frame) for a new thread about to 269 * upcall. Put enough state in the new thread's PCB to get it to go back 270 * userret(), where we can intercept it again to set the return (upcall) 271 * Address and stack, along with those from upcals that are from other sources 272 * such as those generated in thread_userret() itself. 273 */ 274void 275cpu_set_upcall(struct thread *td, struct thread *td0) 276{ 277 struct trapframe *tf; 278 struct switchframe *sf; 279 280 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); 281 bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); 282 tf = td->td_frame; 283 sf = (struct switchframe *)tf - 1; 284 sf->sf_r4 = (u_int)fork_return; 285 sf->sf_r5 = (u_int)td; 286 sf->sf_pc = (u_int)fork_trampoline; 287 tf->tf_spsr &= ~PSR_C_bit; 288 tf->tf_r0 = 0; 289 td->td_pcb->un_32.pcb32_sp = (u_int)sf; 290 td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + USPACE_UNDEF_STACK_TOP; 291 292 /* Setup to release spin count in fork_exit(). */ 293 td->td_md.md_spinlock_count = 1; 294 td->td_md.md_saved_cspr = 0; 295} 296 297/* 298 * Set that machine state for performing an upcall that has to 299 * be done in thread_userret() so that those upcalls generated 300 * in thread_userret() itself can be done as well. 301 */ 302void 303cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, 304 stack_t *stack) 305{ 306 struct trapframe *tf = td->td_frame; 307 308 tf->tf_usr_sp = ((int)stack->ss_sp + stack->ss_size 309 - sizeof(struct trapframe)) & ~7; 310 tf->tf_pc = (int)entry; 311 tf->tf_r0 = (int)arg; 312 tf->tf_spsr = PSR_USR32_MODE; 313} 314 315int 316cpu_set_user_tls(struct thread *td, void *tls_base) 317{ 318 319 if (td != curthread) 320 td->td_md.md_tp = tls_base; 321 else { 322 critical_enter(); 323 *(void **)ARM_TP_ADDRESS = tls_base; 324 critical_exit(); 325 } 326 return (0); 327} 328 329void 330cpu_thread_exit(struct thread *td) 331{ 332} 333 334void 335cpu_thread_alloc(struct thread *td) 336{ 337 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * 338 PAGE_SIZE) - 1; 339 td->td_frame = (struct trapframe *) 340 ((u_int)td->td_kstack + USPACE_SVC_STACK_TOP - sizeof(struct pcb)) - 1; 341#ifdef __XSCALE__ 342#ifndef CPU_XSCALE_CORE3 343 pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE); 344#endif 345#endif 346} 347 348void 349cpu_thread_free(struct thread *td) 350{ 351} 352 353void 354cpu_thread_clean(struct thread *td) 355{ 356} 357 358/* 359 * Intercept the return address from a freshly forked process that has NOT 360 * been scheduled yet. 361 * 362 * This is needed to make kernel threads stay in kernel mode. 363 */ 364void 365cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg) 366{ 367 struct switchframe *sf; 368 struct trapframe *tf; 369 370 tf = td->td_frame; 371 sf = (struct switchframe *)tf - 1; 372 sf->sf_r4 = (u_int)func; 373 sf->sf_r5 = (u_int)arg; 374 td->td_pcb->un_32.pcb32_sp = (u_int)sf; 375} 376 377/* 378 * Software interrupt handler for queued VM system processing. 379 */ 380void 381swi_vm(void *dummy) 382{ 383 384 if (busdma_swi_pending) 385 busdma_swi(); 386} 387 388void 389cpu_exit(struct thread *td) 390{ 391} 392 393#define BITS_PER_INT (8 * sizeof(int)) 394vm_offset_t arm_nocache_startaddr; 395static int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * 396 BITS_PER_INT)]; 397 398/* 399 * Functions to map and unmap memory non-cached into KVA the kernel won't try 400 * to allocate. The goal is to provide uncached memory to busdma, to honor 401 * BUS_DMA_COHERENT. 402 * We can allocate at most ARM_NOCACHE_KVA_SIZE bytes. 403 * The allocator is rather dummy, each page is represented by a bit in 404 * a bitfield, 0 meaning the page is not allocated, 1 meaning it is. 405 * As soon as it finds enough contiguous pages to satisfy the request, 406 * it returns the address. 407 */ 408void * 409arm_remap_nocache(void *addr, vm_size_t size) 410{ 411 int i, j; 412 413 size = round_page(size); 414 for (i = 0; i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE; i++) { 415 if (!(arm_nocache_allocated[i / BITS_PER_INT] & (1 << (i % 416 BITS_PER_INT)))) { 417 for (j = i; j < i + (size / (PAGE_SIZE)); j++) 418 if (arm_nocache_allocated[j / BITS_PER_INT] & 419 (1 << (j % BITS_PER_INT))) 420 break; 421 if (j == i + (size / (PAGE_SIZE))) 422 break; 423 } 424 } 425 if (i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE) { 426 vm_offset_t tomap = arm_nocache_startaddr + i * PAGE_SIZE; 427 void *ret = (void *)tomap; 428 vm_paddr_t physaddr = vtophys((vm_offset_t)addr); 429 vm_offset_t vaddr = (vm_offset_t) addr; 430 431 vaddr = vaddr & ~PAGE_MASK; 432 for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE, 433 vaddr += PAGE_SIZE, physaddr += PAGE_SIZE, i++) { 434 cpu_idcache_wbinv_range(vaddr, PAGE_SIZE); 435 cpu_l2cache_wbinv_range(vaddr, PAGE_SIZE); 436 pmap_kenter_nocache(tomap, physaddr); 437 cpu_tlb_flushID_SE(vaddr); 438 arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i % 439 BITS_PER_INT); 440 } 441 return (ret); 442 } 443 444 return (NULL); 445} 446 447void 448arm_unmap_nocache(void *addr, vm_size_t size) 449{ 450 vm_offset_t raddr = (vm_offset_t)addr; 451 int i; 452 453 size = round_page(size); 454 i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE); 455 for (; size > 0; size -= PAGE_SIZE, i++) 456 arm_nocache_allocated[i / BITS_PER_INT] &= ~(1 << (i % 457 BITS_PER_INT)); 458} 459 460#ifdef ARM_USE_SMALL_ALLOC 461 462static TAILQ_HEAD(,arm_small_page) pages_normal = 463 TAILQ_HEAD_INITIALIZER(pages_normal); 464static TAILQ_HEAD(,arm_small_page) pages_wt = 465 TAILQ_HEAD_INITIALIZER(pages_wt); 466static TAILQ_HEAD(,arm_small_page) free_pgdesc = 467 TAILQ_HEAD_INITIALIZER(free_pgdesc); 468 469extern uma_zone_t l2zone; 470 471struct mtx smallalloc_mtx; 472 473MALLOC_DEFINE(M_VMSMALLALLOC, "vm_small_alloc", "VM Small alloc data"); 474 475vm_offset_t alloc_firstaddr; 476 477#ifdef ARM_HAVE_SUPERSECTIONS 478#define S_FRAME L1_SUP_FRAME 479#define S_SIZE L1_SUP_SIZE 480#else 481#define S_FRAME L1_S_FRAME 482#define S_SIZE L1_S_SIZE 483#endif 484 485vm_offset_t 486arm_ptovirt(vm_paddr_t pa) 487{ 488 int i; 489 vm_offset_t addr = alloc_firstaddr; 490 491 KASSERT(alloc_firstaddr != 0, ("arm_ptovirt called too early ?")); 492 for (i = 0; dump_avail[i + 1]; i += 2) { 493 if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) 494 break; 495 addr += (dump_avail[i + 1] & S_FRAME) + S_SIZE - 496 (dump_avail[i] & S_FRAME); 497 } 498 KASSERT(dump_avail[i + 1] != 0, ("Trying to access invalid physical address")); 499 return (addr + (pa - (dump_avail[i] & S_FRAME))); 500} 501 502void 503arm_init_smallalloc(void) 504{ 505 vm_offset_t to_map = 0, mapaddr; 506 int i; 507 508 /* 509 * We need to use dump_avail and not phys_avail, since we want to 510 * map the whole memory and not just the memory available to the VM 511 * to be able to do a pa => va association for any address. 512 */ 513 514 for (i = 0; dump_avail[i + 1]; i+= 2) { 515 to_map += (dump_avail[i + 1] & S_FRAME) + S_SIZE - 516 (dump_avail[i] & S_FRAME); 517 } 518 alloc_firstaddr = mapaddr = KERNBASE - to_map; 519 for (i = 0; dump_avail[i + 1]; i+= 2) { 520 vm_offset_t size = (dump_avail[i + 1] & S_FRAME) + 521 S_SIZE - (dump_avail[i] & S_FRAME); 522 vm_offset_t did = 0; 523 while (size > 0) { 524#ifdef ARM_HAVE_SUPERSECTIONS 525 pmap_kenter_supersection(mapaddr, 526 (dump_avail[i] & L1_SUP_FRAME) + did, 527 SECTION_CACHE); 528#else 529 pmap_kenter_section(mapaddr, 530 (dump_avail[i] & L1_S_FRAME) + did, SECTION_CACHE); 531#endif 532 mapaddr += S_SIZE; 533 did += S_SIZE; 534 size -= S_SIZE; 535 } 536 } 537} 538 539void 540arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable) 541{ 542 struct arm_small_page *pg; 543 544 bytes &= ~PAGE_MASK; 545 while (bytes > 0) { 546 pg = (struct arm_small_page *)list; 547 pg->addr = mem; 548 if (pagetable) 549 TAILQ_INSERT_HEAD(&pages_wt, pg, pg_list); 550 else 551 TAILQ_INSERT_HEAD(&pages_normal, pg, pg_list); 552 list = (char *)list + sizeof(*pg); 553 mem = (char *)mem + PAGE_SIZE; 554 bytes -= PAGE_SIZE; 555 } 556} 557 558void * 559uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 560{ 561 void *ret; 562 struct arm_small_page *sp; 563 TAILQ_HEAD(,arm_small_page) *head; 564 static vm_pindex_t color; 565 vm_page_t m; 566 567 *flags = UMA_SLAB_PRIV; 568 /* 569 * For CPUs where we setup page tables as write back, there's no 570 * need to maintain two separate pools. 571 */ 572 if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) 573 head = (void *)&pages_wt; 574 else 575 head = (void *)&pages_normal; 576 577 mtx_lock(&smallalloc_mtx); 578 sp = TAILQ_FIRST(head); 579 580 if (!sp) { 581 int pflags; 582 583 mtx_unlock(&smallalloc_mtx); 584 if (zone == l2zone && 585 pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) { 586 *flags = UMA_SLAB_KMEM; 587 ret = ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT)); 588 return (ret); 589 } 590 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 591 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 592 else 593 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 594 if (wait & M_ZERO) 595 pflags |= VM_ALLOC_ZERO; 596 for (;;) { 597 m = vm_page_alloc(NULL, color++, 598 pflags | VM_ALLOC_NOOBJ); 599 if (m == NULL) { 600 if (wait & M_NOWAIT) 601 return (NULL); 602 VM_WAIT; 603 } else 604 break; 605 } 606 ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m)); 607 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 608 bzero(ret, PAGE_SIZE); 609 return (ret); 610 } 611 TAILQ_REMOVE(head, sp, pg_list); 612 TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list); 613 ret = sp->addr; 614 mtx_unlock(&smallalloc_mtx); 615 if ((wait & M_ZERO)) 616 bzero(ret, bytes); 617 return (ret); 618} 619 620void 621uma_small_free(void *mem, int size, u_int8_t flags) 622{ 623 pd_entry_t *pd; 624 pt_entry_t *pt; 625 626 if (flags & UMA_SLAB_KMEM) 627 kmem_free(kmem_map, (vm_offset_t)mem, size); 628 else { 629 struct arm_small_page *sp; 630 631 if ((vm_offset_t)mem >= KERNBASE) { 632 mtx_lock(&smallalloc_mtx); 633 sp = TAILQ_FIRST(&free_pgdesc); 634 KASSERT(sp != NULL, ("No more free page descriptor ?")); 635 TAILQ_REMOVE(&free_pgdesc, sp, pg_list); 636 sp->addr = mem; 637 pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd, 638 &pt); 639 if ((*pd & pte_l1_s_cache_mask) == 640 pte_l1_s_cache_mode_pt && 641 pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode) 642 TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list); 643 else 644 TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list); 645 mtx_unlock(&smallalloc_mtx); 646 } else { 647 vm_page_t m; 648 vm_paddr_t pa = vtophys((vm_offset_t)mem); 649 650 m = PHYS_TO_VM_PAGE(pa); 651 m->wire_count--; 652 vm_page_free(m); 653 atomic_subtract_int(&cnt.v_wire_count, 1); 654 } 655 } 656} 657 658#endif 659