vm_machdep.c revision 172189
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary :forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 */ 42 43#include <sys/cdefs.h> 44__FBSDID("$FreeBSD: head/sys/arm/arm/vm_machdep.c 172189 2007-09-15 18:47:02Z alc $"); 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/kernel.h> 49#include <sys/malloc.h> 50#include <sys/mbuf.h> 51#include <sys/proc.h> 52#include <sys/socketvar.h> 53#include <sys/sf_buf.h> 54#include <sys/unistd.h> 55#include <machine/cpu.h> 56#include <machine/pcb.h> 57#include <machine/sysarch.h> 58#include <sys/lock.h> 59#include <sys/mutex.h> 60 61#include <vm/vm.h> 62#include <vm/pmap.h> 63#include <vm/vm_extern.h> 64#include <vm/vm_kern.h> 65#include <vm/vm_page.h> 66#include <vm/vm_map.h> 67#include <vm/vm_param.h> 68#include <vm/vm_pageout.h> 69#include <vm/uma.h> 70#include <vm/uma_int.h> 71 72#include <machine/md_var.h> 73 74#ifndef NSFBUFS 75#define NSFBUFS (512 + maxusers * 16) 76#endif 77 78#ifndef ARM_USE_SMALL_ALLOC 79static void sf_buf_init(void *arg); 80SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL) 81 82LIST_HEAD(sf_head, sf_buf); 83 84 85/* 86 * A hash table of active sendfile(2) buffers 87 */ 88static struct sf_head *sf_buf_active; 89static u_long sf_buf_hashmask; 90 91#define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask) 92 93static TAILQ_HEAD(, sf_buf) sf_buf_freelist; 94static u_int sf_buf_alloc_want; 95 96/* 97 * A lock used to synchronize access to the hash table and free list 98 */ 99static struct mtx sf_buf_lock; 100#endif 101 102/* 103 * Finish a fork operation, with process p2 nearly set up. 104 * Copy and update the pcb, set up the stack so that the child 105 * ready to run and return to user mode. 106 */ 107void 108cpu_fork(register struct thread *td1, register struct proc *p2, 109 struct thread *td2, int flags) 110{ 111 struct pcb *pcb1, *pcb2; 112 struct trapframe *tf; 113 struct switchframe *sf; 114 struct mdproc *mdp2; 115 116 if ((flags & RFPROC) == 0) 117 return; 118 pcb1 = td1->td_pcb; 119 pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; 120#ifdef __XSCALE__ 121#ifndef CPU_XSCALE_CORE3 122 pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE); 123 if (td2->td_altkstack) 124 pmap_use_minicache(td2->td_altkstack, td2->td_altkstack_pages * 125 PAGE_SIZE); 126#endif 127#endif 128 td2->td_pcb = pcb2; 129 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); 130 mdp2 = &p2->p_md; 131 bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2)); 132 pcb2->un_32.pcb32_und_sp = td2->td_kstack + USPACE_UNDEF_STACK_TOP; 133 pcb2->un_32.pcb32_sp = td2->td_kstack + 134 USPACE_SVC_STACK_TOP - sizeof(*pcb2); 135 pmap_activate(td2); 136 td2->td_frame = tf = 137 (struct trapframe *)pcb2->un_32.pcb32_sp - 1; 138 *tf = *td1->td_frame; 139 sf = (struct switchframe *)tf - 1; 140 sf->sf_r4 = (u_int)fork_return; 141 sf->sf_r5 = (u_int)td2; 142 sf->sf_pc = (u_int)fork_trampoline; 143 tf->tf_spsr &= ~PSR_C_bit; 144 tf->tf_r0 = 0; 145 tf->tf_r1 = 0; 146 pcb2->un_32.pcb32_sp = (u_int)sf; 147 148 /* Setup to release spin count in fork_exit(). */ 149 td2->td_md.md_spinlock_count = 1; 150 td2->td_md.md_saved_cspr = 0; 151 td2->td_md.md_tp = *(uint32_t **)ARM_TP_ADDRESS; 152} 153 154void 155cpu_thread_swapin(struct thread *td) 156{ 157} 158 159void 160cpu_thread_swapout(struct thread *td) 161{ 162} 163 164/* 165 * Detatch mapped page and release resources back to the system. 166 */ 167void 168sf_buf_free(struct sf_buf *sf) 169{ 170#ifndef ARM_USE_SMALL_ALLOC 171 mtx_lock(&sf_buf_lock); 172 sf->ref_count--; 173 if (sf->ref_count == 0) { 174 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry); 175 nsfbufsused--; 176 if (sf_buf_alloc_want > 0) 177 wakeup_one(&sf_buf_freelist); 178 } 179 mtx_unlock(&sf_buf_lock); 180#endif 181} 182 183#ifndef ARM_USE_SMALL_ALLOC 184/* 185 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) 186 */ 187static void 188sf_buf_init(void *arg) 189{ 190 struct sf_buf *sf_bufs; 191 vm_offset_t sf_base; 192 int i; 193 194 nsfbufs = NSFBUFS; 195 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); 196 197 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask); 198 TAILQ_INIT(&sf_buf_freelist); 199 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE); 200 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, 201 M_NOWAIT | M_ZERO); 202 for (i = 0; i < nsfbufs; i++) { 203 sf_bufs[i].kva = sf_base + i * PAGE_SIZE; 204 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry); 205 } 206 sf_buf_alloc_want = 0; 207 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF); 208} 209#endif 210 211/* 212 * Get an sf_buf from the freelist. Will block if none are available. 213 */ 214struct sf_buf * 215sf_buf_alloc(struct vm_page *m, int flags) 216{ 217#ifdef ARM_USE_SMALL_ALLOC 218 return ((struct sf_buf *)m); 219#else 220 struct sf_head *hash_list; 221 struct sf_buf *sf; 222 int error; 223 224 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 225 mtx_lock(&sf_buf_lock); 226 LIST_FOREACH(sf, hash_list, list_entry) { 227 if (sf->m == m) { 228 sf->ref_count++; 229 if (sf->ref_count == 1) { 230 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 231 nsfbufsused++; 232 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 233 } 234 goto done; 235 } 236 } 237 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) { 238 if (flags & SFB_NOWAIT) 239 goto done; 240 sf_buf_alloc_want++; 241 mbstat.sf_allocwait++; 242 error = msleep(&sf_buf_freelist, &sf_buf_lock, 243 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); 244 sf_buf_alloc_want--; 245 246 247 /* 248 * If we got a signal, don't risk going back to sleep. 249 */ 250 if (error) 251 goto done; 252 } 253 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 254 if (sf->m != NULL) 255 LIST_REMOVE(sf, list_entry); 256 LIST_INSERT_HEAD(hash_list, sf, list_entry); 257 sf->ref_count = 1; 258 sf->m = m; 259 nsfbufsused++; 260 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 261 pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m)); 262done: 263 mtx_unlock(&sf_buf_lock); 264 return (sf); 265#endif 266} 267 268/* 269 * Initialize machine state (pcb and trap frame) for a new thread about to 270 * upcall. Put enough state in the new thread's PCB to get it to go back 271 * userret(), where we can intercept it again to set the return (upcall) 272 * Address and stack, along with those from upcals that are from other sources 273 * such as those generated in thread_userret() itself. 274 */ 275void 276cpu_set_upcall(struct thread *td, struct thread *td0) 277{ 278 struct trapframe *tf; 279 struct switchframe *sf; 280 281 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); 282 bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); 283 tf = td->td_frame; 284 sf = (struct switchframe *)tf - 1; 285 sf->sf_r4 = (u_int)fork_return; 286 sf->sf_r5 = (u_int)td; 287 sf->sf_pc = (u_int)fork_trampoline; 288 tf->tf_spsr &= ~PSR_C_bit; 289 tf->tf_r0 = 0; 290 td->td_pcb->un_32.pcb32_sp = (u_int)sf; 291 td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + USPACE_UNDEF_STACK_TOP; 292 293 /* Setup to release spin count in fork_exit(). */ 294 td->td_md.md_spinlock_count = 1; 295 td->td_md.md_saved_cspr = 0; 296} 297 298/* 299 * Set that machine state for performing an upcall that has to 300 * be done in thread_userret() so that those upcalls generated 301 * in thread_userret() itself can be done as well. 302 */ 303void 304cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, 305 stack_t *stack) 306{ 307 struct trapframe *tf = td->td_frame; 308 309 tf->tf_usr_sp = ((int)stack->ss_sp + stack->ss_size 310 - sizeof(struct trapframe)) & ~7; 311 tf->tf_pc = (int)entry; 312 tf->tf_r0 = (int)arg; 313 tf->tf_spsr = PSR_USR32_MODE; 314} 315 316int 317cpu_set_user_tls(struct thread *td, void *tls_base) 318{ 319 320 if (td != curthread) 321 td->td_md.md_tp = tls_base; 322 else { 323 critical_enter(); 324 *(void **)ARM_TP_ADDRESS = tls_base; 325 critical_exit(); 326 } 327 return (0); 328} 329 330void 331cpu_thread_exit(struct thread *td) 332{ 333} 334 335void 336cpu_thread_setup(struct thread *td) 337{ 338 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * 339 PAGE_SIZE) - 1; 340 td->td_frame = (struct trapframe *) 341 ((u_int)td->td_kstack + USPACE_SVC_STACK_TOP - sizeof(struct pcb)) - 1; 342#ifdef __XSCALE__ 343#ifndef CPU_XSCALE_CORE3 344 pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE); 345#endif 346#endif 347 348} 349void 350cpu_thread_clean(struct thread *td) 351{ 352} 353 354/* 355 * Intercept the return address from a freshly forked process that has NOT 356 * been scheduled yet. 357 * 358 * This is needed to make kernel threads stay in kernel mode. 359 */ 360void 361cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg) 362{ 363 struct switchframe *sf; 364 struct trapframe *tf; 365 366 tf = td->td_frame; 367 sf = (struct switchframe *)tf - 1; 368 sf->sf_r4 = (u_int)func; 369 sf->sf_r5 = (u_int)arg; 370 td->td_pcb->un_32.pcb32_sp = (u_int)sf; 371} 372 373/* 374 * Software interrupt handler for queued VM system processing. 375 */ 376void 377swi_vm(void *dummy) 378{ 379 380 if (busdma_swi_pending) 381 busdma_swi(); 382} 383 384void 385cpu_exit(struct thread *td) 386{ 387} 388 389#define BITS_PER_INT (8 * sizeof(int)) 390vm_offset_t arm_nocache_startaddr; 391static int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * 392 BITS_PER_INT)]; 393 394/* 395 * Functions to map and unmap memory non-cached into KVA the kernel won't try 396 * to allocate. The goal is to provide uncached memory to busdma, to honor 397 * BUS_DMA_COHERENT. 398 * We can allocate at most ARM_NOCACHE_KVA_SIZE bytes. 399 * The allocator is rather dummy, each page is represented by a bit in 400 * a bitfield, 0 meaning the page is not allocated, 1 meaning it is. 401 * As soon as it finds enough contiguous pages to satisfy the request, 402 * it returns the address. 403 */ 404void * 405arm_remap_nocache(void *addr, vm_size_t size) 406{ 407 int i, j; 408 409 size = round_page(size); 410 for (i = 0; i < MIN(ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * BITS_PER_INT), 411 ARM_TP_ADDRESS); i++) { 412 if (!(arm_nocache_allocated[i / BITS_PER_INT] & (1 << (i % 413 BITS_PER_INT)))) { 414 for (j = i; j < i + (size / (PAGE_SIZE)); j++) 415 if (arm_nocache_allocated[j / BITS_PER_INT] & 416 (1 << (j % BITS_PER_INT))) 417 break; 418 if (j == i + (size / (PAGE_SIZE))) 419 break; 420 } 421 } 422 if (i < MIN(ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * BITS_PER_INT), 423 ARM_TP_ADDRESS)) { 424 vm_offset_t tomap = arm_nocache_startaddr + i * PAGE_SIZE; 425 void *ret = (void *)tomap; 426 vm_paddr_t physaddr = vtophys((vm_offset_t)addr); 427 428 for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE, 429 physaddr += PAGE_SIZE, i++) { 430 pmap_kenter_nocache(tomap, physaddr); 431 arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i % 432 BITS_PER_INT); 433 } 434 return (ret); 435 } 436 return (NULL); 437} 438 439void 440arm_unmap_nocache(void *addr, vm_size_t size) 441{ 442 vm_offset_t raddr = (vm_offset_t)addr; 443 int i; 444 445 size = round_page(size); 446 i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE); 447 for (; size > 0; size -= PAGE_SIZE, i++) 448 arm_nocache_allocated[i / BITS_PER_INT] &= ~(1 << (i % 449 BITS_PER_INT)); 450} 451 452#ifdef ARM_USE_SMALL_ALLOC 453 454static TAILQ_HEAD(,arm_small_page) pages_normal = 455 TAILQ_HEAD_INITIALIZER(pages_normal); 456static TAILQ_HEAD(,arm_small_page) pages_wt = 457 TAILQ_HEAD_INITIALIZER(pages_wt); 458static TAILQ_HEAD(,arm_small_page) free_pgdesc = 459 TAILQ_HEAD_INITIALIZER(free_pgdesc); 460 461extern uma_zone_t l2zone; 462 463struct mtx smallalloc_mtx; 464 465MALLOC_DEFINE(M_VMSMALLALLOC, "vm_small_alloc", "VM Small alloc data"); 466 467vm_offset_t alloc_firstaddr; 468 469#ifdef ARM_HAVE_SUPERSECTIONS 470#define S_FRAME L1_SUP_FRAME 471#define S_SIZE L1_SUP_SIZE 472#else 473#define S_FRAME L1_S_FRAME 474#define S_SIZE L1_S_SIZE 475#endif 476 477vm_offset_t 478arm_ptovirt(vm_paddr_t pa) 479{ 480 int i; 481 vm_offset_t addr = alloc_firstaddr; 482 483 KASSERT(alloc_firstaddr != 0, ("arm_ptovirt called to early ?")); 484 for (i = 0; dump_avail[i + 1]; i += 2) { 485 if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) 486 break; 487 addr += (dump_avail[i + 1] & S_FRAME) + S_SIZE - 488 (dump_avail[i] & S_FRAME); 489 } 490 KASSERT(dump_avail[i + 1] != 0, ("Trying to access invalid physical address")); 491 return (addr + (pa - (dump_avail[i] & S_FRAME))); 492} 493 494void 495arm_init_smallalloc(void) 496{ 497 vm_offset_t to_map = 0, mapaddr; 498 int i; 499 500 /* 501 * We need to use dump_avail and not phys_avail, since we want to 502 * map the whole memory and not just the memory available to the VM 503 * to be able to do a pa => va association for any address. 504 */ 505 506 for (i = 0; dump_avail[i + 1]; i+= 2) { 507 to_map += (dump_avail[i + 1] & S_FRAME) + S_SIZE - 508 (dump_avail[i] & S_FRAME); 509 } 510 alloc_firstaddr = mapaddr = KERNBASE - to_map; 511 for (i = 0; dump_avail[i + 1]; i+= 2) { 512 vm_offset_t size = (dump_avail[i + 1] & S_FRAME) + 513 S_SIZE - (dump_avail[i] & S_FRAME); 514 vm_offset_t did = 0; 515 while (size > 0) { 516#ifdef ARM_HAVE_SUPERSECTIONS 517 pmap_kenter_supersection(mapaddr, 518 (dump_avail[i] & L1_SUP_FRAME) + did, 519 SECTION_CACHE); 520#else 521 pmap_kenter_section(mapaddr, 522 (dump_avail[i] & L1_S_FRAME) + did, SECTION_CACHE); 523#endif 524 mapaddr += S_SIZE; 525 did += S_SIZE; 526 size -= S_SIZE; 527 } 528 } 529} 530 531void 532arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable) 533{ 534 struct arm_small_page *pg; 535 536 bytes &= ~PAGE_MASK; 537 while (bytes > 0) { 538 pg = (struct arm_small_page *)list; 539 pg->addr = mem; 540 if (pagetable) 541 TAILQ_INSERT_HEAD(&pages_wt, pg, pg_list); 542 else 543 TAILQ_INSERT_HEAD(&pages_normal, pg, pg_list); 544 list = (char *)list + sizeof(*pg); 545 mem = (char *)mem + PAGE_SIZE; 546 bytes -= PAGE_SIZE; 547 } 548} 549 550void * 551uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 552{ 553 void *ret; 554 struct arm_small_page *sp; 555 TAILQ_HEAD(,arm_small_page) *head; 556 static vm_pindex_t color; 557 vm_page_t m; 558 559 *flags = UMA_SLAB_PRIV; 560 /* 561 * For CPUs where we setup page tables as write back, there's no 562 * need to maintain two separate pools. 563 */ 564 if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) 565 head = (void *)&pages_wt; 566 else 567 head = (void *)&pages_normal; 568 569 mtx_lock(&smallalloc_mtx); 570 sp = TAILQ_FIRST(head); 571 572 if (!sp) { 573 int pflags; 574 575 mtx_unlock(&smallalloc_mtx); 576 if (zone == l2zone && 577 pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) { 578 *flags = UMA_SLAB_KMEM; 579 ret = ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT)); 580 return (ret); 581 } 582 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 583 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 584 else 585 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 586 if (wait & M_ZERO) 587 pflags |= VM_ALLOC_ZERO; 588 for (;;) { 589 m = vm_page_alloc(NULL, color++, 590 pflags | VM_ALLOC_NOOBJ); 591 if (m == NULL) { 592 if (wait & M_NOWAIT) 593 return (NULL); 594 VM_WAIT; 595 } else 596 break; 597 } 598 ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m)); 599 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 600 bzero(ret, PAGE_SIZE); 601 return (ret); 602 } 603 TAILQ_REMOVE(head, sp, pg_list); 604 TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list); 605 ret = sp->addr; 606 mtx_unlock(&smallalloc_mtx); 607 if ((wait & M_ZERO)) 608 bzero(ret, bytes); 609 return (ret); 610} 611 612void 613uma_small_free(void *mem, int size, u_int8_t flags) 614{ 615 pd_entry_t *pd; 616 pt_entry_t *pt; 617 618 if (flags & UMA_SLAB_KMEM) 619 kmem_free(kmem_map, (vm_offset_t)mem, size); 620 else { 621 struct arm_small_page *sp; 622 623 if ((vm_offset_t)mem >= KERNBASE) { 624 mtx_lock(&smallalloc_mtx); 625 sp = TAILQ_FIRST(&free_pgdesc); 626 KASSERT(sp != NULL, ("No more free page descriptor ?")); 627 TAILQ_REMOVE(&free_pgdesc, sp, pg_list); 628 sp->addr = mem; 629 pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd, 630 &pt); 631 if ((*pd & pte_l1_s_cache_mask) == 632 pte_l1_s_cache_mode_pt && 633 pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode) 634 TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list); 635 else 636 TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list); 637 mtx_unlock(&smallalloc_mtx); 638 } else { 639 vm_page_t m; 640 vm_paddr_t pa = vtophys((vm_offset_t)mem); 641 642 m = PHYS_TO_VM_PAGE(pa); 643 m->wire_count--; 644 vm_page_free(m); 645 atomic_subtract_int(&cnt.v_wire_count, 1); 646 } 647 } 648} 649 650#endif 651