vm_machdep.c revision 163674
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary :forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: head/sys/arm/arm/vm_machdep.c 163674 2006-10-24 23:27:52Z cognet $");
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/kernel.h>
49#include <sys/malloc.h>
50#include <sys/mbuf.h>
51#include <sys/proc.h>
52#include <sys/socketvar.h>
53#include <sys/sf_buf.h>
54#include <sys/unistd.h>
55#include <machine/cpu.h>
56#include <machine/pcb.h>
57#include <machine/sysarch.h>
58#include <vm/vm.h>
59#include <vm/pmap.h>
60#include <sys/lock.h>
61#include <sys/mutex.h>
62
63#include <vm/vm.h>
64#include <vm/vm_extern.h>
65#include <vm/vm_kern.h>
66#include <vm/vm_page.h>
67#include <vm/vm_map.h>
68#include <vm/vm_param.h>
69#include <vm/vm_pageout.h>
70#include <vm/uma.h>
71#include <vm/uma_int.h>
72
73#ifndef NSFBUFS
74#define NSFBUFS		(512 + maxusers * 16)
75#endif
76
77#ifndef ARM_USE_SMALL_ALLOC
78static void     sf_buf_init(void *arg);
79SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
80
81LIST_HEAD(sf_head, sf_buf);
82
83
84/*
85 * A hash table of active sendfile(2) buffers
86 */
87static struct sf_head *sf_buf_active;
88static u_long sf_buf_hashmask;
89
90#define SF_BUF_HASH(m)  (((m) - vm_page_array) & sf_buf_hashmask)
91
92static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
93static u_int    sf_buf_alloc_want;
94
95/*
96 * A lock used to synchronize access to the hash table and free list
97 */
98static struct mtx sf_buf_lock;
99#endif
100
101/*
102 * Finish a fork operation, with process p2 nearly set up.
103 * Copy and update the pcb, set up the stack so that the child
104 * ready to run and return to user mode.
105 */
106void
107cpu_fork(register struct thread *td1, register struct proc *p2,
108    struct thread *td2, int flags)
109{
110	struct pcb *pcb1, *pcb2;
111	struct trapframe *tf;
112	struct switchframe *sf;
113	struct mdproc *mdp2;
114
115	if ((flags & RFPROC) == 0)
116		return;
117	pcb1 = td1->td_pcb;
118	pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
119#ifdef __XSCALE__
120	pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE);
121	if (td2->td_altkstack)
122		pmap_use_minicache(td2->td_altkstack, td2->td_altkstack_pages *
123		    PAGE_SIZE);
124#endif
125	td2->td_pcb = pcb2;
126	bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
127	mdp2 = &p2->p_md;
128	bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
129	pcb2->un_32.pcb32_und_sp = td2->td_kstack + USPACE_UNDEF_STACK_TOP;
130	pcb2->un_32.pcb32_sp = td2->td_kstack +
131	    USPACE_SVC_STACK_TOP - sizeof(*pcb2);
132	pmap_activate(td2);
133	td2->td_frame = tf =
134	    (struct trapframe *)pcb2->un_32.pcb32_sp - 1;
135	*tf = *td1->td_frame;
136	sf = (struct switchframe *)tf - 1;
137	sf->sf_r4 = (u_int)fork_return;
138	sf->sf_r5 = (u_int)td2;
139	sf->sf_pc = (u_int)fork_trampoline;
140	tf->tf_spsr &= ~PSR_C_bit;
141	tf->tf_r0 = 0;
142	tf->tf_r1 = 0;
143	pcb2->un_32.pcb32_sp = (u_int)sf;
144
145	/* Setup to release sched_lock in fork_exit(). */
146	td2->td_md.md_spinlock_count = 1;
147	td2->td_md.md_saved_cspr = 0;
148	td2->td_md.md_tp = *(uint32_t **)ARM_TP_ADDRESS;
149}
150
151void
152cpu_thread_swapin(struct thread *td)
153{
154}
155
156void
157cpu_thread_swapout(struct thread *td)
158{
159}
160
161/*
162 * Detatch mapped page and release resources back to the system.
163 */
164void
165sf_buf_free(struct sf_buf *sf)
166{
167#ifndef ARM_USE_SMALL_ALLOC
168	 mtx_lock(&sf_buf_lock);
169	 sf->ref_count--;
170	 if (sf->ref_count == 0) {
171		 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
172		 nsfbufsused--;
173		 if (sf_buf_alloc_want > 0)
174			 wakeup_one(&sf_buf_freelist);
175	 }
176	 mtx_unlock(&sf_buf_lock);
177#endif
178}
179
180#ifndef ARM_USE_SMALL_ALLOC
181/*
182 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
183 */
184static void
185sf_buf_init(void *arg)
186{
187	struct sf_buf *sf_bufs;
188	vm_offset_t sf_base;
189	int i;
190
191	nsfbufs = NSFBUFS;
192	TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
193
194	sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
195	TAILQ_INIT(&sf_buf_freelist);
196	sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
197	sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
198	    M_NOWAIT | M_ZERO);
199	for (i = 0; i < nsfbufs; i++) {
200		sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
201		TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
202	}
203	sf_buf_alloc_want = 0;
204	mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
205}
206#endif
207
208/*
209 * Get an sf_buf from the freelist. Will block if none are available.
210 */
211struct sf_buf *
212sf_buf_alloc(struct vm_page *m, int flags)
213{
214#ifdef ARM_USE_SMALL_ALLOC
215	return ((struct sf_buf *)m);
216#else
217	struct sf_head *hash_list;
218	struct sf_buf *sf;
219	int error;
220
221	hash_list = &sf_buf_active[SF_BUF_HASH(m)];
222	mtx_lock(&sf_buf_lock);
223	LIST_FOREACH(sf, hash_list, list_entry) {
224		if (sf->m == m) {
225			sf->ref_count++;
226			if (sf->ref_count == 1) {
227				TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
228				nsfbufsused++;
229				nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
230			}
231			goto done;
232		}
233	}
234	while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
235		if (flags & SFB_NOWAIT)
236			goto done;
237		sf_buf_alloc_want++;
238		mbstat.sf_allocwait++;
239		error = msleep(&sf_buf_freelist, &sf_buf_lock,
240		    (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
241		sf_buf_alloc_want--;
242
243
244		/*
245		 * If we got a signal, don't risk going back to sleep.
246		 */
247		if (error)
248			goto done;
249	}
250	TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
251	if (sf->m != NULL)
252		LIST_REMOVE(sf, list_entry);
253	LIST_INSERT_HEAD(hash_list, sf, list_entry);
254	sf->ref_count = 1;
255	sf->m = m;
256	nsfbufsused++;
257	nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
258	pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m));
259done:
260	mtx_unlock(&sf_buf_lock);
261	return (sf);
262#endif
263}
264
265/*
266 * Initialize machine state (pcb and trap frame) for a new thread about to
267 * upcall. Put enough state in the new thread's PCB to get it to go back
268 * userret(), where we can intercept it again to set the return (upcall)
269 * Address and stack, along with those from upcals that are from other sources
270 * such as those generated in thread_userret() itself.
271 */
272void
273cpu_set_upcall(struct thread *td, struct thread *td0)
274{
275	struct trapframe *tf;
276	struct switchframe *sf;
277
278	bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
279	bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
280	tf = td->td_frame;
281	sf = (struct switchframe *)tf - 1;
282	sf->sf_r4 = (u_int)fork_return;
283	sf->sf_r5 = (u_int)td;
284	sf->sf_pc = (u_int)fork_trampoline;
285	tf->tf_spsr &= ~PSR_C_bit;
286	tf->tf_r0 = 0;
287	td->td_pcb->un_32.pcb32_sp = (u_int)sf;
288	td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + USPACE_UNDEF_STACK_TOP;
289
290	/* Setup to release sched_lock in fork_exit(). */
291	td->td_md.md_spinlock_count = 1;
292	td->td_md.md_saved_cspr = 0;
293}
294
295/*
296 * Set that machine state for performing an upcall that has to
297 * be done in thread_userret() so that those upcalls generated
298 * in thread_userret() itself can be done as well.
299 */
300void
301cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
302	stack_t *stack)
303{
304	struct trapframe *tf = td->td_frame;
305
306	tf->tf_usr_sp = ((int)stack->ss_sp + stack->ss_size
307	    - sizeof(struct trapframe)) & ~7;
308	tf->tf_pc = (int)entry;
309	tf->tf_r0 = (int)arg;
310	tf->tf_spsr = PSR_USR32_MODE;
311}
312
313int
314cpu_set_user_tls(struct thread *td, void *tls_base)
315{
316
317	if (td != curthread)
318		td->td_md.md_tp = tls_base;
319	else {
320		critical_enter();
321		*(void **)ARM_TP_ADDRESS = tls_base;
322		critical_exit();
323	}
324	return (0);
325}
326
327void
328cpu_thread_exit(struct thread *td)
329{
330}
331
332void
333cpu_thread_setup(struct thread *td)
334{
335	td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages *
336	    PAGE_SIZE) - 1;
337	td->td_frame = (struct trapframe *)
338	    ((u_int)td->td_kstack + USPACE_SVC_STACK_TOP - sizeof(struct pcb)) - 1;
339#ifdef __XSCALE__
340	pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE);
341#endif
342
343}
344void
345cpu_thread_clean(struct thread *td)
346{
347}
348
349/*
350 * Intercept the return address from a freshly forked process that has NOT
351 * been scheduled yet.
352 *
353 * This is needed to make kernel threads stay in kernel mode.
354 */
355void
356cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
357{
358	struct switchframe *sf;
359	struct trapframe *tf;
360
361	tf = td->td_frame;
362	sf = (struct switchframe *)tf - 1;
363	sf->sf_r4 = (u_int)func;
364	sf->sf_r5 = (u_int)arg;
365	td->td_pcb->un_32.pcb32_sp = (u_int)sf;
366}
367
368/*
369 * Software interrupt handler for queued VM system processing.
370 */
371void
372swi_vm(void *dummy)
373{
374}
375
376void
377cpu_exit(struct thread *td)
378{
379}
380
381#define BITS_PER_INT	(8 * sizeof(int))
382vm_offset_t arm_nocache_startaddr;
383static int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE *
384    BITS_PER_INT)];
385
386/*
387 * Functions to map and unmap memory non-cached into KVA the kernel won't try
388 * to allocate. The goal is to provide uncached memory to busdma, to honor
389 * BUS_DMA_COHERENT.
390 * We can allocate at most ARM_NOCACHE_KVA_SIZE bytes.
391 * The allocator is rather dummy, each page is represented by a bit in
392 * a bitfield, 0 meaning the page is not allocated, 1 meaning it is.
393 * As soon as it finds enough contiguous pages to satisfy the request,
394 * it returns the address.
395 */
396void *
397arm_remap_nocache(void *addr, vm_size_t size)
398{
399	int i, j;
400
401	size = round_page(size);
402	for (i = 0; i < MIN(ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * BITS_PER_INT),
403	    ARM_TP_ADDRESS); i++) {
404		if (!(arm_nocache_allocated[i / BITS_PER_INT] & (1 << (i %
405		    BITS_PER_INT)))) {
406			for (j = i; j < i + (size / (PAGE_SIZE)); j++)
407				if (arm_nocache_allocated[j / BITS_PER_INT] &
408				    (1 << (j % BITS_PER_INT)))
409					break;
410			if (j == i + (size / (PAGE_SIZE)))
411				break;
412		}
413	}
414	if (i < MIN(ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * BITS_PER_INT),
415	    ARM_TP_ADDRESS)) {
416		vm_offset_t tomap = arm_nocache_startaddr + i * PAGE_SIZE;
417		void *ret = (void *)tomap;
418		vm_paddr_t physaddr = vtophys((vm_offset_t)addr);
419
420		for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE,
421		    physaddr += PAGE_SIZE, i++) {
422			pmap_kenter_nocache(tomap, physaddr);
423			arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i %
424			    BITS_PER_INT);
425		}
426		return (ret);
427	}
428	return (NULL);
429}
430
431void
432arm_unmap_nocache(void *addr, vm_size_t size)
433{
434	vm_offset_t raddr = (vm_offset_t)addr;
435	int i;
436
437	size = round_page(size);
438	i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE);
439	for (; size > 0; size -= PAGE_SIZE, i++)
440		arm_nocache_allocated[i / BITS_PER_INT] &= ~(1 << (i %
441		    BITS_PER_INT));
442}
443
444#ifdef ARM_USE_SMALL_ALLOC
445
446static TAILQ_HEAD(,arm_small_page) pages_normal =
447	TAILQ_HEAD_INITIALIZER(pages_normal);
448static TAILQ_HEAD(,arm_small_page) pages_wt =
449	TAILQ_HEAD_INITIALIZER(pages_wt);
450static TAILQ_HEAD(,arm_small_page) free_pgdesc =
451	TAILQ_HEAD_INITIALIZER(free_pgdesc);
452
453extern uma_zone_t l2zone;
454
455struct mtx smallalloc_mtx;
456
457MALLOC_DEFINE(M_VMSMALLALLOC, "vm_small_alloc", "VM Small alloc data");
458
459static vm_offset_t alloc_firstaddr;
460
461vm_offset_t
462arm_ptovirt(vm_paddr_t pa)
463{
464	int i;
465	vm_offset_t addr = alloc_firstaddr;
466
467	KASSERT(alloc_firstaddr != 0, ("arm_ptovirt called to early ?"));
468	for (i = 0; dump_avail[i + 1]; i += 2) {
469		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
470			break;
471		addr += (dump_avail[i + 1] & L1_S_FRAME) + L1_S_SIZE -
472		    (dump_avail[i] & L1_S_FRAME);
473	}
474	KASSERT(dump_avail[i + 1] != 0, ("Trying to access invalid physical address"));
475	return (addr + (pa - (dump_avail[i] & L1_S_FRAME)));
476}
477
478void
479arm_init_smallalloc(void)
480{
481	vm_offset_t to_map = 0, mapaddr;
482	int i;
483
484	/*
485	 * We need to use dump_avail and not phys_avail, since we want to
486	 * map the whole memory and not just the memory available to the VM
487	 * to be able to do a pa => va association for any address.
488	 */
489
490	for (i = 0; dump_avail[i + 1]; i+= 2) {
491		to_map += (dump_avail[i + 1] & L1_S_FRAME) + L1_S_SIZE -
492		    (dump_avail[i] & L1_S_FRAME);
493	}
494	alloc_firstaddr = mapaddr = KERNBASE - to_map;
495	for (i = 0; dump_avail[i + 1]; i+= 2) {
496		vm_offset_t size = (dump_avail[i + 1] & L1_S_FRAME) +
497		    L1_S_SIZE - (dump_avail[i] & L1_S_FRAME);
498		vm_offset_t did = 0;
499		while (size > 0 ) {
500			pmap_kenter_section(mapaddr,
501			    (dump_avail[i] & L1_S_FRAME) + did, SECTION_CACHE);
502			mapaddr += L1_S_SIZE;
503			did += L1_S_SIZE;
504			size -= L1_S_SIZE;
505		}
506	}
507}
508
509void
510arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
511{
512	struct arm_small_page *pg;
513
514	bytes &= ~PAGE_MASK;
515	while (bytes > 0) {
516		pg = (struct arm_small_page *)list;
517		pg->addr = mem;
518		if (pagetable)
519			TAILQ_INSERT_HEAD(&pages_wt, pg, pg_list);
520		else
521			TAILQ_INSERT_HEAD(&pages_normal, pg, pg_list);
522		list = (char *)list + sizeof(*pg);
523		mem = (char *)mem + PAGE_SIZE;
524		bytes -= PAGE_SIZE;
525	}
526}
527
528void *
529uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
530{
531	void *ret;
532	struct arm_small_page *sp;
533	TAILQ_HEAD(,arm_small_page) *head;
534	static vm_pindex_t color;
535	vm_page_t m;
536
537	*flags = UMA_SLAB_PRIV;
538	/*
539	 * For CPUs where we setup page tables as write back, there's no
540	 * need to maintain two separate pools.
541	 */
542	if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt)
543		head = (void *)&pages_wt;
544	else
545		head = (void *)&pages_normal;
546
547	mtx_lock(&smallalloc_mtx);
548	sp = TAILQ_FIRST(head);
549
550	if (!sp) {
551		int pflags;
552
553		mtx_unlock(&smallalloc_mtx);
554		if (zone == l2zone &&
555		    pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
556			*flags = UMA_SLAB_KMEM;
557			ret = ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT));
558			return (ret);
559		}
560		if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
561			pflags = VM_ALLOC_INTERRUPT;
562		else
563			pflags = VM_ALLOC_SYSTEM;
564		if (wait & M_ZERO)
565			pflags |= VM_ALLOC_ZERO;
566		for (;;) {
567			m = vm_page_alloc(NULL, color++,
568			    pflags | VM_ALLOC_NOOBJ);
569			if (m == NULL) {
570				if (wait & M_NOWAIT)
571					return (NULL);
572				VM_WAIT;
573			} else
574				break;
575		}
576		ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m));
577		if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
578			bzero(ret, PAGE_SIZE);
579		return (ret);
580	}
581	TAILQ_REMOVE(head, sp, pg_list);
582	TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list);
583	ret = sp->addr;
584	mtx_unlock(&smallalloc_mtx);
585	if ((wait & M_ZERO))
586		bzero(ret, bytes);
587	return (ret);
588}
589
590void
591uma_small_free(void *mem, int size, u_int8_t flags)
592{
593	pd_entry_t *pd;
594	pt_entry_t *pt;
595
596	if (flags & UMA_SLAB_KMEM)
597		kmem_free(kmem_map, (vm_offset_t)mem, size);
598	else {
599		struct arm_small_page *sp;
600
601		if ((vm_offset_t)mem >= KERNBASE) {
602			mtx_lock(&smallalloc_mtx);
603			sp = TAILQ_FIRST(&free_pgdesc);
604			KASSERT(sp != NULL, ("No more free page descriptor ?"));
605			TAILQ_REMOVE(&free_pgdesc, sp, pg_list);
606			sp->addr = mem;
607			pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd,
608			    &pt);
609			if ((*pd & pte_l1_s_cache_mask) ==
610			    pte_l1_s_cache_mode_pt &&
611			    pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode)
612				TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list);
613			else
614				TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list);
615			mtx_unlock(&smallalloc_mtx);
616		} else {
617			vm_page_t m;
618			vm_paddr_t pa = vtophys((vm_offset_t)mem);
619
620			m = PHYS_TO_VM_PAGE(pa);
621			vm_page_lock_queues();
622			vm_page_free(m);
623			vm_page_unlock_queues();
624		}
625	}
626}
627
628#endif
629