vm_machdep.c revision 166063
1129198Scognet/*-
2129198Scognet * Copyright (c) 1982, 1986 The Regents of the University of California.
3129198Scognet * Copyright (c) 1989, 1990 William Jolitz
4129198Scognet * Copyright (c) 1994 John Dyson
5129198Scognet * All rights reserved.
6129198Scognet *
7129198Scognet * This code is derived from software contributed to Berkeley by
8129198Scognet * the Systems Programming Group of the University of Utah Computer
9129198Scognet * Science Department, and William Jolitz.
10129198Scognet *
11150868Scognet * Redistribution and use in source and binary :forms, with or without
12129198Scognet * modification, are permitted provided that the following conditions
13129198Scognet * are met:
14129198Scognet * 1. Redistributions of source code must retain the above copyright
15129198Scognet *    notice, this list of conditions and the following disclaimer.
16129198Scognet * 2. Redistributions in binary form must reproduce the above copyright
17129198Scognet *    notice, this list of conditions and the following disclaimer in the
18129198Scognet *    documentation and/or other materials provided with the distribution.
19129198Scognet * 3. All advertising materials mentioning features or use of this software
20129198Scognet *    must display the following acknowledgement:
21129198Scognet *	This product includes software developed by the University of
22129198Scognet *	California, Berkeley and its contributors.
23129198Scognet * 4. Neither the name of the University nor the names of its contributors
24129198Scognet *    may be used to endorse or promote products derived from this software
25129198Scognet *    without specific prior written permission.
26129198Scognet *
27129198Scognet * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28129198Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29129198Scognet * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30129198Scognet * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31129198Scognet * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32129198Scognet * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33129198Scognet * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34129198Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35129198Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36129198Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37129198Scognet * SUCH DAMAGE.
38129198Scognet *
39129198Scognet *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40129198Scognet *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41129198Scognet */
42129198Scognet
43129198Scognet#include <sys/cdefs.h>
44129198Scognet__FBSDID("$FreeBSD: head/sys/arm/arm/vm_machdep.c 166063 2007-01-17 00:53:05Z cognet $");
45129198Scognet
46129198Scognet#include <sys/param.h>
47129198Scognet#include <sys/systm.h>
48129198Scognet#include <sys/kernel.h>
49129198Scognet#include <sys/malloc.h>
50129198Scognet#include <sys/mbuf.h>
51129198Scognet#include <sys/proc.h>
52129198Scognet#include <sys/socketvar.h>
53129198Scognet#include <sys/sf_buf.h>
54146599Scognet#include <sys/unistd.h>
55129198Scognet#include <machine/cpu.h>
56129198Scognet#include <machine/pcb.h>
57145433Sdavidxu#include <machine/sysarch.h>
58129198Scognet#include <vm/vm.h>
59129198Scognet#include <vm/pmap.h>
60129198Scognet#include <sys/lock.h>
61129198Scognet#include <sys/mutex.h>
62129198Scognet
63129198Scognet#include <vm/vm.h>
64129198Scognet#include <vm/vm_extern.h>
65129198Scognet#include <vm/vm_kern.h>
66129198Scognet#include <vm/vm_page.h>
67129198Scognet#include <vm/vm_map.h>
68129198Scognet#include <vm/vm_param.h>
69161105Scognet#include <vm/vm_pageout.h>
70147114Scognet#include <vm/uma.h>
71147114Scognet#include <vm/uma_int.h>
72129198Scognet
73166063Scognet#include <machine/md_var.h>
74166063Scognet
75131837Scognet#ifndef NSFBUFS
76131837Scognet#define NSFBUFS		(512 + maxusers * 16)
77131837Scognet#endif
78131837Scognet
79161105Scognet#ifndef ARM_USE_SMALL_ALLOC
80129198Scognetstatic void     sf_buf_init(void *arg);
81129198ScognetSYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
82129198Scognet
83129198ScognetLIST_HEAD(sf_head, sf_buf);
84129198Scognet
85129198Scognet
86129198Scognet/*
87129198Scognet * A hash table of active sendfile(2) buffers
88129198Scognet */
89129198Scognetstatic struct sf_head *sf_buf_active;
90129198Scognetstatic u_long sf_buf_hashmask;
91129198Scognet
92129198Scognet#define SF_BUF_HASH(m)  (((m) - vm_page_array) & sf_buf_hashmask)
93129198Scognet
94129198Scognetstatic TAILQ_HEAD(, sf_buf) sf_buf_freelist;
95129198Scognetstatic u_int    sf_buf_alloc_want;
96129198Scognet
97129198Scognet/*
98129198Scognet * A lock used to synchronize access to the hash table and free list
99129198Scognet */
100129198Scognetstatic struct mtx sf_buf_lock;
101161105Scognet#endif
102129198Scognet
103129198Scognet/*
104129198Scognet * Finish a fork operation, with process p2 nearly set up.
105129198Scognet * Copy and update the pcb, set up the stack so that the child
106129198Scognet * ready to run and return to user mode.
107129198Scognet */
108129198Scognetvoid
109129198Scognetcpu_fork(register struct thread *td1, register struct proc *p2,
110129198Scognet    struct thread *td2, int flags)
111129198Scognet{
112129198Scognet	struct pcb *pcb1, *pcb2;
113129198Scognet	struct trapframe *tf;
114129198Scognet	struct switchframe *sf;
115129198Scognet	struct mdproc *mdp2;
116129198Scognet
117146599Scognet	if ((flags & RFPROC) == 0)
118146599Scognet		return;
119129198Scognet	pcb1 = td1->td_pcb;
120129198Scognet	pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
121135657Scognet#ifdef __XSCALE__
122135657Scognet	pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE);
123153113Scognet	if (td2->td_altkstack)
124153113Scognet		pmap_use_minicache(td2->td_altkstack, td2->td_altkstack_pages *
125153113Scognet		    PAGE_SIZE);
126135657Scognet#endif
127129198Scognet	td2->td_pcb = pcb2;
128129198Scognet	bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
129129198Scognet	mdp2 = &p2->p_md;
130129198Scognet	bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
131137939Scognet	pcb2->un_32.pcb32_und_sp = td2->td_kstack + USPACE_UNDEF_STACK_TOP;
132137939Scognet	pcb2->un_32.pcb32_sp = td2->td_kstack +
133135657Scognet	    USPACE_SVC_STACK_TOP - sizeof(*pcb2);
134129198Scognet	pmap_activate(td2);
135129198Scognet	td2->td_frame = tf =
136129198Scognet	    (struct trapframe *)pcb2->un_32.pcb32_sp - 1;
137129198Scognet	*tf = *td1->td_frame;
138129198Scognet	sf = (struct switchframe *)tf - 1;
139129198Scognet	sf->sf_r4 = (u_int)fork_return;
140129198Scognet	sf->sf_r5 = (u_int)td2;
141129198Scognet	sf->sf_pc = (u_int)fork_trampoline;
142129198Scognet	tf->tf_spsr &= ~PSR_C_bit;
143129198Scognet	tf->tf_r0 = 0;
144135657Scognet	tf->tf_r1 = 0;
145129198Scognet	pcb2->un_32.pcb32_sp = (u_int)sf;
146144637Sjhb
147144637Sjhb	/* Setup to release sched_lock in fork_exit(). */
148144637Sjhb	td2->td_md.md_spinlock_count = 1;
149144637Sjhb	td2->td_md.md_saved_cspr = 0;
150146122Scognet	td2->td_md.md_tp = *(uint32_t **)ARM_TP_ADDRESS;
151129198Scognet}
152129198Scognet
153129198Scognetvoid
154129198Scognetcpu_thread_swapin(struct thread *td)
155129198Scognet{
156129198Scognet}
157129198Scognet
158129198Scognetvoid
159129198Scognetcpu_thread_swapout(struct thread *td)
160129198Scognet{
161129198Scognet}
162129198Scognet
163129198Scognet/*
164129198Scognet * Detatch mapped page and release resources back to the system.
165129198Scognet */
166129198Scognetvoid
167129198Scognetsf_buf_free(struct sf_buf *sf)
168129198Scognet{
169161105Scognet#ifndef ARM_USE_SMALL_ALLOC
170129198Scognet	 mtx_lock(&sf_buf_lock);
171129198Scognet	 sf->ref_count--;
172129198Scognet	 if (sf->ref_count == 0) {
173129198Scognet		 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
174129198Scognet		 nsfbufsused--;
175129198Scognet		 if (sf_buf_alloc_want > 0)
176129198Scognet			 wakeup_one(&sf_buf_freelist);
177129198Scognet	 }
178129198Scognet	 mtx_unlock(&sf_buf_lock);
179161105Scognet#endif
180129198Scognet}
181129198Scognet
182161105Scognet#ifndef ARM_USE_SMALL_ALLOC
183129198Scognet/*
184161105Scognet * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
185161105Scognet */
186129198Scognetstatic void
187129198Scognetsf_buf_init(void *arg)
188129198Scognet{
189129198Scognet	struct sf_buf *sf_bufs;
190129198Scognet	vm_offset_t sf_base;
191129198Scognet	int i;
192129198Scognet
193131837Scognet	nsfbufs = NSFBUFS;
194131837Scognet	TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
195131837Scognet
196129198Scognet	sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
197129198Scognet	TAILQ_INIT(&sf_buf_freelist);
198129198Scognet	sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
199129198Scognet	sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
200129198Scognet	    M_NOWAIT | M_ZERO);
201129198Scognet	for (i = 0; i < nsfbufs; i++) {
202129198Scognet		sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
203129198Scognet		TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
204129198Scognet	}
205129198Scognet	sf_buf_alloc_want = 0;
206129198Scognet	mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
207129198Scognet}
208161105Scognet#endif
209129198Scognet
210129198Scognet/*
211129198Scognet * Get an sf_buf from the freelist. Will block if none are available.
212129198Scognet */
213129198Scognetstruct sf_buf *
214137372Salcsf_buf_alloc(struct vm_page *m, int flags)
215129198Scognet{
216161105Scognet#ifdef ARM_USE_SMALL_ALLOC
217161105Scognet	return ((struct sf_buf *)m);
218161105Scognet#else
219129198Scognet	struct sf_head *hash_list;
220129198Scognet	struct sf_buf *sf;
221129198Scognet	int error;
222129198Scognet
223129198Scognet	hash_list = &sf_buf_active[SF_BUF_HASH(m)];
224129198Scognet	mtx_lock(&sf_buf_lock);
225129198Scognet	LIST_FOREACH(sf, hash_list, list_entry) {
226129198Scognet		if (sf->m == m) {
227129198Scognet			sf->ref_count++;
228129198Scognet			if (sf->ref_count == 1) {
229129198Scognet				TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
230129198Scognet				nsfbufsused++;
231129198Scognet				nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
232129198Scognet			}
233129198Scognet			goto done;
234129198Scognet		}
235129198Scognet	}
236129198Scognet	while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
237137372Salc		if (flags & SFB_NOWAIT)
238137372Salc			goto done;
239129198Scognet		sf_buf_alloc_want++;
240129198Scognet		mbstat.sf_allocwait++;
241137372Salc		error = msleep(&sf_buf_freelist, &sf_buf_lock,
242137372Salc		    (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
243129198Scognet		sf_buf_alloc_want--;
244129198Scognet
245129198Scognet
246129198Scognet		/*
247129198Scognet		 * If we got a signal, don't risk going back to sleep.
248129198Scognet		 */
249129198Scognet		if (error)
250129198Scognet			goto done;
251129198Scognet	}
252129198Scognet	TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
253129198Scognet	if (sf->m != NULL)
254129198Scognet		LIST_REMOVE(sf, list_entry);
255129198Scognet	LIST_INSERT_HEAD(hash_list, sf, list_entry);
256129198Scognet	sf->ref_count = 1;
257129198Scognet	sf->m = m;
258129198Scognet	nsfbufsused++;
259129198Scognet	nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
260150868Scognet	pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m));
261129198Scognetdone:
262129198Scognet	mtx_unlock(&sf_buf_lock);
263129198Scognet	return (sf);
264161105Scognet#endif
265129198Scognet}
266129198Scognet
267129198Scognet/*
268129198Scognet * Initialize machine state (pcb and trap frame) for a new thread about to
269129198Scognet * upcall. Put enough state in the new thread's PCB to get it to go back
270129198Scognet * userret(), where we can intercept it again to set the return (upcall)
271129198Scognet * Address and stack, along with those from upcals that are from other sources
272129198Scognet * such as those generated in thread_userret() itself.
273129198Scognet */
274129198Scognetvoid
275129198Scognetcpu_set_upcall(struct thread *td, struct thread *td0)
276129198Scognet{
277137214Scognet	struct trapframe *tf;
278137214Scognet	struct switchframe *sf;
279137214Scognet
280137214Scognet	bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
281137214Scognet	bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
282137214Scognet	tf = td->td_frame;
283137214Scognet	sf = (struct switchframe *)tf - 1;
284137214Scognet	sf->sf_r4 = (u_int)fork_return;
285137214Scognet	sf->sf_r5 = (u_int)td;
286137214Scognet	sf->sf_pc = (u_int)fork_trampoline;
287137214Scognet	tf->tf_spsr &= ~PSR_C_bit;
288137214Scognet	tf->tf_r0 = 0;
289137214Scognet	td->td_pcb->un_32.pcb32_sp = (u_int)sf;
290146599Scognet	td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + USPACE_UNDEF_STACK_TOP;
291144637Sjhb
292144637Sjhb	/* Setup to release sched_lock in fork_exit(). */
293144637Sjhb	td->td_md.md_spinlock_count = 1;
294144637Sjhb	td->td_md.md_saved_cspr = 0;
295129198Scognet}
296129198Scognet
297129198Scognet/*
298129198Scognet * Set that machine state for performing an upcall that has to
299129198Scognet * be done in thread_userret() so that those upcalls generated
300129198Scognet * in thread_userret() itself can be done as well.
301129198Scognet */
302129198Scognetvoid
303145433Sdavidxucpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
304145433Sdavidxu	stack_t *stack)
305129198Scognet{
306137214Scognet	struct trapframe *tf = td->td_frame;
307137214Scognet
308145433Sdavidxu	tf->tf_usr_sp = ((int)stack->ss_sp + stack->ss_size
309137214Scognet	    - sizeof(struct trapframe)) & ~7;
310145433Sdavidxu	tf->tf_pc = (int)entry;
311145433Sdavidxu	tf->tf_r0 = (int)arg;
312137214Scognet	tf->tf_spsr = PSR_USR32_MODE;
313129198Scognet}
314129198Scognet
315147889Sdavidxuint
316145433Sdavidxucpu_set_user_tls(struct thread *td, void *tls_base)
317145433Sdavidxu{
318145433Sdavidxu
319145433Sdavidxu	if (td != curthread)
320145433Sdavidxu		td->td_md.md_tp = tls_base;
321145433Sdavidxu	else {
322145433Sdavidxu		critical_enter();
323145433Sdavidxu		*(void **)ARM_TP_ADDRESS = tls_base;
324145433Sdavidxu		critical_exit();
325145433Sdavidxu	}
326147889Sdavidxu	return (0);
327145433Sdavidxu}
328145433Sdavidxu
329145433Sdavidxuvoid
330129198Scognetcpu_thread_exit(struct thread *td)
331129198Scognet{
332129198Scognet}
333129198Scognet
334129198Scognetvoid
335129198Scognetcpu_thread_setup(struct thread *td)
336129198Scognet{
337137214Scognet	td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages *
338129198Scognet	    PAGE_SIZE) - 1;
339129198Scognet	td->td_frame = (struct trapframe *)
340146599Scognet	    ((u_int)td->td_kstack + USPACE_SVC_STACK_TOP - sizeof(struct pcb)) - 1;
341137214Scognet#ifdef __XSCALE__
342137214Scognet	pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE);
343137214Scognet#endif
344137214Scognet
345129198Scognet}
346129198Scognetvoid
347129198Scognetcpu_thread_clean(struct thread *td)
348129198Scognet{
349129198Scognet}
350129198Scognet
351129198Scognet/*
352129198Scognet * Intercept the return address from a freshly forked process that has NOT
353129198Scognet * been scheduled yet.
354129198Scognet *
355129198Scognet * This is needed to make kernel threads stay in kernel mode.
356129198Scognet */
357129198Scognetvoid
358129198Scognetcpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
359129198Scognet{
360129198Scognet	struct switchframe *sf;
361129198Scognet	struct trapframe *tf;
362129198Scognet
363129198Scognet	tf = td->td_frame;
364129198Scognet	sf = (struct switchframe *)tf - 1;
365129198Scognet	sf->sf_r4 = (u_int)func;
366129198Scognet	sf->sf_r5 = (u_int)arg;
367129198Scognet	td->td_pcb->un_32.pcb32_sp = (u_int)sf;
368129198Scognet}
369129198Scognet
370129198Scognet/*
371129198Scognet * Software interrupt handler for queued VM system processing.
372129198Scognet */
373129198Scognetvoid
374129198Scognetswi_vm(void *dummy)
375129198Scognet{
376166063Scognet
377166063Scognet	if (busdma_swi_pending)
378166063Scognet		busdma_swi();
379129198Scognet}
380129198Scognet
381129198Scognetvoid
382129198Scognetcpu_exit(struct thread *td)
383129198Scognet{
384129198Scognet}
385147114Scognet
386156199Scognet#define BITS_PER_INT	(8 * sizeof(int))
387156191Scognetvm_offset_t arm_nocache_startaddr;
388156199Scognetstatic int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE *
389156199Scognet    BITS_PER_INT)];
390156191Scognet
391156191Scognet/*
392156191Scognet * Functions to map and unmap memory non-cached into KVA the kernel won't try
393156191Scognet * to allocate. The goal is to provide uncached memory to busdma, to honor
394156191Scognet * BUS_DMA_COHERENT.
395156191Scognet * We can allocate at most ARM_NOCACHE_KVA_SIZE bytes.
396156191Scognet * The allocator is rather dummy, each page is represented by a bit in
397156191Scognet * a bitfield, 0 meaning the page is not allocated, 1 meaning it is.
398156191Scognet * As soon as it finds enough contiguous pages to satisfy the request,
399156191Scognet * it returns the address.
400156191Scognet */
401156191Scognetvoid *
402156191Scognetarm_remap_nocache(void *addr, vm_size_t size)
403156191Scognet{
404156191Scognet	int i, j;
405156191Scognet
406156191Scognet	size = round_page(size);
407156199Scognet	for (i = 0; i < MIN(ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * BITS_PER_INT),
408156191Scognet	    ARM_TP_ADDRESS); i++) {
409156199Scognet		if (!(arm_nocache_allocated[i / BITS_PER_INT] & (1 << (i %
410156199Scognet		    BITS_PER_INT)))) {
411156191Scognet			for (j = i; j < i + (size / (PAGE_SIZE)); j++)
412156199Scognet				if (arm_nocache_allocated[j / BITS_PER_INT] &
413156199Scognet				    (1 << (j % BITS_PER_INT)))
414156191Scognet					break;
415156191Scognet			if (j == i + (size / (PAGE_SIZE)))
416156191Scognet				break;
417156191Scognet		}
418156191Scognet	}
419156199Scognet	if (i < MIN(ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * BITS_PER_INT),
420156191Scognet	    ARM_TP_ADDRESS)) {
421156191Scognet		vm_offset_t tomap = arm_nocache_startaddr + i * PAGE_SIZE;
422156191Scognet		void *ret = (void *)tomap;
423156191Scognet		vm_paddr_t physaddr = vtophys((vm_offset_t)addr);
424156191Scognet
425156191Scognet		for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE,
426156191Scognet		    physaddr += PAGE_SIZE, i++) {
427156191Scognet			pmap_kenter_nocache(tomap, physaddr);
428156199Scognet			arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i %
429156199Scognet			    BITS_PER_INT);
430156191Scognet		}
431156191Scognet		return (ret);
432156191Scognet	}
433156191Scognet	return (NULL);
434156191Scognet}
435156191Scognet
436156191Scognetvoid
437156191Scognetarm_unmap_nocache(void *addr, vm_size_t size)
438156191Scognet{
439156191Scognet	vm_offset_t raddr = (vm_offset_t)addr;
440156191Scognet	int i;
441156191Scognet
442156191Scognet	size = round_page(size);
443156191Scognet	i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE);
444156191Scognet	for (; size > 0; size -= PAGE_SIZE, i++)
445156199Scognet		arm_nocache_allocated[i / BITS_PER_INT] &= ~(1 << (i %
446156199Scognet		    BITS_PER_INT));
447156191Scognet}
448156191Scognet
449147114Scognet#ifdef ARM_USE_SMALL_ALLOC
450147114Scognet
451147114Scognetstatic TAILQ_HEAD(,arm_small_page) pages_normal =
452147114Scognet	TAILQ_HEAD_INITIALIZER(pages_normal);
453147114Scognetstatic TAILQ_HEAD(,arm_small_page) pages_wt =
454147114Scognet	TAILQ_HEAD_INITIALIZER(pages_wt);
455147114Scognetstatic TAILQ_HEAD(,arm_small_page) free_pgdesc =
456147114Scognet	TAILQ_HEAD_INITIALIZER(free_pgdesc);
457147114Scognet
458147114Scognetextern uma_zone_t l2zone;
459147114Scognet
460147114Scognetstruct mtx smallalloc_mtx;
461147114Scognet
462151897SrwatsonMALLOC_DEFINE(M_VMSMALLALLOC, "vm_small_alloc", "VM Small alloc data");
463147114Scognet
464164079Scognetvm_offset_t alloc_firstaddr;
465147114Scognet
466161105Scognetvm_offset_t
467161105Scognetarm_ptovirt(vm_paddr_t pa)
468161105Scognet{
469161105Scognet	int i;
470161105Scognet	vm_offset_t addr = alloc_firstaddr;
471147114Scognet
472161105Scognet	KASSERT(alloc_firstaddr != 0, ("arm_ptovirt called to early ?"));
473163674Scognet	for (i = 0; dump_avail[i + 1]; i += 2) {
474161105Scognet		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
475161105Scognet			break;
476161105Scognet		addr += (dump_avail[i + 1] & L1_S_FRAME) + L1_S_SIZE -
477161105Scognet		    (dump_avail[i] & L1_S_FRAME);
478161105Scognet	}
479163674Scognet	KASSERT(dump_avail[i + 1] != 0, ("Trying to access invalid physical address"));
480161105Scognet	return (addr + (pa - (dump_avail[i] & L1_S_FRAME)));
481161105Scognet}
482161105Scognet
483147114Scognetvoid
484161105Scognetarm_init_smallalloc(void)
485161105Scognet{
486161105Scognet	vm_offset_t to_map = 0, mapaddr;
487161105Scognet	int i;
488161105Scognet
489161105Scognet	/*
490161105Scognet	 * We need to use dump_avail and not phys_avail, since we want to
491161105Scognet	 * map the whole memory and not just the memory available to the VM
492161105Scognet	 * to be able to do a pa => va association for any address.
493161105Scognet	 */
494161105Scognet
495163674Scognet	for (i = 0; dump_avail[i + 1]; i+= 2) {
496161105Scognet		to_map += (dump_avail[i + 1] & L1_S_FRAME) + L1_S_SIZE -
497161105Scognet		    (dump_avail[i] & L1_S_FRAME);
498161105Scognet	}
499161105Scognet	alloc_firstaddr = mapaddr = KERNBASE - to_map;
500163674Scognet	for (i = 0; dump_avail[i + 1]; i+= 2) {
501161105Scognet		vm_offset_t size = (dump_avail[i + 1] & L1_S_FRAME) +
502161105Scognet		    L1_S_SIZE - (dump_avail[i] & L1_S_FRAME);
503161105Scognet		vm_offset_t did = 0;
504161105Scognet		while (size > 0 ) {
505161105Scognet			pmap_kenter_section(mapaddr,
506161105Scognet			    (dump_avail[i] & L1_S_FRAME) + did, SECTION_CACHE);
507161105Scognet			mapaddr += L1_S_SIZE;
508161105Scognet			did += L1_S_SIZE;
509161105Scognet			size -= L1_S_SIZE;
510161105Scognet		}
511161105Scognet	}
512161105Scognet}
513161105Scognet
514161105Scognetvoid
515147114Scognetarm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
516147114Scognet{
517147114Scognet	struct arm_small_page *pg;
518147114Scognet
519156191Scognet	bytes &= ~PAGE_MASK;
520147114Scognet	while (bytes > 0) {
521147114Scognet		pg = (struct arm_small_page *)list;
522147114Scognet		pg->addr = mem;
523147114Scognet		if (pagetable)
524147114Scognet			TAILQ_INSERT_HEAD(&pages_wt, pg, pg_list);
525147114Scognet		else
526147114Scognet			TAILQ_INSERT_HEAD(&pages_normal, pg, pg_list);
527147114Scognet		list = (char *)list + sizeof(*pg);
528147114Scognet		mem = (char *)mem + PAGE_SIZE;
529147114Scognet		bytes -= PAGE_SIZE;
530147114Scognet	}
531147114Scognet}
532147114Scognet
533147114Scognetvoid *
534147114Scognetuma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
535147114Scognet{
536147114Scognet	void *ret;
537161105Scognet	struct arm_small_page *sp;
538147114Scognet	TAILQ_HEAD(,arm_small_page) *head;
539161105Scognet	static vm_pindex_t color;
540161105Scognet	vm_page_t m;
541161105Scognet
542147114Scognet	*flags = UMA_SLAB_PRIV;
543147114Scognet	/*
544147114Scognet	 * For CPUs where we setup page tables as write back, there's no
545147114Scognet	 * need to maintain two separate pools.
546147114Scognet	 */
547147114Scognet	if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt)
548147114Scognet		head = (void *)&pages_wt;
549147114Scognet	else
550147114Scognet		head = (void *)&pages_normal;
551147114Scognet
552147114Scognet	mtx_lock(&smallalloc_mtx);
553147114Scognet	sp = TAILQ_FIRST(head);
554147114Scognet
555147114Scognet	if (!sp) {
556161105Scognet		int pflags;
557161105Scognet
558161105Scognet		mtx_unlock(&smallalloc_mtx);
559161105Scognet		if (zone == l2zone &&
560161105Scognet		    pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
561159321Scognet			*flags = UMA_SLAB_KMEM;
562161105Scognet			ret = ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT));
563161105Scognet			return (ret);
564159321Scognet		}
565161105Scognet		if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
566161105Scognet			pflags = VM_ALLOC_INTERRUPT;
567161105Scognet		else
568161105Scognet			pflags = VM_ALLOC_SYSTEM;
569161105Scognet		if (wait & M_ZERO)
570161105Scognet			pflags |= VM_ALLOC_ZERO;
571161105Scognet		for (;;) {
572161105Scognet			m = vm_page_alloc(NULL, color++,
573161105Scognet			    pflags | VM_ALLOC_NOOBJ);
574161105Scognet			if (m == NULL) {
575161105Scognet				if (wait & M_NOWAIT)
576161105Scognet					return (NULL);
577161105Scognet				VM_WAIT;
578161105Scognet			} else
579161105Scognet				break;
580150868Scognet		}
581161105Scognet		ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m));
582161105Scognet		if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
583161105Scognet			bzero(ret, PAGE_SIZE);
584161105Scognet		return (ret);
585161105Scognet	}
586161105Scognet	TAILQ_REMOVE(head, sp, pg_list);
587161105Scognet	TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list);
588161105Scognet	ret = sp->addr;
589147114Scognet	mtx_unlock(&smallalloc_mtx);
590147114Scognet	if ((wait & M_ZERO))
591147114Scognet		bzero(ret, bytes);
592147114Scognet	return (ret);
593147114Scognet}
594147114Scognet
595147114Scognetvoid
596147114Scognetuma_small_free(void *mem, int size, u_int8_t flags)
597147114Scognet{
598147114Scognet	pd_entry_t *pd;
599147114Scognet	pt_entry_t *pt;
600147114Scognet
601150868Scognet	if (flags & UMA_SLAB_KMEM)
602147114Scognet		kmem_free(kmem_map, (vm_offset_t)mem, size);
603147114Scognet	else {
604147114Scognet		struct arm_small_page *sp;
605147114Scognet
606161105Scognet		if ((vm_offset_t)mem >= KERNBASE) {
607161105Scognet			mtx_lock(&smallalloc_mtx);
608161105Scognet			sp = TAILQ_FIRST(&free_pgdesc);
609161105Scognet			KASSERT(sp != NULL, ("No more free page descriptor ?"));
610161105Scognet			TAILQ_REMOVE(&free_pgdesc, sp, pg_list);
611161105Scognet			sp->addr = mem;
612161105Scognet			pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd,
613161105Scognet			    &pt);
614161105Scognet			if ((*pd & pte_l1_s_cache_mask) ==
615161105Scognet			    pte_l1_s_cache_mode_pt &&
616161105Scognet			    pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode)
617161105Scognet				TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list);
618161105Scognet			else
619161105Scognet				TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list);
620161105Scognet			mtx_unlock(&smallalloc_mtx);
621161105Scognet		} else {
622161105Scognet			vm_page_t m;
623161105Scognet			vm_paddr_t pa = vtophys((vm_offset_t)mem);
624161105Scognet
625161105Scognet			m = PHYS_TO_VM_PAGE(pa);
626161105Scognet			vm_page_lock_queues();
627161105Scognet			vm_page_free(m);
628161105Scognet			vm_page_unlock_queues();
629161105Scognet		}
630147114Scognet	}
631147114Scognet}
632147114Scognet
633147114Scognet#endif
634