vm_machdep.c revision 245942
1129198Scognet/*-
2129198Scognet * Copyright (c) 1982, 1986 The Regents of the University of California.
3129198Scognet * Copyright (c) 1989, 1990 William Jolitz
4129198Scognet * Copyright (c) 1994 John Dyson
5129198Scognet * All rights reserved.
6129198Scognet *
7129198Scognet * This code is derived from software contributed to Berkeley by
8129198Scognet * the Systems Programming Group of the University of Utah Computer
9129198Scognet * Science Department, and William Jolitz.
10129198Scognet *
11150868Scognet * Redistribution and use in source and binary :forms, with or without
12129198Scognet * modification, are permitted provided that the following conditions
13129198Scognet * are met:
14129198Scognet * 1. Redistributions of source code must retain the above copyright
15129198Scognet *    notice, this list of conditions and the following disclaimer.
16129198Scognet * 2. Redistributions in binary form must reproduce the above copyright
17129198Scognet *    notice, this list of conditions and the following disclaimer in the
18129198Scognet *    documentation and/or other materials provided with the distribution.
19129198Scognet * 3. All advertising materials mentioning features or use of this software
20129198Scognet *    must display the following acknowledgement:
21129198Scognet *	This product includes software developed by the University of
22129198Scognet *	California, Berkeley and its contributors.
23129198Scognet * 4. Neither the name of the University nor the names of its contributors
24129198Scognet *    may be used to endorse or promote products derived from this software
25129198Scognet *    without specific prior written permission.
26129198Scognet *
27129198Scognet * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28129198Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29129198Scognet * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30129198Scognet * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31129198Scognet * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32129198Scognet * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33129198Scognet * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34129198Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35129198Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36129198Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37129198Scognet * SUCH DAMAGE.
38129198Scognet *
39129198Scognet *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40129198Scognet *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41129198Scognet */
42129198Scognet
43129198Scognet#include <sys/cdefs.h>
44129198Scognet__FBSDID("$FreeBSD: head/sys/arm/arm/vm_machdep.c 245942 2013-01-26 08:55:04Z andrew $");
45129198Scognet
46129198Scognet#include <sys/param.h>
47129198Scognet#include <sys/systm.h>
48129198Scognet#include <sys/kernel.h>
49129198Scognet#include <sys/malloc.h>
50129198Scognet#include <sys/mbuf.h>
51129198Scognet#include <sys/proc.h>
52129198Scognet#include <sys/socketvar.h>
53129198Scognet#include <sys/sf_buf.h>
54199135Skib#include <sys/syscall.h>
55199135Skib#include <sys/sysent.h>
56146599Scognet#include <sys/unistd.h>
57129198Scognet#include <machine/cpu.h>
58129198Scognet#include <machine/pcb.h>
59145433Sdavidxu#include <machine/sysarch.h>
60129198Scognet#include <sys/lock.h>
61129198Scognet#include <sys/mutex.h>
62129198Scognet
63129198Scognet#include <vm/vm.h>
64169900Scognet#include <vm/pmap.h>
65129198Scognet#include <vm/vm_extern.h>
66129198Scognet#include <vm/vm_kern.h>
67129198Scognet#include <vm/vm_page.h>
68129198Scognet#include <vm/vm_map.h>
69129198Scognet#include <vm/vm_param.h>
70161105Scognet#include <vm/vm_pageout.h>
71147114Scognet#include <vm/uma.h>
72147114Scognet#include <vm/uma_int.h>
73129198Scognet
74166063Scognet#include <machine/md_var.h>
75166063Scognet
76131837Scognet#ifndef NSFBUFS
77131837Scognet#define NSFBUFS		(512 + maxusers * 16)
78131837Scognet#endif
79131837Scognet
80161105Scognet#ifndef ARM_USE_SMALL_ALLOC
81129198Scognetstatic void     sf_buf_init(void *arg);
82177253SrwatsonSYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
83129198Scognet
84129198ScognetLIST_HEAD(sf_head, sf_buf);
85129198Scognet
86129198Scognet
87129198Scognet/*
88129198Scognet * A hash table of active sendfile(2) buffers
89129198Scognet */
90129198Scognetstatic struct sf_head *sf_buf_active;
91129198Scognetstatic u_long sf_buf_hashmask;
92129198Scognet
93129198Scognet#define SF_BUF_HASH(m)  (((m) - vm_page_array) & sf_buf_hashmask)
94129198Scognet
95129198Scognetstatic TAILQ_HEAD(, sf_buf) sf_buf_freelist;
96129198Scognetstatic u_int    sf_buf_alloc_want;
97129198Scognet
98129198Scognet/*
99129198Scognet * A lock used to synchronize access to the hash table and free list
100129198Scognet */
101129198Scognetstatic struct mtx sf_buf_lock;
102161105Scognet#endif
103129198Scognet
104129198Scognet/*
105129198Scognet * Finish a fork operation, with process p2 nearly set up.
106129198Scognet * Copy and update the pcb, set up the stack so that the child
107129198Scognet * ready to run and return to user mode.
108129198Scognet */
109129198Scognetvoid
110129198Scognetcpu_fork(register struct thread *td1, register struct proc *p2,
111129198Scognet    struct thread *td2, int flags)
112129198Scognet{
113188019Scognet	struct pcb *pcb2;
114129198Scognet	struct trapframe *tf;
115129198Scognet	struct switchframe *sf;
116129198Scognet	struct mdproc *mdp2;
117129198Scognet
118146599Scognet	if ((flags & RFPROC) == 0)
119146599Scognet		return;
120129198Scognet	pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
121135657Scognet#ifdef __XSCALE__
122171622Scognet#ifndef CPU_XSCALE_CORE3
123135657Scognet	pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE);
124135657Scognet#endif
125171622Scognet#endif
126129198Scognet	td2->td_pcb = pcb2;
127129198Scognet	bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
128129198Scognet	mdp2 = &p2->p_md;
129129198Scognet	bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
130137939Scognet	pcb2->un_32.pcb32_und_sp = td2->td_kstack + USPACE_UNDEF_STACK_TOP;
131137939Scognet	pcb2->un_32.pcb32_sp = td2->td_kstack +
132135657Scognet	    USPACE_SVC_STACK_TOP - sizeof(*pcb2);
133129198Scognet	pmap_activate(td2);
134129198Scognet	td2->td_frame = tf =
135129198Scognet	    (struct trapframe *)pcb2->un_32.pcb32_sp - 1;
136129198Scognet	*tf = *td1->td_frame;
137129198Scognet	sf = (struct switchframe *)tf - 1;
138129198Scognet	sf->sf_r4 = (u_int)fork_return;
139129198Scognet	sf->sf_r5 = (u_int)td2;
140129198Scognet	sf->sf_pc = (u_int)fork_trampoline;
141129198Scognet	tf->tf_spsr &= ~PSR_C_bit;
142129198Scognet	tf->tf_r0 = 0;
143135657Scognet	tf->tf_r1 = 0;
144129198Scognet	pcb2->un_32.pcb32_sp = (u_int)sf;
145144637Sjhb
146170305Sjeff	/* Setup to release spin count in fork_exit(). */
147144637Sjhb	td2->td_md.md_spinlock_count = 1;
148144637Sjhb	td2->td_md.md_saved_cspr = 0;
149239268Sgonzo#ifdef ARM_TP_ADDRESS
150218310Simp	td2->td_md.md_tp = *(register_t *)ARM_TP_ADDRESS;
151239268Sgonzo#else
152239268Sgonzo	td2->td_md.md_tp = (register_t) get_tls();
153239268Sgonzo#endif
154129198Scognet}
155129198Scognet
156129198Scognetvoid
157129198Scognetcpu_thread_swapin(struct thread *td)
158129198Scognet{
159236991Simp}
160129198Scognet
161236991Simpvoid
162129198Scognetcpu_thread_swapout(struct thread *td)
163236991Simp{
164129198Scognet}
165129198Scognet
166129198Scognet/*
167129198Scognet * Detatch mapped page and release resources back to the system.
168129198Scognet */
169129198Scognetvoid
170129198Scognetsf_buf_free(struct sf_buf *sf)
171129198Scognet{
172161105Scognet#ifndef ARM_USE_SMALL_ALLOC
173129198Scognet	 mtx_lock(&sf_buf_lock);
174129198Scognet	 sf->ref_count--;
175129198Scognet	 if (sf->ref_count == 0) {
176129198Scognet		 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
177129198Scognet		 nsfbufsused--;
178205028Sraj		 pmap_kremove(sf->kva);
179205028Sraj		 sf->m = NULL;
180205028Sraj		 LIST_REMOVE(sf, list_entry);
181129198Scognet		 if (sf_buf_alloc_want > 0)
182217561Skib			 wakeup(&sf_buf_freelist);
183129198Scognet	 }
184236991Simp	 mtx_unlock(&sf_buf_lock);
185161105Scognet#endif
186129198Scognet}
187129198Scognet
188161105Scognet#ifndef ARM_USE_SMALL_ALLOC
189129198Scognet/*
190161105Scognet * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
191161105Scognet */
192129198Scognetstatic void
193129198Scognetsf_buf_init(void *arg)
194236991Simp{
195129198Scognet	struct sf_buf *sf_bufs;
196129198Scognet	vm_offset_t sf_base;
197129198Scognet	int i;
198236991Simp
199131837Scognet	nsfbufs = NSFBUFS;
200131837Scognet	TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
201131837Scognet
202129198Scognet	sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
203129198Scognet	TAILQ_INIT(&sf_buf_freelist);
204129198Scognet	sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
205129198Scognet	sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
206129198Scognet	    M_NOWAIT | M_ZERO);
207129198Scognet	for (i = 0; i < nsfbufs; i++) {
208129198Scognet		sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
209129198Scognet		TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
210129198Scognet	}
211236991Simp	sf_buf_alloc_want = 0;
212129198Scognet	mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
213129198Scognet}
214161105Scognet#endif
215129198Scognet
216129198Scognet/*
217129198Scognet * Get an sf_buf from the freelist. Will block if none are available.
218129198Scognet */
219129198Scognetstruct sf_buf *
220137372Salcsf_buf_alloc(struct vm_page *m, int flags)
221129198Scognet{
222161105Scognet#ifdef ARM_USE_SMALL_ALLOC
223161105Scognet	return ((struct sf_buf *)m);
224161105Scognet#else
225129198Scognet	struct sf_head *hash_list;
226129198Scognet	struct sf_buf *sf;
227129198Scognet	int error;
228129198Scognet
229129198Scognet	hash_list = &sf_buf_active[SF_BUF_HASH(m)];
230129198Scognet	mtx_lock(&sf_buf_lock);
231129198Scognet	LIST_FOREACH(sf, hash_list, list_entry) {
232129198Scognet		if (sf->m == m) {
233129198Scognet			sf->ref_count++;
234129198Scognet			if (sf->ref_count == 1) {
235129198Scognet				TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
236129198Scognet				nsfbufsused++;
237129198Scognet				nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
238129198Scognet			}
239129198Scognet			goto done;
240129198Scognet		}
241129198Scognet	}
242129198Scognet	while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
243137372Salc		if (flags & SFB_NOWAIT)
244137372Salc			goto done;
245129198Scognet		sf_buf_alloc_want++;
246129198Scognet		mbstat.sf_allocwait++;
247137372Salc		error = msleep(&sf_buf_freelist, &sf_buf_lock,
248137372Salc		    (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
249129198Scognet		sf_buf_alloc_want--;
250129198Scognet
251129198Scognet
252129198Scognet		/*
253236991Simp		 * If we got a signal, don't risk going back to sleep.
254129198Scognet		 */
255129198Scognet		if (error)
256129198Scognet			goto done;
257129198Scognet	}
258129198Scognet	TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
259129198Scognet	if (sf->m != NULL)
260129198Scognet		LIST_REMOVE(sf, list_entry);
261129198Scognet	LIST_INSERT_HEAD(hash_list, sf, list_entry);
262129198Scognet	sf->ref_count = 1;
263129198Scognet	sf->m = m;
264129198Scognet	nsfbufsused++;
265129198Scognet	nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
266150868Scognet	pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m));
267129198Scognetdone:
268129198Scognet	mtx_unlock(&sf_buf_lock);
269129198Scognet	return (sf);
270161105Scognet#endif
271129198Scognet}
272129198Scognet
273199135Skibvoid
274199135Skibcpu_set_syscall_retval(struct thread *td, int error)
275199135Skib{
276199135Skib	trapframe_t *frame;
277199135Skib	int fixup;
278199135Skib#ifdef __ARMEB__
279199135Skib	uint32_t insn;
280199135Skib#endif
281199135Skib
282199135Skib	frame = td->td_frame;
283199135Skib	fixup = 0;
284199135Skib
285199135Skib#ifdef __ARMEB__
286199135Skib	insn = *(u_int32_t *)(frame->tf_pc - INSN_SIZE);
287199135Skib	if ((insn & 0x000fffff) == SYS___syscall) {
288199135Skib		register_t *ap = &frame->tf_r0;
289199135Skib		register_t code = ap[_QUAD_LOWWORD];
290199135Skib		if (td->td_proc->p_sysent->sv_mask)
291199135Skib			code &= td->td_proc->p_sysent->sv_mask;
292199135Skib		fixup = (code != SYS_freebsd6_lseek && code != SYS_lseek)
293199135Skib		    ? 1 : 0;
294199135Skib	}
295199135Skib#endif
296199135Skib
297199135Skib	switch (error) {
298199135Skib	case 0:
299199135Skib		if (fixup) {
300199135Skib			frame->tf_r0 = 0;
301199135Skib			frame->tf_r1 = td->td_retval[0];
302199135Skib		} else {
303199135Skib			frame->tf_r0 = td->td_retval[0];
304199135Skib			frame->tf_r1 = td->td_retval[1];
305199135Skib		}
306199135Skib		frame->tf_spsr &= ~PSR_C_bit;   /* carry bit */
307199135Skib		break;
308199135Skib	case ERESTART:
309199135Skib		/*
310199135Skib		 * Reconstruct the pc to point at the swi.
311199135Skib		 */
312199135Skib		frame->tf_pc -= INSN_SIZE;
313199135Skib		break;
314199135Skib	case EJUSTRETURN:
315199135Skib		/* nothing to do */
316199135Skib		break;
317199135Skib	default:
318199135Skib		frame->tf_r0 = error;
319199135Skib		frame->tf_spsr |= PSR_C_bit;    /* carry bit */
320199135Skib		break;
321199135Skib	}
322199135Skib}
323199135Skib
324129198Scognet/*
325129198Scognet * Initialize machine state (pcb and trap frame) for a new thread about to
326236991Simp * upcall. Put enough state in the new thread's PCB to get it to go back
327129198Scognet * userret(), where we can intercept it again to set the return (upcall)
328129198Scognet * Address and stack, along with those from upcals that are from other sources
329129198Scognet * such as those generated in thread_userret() itself.
330129198Scognet */
331129198Scognetvoid
332129198Scognetcpu_set_upcall(struct thread *td, struct thread *td0)
333129198Scognet{
334137214Scognet	struct trapframe *tf;
335137214Scognet	struct switchframe *sf;
336137214Scognet
337137214Scognet	bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
338137214Scognet	bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
339137214Scognet	tf = td->td_frame;
340137214Scognet	sf = (struct switchframe *)tf - 1;
341137214Scognet	sf->sf_r4 = (u_int)fork_return;
342137214Scognet	sf->sf_r5 = (u_int)td;
343137214Scognet	sf->sf_pc = (u_int)fork_trampoline;
344137214Scognet	tf->tf_spsr &= ~PSR_C_bit;
345137214Scognet	tf->tf_r0 = 0;
346137214Scognet	td->td_pcb->un_32.pcb32_sp = (u_int)sf;
347146599Scognet	td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + USPACE_UNDEF_STACK_TOP;
348144637Sjhb
349170305Sjeff	/* Setup to release spin count in fork_exit(). */
350144637Sjhb	td->td_md.md_spinlock_count = 1;
351144637Sjhb	td->td_md.md_saved_cspr = 0;
352129198Scognet}
353129198Scognet
354129198Scognet/*
355129198Scognet * Set that machine state for performing an upcall that has to
356129198Scognet * be done in thread_userret() so that those upcalls generated
357129198Scognet * in thread_userret() itself can be done as well.
358129198Scognet */
359129198Scognetvoid
360145433Sdavidxucpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
361145433Sdavidxu	stack_t *stack)
362129198Scognet{
363137214Scognet	struct trapframe *tf = td->td_frame;
364137214Scognet
365145433Sdavidxu	tf->tf_usr_sp = ((int)stack->ss_sp + stack->ss_size
366137214Scognet	    - sizeof(struct trapframe)) & ~7;
367145433Sdavidxu	tf->tf_pc = (int)entry;
368145433Sdavidxu	tf->tf_r0 = (int)arg;
369137214Scognet	tf->tf_spsr = PSR_USR32_MODE;
370129198Scognet}
371129198Scognet
372147889Sdavidxuint
373145433Sdavidxucpu_set_user_tls(struct thread *td, void *tls_base)
374145433Sdavidxu{
375145433Sdavidxu
376239268Sgonzo	td->td_md.md_tp = (register_t)tls_base;
377239268Sgonzo	if (td == curthread) {
378145433Sdavidxu		critical_enter();
379239268Sgonzo#ifdef ARM_TP_ADDRESS
380218310Simp		*(register_t *)ARM_TP_ADDRESS = (register_t)tls_base;
381239268Sgonzo#else
382239268Sgonzo		set_tls((void *)tls_base);
383239268Sgonzo#endif
384145433Sdavidxu		critical_exit();
385145433Sdavidxu	}
386147889Sdavidxu	return (0);
387145433Sdavidxu}
388145433Sdavidxu
389145433Sdavidxuvoid
390129198Scognetcpu_thread_exit(struct thread *td)
391129198Scognet{
392129198Scognet}
393129198Scognet
394129198Scognetvoid
395173615Smarcelcpu_thread_alloc(struct thread *td)
396129198Scognet{
397236991Simp	td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages *
398129198Scognet	    PAGE_SIZE) - 1;
399129198Scognet	td->td_frame = (struct trapframe *)
400146599Scognet	    ((u_int)td->td_kstack + USPACE_SVC_STACK_TOP - sizeof(struct pcb)) - 1;
401245942Sandrew	/*
402245942Sandrew	 * Ensure td_frame is aligned to an 8 byte boundary as it will be
403245942Sandrew	 * placed into the stack pointer which must be 8 byte aligned in
404245942Sandrew	 * the ARM EABI.
405245942Sandrew	 */
406245942Sandrew	td->td_frame = (struct trapframe *)((u_int)td->td_frame & ~7);
407137214Scognet#ifdef __XSCALE__
408171622Scognet#ifndef CPU_XSCALE_CORE3
409137214Scognet	pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE);
410171622Scognet#endif
411236991Simp#endif
412129198Scognet}
413173615Smarcel
414129198Scognetvoid
415173615Smarcelcpu_thread_free(struct thread *td)
416173615Smarcel{
417173615Smarcel}
418173615Smarcel
419173615Smarcelvoid
420129198Scognetcpu_thread_clean(struct thread *td)
421129198Scognet{
422129198Scognet}
423129198Scognet
424129198Scognet/*
425129198Scognet * Intercept the return address from a freshly forked process that has NOT
426129198Scognet * been scheduled yet.
427129198Scognet *
428129198Scognet * This is needed to make kernel threads stay in kernel mode.
429129198Scognet */
430129198Scognetvoid
431129198Scognetcpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
432129198Scognet{
433129198Scognet	struct switchframe *sf;
434129198Scognet	struct trapframe *tf;
435129198Scognet
436129198Scognet	tf = td->td_frame;
437129198Scognet	sf = (struct switchframe *)tf - 1;
438129198Scognet	sf->sf_r4 = (u_int)func;
439129198Scognet	sf->sf_r5 = (u_int)arg;
440129198Scognet	td->td_pcb->un_32.pcb32_sp = (u_int)sf;
441129198Scognet}
442129198Scognet
443129198Scognet/*
444129198Scognet * Software interrupt handler for queued VM system processing.
445236991Simp */
446236991Simpvoid
447129198Scognetswi_vm(void *dummy)
448129198Scognet{
449166063Scognet
450166063Scognet	if (busdma_swi_pending)
451166063Scognet		busdma_swi();
452129198Scognet}
453129198Scognet
454129198Scognetvoid
455129198Scognetcpu_exit(struct thread *td)
456129198Scognet{
457129198Scognet}
458147114Scognet
459156199Scognet#define BITS_PER_INT	(8 * sizeof(int))
460156191Scognetvm_offset_t arm_nocache_startaddr;
461236991Simpstatic int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE *
462156199Scognet    BITS_PER_INT)];
463156191Scognet
464156191Scognet/*
465236991Simp * Functions to map and unmap memory non-cached into KVA the kernel won't try
466156191Scognet * to allocate. The goal is to provide uncached memory to busdma, to honor
467236991Simp * BUS_DMA_COHERENT.
468236991Simp * We can allocate at most ARM_NOCACHE_KVA_SIZE bytes.
469156191Scognet * The allocator is rather dummy, each page is represented by a bit in
470156191Scognet * a bitfield, 0 meaning the page is not allocated, 1 meaning it is.
471156191Scognet * As soon as it finds enough contiguous pages to satisfy the request,
472156191Scognet * it returns the address.
473156191Scognet */
474156191Scognetvoid *
475156191Scognetarm_remap_nocache(void *addr, vm_size_t size)
476156191Scognet{
477156191Scognet	int i, j;
478181296Sraj
479156191Scognet	size = round_page(size);
480181296Sraj	for (i = 0; i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE; i++) {
481236991Simp		if (!(arm_nocache_allocated[i / BITS_PER_INT] & (1 << (i %
482156199Scognet		    BITS_PER_INT)))) {
483156191Scognet			for (j = i; j < i + (size / (PAGE_SIZE)); j++)
484156199Scognet				if (arm_nocache_allocated[j / BITS_PER_INT] &
485156199Scognet				    (1 << (j % BITS_PER_INT)))
486156191Scognet					break;
487156191Scognet			if (j == i + (size / (PAGE_SIZE)))
488156191Scognet				break;
489156191Scognet		}
490156191Scognet	}
491181296Sraj	if (i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE) {
492156191Scognet		vm_offset_t tomap = arm_nocache_startaddr + i * PAGE_SIZE;
493156191Scognet		void *ret = (void *)tomap;
494156191Scognet		vm_paddr_t physaddr = vtophys((vm_offset_t)addr);
495195779Sraj		vm_offset_t vaddr = (vm_offset_t) addr;
496156191Scognet
497195779Sraj		vaddr = vaddr & ~PAGE_MASK;
498156191Scognet		for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE,
499195779Sraj		    vaddr += PAGE_SIZE, physaddr += PAGE_SIZE, i++) {
500195779Sraj			cpu_idcache_wbinv_range(vaddr, PAGE_SIZE);
501239268Sgonzo#ifdef ARM_L2_PIPT
502239268Sgonzo			cpu_l2cache_wbinv_range(physaddr, PAGE_SIZE);
503239268Sgonzo#else
504195779Sraj			cpu_l2cache_wbinv_range(vaddr, PAGE_SIZE);
505239268Sgonzo#endif
506156191Scognet			pmap_kenter_nocache(tomap, physaddr);
507195779Sraj			cpu_tlb_flushID_SE(vaddr);
508236991Simp			arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i %
509156199Scognet			    BITS_PER_INT);
510156191Scognet		}
511156191Scognet		return (ret);
512156191Scognet	}
513181296Sraj
514156191Scognet	return (NULL);
515156191Scognet}
516156191Scognet
517156191Scognetvoid
518156191Scognetarm_unmap_nocache(void *addr, vm_size_t size)
519156191Scognet{
520156191Scognet	vm_offset_t raddr = (vm_offset_t)addr;
521156191Scognet	int i;
522156191Scognet
523156191Scognet	size = round_page(size);
524156191Scognet	i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE);
525205028Sraj	for (; size > 0; size -= PAGE_SIZE, i++) {
526236991Simp		arm_nocache_allocated[i / BITS_PER_INT] &= ~(1 << (i %
527156199Scognet		    BITS_PER_INT));
528205028Sraj		pmap_kremove(raddr);
529205028Sraj		raddr += PAGE_SIZE;
530205028Sraj	}
531156191Scognet}
532156191Scognet
533147114Scognet#ifdef ARM_USE_SMALL_ALLOC
534147114Scognet
535236991Simpstatic TAILQ_HEAD(,arm_small_page) pages_normal =
536147114Scognet	TAILQ_HEAD_INITIALIZER(pages_normal);
537236991Simpstatic TAILQ_HEAD(,arm_small_page) pages_wt =
538147114Scognet	TAILQ_HEAD_INITIALIZER(pages_wt);
539147114Scognetstatic TAILQ_HEAD(,arm_small_page) free_pgdesc =
540147114Scognet	TAILQ_HEAD_INITIALIZER(free_pgdesc);
541147114Scognet
542147114Scognetextern uma_zone_t l2zone;
543147114Scognet
544147114Scognetstruct mtx smallalloc_mtx;
545147114Scognet
546164079Scognetvm_offset_t alloc_firstaddr;
547147114Scognet
548171622Scognet#ifdef ARM_HAVE_SUPERSECTIONS
549171622Scognet#define S_FRAME	L1_SUP_FRAME
550171622Scognet#define S_SIZE	L1_SUP_SIZE
551171622Scognet#else
552171622Scognet#define S_FRAME	L1_S_FRAME
553171622Scognet#define S_SIZE	L1_S_SIZE
554171622Scognet#endif
555171622Scognet
556161105Scognetvm_offset_t
557161105Scognetarm_ptovirt(vm_paddr_t pa)
558161105Scognet{
559161105Scognet	int i;
560161105Scognet	vm_offset_t addr = alloc_firstaddr;
561147114Scognet
562194906Scognet	KASSERT(alloc_firstaddr != 0, ("arm_ptovirt called too early ?"));
563163674Scognet	for (i = 0; dump_avail[i + 1]; i += 2) {
564161105Scognet		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
565161105Scognet			break;
566171622Scognet		addr += (dump_avail[i + 1] & S_FRAME) + S_SIZE -
567171622Scognet		    (dump_avail[i] & S_FRAME);
568161105Scognet	}
569163674Scognet	KASSERT(dump_avail[i + 1] != 0, ("Trying to access invalid physical address"));
570171622Scognet	return (addr + (pa - (dump_avail[i] & S_FRAME)));
571161105Scognet}
572161105Scognet
573147114Scognetvoid
574161105Scognetarm_init_smallalloc(void)
575161105Scognet{
576161105Scognet	vm_offset_t to_map = 0, mapaddr;
577161105Scognet	int i;
578161105Scognet
579236991Simp	/*
580161105Scognet	 * We need to use dump_avail and not phys_avail, since we want to
581161105Scognet	 * map the whole memory and not just the memory available to the VM
582161105Scognet	 * to be able to do a pa => va association for any address.
583161105Scognet	 */
584236991Simp
585163674Scognet	for (i = 0; dump_avail[i + 1]; i+= 2) {
586171622Scognet		to_map += (dump_avail[i + 1] & S_FRAME) + S_SIZE -
587171622Scognet		    (dump_avail[i] & S_FRAME);
588161105Scognet	}
589161105Scognet	alloc_firstaddr = mapaddr = KERNBASE - to_map;
590163674Scognet	for (i = 0; dump_avail[i + 1]; i+= 2) {
591171622Scognet		vm_offset_t size = (dump_avail[i + 1] & S_FRAME) +
592171622Scognet		    S_SIZE - (dump_avail[i] & S_FRAME);
593161105Scognet		vm_offset_t did = 0;
594171622Scognet		while (size > 0) {
595171622Scognet#ifdef ARM_HAVE_SUPERSECTIONS
596171622Scognet			pmap_kenter_supersection(mapaddr,
597236991Simp			    (dump_avail[i] & L1_SUP_FRAME) + did,
598171622Scognet			    SECTION_CACHE);
599171622Scognet#else
600236991Simp			pmap_kenter_section(mapaddr,
601161105Scognet			    (dump_avail[i] & L1_S_FRAME) + did, SECTION_CACHE);
602171622Scognet#endif
603171622Scognet			mapaddr += S_SIZE;
604171622Scognet			did += S_SIZE;
605171622Scognet			size -= S_SIZE;
606161105Scognet		}
607161105Scognet	}
608161105Scognet}
609161105Scognet
610161105Scognetvoid
611147114Scognetarm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
612147114Scognet{
613147114Scognet	struct arm_small_page *pg;
614147114Scognet
615156191Scognet	bytes &= ~PAGE_MASK;
616147114Scognet	while (bytes > 0) {
617147114Scognet		pg = (struct arm_small_page *)list;
618147114Scognet		pg->addr = mem;
619147114Scognet		if (pagetable)
620147114Scognet			TAILQ_INSERT_HEAD(&pages_wt, pg, pg_list);
621147114Scognet		else
622147114Scognet			TAILQ_INSERT_HEAD(&pages_normal, pg, pg_list);
623147114Scognet		list = (char *)list + sizeof(*pg);
624147114Scognet		mem = (char *)mem + PAGE_SIZE;
625147114Scognet		bytes -= PAGE_SIZE;
626147114Scognet	}
627147114Scognet}
628147114Scognet
629147114Scognetvoid *
630147114Scognetuma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
631147114Scognet{
632147114Scognet	void *ret;
633161105Scognet	struct arm_small_page *sp;
634147114Scognet	TAILQ_HEAD(,arm_small_page) *head;
635161105Scognet	vm_page_t m;
636161105Scognet
637147114Scognet	*flags = UMA_SLAB_PRIV;
638147114Scognet	/*
639147114Scognet	 * For CPUs where we setup page tables as write back, there's no
640147114Scognet	 * need to maintain two separate pools.
641147114Scognet	 */
642147114Scognet	if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt)
643147114Scognet		head = (void *)&pages_wt;
644147114Scognet	else
645147114Scognet		head = (void *)&pages_normal;
646147114Scognet
647147114Scognet	mtx_lock(&smallalloc_mtx);
648147114Scognet	sp = TAILQ_FIRST(head);
649147114Scognet
650147114Scognet	if (!sp) {
651161105Scognet		int pflags;
652161105Scognet
653161105Scognet		mtx_unlock(&smallalloc_mtx);
654161105Scognet		if (zone == l2zone &&
655161105Scognet		    pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
656159321Scognet			*flags = UMA_SLAB_KMEM;
657161105Scognet			ret = ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT));
658161105Scognet			return (ret);
659159321Scognet		}
660243040Skib		pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
661161105Scognet		for (;;) {
662228522Salc			m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
663161105Scognet			if (m == NULL) {
664161105Scognet				if (wait & M_NOWAIT)
665161105Scognet					return (NULL);
666161105Scognet				VM_WAIT;
667161105Scognet			} else
668161105Scognet				break;
669150868Scognet		}
670161105Scognet		ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m));
671161105Scognet		if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
672161105Scognet			bzero(ret, PAGE_SIZE);
673161105Scognet		return (ret);
674236991Simp	}
675161105Scognet	TAILQ_REMOVE(head, sp, pg_list);
676161105Scognet	TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list);
677161105Scognet	ret = sp->addr;
678147114Scognet	mtx_unlock(&smallalloc_mtx);
679147114Scognet	if ((wait & M_ZERO))
680147114Scognet		bzero(ret, bytes);
681147114Scognet	return (ret);
682147114Scognet}
683147114Scognet
684147114Scognetvoid
685147114Scognetuma_small_free(void *mem, int size, u_int8_t flags)
686147114Scognet{
687147114Scognet	pd_entry_t *pd;
688147114Scognet	pt_entry_t *pt;
689147114Scognet
690150868Scognet	if (flags & UMA_SLAB_KMEM)
691147114Scognet		kmem_free(kmem_map, (vm_offset_t)mem, size);
692147114Scognet	else {
693147114Scognet		struct arm_small_page *sp;
694147114Scognet
695161105Scognet		if ((vm_offset_t)mem >= KERNBASE) {
696161105Scognet			mtx_lock(&smallalloc_mtx);
697161105Scognet			sp = TAILQ_FIRST(&free_pgdesc);
698161105Scognet			KASSERT(sp != NULL, ("No more free page descriptor ?"));
699161105Scognet			TAILQ_REMOVE(&free_pgdesc, sp, pg_list);
700161105Scognet			sp->addr = mem;
701161105Scognet			pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd,
702161105Scognet			    &pt);
703236991Simp			if ((*pd & pte_l1_s_cache_mask) ==
704161105Scognet			    pte_l1_s_cache_mode_pt &&
705161105Scognet			    pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode)
706161105Scognet				TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list);
707161105Scognet			else
708161105Scognet				TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list);
709161105Scognet			mtx_unlock(&smallalloc_mtx);
710161105Scognet		} else {
711161105Scognet			vm_page_t m;
712161105Scognet			vm_paddr_t pa = vtophys((vm_offset_t)mem);
713161105Scognet
714161105Scognet			m = PHYS_TO_VM_PAGE(pa);
715172189Salc			m->wire_count--;
716161105Scognet			vm_page_free(m);
717172189Salc			atomic_subtract_int(&cnt.v_wire_count, 1);
718161105Scognet		}
719147114Scognet	}
720147114Scognet}
721147114Scognet
722147114Scognet#endif
723