vm_machdep.c revision 144637
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: head/sys/arm/arm/vm_machdep.c 144637 2005-04-04 21:53:56Z jhb $");
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/kernel.h>
49#include <sys/malloc.h>
50#include <sys/mbuf.h>
51#include <sys/proc.h>
52#include <sys/socketvar.h>
53#include <sys/sf_buf.h>
54#include <machine/cpu.h>
55#include <machine/pcb.h>
56#include <vm/vm.h>
57#include <vm/pmap.h>
58#include <sys/lock.h>
59#include <sys/mutex.h>
60
61#include <vm/vm.h>
62#include <vm/vm_extern.h>
63#include <vm/vm_kern.h>
64#include <vm/vm_page.h>
65#include <vm/vm_map.h>
66#include <vm/vm_param.h>
67
68#ifndef NSFBUFS
69#define NSFBUFS		(512 + maxusers * 16)
70#endif
71
72static void     sf_buf_init(void *arg);
73SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
74
75LIST_HEAD(sf_head, sf_buf);
76
77
78/*
79 * A hash table of active sendfile(2) buffers
80 */
81static struct sf_head *sf_buf_active;
82static u_long sf_buf_hashmask;
83
84#define SF_BUF_HASH(m)  (((m) - vm_page_array) & sf_buf_hashmask)
85
86static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
87static u_int    sf_buf_alloc_want;
88
89/*
90 * A lock used to synchronize access to the hash table and free list
91 */
92static struct mtx sf_buf_lock;
93
94/*
95 * Finish a fork operation, with process p2 nearly set up.
96 * Copy and update the pcb, set up the stack so that the child
97 * ready to run and return to user mode.
98 */
99void
100cpu_fork(register struct thread *td1, register struct proc *p2,
101    struct thread *td2, int flags)
102{
103	struct pcb *pcb1, *pcb2;
104	struct trapframe *tf;
105	struct switchframe *sf;
106	struct mdproc *mdp2;
107
108	pcb1 = td1->td_pcb;
109	pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
110#ifdef __XSCALE__
111	pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE);
112#endif
113	td2->td_pcb = pcb2;
114	bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
115	mdp2 = &p2->p_md;
116	bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
117	pcb2->un_32.pcb32_und_sp = td2->td_kstack + USPACE_UNDEF_STACK_TOP;
118	pcb2->un_32.pcb32_sp = td2->td_kstack +
119	    USPACE_SVC_STACK_TOP - sizeof(*pcb2);
120	pmap_activate(td2);
121	td2->td_frame = tf =
122	    (struct trapframe *)pcb2->un_32.pcb32_sp - 1;
123	*tf = *td1->td_frame;
124	sf = (struct switchframe *)tf - 1;
125	sf->sf_r4 = (u_int)fork_return;
126	sf->sf_r5 = (u_int)td2;
127	sf->sf_pc = (u_int)fork_trampoline;
128	tf->tf_spsr &= ~PSR_C_bit;
129	tf->tf_r0 = 0;
130	tf->tf_r1 = 0;
131	pcb2->un_32.pcb32_sp = (u_int)sf;
132
133	/* Setup to release sched_lock in fork_exit(). */
134	td2->td_md.md_spinlock_count = 1;
135	td2->td_md.md_saved_cspr = 0;
136}
137
138void
139cpu_thread_swapin(struct thread *td)
140{
141}
142
143void
144cpu_thread_swapout(struct thread *td)
145{
146}
147
148/*
149 * Detatch mapped page and release resources back to the system.
150 */
151void
152sf_buf_free(struct sf_buf *sf)
153{
154	 mtx_lock(&sf_buf_lock);
155	 sf->ref_count--;
156	 if (sf->ref_count == 0) {
157		 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
158		 nsfbufsused--;
159		 if (sf_buf_alloc_want > 0)
160			 wakeup_one(&sf_buf_freelist);
161	 }
162	 mtx_unlock(&sf_buf_lock);
163}
164
165/*
166 *  * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
167 *   */
168static void
169sf_buf_init(void *arg)
170{
171	struct sf_buf *sf_bufs;
172	vm_offset_t sf_base;
173	int i;
174
175	nsfbufs = NSFBUFS;
176	TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
177
178	sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
179	TAILQ_INIT(&sf_buf_freelist);
180	sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
181	sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
182	    M_NOWAIT | M_ZERO);
183	for (i = 0; i < nsfbufs; i++) {
184		sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
185		TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
186	}
187	sf_buf_alloc_want = 0;
188	mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
189}
190
191/*
192 * Get an sf_buf from the freelist. Will block if none are available.
193 */
194struct sf_buf *
195sf_buf_alloc(struct vm_page *m, int flags)
196{
197	struct sf_head *hash_list;
198	struct sf_buf *sf;
199	int error;
200
201	hash_list = &sf_buf_active[SF_BUF_HASH(m)];
202	mtx_lock(&sf_buf_lock);
203	LIST_FOREACH(sf, hash_list, list_entry) {
204		if (sf->m == m) {
205			sf->ref_count++;
206			if (sf->ref_count == 1) {
207				TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
208				nsfbufsused++;
209				nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
210			}
211			goto done;
212		}
213	}
214	while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
215		if (flags & SFB_NOWAIT)
216			goto done;
217		sf_buf_alloc_want++;
218		mbstat.sf_allocwait++;
219		error = msleep(&sf_buf_freelist, &sf_buf_lock,
220		    (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
221		sf_buf_alloc_want--;
222
223
224		/*
225		 * If we got a signal, don't risk going back to sleep.
226		 */
227		if (error)
228			goto done;
229	}
230	TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
231	if (sf->m != NULL)
232		LIST_REMOVE(sf, list_entry);
233	LIST_INSERT_HEAD(hash_list, sf, list_entry);
234	sf->ref_count = 1;
235	sf->m = m;
236	nsfbufsused++;
237	nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
238	pmap_qenter(sf->kva, &sf->m, 1);
239done:
240	mtx_unlock(&sf_buf_lock);
241	return (sf);
242
243}
244
245/*
246 * Initialize machine state (pcb and trap frame) for a new thread about to
247 * upcall. Put enough state in the new thread's PCB to get it to go back
248 * userret(), where we can intercept it again to set the return (upcall)
249 * Address and stack, along with those from upcals that are from other sources
250 * such as those generated in thread_userret() itself.
251 */
252void
253cpu_set_upcall(struct thread *td, struct thread *td0)
254{
255	struct trapframe *tf;
256	struct switchframe *sf;
257
258	bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
259	bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
260	tf = td->td_frame;
261	sf = (struct switchframe *)tf - 1;
262	sf->sf_r4 = (u_int)fork_return;
263	sf->sf_r5 = (u_int)td;
264	sf->sf_pc = (u_int)fork_trampoline;
265	tf->tf_spsr &= ~PSR_C_bit;
266	tf->tf_r0 = 0;
267	td->td_pcb->un_32.pcb32_sp = (u_int)sf;
268	td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + td->td_kstack_pages
269	    * PAGE_SIZE + USPACE_UNDEF_STACK_TOP;
270
271	/* Setup to release sched_lock in fork_exit(). */
272	td->td_md.md_spinlock_count = 1;
273	td->td_md.md_saved_cspr = 0;
274}
275
276/*
277 * Set that machine state for performing an upcall that has to
278 * be done in thread_userret() so that those upcalls generated
279 * in thread_userret() itself can be done as well.
280 */
281void
282cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
283{
284	struct trapframe *tf = td->td_frame;
285
286	tf->tf_usr_sp = ((int)ku->ku_stack.ss_sp + ku->ku_stack.ss_size
287	    - sizeof(struct trapframe)) & ~7;
288	tf->tf_pc = (int)ku->ku_func;
289	tf->tf_r0 = (int)ku->ku_mailbox;
290	tf->tf_spsr = PSR_USR32_MODE;
291}
292
293void
294cpu_thread_exit(struct thread *td)
295{
296}
297
298void
299cpu_thread_setup(struct thread *td)
300{
301	td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages *
302	    PAGE_SIZE) - 1;
303	td->td_frame = (struct trapframe *)
304	    ((u_int)td->td_kstack + td->td_kstack_pages * PAGE_SIZE +
305	     USPACE_SVC_STACK_TOP - sizeof(struct pcb)) - 1;
306#ifdef __XSCALE__
307	pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE);
308#endif
309
310}
311void
312cpu_thread_clean(struct thread *td)
313{
314}
315
316/*
317 * Intercept the return address from a freshly forked process that has NOT
318 * been scheduled yet.
319 *
320 * This is needed to make kernel threads stay in kernel mode.
321 */
322void
323cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
324{
325	struct switchframe *sf;
326	struct trapframe *tf;
327
328	tf = td->td_frame;
329	sf = (struct switchframe *)tf - 1;
330	sf->sf_r4 = (u_int)func;
331	sf->sf_r5 = (u_int)arg;
332	td->td_pcb->un_32.pcb32_sp = (u_int)sf;
333}
334
335/*
336 * Software interrupt handler for queued VM system processing.
337 */
338void
339swi_vm(void *dummy)
340{
341}
342
343void
344cpu_exit(struct thread *td)
345{
346}
347