vm_machdep.c revision 123929
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 * $FreeBSD: head/sys/ia64/ia64/vm_machdep.c 123929 2003-12-28 08:57:09Z silby $
42 */
43/*
44 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
45 * All rights reserved.
46 *
47 * Author: Chris G. Demetriou
48 *
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
54 *
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 *
59 * Carnegie Mellon requests users of this software to return to
60 *
61 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62 *  School of Computer Science
63 *  Carnegie Mellon University
64 *  Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 */
69
70#include "opt_kstack_pages.h"
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/proc.h>
75#include <sys/malloc.h>
76#include <sys/bio.h>
77#include <sys/buf.h>
78#include <sys/vnode.h>
79#include <sys/vmmeter.h>
80#include <sys/kernel.h>
81#include <sys/mbuf.h>
82#include <sys/sf_buf.h>
83#include <sys/sysctl.h>
84#include <sys/unistd.h>
85
86#include <machine/clock.h>
87#include <machine/cpu.h>
88#include <machine/fpu.h>
89#include <machine/md_var.h>
90
91#include <vm/vm.h>
92#include <vm/vm_param.h>
93#include <sys/lock.h>
94#include <vm/vm_kern.h>
95#include <vm/vm_page.h>
96#include <vm/vm_map.h>
97#include <vm/vm_extern.h>
98
99#include <sys/user.h>
100
101#include <i386/include/psl.h>
102
103static void	sf_buf_init(void *arg);
104SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
105
106/*
107 * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
108 * sf_freelist head with the sf_lock mutex.
109 */
110static struct {
111	SLIST_HEAD(, sf_buf) sf_head;
112	struct mtx sf_lock;
113} sf_freelist;
114
115static u_int	sf_buf_alloc_want;
116
117void
118cpu_thread_exit(struct thread *td)
119{
120}
121
122void
123cpu_thread_clean(struct thread *td)
124{
125}
126
127void
128cpu_thread_setup(struct thread *td)
129{
130	intptr_t sp;
131
132	sp = td->td_kstack + KSTACK_PAGES * PAGE_SIZE;
133	sp -= sizeof(struct pcb);
134	td->td_pcb = (struct pcb *)sp;
135	sp -= sizeof(struct trapframe);
136	td->td_frame = (struct trapframe *)sp;
137	td->td_frame->tf_length = sizeof(struct trapframe);
138}
139
140void
141cpu_thread_swapin(struct thread *td)
142{
143}
144
145void
146cpu_thread_swapout(struct thread *td)
147{
148
149	ia64_highfp_save(td);
150}
151
152void
153cpu_set_upcall(struct thread *td, struct thread *td0)
154{
155	struct pcb *pcb;
156	struct trapframe *tf;
157
158	tf = td->td_frame;
159	KASSERT(tf != NULL, ("foo"));
160	bcopy(td0->td_frame, tf, sizeof(*tf));
161	tf->tf_length = sizeof(struct trapframe);
162	tf->tf_flags = FRAME_SYSCALL;
163	tf->tf_special.ndirty = 0;
164	tf->tf_special.bspstore &= ~0x1ffUL;
165	tf->tf_scratch.gr8 = 0;
166	tf->tf_scratch.gr9 = 1;
167	tf->tf_scratch.gr10 = 0;
168
169	pcb = td->td_pcb;
170	KASSERT(pcb != NULL, ("foo"));
171	bcopy(td0->td_pcb, pcb, sizeof(*pcb));
172	pcb->pcb_special.bspstore = td->td_kstack;
173	pcb->pcb_special.pfs = 0;
174	pcb->pcb_current_pmap = vmspace_pmap(td->td_proc->p_vmspace);
175	pcb->pcb_special.sp = (uintptr_t)tf - 16;
176	pcb->pcb_special.rp = FDESC_FUNC(fork_trampoline);
177	cpu_set_fork_handler(td, (void (*)(void*))fork_return, td);
178}
179
180void
181cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
182{
183	struct ia64_fdesc *fd;
184	struct trapframe *tf;
185	uint64_t ndirty, stack;
186
187	tf = td->td_frame;
188	ndirty = tf->tf_special.ndirty + (tf->tf_special.bspstore & 0x1ffUL);
189
190	KASSERT((ndirty & ~PAGE_MASK) == 0,
191	    ("Whoa there! We have more than 8KB of dirty registers!"));
192
193	fd = ku->ku_func;
194	stack = (uint64_t)ku->ku_stack.ss_sp;
195
196	bzero(&tf->tf_special, sizeof(tf->tf_special));
197	tf->tf_special.iip = fuword(&fd->func);
198	tf->tf_special.gp = fuword(&fd->gp);
199	tf->tf_special.sp = (stack + ku->ku_stack.ss_size - 16) & ~15;
200	tf->tf_special.rsc = 0xf;
201	tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
202	tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
203	    IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
204	    IA64_PSR_CPL_USER;
205
206	if (tf->tf_flags & FRAME_SYSCALL) {
207		tf->tf_special.cfm = (3UL<<62) | (1UL<<7) | 1UL;
208		tf->tf_special.bspstore = stack + 8;
209		suword((caddr_t)stack, (uint64_t)ku->ku_mailbox);
210	} else {
211		tf->tf_special.cfm = (1UL<<63) | (1UL<<7) | 1UL;
212		tf->tf_special.bspstore = stack;
213		tf->tf_special.ndirty = 8;
214		stack = td->td_kstack + ndirty - 8;
215		if ((stack & 0x1ff) == 0x1f8) {
216			*(uint64_t*)stack = 0;
217			tf->tf_special.ndirty += 8;
218			stack -= 8;
219		}
220		*(uint64_t*)stack = (uint64_t)ku->ku_mailbox;
221	}
222}
223
224/*
225 * Finish a fork operation, with process p2 nearly set up.
226 * Copy and update the pcb, set up the stack so that the child
227 * ready to run and return to user mode.
228 */
229void
230cpu_fork(struct thread *td1, struct proc *p2 __unused, struct thread *td2,
231    int flags)
232{
233	char *stackp;
234	uint64_t ndirty;
235
236	KASSERT(td1 == curthread || td1 == &thread0,
237	    ("cpu_fork: td1 not curthread and not thread0"));
238
239	if ((flags & RFPROC) == 0)
240		return;
241
242	/*
243	 * Save the preserved registers and the high FP registers in the
244	 * PCB if we're the parent (ie td1 == curthread) so that we have
245	 * a valid PCB. This also causes a RSE flush. We don't have to
246	 * do that otherwise, because there wouldn't be anything important
247	 * to save.
248	 */
249	if (td1 == curthread) {
250		if (savectx(td1->td_pcb) != 0)
251			panic("unexpected return from savectx()");
252		ia64_highfp_save(td1);
253	}
254
255	/*
256	 * create the child's kernel stack and backing store. We basicly
257	 * create an image of the parent's stack and backing store and
258	 * adjust where necessary.
259	 */
260	stackp = (char *)(td2->td_kstack + KSTACK_PAGES * PAGE_SIZE);
261
262	stackp -= sizeof(struct pcb);
263	td2->td_pcb = (struct pcb *)stackp;
264	bcopy(td1->td_pcb, td2->td_pcb, sizeof(struct pcb));
265
266	stackp -= sizeof(struct trapframe);
267	td2->td_frame = (struct trapframe *)stackp;
268	bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
269	td2->td_frame->tf_length = sizeof(struct trapframe);
270	ndirty = td2->td_frame->tf_special.ndirty +
271	    (td2->td_frame->tf_special.bspstore & 0x1ffUL);
272	bcopy((void*)td1->td_kstack, (void*)td2->td_kstack, ndirty);
273
274	/* Set-up the return values as expected by the fork() libc stub. */
275	if (td2->td_frame->tf_special.psr & IA64_PSR_IS) {
276		td2->td_frame->tf_scratch.gr8 = 0;
277		td2->td_frame->tf_scratch.gr10 = 1;
278	} else {
279		td2->td_frame->tf_scratch.gr8 = 0;
280		td2->td_frame->tf_scratch.gr9 = 1;
281		td2->td_frame->tf_scratch.gr10 = 0;
282	}
283
284	td2->td_pcb->pcb_special.bspstore = td2->td_kstack + ndirty;
285	td2->td_pcb->pcb_special.pfs = 0;
286	td2->td_pcb->pcb_current_pmap = vmspace_pmap(td2->td_proc->p_vmspace);
287
288	td2->td_pcb->pcb_special.sp = (uintptr_t)stackp - 16;
289	td2->td_pcb->pcb_special.rp = FDESC_FUNC(fork_trampoline);
290	cpu_set_fork_handler(td2, (void (*)(void*))fork_return, td2);
291}
292
293/*
294 * Intercept the return address from a freshly forked process that has NOT
295 * been scheduled yet.
296 *
297 * This is needed to make kernel threads stay in kernel mode.
298 */
299void
300cpu_set_fork_handler(td, func, arg)
301	struct thread *td;
302	void (*func)(void *);
303	void *arg;
304{
305	td->td_frame->tf_scratch.gr2 = (u_int64_t)func;
306	td->td_frame->tf_scratch.gr3 = (u_int64_t)arg;
307}
308
309/*
310 * cpu_exit is called as the last action during exit.
311 * We drop the fp state (if we have it) and switch to a live one.
312 */
313void
314cpu_exit(struct thread *td)
315{
316
317	/* Throw away the high FP registers. */
318	ia64_highfp_drop(td);
319}
320
321void
322cpu_sched_exit(td)
323	register struct thread *td;
324{
325}
326
327/*
328 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
329 */
330static void
331sf_buf_init(void *arg)
332{
333	struct sf_buf *sf_bufs;
334	int i;
335
336	mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
337	SLIST_INIT(&sf_freelist.sf_head);
338	sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
339	    M_NOWAIT | M_ZERO);
340	for (i = 0; i < nsfbufs; i++)
341		SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf_bufs+i, free_list);
342	sf_buf_alloc_want = 0;
343}
344
345/*
346 * Get an sf_buf from the freelist. Will block if none are available.
347 */
348struct sf_buf *
349sf_buf_alloc(struct vm_page *m)
350{
351	struct sf_buf *sf;
352	int error;
353
354	mtx_lock(&sf_freelist.sf_lock);
355	while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
356		sf_buf_alloc_want++;
357		mbstat.sf_allocwait++;
358		error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
359		    "sfbufa", 0);
360		sf_buf_alloc_want--;
361
362		/* If we got a signal, don't risk going back to sleep. */
363		if (error)
364			break;
365	}
366	if (sf != NULL) {
367		SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
368		sf->m = m;
369		nsfbufsused++;
370		nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
371	}
372	mtx_unlock(&sf_freelist.sf_lock);
373	return (sf);
374}
375
376/*
377 * Detach mapped page and release resources back to the system.
378 */
379void
380sf_buf_free(void *addr, void *args)
381{
382	struct sf_buf *sf;
383	struct vm_page *m;
384
385	sf = args;
386	m = sf->m;
387	vm_page_lock_queues();
388	vm_page_unwire(m, 0);
389	/*
390	 * Check for the object going away on us. This can happen since we
391	 * don't hold a reference to it. If so, we're responsible for freeing
392	 * the page.
393	 */
394	if (m->wire_count == 0 && m->object == NULL)
395		vm_page_free(m);
396	vm_page_unlock_queues();
397	sf->m = NULL;
398	mtx_lock(&sf_freelist.sf_lock);
399	SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
400	nsfbufsused--;
401	if (sf_buf_alloc_want > 0)
402		wakeup_one(&sf_freelist);
403	mtx_unlock(&sf_freelist.sf_lock);
404}
405
406/*
407 * Software interrupt handler for queued VM system processing.
408 */
409void
410swi_vm(void *dummy)
411{
412#if 0
413	if (busdma_swi_pending != 0)
414		busdma_swi();
415#endif
416}
417