vm_machdep.c revision 149915
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 * $FreeBSD: head/sys/ia64/ia64/vm_machdep.c 149915 2005-09-09 19:18:36Z marcel $
42 */
43/*-
44 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
45 * All rights reserved.
46 *
47 * Author: Chris G. Demetriou
48 *
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
54 *
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 *
59 * Carnegie Mellon requests users of this software to return to
60 *
61 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62 *  School of Computer Science
63 *  Carnegie Mellon University
64 *  Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 */
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/proc.h>
73#include <sys/malloc.h>
74#include <sys/bio.h>
75#include <sys/buf.h>
76#include <sys/vnode.h>
77#include <sys/vmmeter.h>
78#include <sys/kernel.h>
79#include <sys/mbuf.h>
80#include <sys/sf_buf.h>
81#include <sys/sysctl.h>
82#include <sys/unistd.h>
83
84#include <machine/clock.h>
85#include <machine/cpu.h>
86#include <machine/fpu.h>
87#include <machine/md_var.h>
88#include <machine/pcb.h>
89
90#include <vm/vm.h>
91#include <vm/vm_param.h>
92#include <sys/lock.h>
93#include <vm/vm_kern.h>
94#include <vm/vm_page.h>
95#include <vm/vm_map.h>
96#include <vm/vm_extern.h>
97
98#include <i386/include/psl.h>
99
100void
101cpu_thread_exit(struct thread *td)
102{
103}
104
105void
106cpu_thread_clean(struct thread *td)
107{
108}
109
110void
111cpu_thread_setup(struct thread *td)
112{
113	intptr_t sp;
114
115	sp = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
116	sp -= sizeof(struct pcb);
117	td->td_pcb = (struct pcb *)sp;
118	sp -= sizeof(struct trapframe);
119	td->td_frame = (struct trapframe *)sp;
120	td->td_frame->tf_length = sizeof(struct trapframe);
121	mtx_init(&td->td_md.md_highfp_mtx, "High FP lock", NULL, MTX_SPIN);
122}
123
124void
125cpu_thread_swapin(struct thread *td)
126{
127}
128
129void
130cpu_thread_swapout(struct thread *td)
131{
132
133	ia64_highfp_save(td);
134}
135
136void
137cpu_set_upcall(struct thread *td, struct thread *td0)
138{
139	struct pcb *pcb;
140	struct trapframe *tf;
141
142	tf = td->td_frame;
143	KASSERT(tf != NULL, ("foo"));
144	bcopy(td0->td_frame, tf, sizeof(*tf));
145	tf->tf_length = sizeof(struct trapframe);
146	tf->tf_flags = FRAME_SYSCALL;
147	tf->tf_special.ndirty = 0;
148	tf->tf_special.bspstore &= ~0x1ffUL;
149	tf->tf_scratch.gr8 = 0;
150	tf->tf_scratch.gr9 = 1;
151	tf->tf_scratch.gr10 = 0;
152
153	pcb = td->td_pcb;
154	KASSERT(pcb != NULL, ("foo"));
155	bcopy(td0->td_pcb, pcb, sizeof(*pcb));
156	pcb->pcb_special.bspstore = td->td_kstack;
157	pcb->pcb_special.pfs = 0;
158	pcb->pcb_current_pmap = vmspace_pmap(td->td_proc->p_vmspace);
159	pcb->pcb_special.sp = (uintptr_t)tf - 16;
160	pcb->pcb_special.rp = FDESC_FUNC(fork_trampoline);
161	cpu_set_fork_handler(td, (void (*)(void*))fork_return, td);
162
163	/* Setup to release sched_lock in fork_exit(). */
164	td->td_md.md_spinlock_count = 1;
165	td->td_md.md_saved_intr = 1;
166}
167
168void
169cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
170	stack_t *stack)
171{
172	struct ia64_fdesc *fd;
173	struct trapframe *tf;
174	uint64_t ndirty, sp;
175
176	tf = td->td_frame;
177	ndirty = tf->tf_special.ndirty + (tf->tf_special.bspstore & 0x1ffUL);
178
179	KASSERT((ndirty & ~PAGE_MASK) == 0,
180	    ("Whoa there! We have more than 8KB of dirty registers!"));
181
182	fd = (struct ia64_fdesc *)entry;
183	sp = (uint64_t)stack->ss_sp;
184
185	bzero(&tf->tf_special, sizeof(tf->tf_special));
186	tf->tf_special.iip = fuword(&fd->func);
187	tf->tf_special.gp = fuword(&fd->gp);
188	tf->tf_special.sp = (sp + stack->ss_size - 16) & ~15;
189	tf->tf_special.rsc = 0xf;
190	tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
191	tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
192	    IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
193	    IA64_PSR_CPL_USER;
194
195	if (tf->tf_flags & FRAME_SYSCALL) {
196		tf->tf_special.cfm = (3UL<<62) | (1UL<<7) | 1UL;
197		tf->tf_special.bspstore = sp + 8;
198		suword((caddr_t)sp, (uint64_t)arg);
199	} else {
200		tf->tf_special.cfm = (1UL<<63) | (1UL<<7) | 1UL;
201		tf->tf_special.bspstore = sp;
202		tf->tf_special.ndirty = 8;
203		sp = td->td_kstack + ndirty - 8;
204		if ((sp & 0x1ff) == 0x1f8) {
205			*(uint64_t*)sp = 0;
206			tf->tf_special.ndirty += 8;
207			sp -= 8;
208		}
209		*(uint64_t*)sp = (uint64_t)arg;
210	}
211}
212
213int
214cpu_set_user_tls(struct thread *td, void *tls_base)
215{
216	td->td_frame->tf_special.tp = (unsigned long)tls_base;
217	return (0);
218}
219
220/*
221 * Finish a fork operation, with process p2 nearly set up.
222 * Copy and update the pcb, set up the stack so that the child
223 * ready to run and return to user mode.
224 */
225void
226cpu_fork(struct thread *td1, struct proc *p2 __unused, struct thread *td2,
227    int flags)
228{
229	char *stackp;
230	uint64_t ndirty;
231
232	KASSERT(td1 == curthread || td1 == &thread0,
233	    ("cpu_fork: td1 not curthread and not thread0"));
234
235	if ((flags & RFPROC) == 0)
236		return;
237
238	/*
239	 * Save the preserved registers and the high FP registers in the
240	 * PCB if we're the parent (ie td1 == curthread) so that we have
241	 * a valid PCB. This also causes a RSE flush. We don't have to
242	 * do that otherwise, because there wouldn't be anything important
243	 * to save.
244	 */
245	if (td1 == curthread) {
246		if (savectx(td1->td_pcb) != 0)
247			panic("unexpected return from savectx()");
248		ia64_highfp_save(td1);
249	}
250
251	/*
252	 * create the child's kernel stack and backing store. We basicly
253	 * create an image of the parent's stack and backing store and
254	 * adjust where necessary.
255	 */
256	stackp = (char *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE);
257
258	stackp -= sizeof(struct pcb);
259	td2->td_pcb = (struct pcb *)stackp;
260	bcopy(td1->td_pcb, td2->td_pcb, sizeof(struct pcb));
261
262	stackp -= sizeof(struct trapframe);
263	td2->td_frame = (struct trapframe *)stackp;
264	bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
265	td2->td_frame->tf_length = sizeof(struct trapframe);
266	ndirty = td2->td_frame->tf_special.ndirty +
267	    (td2->td_frame->tf_special.bspstore & 0x1ffUL);
268	bcopy((void*)td1->td_kstack, (void*)td2->td_kstack, ndirty);
269
270	/* Set-up the return values as expected by the fork() libc stub. */
271	if (td2->td_frame->tf_special.psr & IA64_PSR_IS) {
272		td2->td_frame->tf_scratch.gr8 = 0;
273		td2->td_frame->tf_scratch.gr10 = 1;
274	} else {
275		td2->td_frame->tf_scratch.gr8 = 0;
276		td2->td_frame->tf_scratch.gr9 = 1;
277		td2->td_frame->tf_scratch.gr10 = 0;
278	}
279
280	td2->td_pcb->pcb_special.bspstore = td2->td_kstack + ndirty;
281	td2->td_pcb->pcb_special.pfs = 0;
282	td2->td_pcb->pcb_current_pmap = vmspace_pmap(td2->td_proc->p_vmspace);
283
284	td2->td_pcb->pcb_special.sp = (uintptr_t)stackp - 16;
285	td2->td_pcb->pcb_special.rp = FDESC_FUNC(fork_trampoline);
286	cpu_set_fork_handler(td2, (void (*)(void*))fork_return, td2);
287
288	/* Setup to release sched_lock in fork_exit(). */
289	td2->td_md.md_spinlock_count = 1;
290	td2->td_md.md_saved_intr = 1;
291}
292
293/*
294 * Intercept the return address from a freshly forked process that has NOT
295 * been scheduled yet.
296 *
297 * This is needed to make kernel threads stay in kernel mode.
298 */
299void
300cpu_set_fork_handler(td, func, arg)
301	struct thread *td;
302	void (*func)(void *);
303	void *arg;
304{
305	td->td_frame->tf_scratch.gr2 = (u_int64_t)func;
306	td->td_frame->tf_scratch.gr3 = (u_int64_t)arg;
307}
308
309/*
310 * cpu_exit is called as the last action during exit.
311 * We drop the fp state (if we have it) and switch to a live one.
312 */
313void
314cpu_exit(struct thread *td)
315{
316
317	/* XXX: Should this be in cpu_thread_exit() instead? */
318	/* Throw away the high FP registers. */
319	ia64_highfp_drop(td);
320}
321
322/*
323 * Allocate an sf_buf for the given vm_page.  On this machine, however, there
324 * is no sf_buf object.  Instead, an opaque pointer to the given vm_page is
325 * returned.
326 */
327struct sf_buf *
328sf_buf_alloc(struct vm_page *m, int pri)
329{
330
331	return ((struct sf_buf *)m);
332}
333
334/*
335 * Free the sf_buf.  In fact, do nothing because there are no resources
336 * associated with the sf_buf.
337 */
338void
339sf_buf_free(struct sf_buf *sf)
340{
341}
342
343/*
344 * Software interrupt handler for queued VM system processing.
345 */
346void
347swi_vm(void *dummy)
348{
349#if 0
350	if (busdma_swi_pending != 0)
351		busdma_swi();
352#endif
353}
354