vm_machdep.c revision 1.22
1/*	$OpenBSD: vm_machdep.c,v 1.22 2008/01/03 22:50:04 kettenis Exp $	*/
2/*	$NetBSD: vm_machdep.c,v 1.38 2001/06/30 00:02:20 eeh Exp $ */
3
4/*
5 * Copyright (c) 1996
6 *	The President and Fellows of Harvard College. All rights reserved.
7 * Copyright (c) 1992, 1993
8 *	The Regents of the University of California.  All rights reserved.
9 *
10 * This software was developed by the Computer Systems Engineering group
11 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
12 * contributed to Berkeley.
13 *
14 * All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 *	This product includes software developed by the University of
17 *	California, Lawrence Berkeley Laboratory.
18 *	This product includes software developed by Harvard University.
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 * 1. Redistributions of source code must retain the above copyright
24 *    notice, this list of conditions and the following disclaimer.
25 * 2. Redistributions in binary form must reproduce the above copyright
26 *    notice, this list of conditions and the following disclaimer in the
27 *    documentation and/or other materials provided with the distribution.
28 * 3. All advertising materials mentioning features or use of this software
29 *    must display the following acknowledgement:
30 *	This product includes software developed by Harvard University.
31 *	This product includes software developed by the University of
32 *	California, Berkeley and its contributors.
33 * 4. Neither the name of the University nor the names of its contributors
34 *    may be used to endorse or promote products derived from this software
35 *    without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47 * SUCH DAMAGE.
48 *
49 *	@(#)vm_machdep.c	8.2 (Berkeley) 9/23/93
50 */
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/proc.h>
55#include <sys/user.h>
56#include <sys/core.h>
57#include <sys/malloc.h>
58#include <sys/buf.h>
59#include <sys/exec.h>
60#include <sys/vnode.h>
61#include <sys/signalvar.h>
62
63#include <uvm/uvm_extern.h>
64
65#include <machine/cpu.h>
66#include <machine/frame.h>
67#include <machine/trap.h>
68#include <machine/bus.h>
69
70#include <sparc64/sparc64/cache.h>
71
72/*
73 * Map a user I/O request into kernel virtual address space.
74 * Note: the pages are already locked by uvm_vslock(), so we
75 * do not need to pass an access_type to pmap_enter().
76 */
77void
78vmapbuf(bp, len)
79	struct buf *bp;
80	vsize_t len;
81{
82	struct pmap *upmap, *kpmap;
83	vaddr_t uva;	/* User VA (map from) */
84	vaddr_t kva;	/* Kernel VA (new to) */
85	paddr_t pa; 	/* physical address */
86	vsize_t off;
87
88	if ((bp->b_flags & B_PHYS) == 0)
89		panic("vmapbuf");
90
91	/*
92	 * XXX:  It might be better to round/trunc to a
93	 * segment boundary to avoid VAC problems!
94	 */
95	bp->b_saveaddr = bp->b_data;
96	uva = trunc_page((vaddr_t)bp->b_data);
97	off = (vaddr_t)bp->b_data - uva;
98	len = round_page(off + len);
99	kva = uvm_km_valloc_wait(kernel_map, len);
100	bp->b_data = (caddr_t)(kva + off);
101
102	/*
103	 * We have to flush any write-back cache on the
104	 * user-space mappings so our new mappings will
105	 * have the correct contents.
106	 */
107	cache_flush(uva, len);
108
109	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
110	kpmap = vm_map_pmap(kernel_map);
111	do {
112		if (pmap_extract(upmap, uva, &pa) == FALSE)
113			panic("vmapbuf: null page frame");
114		/* Now map the page into kernel space. */
115		pmap_enter(pmap_kernel(), kva,
116			pa /* | PMAP_NC */,
117			VM_PROT_READ|VM_PROT_WRITE,
118			VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
119
120		uva += PAGE_SIZE;
121		kva += PAGE_SIZE;
122		len -= PAGE_SIZE;
123	} while (len);
124	pmap_update(pmap_kernel());
125}
126
127/*
128 * Unmap a previously-mapped user I/O request.
129 */
130void
131vunmapbuf(bp, len)
132	struct buf *bp;
133	vsize_t len;
134{
135	vaddr_t kva;
136	vsize_t off;
137
138	if ((bp->b_flags & B_PHYS) == 0)
139		panic("vunmapbuf");
140
141	kva = trunc_page((vaddr_t)bp->b_data);
142	off = (vaddr_t)bp->b_data - kva;
143	len = round_page(off + len);
144
145	pmap_remove(pmap_kernel(), kva, kva + len);
146	pmap_update(pmap_kernel());
147	uvm_km_free_wakeup(kernel_map, kva, len);
148	bp->b_data = bp->b_saveaddr;
149	bp->b_saveaddr = NULL;
150}
151
152
153/*
154 * The offset of the topmost frame in the kernel stack.
155 */
156#define	TOPFRAMEOFF (USPACE-sizeof(struct trapframe)-CC64FSZ)
157#define	STACK_OFFSET	BIAS
158
159#ifdef DEBUG
160char cpu_forkname[] = "cpu_fork()";
161#endif
162
163/*
164 * Finish a fork operation, with process p2 nearly set up.
165 * Copy and update the pcb and trap frame, making the child ready to run.
166 *
167 * Rig the child's kernel stack so that it will start out in
168 * proc_trampoline() and call child_return() with p2 as an
169 * argument. This causes the newly-created child process to go
170 * directly to user level with an apparent return value of 0 from
171 * fork(), while the parent process returns normally.
172 *
173 * p1 is the process being forked; if p1 == &proc0, we are creating
174 * a kernel thread, and the return path and argument are specified with
175 * `func' and `arg'.
176 *
177 * If an alternate user-level stack is requested (with non-zero values
178 * in both the stack and stacksize args), set up the user stack pointer
179 * accordingly.
180 */
181void
182cpu_fork(p1, p2, stack, stacksize, func, arg)
183	struct proc *p1, *p2;
184	void *stack;
185	size_t stacksize;
186	void (*func)(void *);
187	void *arg;
188{
189	struct pcb *opcb = &p1->p_addr->u_pcb;
190	struct pcb *npcb = &p2->p_addr->u_pcb;
191	struct trapframe *tf2;
192	struct rwindow *rp;
193	extern struct proc proc0;
194
195	/*
196	 * Save all user registers to p1's stack or, in the case of
197	 * user registers and invalid stack pointers, to opcb.
198	 * We then copy the whole pcb to p2; when switch() selects p2
199	 * to run, it will run at the `proc_trampoline' stub, rather
200	 * than returning at the copying code below.
201	 *
202	 * If process p1 has an FPU state, we must copy it.  If it is
203	 * the FPU user, we must save the FPU state first.
204	 */
205
206#ifdef NOTDEF_DEBUG
207	printf("cpu_fork()\n");
208#endif
209	if (p1 == curproc) {
210		write_user_windows();
211
212		/*
213		 * We're in the kernel, so we don't really care about
214		 * %ccr or %asi.  We do want to duplicate %pstate and %cwp.
215		 */
216		opcb->pcb_pstate = getpstate();
217		opcb->pcb_cwp = getcwp();
218	}
219#ifdef DIAGNOSTIC
220	else if (p1 != &proc0)
221		panic("cpu_fork: curproc");
222#endif
223#ifdef DEBUG
224	/* prevent us from having NULL lastcall */
225	opcb->lastcall = cpu_forkname;
226#else
227	opcb->lastcall = NULL;
228#endif
229	bcopy((caddr_t)opcb, (caddr_t)npcb, sizeof(struct pcb));
230	if (p1->p_md.md_fpstate) {
231		fpusave_proc(p1, 1);
232		p2->p_md.md_fpstate = malloc(sizeof(struct fpstate64),
233		    M_SUBPROC, M_WAITOK);
234		bcopy(p1->p_md.md_fpstate, p2->p_md.md_fpstate,
235		    sizeof(struct fpstate64));
236	} else
237		p2->p_md.md_fpstate = NULL;
238
239	/*
240	 * Setup (kernel) stack frame that will by-pass the child
241	 * out of the kernel. (The trap frame invariably resides at
242	 * the tippity-top of the u. area.)
243	 */
244	tf2 = p2->p_md.md_tf = (struct trapframe *)
245			((long)npcb + USPACE - sizeof(*tf2));
246
247	/* Copy parent's trapframe */
248	*tf2 = *(struct trapframe *)((long)opcb + USPACE - sizeof(*tf2));
249
250	/*
251	 * If specified, give the child a different stack.
252	 */
253	if (stack != NULL)
254		tf2->tf_out[6] = (u_int64_t)(u_long)stack + stacksize;
255
256	/* Duplicate efforts of syscall(), but slightly differently */
257	if (tf2->tf_global[1] & SYSCALL_G2RFLAG) {
258		/* jmp %g2 (or %g7, deprecated) on success */
259		tf2->tf_npc = tf2->tf_global[2];
260	} else {
261		/*
262		 * old system call convention: clear C on success
263		 * note: proc_trampoline() sets a fresh psr when
264		 * returning to user mode.
265		 */
266		/*tf2->tf_psr &= ~PSR_C;   -* success */
267	}
268
269	/* Set return values in child mode */
270	tf2->tf_out[0] = 0;
271	tf2->tf_out[1] = 1;
272
273	/* Skip trap instruction. */
274	tf2->tf_pc = tf2->tf_npc;
275	tf2->tf_npc += 4;
276
277	/* Construct kernel frame to return to in cpu_switch() */
278	rp = (struct rwindow *)((u_long)npcb + TOPFRAMEOFF);
279	*rp = *(struct rwindow *)((u_long)opcb + TOPFRAMEOFF);
280	rp->rw_local[0] = (long)func;		/* Function to call */
281	rp->rw_local[1] = (long)arg;		/* and its argument */
282
283	npcb->pcb_pc = (long)proc_trampoline - 8;
284	npcb->pcb_sp = (long)rp - STACK_OFFSET;
285
286	/* Need to create a %tstate if we're forking from proc0 */
287	if (p1 == &proc0)
288		tf2->tf_tstate = (ASI_PRIMARY_NO_FAULT<<TSTATE_ASI_SHIFT) |
289			((PSTATE_USER)<<TSTATE_PSTATE_SHIFT);
290	else
291		/* clear condition codes and disable FPU */
292		tf2->tf_tstate &=
293		    ~((PSTATE_PEF<<TSTATE_PSTATE_SHIFT)|TSTATE_CCR);
294
295#ifdef NOTDEF_DEBUG
296	printf("cpu_fork: Copying over trapframe: otf=%p ntf=%p sp=%p opcb=%p npcb=%p\n",
297	       (struct trapframe *)((char *)opcb + USPACE - sizeof(*tf2)), tf2, rp, opcb, npcb);
298	printf("cpu_fork: tstate=%lx pc=%lx npc=%lx rsp=%lx\n",
299	       (long)tf2->tf_tstate, (long)tf2->tf_pc, (long)tf2->tf_npc,
300	       (long)(tf2->tf_out[6]));
301	Debugger();
302#endif
303}
304
305/*
306 * These are the "function" entry points in locore.s to handle IPI's.
307 */
308void	ipi_save_fpstate(void);
309void	ipi_drop_fpstate(void);
310
311void
312fpusave_cpu(struct cpu_info *ci, int save)
313{
314	struct proc *p;
315
316	KDASSERT(ci == curcpu());
317
318	p = ci->ci_fpproc;
319	if (p == NULL)
320		return;
321
322	if (save)
323		savefpstate(p->p_md.md_fpstate);
324	else
325		clearfpstate();
326
327	ci->ci_fpproc = NULL;
328}
329
330void
331fpusave_proc(struct proc *p, int save)
332{
333	struct cpu_info *ci = curcpu();
334
335#ifdef MULTIPROCESSOR
336	if (p == ci->ci_fpproc) {
337		u_int64_t s = intr_disable();
338		fpusave_cpu(ci, save);
339		intr_restore(s);
340		return;
341	}
342
343	for (ci = cpus; ci != NULL; ci = ci->ci_next) {
344		int spincount = 0;
345
346		if (ci == curcpu())
347			continue;
348		if (ci->ci_fpproc != p)
349			continue;
350		sparc64_send_ipi(ci->ci_upaid,
351		    save ? ipi_save_fpstate : ipi_drop_fpstate, (vaddr_t)p, 0);
352		while(ci->ci_fpproc == p) {
353			spincount++;
354			if (spincount > 10000000) {
355				panic("ipi_save_fpstate didn't");
356			}
357			sparc_membar(Sync);
358		}
359		break;
360	}
361#else
362	if (p == ci->ci_fpproc)
363		fpusave_cpu(ci, save);
364#endif
365}
366
367/*
368 * cpu_exit is called as the last action during exit.
369 *
370 * We clean up a little and then call sched_exit() with the old proc
371 * as an argument.  sched_exit() schedules the old vmspace and stack
372 * to be freed, then selects a new process to run.
373 */
374void
375cpu_exit(struct proc *p)
376{
377	if (p->p_md.md_fpstate != NULL) {
378		fpusave_proc(p, 0);
379		free(p->p_md.md_fpstate, M_SUBPROC);
380	}
381
382	pmap_deactivate(p);
383	sched_exit(p);
384}
385
386/*
387 * cpu_coredump is called to write a core dump header.
388 * (should this be defined elsewhere?  machdep.c?)
389 */
390int
391cpu_coredump(p, vp, cred, chdr)
392	struct proc *p;
393	struct vnode *vp;
394	struct ucred *cred;
395	struct core *chdr;
396{
397	int error;
398	struct md_coredump md_core;
399	struct coreseg cseg;
400
401	CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0);
402	chdr->c_hdrsize = ALIGN(sizeof(*chdr));
403	chdr->c_seghdrsize = ALIGN(sizeof(cseg));
404	chdr->c_cpusize = sizeof(md_core);
405
406	md_core.md_tf = *p->p_md.md_tf;
407	md_core.md_wcookie = p->p_addr->u_pcb.pcb_wcookie;
408	if (p->p_md.md_fpstate) {
409		fpusave_proc(p, 1);
410		md_core.md_fpstate = *p->p_md.md_fpstate;
411	} else
412		bzero((caddr_t)&md_core.md_fpstate,
413		      sizeof(md_core.md_fpstate));
414
415	CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU);
416	cseg.c_addr = 0;
417	cseg.c_size = chdr->c_cpusize;
418	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize,
419	    (off_t)chdr->c_hdrsize, UIO_SYSSPACE,
420	    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
421	if (error)
422		return error;
423
424	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&md_core, sizeof(md_core),
425	    (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE,
426	    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
427	if (!error)
428		chdr->c_nseg++;
429
430	return error;
431}
432
433