1/* $NetBSD: vm_machdep.c,v 1.110 2012/01/28 16:30:23 skrll Exp $ */
2
3/*
4 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
5 * All rights reserved.
6 *
7 * Author: Chris G. Demetriou
8 *
9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation.
14 *
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
22 *  School of Computer Science
23 *  Carnegie Mellon University
24 *  Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes.
28 */
29
30#include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
31
32__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.110 2012/01/28 16:30:23 skrll Exp $");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/proc.h>
37#include <sys/malloc.h>
38#include <sys/buf.h>
39#include <sys/vnode.h>
40#include <sys/core.h>
41#include <sys/exec.h>
42
43#include <uvm/uvm.h>
44
45#include <machine/cpu.h>
46#include <machine/alpha.h>
47#include <machine/pmap.h>
48#include <machine/reg.h>
49
50void
51cpu_lwp_free(struct lwp *l, int proc)
52{
53	(void) l;
54}
55
56void
57cpu_lwp_free2(struct lwp *l)
58{
59	(void) l;
60}
61
62/*
63 * Finish a fork operation, with thread l2 nearly set up.
64 * Copy and update the pcb and trap frame, making the child ready to run.
65 *
66 * Rig the child's kernel stack so that it will start out in
67 * lwp_trampoline() and call child_return() with l2 as an
68 * argument. This causes the newly-created child thread to go
69 * directly to user level with an apparent return value of 0 from
70 * fork(), while the parent process returns normally.
71 *
72 * l1 is the thread being forked; if l1 == &lwp0, we are creating
73 * a kernel thread, and the return path and argument are specified with
74 * `func' and `arg'.
75 *
76 * If an alternate user-level stack is requested (with non-zero values
77 * in both the stack and stacksize args), set up the user stack pointer
78 * accordingly.
79 */
80void
81cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
82    void (*func)(void *), void *arg)
83{
84	struct pcb *pcb1, *pcb2;
85	extern void lwp_trampoline(void);
86
87	pcb1 = lwp_getpcb(l1);
88	pcb2 = lwp_getpcb(l2);
89
90	l2->l_md.md_tf = l1->l_md.md_tf;
91	l2->l_md.md_flags = l1->l_md.md_flags & (MDLWP_FPUSED | MDLWP_FP_C);
92	l2->l_md.md_astpending = 0;
93
94	/*
95	 * Cache the physical address of the pcb, so we can
96	 * swap to it easily.
97	 */
98	l2->l_md.md_pcbpaddr = (void *)vtophys((vaddr_t)pcb2);
99
100	/*
101	 * Copy pcb and user stack pointer from proc p1 to p2.
102	 * If specificed, give the child a different stack.
103	 * Floating point state from the FP chip has already been saved.
104	 */
105	*pcb2 = *pcb1;
106	if (stack != NULL)
107		pcb2->pcb_hw.apcb_usp = (u_long)stack + stacksize;
108	else
109		pcb2->pcb_hw.apcb_usp = alpha_pal_rdusp();
110
111	/*
112	 * Arrange for a non-local goto when the new process
113	 * is started, to resume here, returning nonzero from setjmp.
114	 */
115#ifdef DIAGNOSTIC
116	/*
117	 * If l1 != curlwp && l1 == &lwp0, we are creating a kernel
118	 * thread.
119	 */
120	if (l1 != curlwp && l1 != &lwp0)
121		panic("cpu_lwp_fork: curlwp");
122#endif
123
124	/*
125	 * create the child's kernel stack, from scratch.
126	 */
127	{
128		struct trapframe *l2tf;
129
130		/*
131		 * Pick a stack pointer, leaving room for a trapframe;
132		 * copy trapframe from parent so return to user mode
133		 * will be to right address, with correct registers.
134		 */
135		l2tf = l2->l_md.md_tf = (struct trapframe *)
136		    (uvm_lwp_getuarea(l2) + USPACE - sizeof(struct trapframe));
137		memcpy(l2->l_md.md_tf, l1->l_md.md_tf,
138		    sizeof(struct trapframe));
139
140		/*
141		 * Set up return-value registers as fork() libc stub expects.
142		 */
143		l2tf->tf_regs[FRAME_V0] = l1->l_proc->p_pid; /* parent's pid */
144		l2tf->tf_regs[FRAME_A3] = 0;		/* no error */
145		l2tf->tf_regs[FRAME_A4] = 1;		/* is child */
146
147		pcb2->pcb_hw.apcb_ksp =
148		    (uint64_t)l2->l_md.md_tf;
149		pcb2->pcb_context[0] =
150		    (uint64_t)func;			/* s0: pc */
151		pcb2->pcb_context[1] =
152		    (uint64_t)exception_return;		/* s1: ra */
153		pcb2->pcb_context[2] =
154		    (uint64_t)arg;			/* s2: arg */
155		pcb2->pcb_context[3] =
156		    (uint64_t)l2;			/* s3: lwp */
157		pcb2->pcb_context[7] =
158		    (uint64_t)lwp_trampoline;		/* ra: assembly magic */
159	}
160}
161
162void
163cpu_setfunc(struct lwp *l, void (*func)(void *), void *arg)
164{
165	struct pcb *pcb = lwp_getpcb(l);
166	extern void setfunc_trampoline(void);
167
168	pcb->pcb_hw.apcb_ksp =
169	    (uint64_t)l->l_md.md_tf;
170	pcb->pcb_context[0] =
171	    (uint64_t)func;			/* s0: pc */
172	pcb->pcb_context[1] =
173	    (uint64_t)exception_return;		/* s1: ra */
174	pcb->pcb_context[2] =
175	    (uint64_t)arg;			/* s2: arg */
176	pcb->pcb_context[7] =
177	    (uint64_t)setfunc_trampoline;	/* ra: assembly magic */
178}
179
180/*
181 * Map a user I/O request into kernel virtual address space.
182 * Note: the pages are already locked by uvm_vslock(), so we
183 * do not need to pass an access_type to pmap_enter().
184 */
185int
186vmapbuf(struct buf *bp, vsize_t len)
187{
188	vaddr_t faddr, taddr, off;
189	paddr_t pa;
190	struct proc *p;
191
192	if ((bp->b_flags & B_PHYS) == 0)
193		panic("vmapbuf");
194	p = bp->b_proc;
195	bp->b_saveaddr = bp->b_data;
196	faddr = trunc_page((vaddr_t)bp->b_data);
197	off = (vaddr_t)bp->b_data - faddr;
198	len = round_page(off + len);
199	taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY|UVM_KMF_WAITVA);
200	bp->b_data = (void *)(taddr + off);
201	len = atop(len);
202	while (len--) {
203		if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), faddr,
204		    &pa) == false)
205			panic("vmapbuf: null page frame");
206		pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
207		    VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
208		faddr += PAGE_SIZE;
209		taddr += PAGE_SIZE;
210	}
211	pmap_update(vm_map_pmap(phys_map));
212
213	return 0;
214}
215
216/*
217 * Unmap a previously-mapped user I/O request.
218 */
219void
220vunmapbuf(struct buf *bp, vsize_t len)
221{
222	vaddr_t addr, off;
223
224	if ((bp->b_flags & B_PHYS) == 0)
225		panic("vunmapbuf");
226	addr = trunc_page((vaddr_t)bp->b_data);
227	off = (vaddr_t)bp->b_data - addr;
228	len = round_page(off + len);
229	pmap_remove(vm_map_pmap(phys_map), addr, addr + len);
230	pmap_update(vm_map_pmap(phys_map));
231	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
232	bp->b_data = bp->b_saveaddr;
233	bp->b_saveaddr = NULL;
234}
235
236#ifdef __HAVE_CPU_UAREA_ROUTINES
237void *
238cpu_uarea_alloc(bool system)
239{
240	struct pglist pglist;
241	int error;
242
243	/*
244	 * Allocate a new physically contiguous uarea which can be
245	 * direct-mapped.
246	 */
247	error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1);
248	if (error) {
249		if (!system)
250			return NULL;
251		panic("%s: uvm_pglistalloc failed: %d", __func__, error);
252	}
253
254	/*
255	 * Get the physical address from the first page.
256	 */
257	const struct vm_page * const pg = TAILQ_FIRST(&pglist);
258	KASSERT(pg != NULL);
259	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
260
261	/*
262	 * We need to return a direct-mapped VA for the pa.
263	 */
264
265	return (void *)PMAP_MAP_POOLPAGE(pa);
266}
267
268/*
269 * Return true if we freed it, false if we didn't.
270 */
271bool
272cpu_uarea_free(void *vva)
273{
274	vaddr_t va = (vaddr_t) vva;
275	if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS)
276		return false;
277
278	/*
279	 * Since the pages are physically contiguous, the vm_page structure
280	 * will be as well.
281	 */
282	struct vm_page *pg = PHYS_TO_VM_PAGE(PMAP_UNMAP_POOLPAGE(va));
283	KASSERT(pg != NULL);
284	for (size_t i = 0; i < UPAGES; i++, pg++) {
285		uvm_pagefree(pg);
286	}
287	return true;
288}
289#endif /* __HAVE_CPU_UAREA_ROUTINES */
290