1/*	$NetBSD: vm_machdep.c,v 1.13 2011/02/10 14:46:48 pooka Exp $	*/
2
3/*-
4 * Copyright (c) 1982, 1986 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 *    may be used to endorse or promote products derived from this software
21 *    without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 *	@(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
36 */
37
38/*-
39 * Copyright (c) 1995 Charles M. Hannum.  All rights reserved.
40 * Copyright (c) 1989, 1990 William Jolitz
41 * All rights reserved.
42 *
43 * This code is derived from software contributed to Berkeley by
44 * the Systems Programming Group of the University of Utah Computer
45 * Science Department, and William Jolitz.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 *    notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 *    notice, this list of conditions and the following disclaimer in the
54 *    documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 *    must display the following acknowledgement:
57 *	This product includes software developed by the University of
58 *	California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 *    may be used to endorse or promote products derived from this software
61 *    without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 *	@(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
76 */
77
78/*
79 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
80 */
81
82#include <sys/cdefs.h>
83__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.13 2011/02/10 14:46:48 pooka Exp $");
84
85#include "opt_mtrr.h"
86
87#include <sys/param.h>
88#include <sys/systm.h>
89#include <sys/proc.h>
90#include <sys/vnode.h>
91#include <sys/buf.h>
92#include <sys/core.h>
93#include <sys/exec.h>
94#include <sys/ptrace.h>
95
96#include <uvm/uvm.h>
97
98#include <machine/cpu.h>
99#include <machine/gdt.h>
100#include <machine/reg.h>
101#include <machine/specialreg.h>
102#ifdef MTRR
103#include <machine/mtrr.h>
104#endif
105
106#ifdef __x86_64__
107#include <machine/fpu.h>
108#else
109#include "npx.h"
110#if NNPX > 0
111#define fpusave_lwp(x, y)	npxsave_lwp(x, y)
112#else
113#define fpusave_lwp(x, y)
114#endif
115#endif
116
117void
118cpu_proc_fork(struct proc *p1, struct proc *p2)
119{
120
121	p2->p_md.md_flags = p1->p_md.md_flags;
122}
123
124/*
125 * cpu_lwp_fork: finish a new LWP (l2) operation.
126 *
127 * First LWP (l1) is the process being forked.  If it is &lwp0, then we
128 * are creating a kthread, where return path and argument are specified
129 * with `func' and `arg'.
130 *
131 * If an alternate user-level stack is requested (with non-zero values
132 * in both the stack and stacksize arguments), then set up the user stack
133 * pointer accordingly.
134 */
135void
136cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
137    void (*func)(void *), void *arg)
138{
139	struct pcb *pcb1, *pcb2;
140	struct trapframe *tf;
141	vaddr_t uv;
142
143	pcb1 = lwp_getpcb(l1);
144	pcb2 = lwp_getpcb(l2);
145
146	/*
147	 * If parent LWP was using FPU, then we have to save the FPU h/w
148	 * state to PCB so that we can copy it.
149	 */
150	if (pcb1->pcb_fpcpu != NULL) {
151		fpusave_lwp(l1, true);
152	}
153
154	/*
155	 * Sync the PCB before we copy it.
156	 */
157	if (l1 == curlwp) {
158		KASSERT(pcb1 == curpcb);
159		savectx(pcb1);
160	} else {
161		KASSERT(l1 == &lwp0);
162	}
163
164	/* Copy the PCB from parent. */
165	memcpy(pcb2, pcb1, sizeof(struct pcb));
166
167#if defined(XEN)
168	pcb2->pcb_iopl = SEL_KPL;
169#endif
170
171	/*
172	 * Set the kernel stack address (from the address to uarea) and
173	 * trapframe address for child.
174	 *
175	 * Rig kernel stack so that it would start out in lwp_trampoline()
176	 * and call child_return() with l2 as an argument.  This causes the
177	 * newly-created child process to go directly to user level with a
178	 * parent return value of 0 from fork(), while the parent process
179	 * returns normally.
180	 */
181	uv = uvm_lwp_getuarea(l2);
182
183#ifdef __x86_64__
184	pcb2->pcb_rsp0 = (uv + KSTACK_SIZE - 16) & ~0xf;
185	tf = (struct trapframe *)pcb2->pcb_rsp0 - 1;
186#else
187	pcb2->pcb_esp0 = (uv + KSTACK_SIZE - 16);
188	tf = (struct trapframe *)pcb2->pcb_esp0 - 1;
189
190	pcb2->pcb_iomap = NULL;
191#endif
192	l2->l_md.md_regs = tf;
193
194	/*
195	 * Copy the trapframe from parent, so that return to userspace
196	 * will be to right address, with correct registers.
197	 */
198	memcpy(tf, l1->l_md.md_regs, sizeof(struct trapframe));
199
200	/* Child LWP might get aston() before returning to userspace. */
201	tf->tf_trapno = T_ASTFLT;
202
203#if 0 /* DIAGNOSTIC */
204	/* Set a red zone in the kernel stack after the uarea. */
205	pmap_kremove(uv, PAGE_SIZE);
206	pmap_update(pmap_kernel());
207#endif
208
209	/* If specified, set a different user stack for a child. */
210	if (stack != NULL) {
211#ifdef __x86_64__
212		tf->tf_rsp = (uint64_t)stack + stacksize;
213#else
214		tf->tf_esp = (uint32_t)stack + stacksize;
215#endif
216	}
217
218	l2->l_md.md_flags = l1->l_md.md_flags;
219	l2->l_md.md_astpending = 0;
220
221	cpu_setfunc(l2, func, arg);
222}
223
224void
225cpu_setfunc(struct lwp *l, void (*func)(void *), void *arg)
226{
227	struct pcb *pcb = lwp_getpcb(l);
228	struct trapframe *tf = l->l_md.md_regs;
229	struct switchframe *sf = (struct switchframe *)tf - 1;
230
231#ifdef __x86_64__
232	sf->sf_r12 = (uint64_t)func;
233	sf->sf_r13 = (uint64_t)arg;
234	if (func == child_return && !(l->l_proc->p_flag & PK_32))
235		sf->sf_rip = (uint64_t)child_trampoline;
236	else
237		sf->sf_rip = (uint64_t)lwp_trampoline;
238	pcb->pcb_rsp = (uint64_t)sf;
239	pcb->pcb_rbp = (uint64_t)l;
240#else
241	sf->sf_esi = (int)func;
242	sf->sf_ebx = (int)arg;
243	sf->sf_eip = (int)lwp_trampoline;
244	pcb->pcb_esp = (int)sf;
245	pcb->pcb_ebp = (int)l;
246#endif
247}
248
249/*
250 * cpu_lwp_free is called from exit() to let machine-dependent
251 * code free machine-dependent resources.  Note that this routine
252 * must not block.
253 */
254void
255cpu_lwp_free(struct lwp *l, int proc)
256{
257	struct pcb *pcb = lwp_getpcb(l);
258
259	/* If we were using the FPU, forget about it. */
260	if (pcb->pcb_fpcpu != NULL) {
261		fpusave_lwp(l, false);
262	}
263
264#ifdef MTRR
265	if (proc && l->l_proc->p_md.md_flags & MDP_USEDMTRR)
266		mtrr_clean(l->l_proc);
267#endif
268}
269
270/*
271 * cpu_lwp_free2 is called when an LWP is being reaped.
272 * This routine may block.
273 */
274void
275cpu_lwp_free2(struct lwp *l)
276{
277
278	KASSERT(l->l_md.md_gc_ptp == NULL);
279	KASSERT(l->l_md.md_gc_pmap == NULL);
280}
281
282/*
283 * Convert kernel VA to physical address
284 */
285paddr_t
286kvtop(void *addr)
287{
288	paddr_t pa;
289	bool ret;
290
291	ret = pmap_extract(pmap_kernel(), (vaddr_t)addr, &pa);
292	KASSERT(ret == true);
293	return pa;
294}
295
296/*
297 * Map a user I/O request into kernel virtual address space.
298 * Note: the pages are already locked by uvm_vslock(), so we
299 * do not need to pass an access_type to pmap_enter().
300 */
301int
302vmapbuf(struct buf *bp, vsize_t len)
303{
304	vaddr_t faddr, taddr, off;
305	paddr_t fpa;
306
307	KASSERT((bp->b_flags & B_PHYS) != 0);
308
309	bp->b_saveaddr = bp->b_data;
310	faddr = trunc_page((vaddr_t)bp->b_data);
311	off = (vaddr_t)bp->b_data - faddr;
312	len = round_page(off + len);
313	taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
314	bp->b_data = (void *)(taddr + off);
315	/*
316	 * The region is locked, so we expect that pmap_pte() will return
317	 * non-NULL.
318	 * XXX: unwise to expect this in a multithreaded environment.
319	 * anything can happen to a pmap between the time we lock a
320	 * region, release the pmap lock, and then relock it for
321	 * the pmap_extract().
322	 *
323	 * no need to flush TLB since we expect nothing to be mapped
324	 * where we we just allocated (TLB will be flushed when our
325	 * mapping is removed).
326	 */
327	while (len) {
328		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
329		    faddr, &fpa);
330		pmap_kenter_pa(taddr, fpa, VM_PROT_READ|VM_PROT_WRITE, 0);
331		faddr += PAGE_SIZE;
332		taddr += PAGE_SIZE;
333		len -= PAGE_SIZE;
334	}
335	pmap_update(pmap_kernel());
336
337	return 0;
338}
339
340/*
341 * Unmap a previously-mapped user I/O request.
342 */
343void
344vunmapbuf(struct buf *bp, vsize_t len)
345{
346	vaddr_t addr, off;
347
348	KASSERT((bp->b_flags & B_PHYS) != 0);
349
350	addr = trunc_page((vaddr_t)bp->b_data);
351	off = (vaddr_t)bp->b_data - addr;
352	len = round_page(off + len);
353	pmap_kremove(addr, len);
354	pmap_update(pmap_kernel());
355	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
356	bp->b_data = bp->b_saveaddr;
357	bp->b_saveaddr = 0;
358}
359
360#ifdef __HAVE_CPU_UAREA_ROUTINES
361void *
362cpu_uarea_alloc(bool system)
363{
364	struct pglist pglist;
365	int error;
366
367	/*
368	 * Allocate a new physically contiguous uarea which can be
369	 * direct-mapped.
370	 */
371	error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1);
372	if (error) {
373		return NULL;
374	}
375
376	/*
377	 * Get the physical address from the first page.
378	 */
379	const struct vm_page * const pg = TAILQ_FIRST(&pglist);
380	KASSERT(pg != NULL);
381	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
382
383	/*
384	 * We need to return a direct-mapped VA for the pa.
385	 */
386
387	return (void *)PMAP_MAP_POOLPAGE(pa);
388}
389
390/*
391 * Return true if we freed it, false if we didn't.
392 */
393bool
394cpu_uarea_free(void *vva)
395{
396	vaddr_t va = (vaddr_t) vva;
397
398	if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) {
399		return false;
400	}
401
402	/*
403	 * Since the pages are physically contiguous, the vm_page structures
404	 * will be as well.
405	 */
406	struct vm_page *pg = PHYS_TO_VM_PAGE(PMAP_UNMAP_POOLPAGE(va));
407	KASSERT(pg != NULL);
408	for (size_t i = 0; i < UPAGES; i++, pg++) {
409		uvm_pagefree(pg);
410	}
411	return true;
412}
413#endif /* __HAVE_CPU_UAREA_ROUTINES */
414