vm_machdep.c revision 115971
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * Copyright (c) 2001 Jake Burkholder.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department, and William Jolitz.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
41 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
42 * 	from: FreeBSD: src/sys/i386/i386/vm_machdep.c,v 1.167 2001/07/12
43 * $FreeBSD: head/sys/sparc64/sparc64/vm_machdep.c 115971 2003-06-07 18:29:29Z jake $
44 */
45
46#include "opt_pmap.h"
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/malloc.h>
51#include <sys/proc.h>
52#include <sys/bio.h>
53#include <sys/buf.h>
54#include <sys/linker_set.h>
55#include <sys/sysctl.h>
56#include <sys/unistd.h>
57#include <sys/user.h>
58#include <sys/vmmeter.h>
59
60#include <dev/ofw/openfirm.h>
61
62#include <vm/vm.h>
63#include <vm/vm_extern.h>
64#include <vm/pmap.h>
65#include <vm/vm_map.h>
66#include <vm/vm_page.h>
67#include <vm/vm_pageout.h>
68#include <vm/vm_param.h>
69#include <vm/uma.h>
70#include <vm/uma_int.h>
71
72#include <machine/cache.h>
73#include <machine/cpu.h>
74#include <machine/fp.h>
75#include <machine/fsr.h>
76#include <machine/frame.h>
77#include <machine/md_var.h>
78#include <machine/ofw_machdep.h>
79#include <machine/ofw_mem.h>
80#include <machine/tlb.h>
81#include <machine/tstate.h>
82
83PMAP_STATS_VAR(uma_nsmall_alloc);
84PMAP_STATS_VAR(uma_nsmall_alloc_oc);
85PMAP_STATS_VAR(uma_nsmall_free);
86
87void
88cpu_exit(struct thread *td)
89{
90	struct md_utrap *ut;
91	struct proc *p;
92
93	p = td->td_proc;
94	p->p_md.md_sigtramp = NULL;
95	if ((ut = p->p_md.md_utrap) != NULL) {
96		ut->ut_refcnt--;
97		if (ut->ut_refcnt == 0)
98			free(ut, M_SUBPROC);
99		p->p_md.md_utrap = NULL;
100	}
101}
102
103void
104cpu_sched_exit(struct thread *td)
105{
106	struct vmspace *vm;
107	struct pcpu *pc;
108	struct proc *p;
109
110	mtx_assert(&sched_lock, MA_OWNED);
111
112	p = td->td_proc;
113	vm = p->p_vmspace;
114	if (vm->vm_refcnt > 1)
115		return;
116	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
117		if (pc->pc_vmspace == vm) {
118			vm->vm_pmap.pm_active &= ~pc->pc_cpumask;
119			vm->vm_pmap.pm_context[pc->pc_cpuid] = -1;
120			pc->pc_vmspace = NULL;
121		}
122	}
123}
124
125void
126cpu_thread_exit(struct thread *td)
127{
128}
129
130void
131cpu_thread_clean(struct thread *td)
132{
133}
134
135void
136cpu_thread_setup(struct thread *td)
137{
138	struct pcb *pcb;
139
140	pcb = (struct pcb *)((td->td_kstack + KSTACK_PAGES * PAGE_SIZE -
141	    sizeof(struct pcb)) & ~0x3fUL);
142	td->td_frame = (struct trapframe *)pcb - 1;
143	td->td_pcb = pcb;
144}
145
146void
147cpu_set_upcall(struct thread *td, struct thread *td0)
148{
149	struct trapframe *tf;
150	struct frame *fr;
151	struct pcb *pcb;
152
153	bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
154
155	pcb = td->td_pcb;
156	tf = td->td_frame;
157	fr = (struct frame *)tf - 1;
158	fr->fr_local[0] = (u_long)fork_return;
159	fr->fr_local[1] = (u_long)td;
160	fr->fr_local[2] = (u_long)tf;
161	pcb->pcb_pc = (u_long)fork_trampoline - 8;
162	pcb->pcb_sp = (u_long)fr - SPOFF;
163}
164
165void
166cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
167{
168}
169
170/*
171 * Finish a fork operation, with process p2 nearly set up.
172 * Copy and update the pcb, set up the stack so that the child
173 * ready to run and return to user mode.
174 */
175void
176cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
177{
178	struct md_utrap *ut;
179	struct trapframe *tf;
180	struct frame *fp;
181	struct pcb *pcb1;
182	struct pcb *pcb2;
183	vm_offset_t sp;
184	int error;
185	int i;
186
187	KASSERT(td1 == curthread || td1 == &thread0,
188	    ("cpu_fork: p1 not curproc and not proc0"));
189
190	if ((flags & RFPROC) == 0)
191		return;
192
193	p2->p_md.md_sigtramp = td1->td_proc->p_md.md_sigtramp;
194	if ((ut = td1->td_proc->p_md.md_utrap) != NULL)
195		ut->ut_refcnt++;
196	p2->p_md.md_utrap = ut;
197
198	/* The pcb must be aligned on a 64-byte boundary. */
199	pcb1 = td1->td_pcb;
200	pcb2 = (struct pcb *)((td2->td_kstack + KSTACK_PAGES * PAGE_SIZE -
201	    sizeof(struct pcb)) & ~0x3fUL);
202	td2->td_pcb = pcb2;
203
204	/*
205	 * Ensure that p1's pcb is up to date.
206	 */
207	critical_enter();
208	if ((td1->td_frame->tf_fprs & FPRS_FEF) != 0)
209		savefpctx(pcb1->pcb_ufp);
210	critical_exit();
211	/* Make sure the copied windows are spilled. */
212	flushw();
213	/* Copy the pcb (this will copy the windows saved in the pcb, too). */
214	bcopy(pcb1, pcb2, sizeof(*pcb1));
215
216	/*
217	 * If we're creating a new user process and we're sharing the address
218	 * space, the parent's top most frame must be saved in the pcb.  The
219	 * child will pop the frame when it returns to user mode, and may
220	 * overwrite it with its own data causing much suffering for the
221	 * parent.  We check if its already in the pcb, and if not copy it
222	 * in.  Its unlikely that the copyin will fail, but if so there's not
223	 * much we can do.  The parent will likely crash soon anyway in that
224	 * case.
225	 */
226	if ((flags & RFMEM) != 0 && td1 != &thread0) {
227		sp = td1->td_frame->tf_sp;
228		for (i = 0; i < pcb1->pcb_nsaved; i++) {
229			if (pcb1->pcb_rwsp[i] == sp)
230				break;
231		}
232		if (i == pcb1->pcb_nsaved) {
233			error = copyin((caddr_t)sp + SPOFF, &pcb1->pcb_rw[i],
234			    sizeof(struct rwindow));
235			if (error == 0) {
236				pcb1->pcb_rwsp[i] = sp;
237				pcb1->pcb_nsaved++;
238			}
239		}
240	}
241
242	/*
243	 * Create a new fresh stack for the new process.
244	 * Copy the trap frame for the return to user mode as if from a
245	 * syscall.  This copies most of the user mode register values.
246	 */
247	tf = (struct trapframe *)pcb2 - 1;
248	bcopy(td1->td_frame, tf, sizeof(*tf));
249
250	tf->tf_out[0] = 0;			/* Child returns zero */
251	tf->tf_out[1] = 0;
252	tf->tf_tstate &= ~TSTATE_XCC_C;		/* success */
253	tf->tf_fprs = 0;
254
255	td2->td_frame = tf;
256	fp = (struct frame *)tf - 1;
257	fp->fr_local[0] = (u_long)fork_return;
258	fp->fr_local[1] = (u_long)td2;
259	fp->fr_local[2] = (u_long)tf;
260	pcb2->pcb_sp = (u_long)fp - SPOFF;
261	pcb2->pcb_pc = (u_long)fork_trampoline - 8;
262
263	/*
264	 * Now, cpu_switch() can schedule the new process.
265	 */
266}
267
268void
269cpu_reset(void)
270{
271	static char bspec[64] = "";
272	phandle_t chosen;
273	static struct {
274		cell_t	name;
275		cell_t	nargs;
276		cell_t	nreturns;
277		cell_t	bootspec;
278	} args = {
279		(cell_t)"boot",
280		1,
281		0,
282		(cell_t)bspec
283	};
284	if ((chosen = OF_finddevice("/chosen")) != 0) {
285		if (OF_getprop(chosen, "bootpath", bspec, sizeof(bspec)) == -1)
286			bspec[0] = '\0';
287		bspec[sizeof(bspec) - 1] = '\0';
288	}
289
290	openfirmware_exit(&args);
291}
292
293/*
294 * Intercept the return address from a freshly forked process that has NOT
295 * been scheduled yet.
296 *
297 * This is needed to make kernel threads stay in kernel mode.
298 */
299void
300cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
301{
302	struct frame *fp;
303	struct pcb *pcb;
304
305	pcb = td->td_pcb;
306	fp = (struct frame *)(pcb->pcb_sp + SPOFF);
307	fp->fr_local[0] = (u_long)func;
308	fp->fr_local[1] = (u_long)arg;
309}
310
311void
312cpu_wait(struct proc *p)
313{
314}
315
316int
317is_physical_memory(vm_paddr_t addr)
318{
319	struct ofw_mem_region *mr;
320
321	for (mr = sparc64_memreg; mr < sparc64_memreg + sparc64_nmemreg; mr++)
322		if (addr >= mr->mr_start && addr < mr->mr_start + mr->mr_size)
323			return (1);
324	return (0);
325}
326
327void
328swi_vm(void *v)
329{
330
331	/*
332	 * Nothing to do here yet - busdma bounce buffers are not yet
333	 * implemented.
334	 */
335}
336
337void *
338uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
339{
340	static vm_pindex_t color;
341	vm_paddr_t pa;
342	vm_page_t m;
343	int pflags;
344	void *va;
345
346	PMAP_STATS_INC(uma_nsmall_alloc);
347
348	*flags = UMA_SLAB_PRIV;
349
350	if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
351		pflags = VM_ALLOC_INTERRUPT;
352	else
353		pflags = VM_ALLOC_SYSTEM;
354
355	if (wait & M_ZERO)
356		pflags |= VM_ALLOC_ZERO;
357
358	for (;;) {
359		m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ);
360		if (m == NULL) {
361			if (wait & M_NOWAIT)
362				return (NULL);
363			else
364				VM_WAIT;
365		} else
366			break;
367	}
368
369	pa = VM_PAGE_TO_PHYS(m);
370	if (m->md.color != DCACHE_COLOR(pa)) {
371		KASSERT(m->md.colors[0] == 0 && m->md.colors[1] == 0,
372		    ("uma_small_alloc: free page still has mappings!"));
373		PMAP_STATS_INC(uma_nsmall_alloc_oc);
374		m->md.color = DCACHE_COLOR(pa);
375		dcache_page_inval(pa);
376	}
377	va = (void *)TLB_PHYS_TO_DIRECT(pa);
378	if ((m->flags & PG_ZERO) == 0)
379		bzero(va, PAGE_SIZE);
380	return (va);
381}
382
383void
384uma_small_free(void *mem, int size, u_int8_t flags)
385{
386	vm_page_t m;
387
388	PMAP_STATS_INC(uma_nsmall_free);
389	m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)mem));
390	vm_page_lock_queues();
391	vm_page_free(m);
392	vm_page_unlock_queues();
393}
394