vm_machdep.c revision 256792
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 * $FreeBSD: head/sys/powerpc/powerpc/vm_machdep.c 256792 2013-10-20 16:14:03Z nwhitehorn $
42 */
43/*-
44 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
45 * All rights reserved.
46 *
47 * Author: Chris G. Demetriou
48 *
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
54 *
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 *
59 * Carnegie Mellon requests users of this software to return to
60 *
61 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62 *  School of Computer Science
63 *  Carnegie Mellon University
64 *  Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 */
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/proc.h>
73#include <sys/malloc.h>
74#include <sys/bio.h>
75#include <sys/buf.h>
76#include <sys/ktr.h>
77#include <sys/lock.h>
78#include <sys/mutex.h>
79#include <sys/vnode.h>
80#include <sys/vmmeter.h>
81#include <sys/kernel.h>
82#include <sys/mbuf.h>
83#include <sys/sf_buf.h>
84#include <sys/sysctl.h>
85#include <sys/sysent.h>
86#include <sys/unistd.h>
87
88#include <machine/cpu.h>
89#include <machine/fpu.h>
90#include <machine/frame.h>
91#include <machine/md_var.h>
92#include <machine/pcb.h>
93
94#include <dev/ofw/openfirm.h>
95
96#include <vm/vm.h>
97#include <vm/vm_param.h>
98#include <vm/vm_kern.h>
99#include <vm/vm_page.h>
100#include <vm/vm_map.h>
101#include <vm/vm_extern.h>
102
103/*
104 * On systems without a direct mapped region (e.g. PPC64),
105 * we use the same code as the Book E implementation. Since
106 * we need to have runtime detection of this, define some machinery
107 * for sf_bufs in this case, and ignore it on systems with direct maps.
108 */
109
110#ifndef NSFBUFS
111#define NSFBUFS		(512 + maxusers * 16)
112#endif
113
114static int nsfbufs;
115static int nsfbufspeak;
116static int nsfbufsused;
117
118SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0,
119    "Maximum number of sendfile(2) sf_bufs available");
120SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0,
121    "Number of sendfile(2) sf_bufs at peak usage");
122SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0,
123    "Number of sendfile(2) sf_bufs in use");
124
125static void sf_buf_init(void *arg);
126SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
127
128LIST_HEAD(sf_head, sf_buf);
129
130/* A hash table of active sendfile(2) buffers */
131static struct sf_head *sf_buf_active;
132static u_long sf_buf_hashmask;
133
134#define SF_BUF_HASH(m)  (((m) - vm_page_array) & sf_buf_hashmask)
135
136static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
137static u_int sf_buf_alloc_want;
138
139/*
140 * A lock used to synchronize access to the hash table and free list
141 */
142static struct mtx sf_buf_lock;
143
144#ifdef __powerpc64__
145extern uintptr_t tocbase;
146#endif
147
148
149/*
150 * Finish a fork operation, with process p2 nearly set up.
151 * Copy and update the pcb, set up the stack so that the child
152 * ready to run and return to user mode.
153 */
154void
155cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
156{
157	struct	trapframe *tf;
158	struct	callframe *cf;
159	struct	pcb *pcb;
160
161	KASSERT(td1 == curthread || td1 == &thread0,
162	    ("cpu_fork: p1 not curproc and not proc0"));
163	CTR3(KTR_PROC, "cpu_fork: called td1=%p p2=%p flags=%x",
164	    td1, p2, flags);
165
166	if ((flags & RFPROC) == 0)
167		return;
168
169	pcb = (struct pcb *)((td2->td_kstack +
170	    td2->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~0x2fUL);
171	td2->td_pcb = pcb;
172
173	/* Copy the pcb */
174	bcopy(td1->td_pcb, pcb, sizeof(struct pcb));
175
176	/*
177	 * Create a fresh stack for the new process.
178	 * Copy the trap frame for the return to user mode as if from a
179	 * syscall.  This copies most of the user mode register values.
180	 */
181	tf = (struct trapframe *)pcb - 1;
182	bcopy(td1->td_frame, tf, sizeof(*tf));
183
184	/* Set up trap frame. */
185	tf->fixreg[FIRSTARG] = 0;
186	tf->fixreg[FIRSTARG + 1] = 0;
187	tf->cr &= ~0x10000000;
188
189	td2->td_frame = tf;
190
191	cf = (struct callframe *)tf - 1;
192	memset(cf, 0, sizeof(struct callframe));
193	#ifdef __powerpc64__
194	cf->cf_toc = tocbase;
195	#endif
196	cf->cf_func = (register_t)fork_return;
197	cf->cf_arg0 = (register_t)td2;
198	cf->cf_arg1 = (register_t)tf;
199
200	pcb->pcb_sp = (register_t)cf;
201	KASSERT(pcb->pcb_sp % 16 == 0, ("stack misaligned"));
202	#ifdef __powerpc64__
203	pcb->pcb_lr = ((register_t *)fork_trampoline)[0];
204	pcb->pcb_toc = ((register_t *)fork_trampoline)[1];
205	#else
206	pcb->pcb_lr = (register_t)fork_trampoline;
207	#endif
208	#ifdef AIM
209	pcb->pcb_cpu.aim.usr_vsid = 0;
210	#endif
211
212	/* Setup to release spin count in fork_exit(). */
213	td2->td_md.md_spinlock_count = 1;
214	td2->td_md.md_saved_msr = PSL_KERNSET;
215
216	/*
217 	 * Now cpu_switch() can schedule the new process.
218	 */
219}
220
221/*
222 * Intercept the return address from a freshly forked process that has NOT
223 * been scheduled yet.
224 *
225 * This is needed to make kernel threads stay in kernel mode.
226 */
227void
228cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
229{
230	struct callframe *cf;
231
232	CTR4(KTR_PROC, "%s called with td=%p func=%p arg=%p",
233	    __func__, td, func, arg);
234
235	cf = (struct callframe *)td->td_pcb->pcb_sp;
236
237	cf->cf_func = (register_t)func;
238	cf->cf_arg0 = (register_t)arg;
239}
240
241void
242cpu_exit(struct thread *td)
243{
244
245}
246
247/*
248 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
249 */
250static void
251sf_buf_init(void *arg)
252{
253	struct sf_buf *sf_bufs;
254	vm_offset_t sf_base;
255	int i;
256
257	/* Don't bother on systems with a direct map */
258	if (hw_direct_map)
259		return;
260
261	nsfbufs = NSFBUFS;
262	TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
263
264	sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
265	TAILQ_INIT(&sf_buf_freelist);
266	sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
267	sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
268	    M_NOWAIT | M_ZERO);
269
270	for (i = 0; i < nsfbufs; i++) {
271		sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
272		TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
273	}
274	sf_buf_alloc_want = 0;
275	mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
276}
277
278/*
279 * Get an sf_buf from the freelist. Will block if none are available.
280 */
281struct sf_buf *
282sf_buf_alloc(struct vm_page *m, int flags)
283{
284	struct sf_head *hash_list;
285	struct sf_buf *sf;
286	int error;
287
288	if (hw_direct_map) {
289		/* Shortcut the direct mapped case */
290		return ((struct sf_buf *)m);
291	}
292
293	hash_list = &sf_buf_active[SF_BUF_HASH(m)];
294	mtx_lock(&sf_buf_lock);
295	LIST_FOREACH(sf, hash_list, list_entry) {
296		if (sf->m == m) {
297			sf->ref_count++;
298			if (sf->ref_count == 1) {
299				TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
300				nsfbufsused++;
301				nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
302			}
303			goto done;
304		}
305	}
306
307	while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
308		if (flags & SFB_NOWAIT)
309			goto done;
310
311		sf_buf_alloc_want++;
312		SFSTAT_INC(sf_allocwait);
313		error = msleep(&sf_buf_freelist, &sf_buf_lock,
314		    (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
315		sf_buf_alloc_want--;
316
317		/*
318		 * If we got a signal, don't risk going back to sleep.
319		 */
320		if (error)
321			goto done;
322	}
323
324	TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
325	if (sf->m != NULL)
326		LIST_REMOVE(sf, list_entry);
327
328	LIST_INSERT_HEAD(hash_list, sf, list_entry);
329	sf->ref_count = 1;
330	sf->m = m;
331	nsfbufsused++;
332	nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
333	pmap_qenter(sf->kva, &sf->m, 1);
334done:
335	mtx_unlock(&sf_buf_lock);
336	return (sf);
337}
338
339/*
340 * Detach mapped page and release resources back to the system.
341 *
342 * Remove a reference from the given sf_buf, adding it to the free
343 * list when its reference count reaches zero. A freed sf_buf still,
344 * however, retains its virtual-to-physical mapping until it is
345 * recycled or reactivated by sf_buf_alloc(9).
346 */
347void
348sf_buf_free(struct sf_buf *sf)
349{
350	if (hw_direct_map)
351		return;
352
353	mtx_lock(&sf_buf_lock);
354	sf->ref_count--;
355	if (sf->ref_count == 0) {
356		TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
357		nsfbufsused--;
358
359		if (sf_buf_alloc_want > 0)
360			wakeup(&sf_buf_freelist);
361	}
362	mtx_unlock(&sf_buf_lock);
363}
364
365/*
366 * Software interrupt handler for queued VM system processing.
367 */
368void
369swi_vm(void *dummy)
370{
371
372	if (busdma_swi_pending != 0)
373		busdma_swi();
374}
375
376/*
377 * Tell whether this address is in some physical memory region.
378 * Currently used by the kernel coredump code in order to avoid
379 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
380 * or other unpredictable behaviour.
381 */
382int
383is_physical_memory(vm_offset_t addr)
384{
385
386	/*
387	 * stuff other tests for known memory-mapped devices (PCI?)
388	 * here
389	 */
390	return (1);
391}
392
393/*
394 * CPU threading functions related to the VM layer. These could be used
395 * to map the SLB bits required for the kernel stack instead of forcing a
396 * fixed-size KVA.
397 */
398
399void
400cpu_thread_swapin(struct thread *td)
401{
402
403}
404
405void
406cpu_thread_swapout(struct thread *td)
407{
408
409}
410
411