1/*-
2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1982, 1986 The Regents of the University of California.
5 * Copyright (c) 1989, 1990 William Jolitz
6 * Copyright (c) 1994 John Dyson
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department, and William Jolitz.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *	This product includes software developed by the University of
24 *	California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 */
42/*-
43 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
44 * All rights reserved.
45 *
46 * Author: Chris G. Demetriou
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61 *  School of Computer Science
62 *  Carnegie Mellon University
63 *  Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69#include <sys/param.h>
70#include <sys/systm.h>
71#include <sys/proc.h>
72#include <sys/malloc.h>
73#include <sys/bio.h>
74#include <sys/buf.h>
75#include <sys/ktr.h>
76#include <sys/lock.h>
77#include <sys/mutex.h>
78#include <sys/vnode.h>
79#include <sys/vmmeter.h>
80#include <sys/kernel.h>
81#include <sys/mbuf.h>
82#include <sys/sysctl.h>
83#include <sys/sysent.h>
84#include <sys/unistd.h>
85
86#include <machine/cpu.h>
87#include <machine/fpu.h>
88#include <machine/frame.h>
89#include <machine/md_var.h>
90#include <machine/pcb.h>
91#include <machine/reg.h>
92
93#include <dev/ofw/openfirm.h>
94
95#include <vm/vm.h>
96#include <vm/vm_param.h>
97#include <vm/vm_kern.h>
98#include <vm/vm_page.h>
99#include <vm/vm_map.h>
100#include <vm/vm_extern.h>
101
102/*
103 * Finish a fork operation, with process p2 nearly set up.
104 * Copy and update the pcb, set up the stack so that the child
105 * ready to run and return to user mode.
106 */
107void
108cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
109{
110	struct	trapframe *tf;
111	struct	callframe *cf;
112	struct	pcb *pcb;
113
114	KASSERT(td1 == curthread || td1 == &thread0,
115	    ("cpu_fork: p1 not curproc and not proc0"));
116	CTR3(KTR_PROC, "cpu_fork: called td1=%p p2=%p flags=%x",
117	    td1, p2, flags);
118
119	if ((flags & RFPROC) == 0)
120		return;
121
122	/* Ensure td1 is up to date before copy. */
123	if (td1 == curthread)
124		cpu_save_thread_regs(td1);
125
126	pcb = (struct pcb *)((td2->td_kstack +
127	    td2->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~0x2fUL);
128	td2->td_pcb = pcb;
129
130	/* Copy the pcb */
131	bcopy(td1->td_pcb, pcb, sizeof(struct pcb));
132
133	/*
134	 * Create a fresh stack for the new process.
135	 * Copy the trap frame for the return to user mode as if from a
136	 * syscall.  This copies most of the user mode register values.
137	 */
138	tf = (struct trapframe *)pcb - 1;
139	bcopy(td1->td_frame, tf, sizeof(*tf));
140
141	/* Set up trap frame. */
142	tf->fixreg[FIRSTARG] = 0;
143	tf->fixreg[FIRSTARG + 1] = 0;
144	tf->cr &= ~0x10000000;
145
146	td2->td_frame = tf;
147
148	cf = (struct callframe *)tf - 1;
149	memset(cf, 0, sizeof(struct callframe));
150	#if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
151	cf->cf_toc = ((register_t *)fork_return)[1];
152	#endif
153	cf->cf_func = (register_t)fork_return;
154	cf->cf_arg0 = (register_t)td2;
155	cf->cf_arg1 = (register_t)tf;
156
157	pcb->pcb_sp = (register_t)cf;
158	KASSERT(pcb->pcb_sp % 16 == 0, ("stack misaligned"));
159	#if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
160	pcb->pcb_lr = ((register_t *)fork_trampoline)[0];
161	pcb->pcb_toc = ((register_t *)fork_trampoline)[1];
162	#else
163	pcb->pcb_lr = (register_t)fork_trampoline;
164	pcb->pcb_context[0] = pcb->pcb_lr;
165	#endif
166	#ifdef AIM
167	pcb->pcb_cpu.aim.usr_vsid = 0;
168	#endif
169
170	/* Setup to release spin count in fork_exit(). */
171	td2->td_md.md_spinlock_count = 1;
172	td2->td_md.md_saved_msr = psl_kernset;
173
174	/*
175 	 * Now cpu_switch() can schedule the new process.
176	 */
177}
178
179/*
180 * Intercept the return address from a freshly forked process that has NOT
181 * been scheduled yet.
182 *
183 * This is needed to make kernel threads stay in kernel mode.
184 */
185void
186cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
187{
188	struct callframe *cf;
189
190	CTR4(KTR_PROC, "%s called with td=%p func=%p arg=%p",
191	    __func__, td, func, arg);
192
193	cf = (struct callframe *)td->td_pcb->pcb_sp;
194
195	#if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
196	cf->cf_toc = ((register_t *)func)[1];
197	#endif
198	cf->cf_func = (register_t)func;
199	cf->cf_arg0 = (register_t)arg;
200}
201
202void
203cpu_exit(struct thread *td)
204{
205
206}
207
208/*
209 * CPU threading functions related to the VM layer. These could be used
210 * to map the SLB bits required for the kernel stack instead of forcing a
211 * fixed-size KVA.
212 */
213
214void
215cpu_thread_swapin(struct thread *td)
216{
217
218}
219
220void
221cpu_thread_swapout(struct thread *td)
222{
223
224}
225
226bool
227cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
228{
229
230	return (true);
231}
232
233int
234cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused,
235    int com __unused, void *data __unused)
236{
237
238	return (EINVAL);
239}
240
241void
242cpu_sync_core(void)
243{
244	/*
245	 * Linux performs "rfi" there.  Our rendezvous IPI handler on
246	 * the target cpu does "rfi" before and lwsync/sync after the
247	 * action, which is stronger than required.
248	 */
249}
250