Deleted Added
full compact
aim_machdep.c (200083) aim_machdep.c (204128)
1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31/*-
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55 */
56
57#include <sys/cdefs.h>
1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31/*-
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55 */
56
57#include <sys/cdefs.h>
58__FBSDID("$FreeBSD: head/sys/powerpc/aim/machdep.c 200083 2009-12-03 20:55:09Z nwhitehorn $");
58__FBSDID("$FreeBSD: head/sys/powerpc/aim/machdep.c 204128 2010-02-20 16:23:29Z nwhitehorn $");
59
60#include "opt_compat.h"
61#include "opt_ddb.h"
62#include "opt_kstack_pages.h"
63#include "opt_msgbuf.h"
64
65#include <sys/param.h>
66#include <sys/proc.h>
67#include <sys/systm.h>
68#include <sys/bio.h>
69#include <sys/buf.h>
70#include <sys/bus.h>
71#include <sys/cons.h>
72#include <sys/cpu.h>
73#include <sys/eventhandler.h>
74#include <sys/exec.h>
75#include <sys/imgact.h>
76#include <sys/kdb.h>
77#include <sys/kernel.h>
78#include <sys/ktr.h>
79#include <sys/linker.h>
80#include <sys/lock.h>
81#include <sys/malloc.h>
82#include <sys/mbuf.h>
83#include <sys/msgbuf.h>
84#include <sys/mutex.h>
85#include <sys/ptrace.h>
86#include <sys/reboot.h>
87#include <sys/signalvar.h>
88#include <sys/sysctl.h>
89#include <sys/sysent.h>
90#include <sys/sysproto.h>
91#include <sys/ucontext.h>
92#include <sys/uio.h>
93#include <sys/vmmeter.h>
94#include <sys/vnode.h>
95
96#include <net/netisr.h>
97
98#include <vm/vm.h>
99#include <vm/vm_extern.h>
100#include <vm/vm_kern.h>
101#include <vm/vm_page.h>
102#include <vm/vm_map.h>
103#include <vm/vm_object.h>
104#include <vm/vm_pager.h>
105
106#include <machine/altivec.h>
107#include <machine/bat.h>
108#include <machine/cpu.h>
109#include <machine/elf.h>
110#include <machine/fpu.h>
111#include <machine/hid.h>
112#include <machine/kdb.h>
113#include <machine/md_var.h>
114#include <machine/metadata.h>
115#include <machine/mmuvar.h>
116#include <machine/pcb.h>
117#include <machine/reg.h>
118#include <machine/sigframe.h>
119#include <machine/spr.h>
120#include <machine/trap.h>
121#include <machine/vmparam.h>
122
123#include <ddb/ddb.h>
124
125#include <dev/ofw/openfirm.h>
126
127#ifdef DDB
128extern vm_offset_t ksym_start, ksym_end;
129#endif
130
131int cold = 1;
132int cacheline_size = 32;
133int hw_direct_map = 1;
134
135struct pcpu __pcpu[MAXCPU];
136
137static struct trapframe frame0;
138
139char machine[] = "powerpc";
140SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
141
142static void cpu_startup(void *);
143SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
144
145SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
146 CTLFLAG_RD, &cacheline_size, 0, "");
147
148u_int powerpc_init(u_int, u_int, u_int, void *);
149
150int save_ofw_mapping(void);
151int restore_ofw_mapping(void);
152
153void install_extint(void (*)(void));
154
155int setfault(faultbuf); /* defined in locore.S */
156
157static int grab_mcontext(struct thread *, mcontext_t *, int);
158
159void asm_panic(char *);
160
161long Maxmem = 0;
162long realmem = 0;
163
164struct pmap ofw_pmap;
165extern int ofmsr;
166
167struct bat battable[16];
168
169struct kva_md_info kmi;
170
171static void
172powerpc_ofw_shutdown(void *junk, int howto)
173{
174 if (howto & RB_HALT) {
175 OF_halt();
176 }
177 OF_reboot();
178}
179
180static void
181cpu_startup(void *dummy)
182{
183
184 /*
185 * Initialise the decrementer-based clock.
186 */
187 decr_init();
188
189 /*
190 * Good {morning,afternoon,evening,night}.
191 */
192 cpu_setup(PCPU_GET(cpuid));
193
194#ifdef PERFMON
195 perfmon_init();
196#endif
197 printf("real memory = %ld (%ld MB)\n", ptoa(physmem),
198 ptoa(physmem) / 1048576);
199 realmem = physmem;
200
59
60#include "opt_compat.h"
61#include "opt_ddb.h"
62#include "opt_kstack_pages.h"
63#include "opt_msgbuf.h"
64
65#include <sys/param.h>
66#include <sys/proc.h>
67#include <sys/systm.h>
68#include <sys/bio.h>
69#include <sys/buf.h>
70#include <sys/bus.h>
71#include <sys/cons.h>
72#include <sys/cpu.h>
73#include <sys/eventhandler.h>
74#include <sys/exec.h>
75#include <sys/imgact.h>
76#include <sys/kdb.h>
77#include <sys/kernel.h>
78#include <sys/ktr.h>
79#include <sys/linker.h>
80#include <sys/lock.h>
81#include <sys/malloc.h>
82#include <sys/mbuf.h>
83#include <sys/msgbuf.h>
84#include <sys/mutex.h>
85#include <sys/ptrace.h>
86#include <sys/reboot.h>
87#include <sys/signalvar.h>
88#include <sys/sysctl.h>
89#include <sys/sysent.h>
90#include <sys/sysproto.h>
91#include <sys/ucontext.h>
92#include <sys/uio.h>
93#include <sys/vmmeter.h>
94#include <sys/vnode.h>
95
96#include <net/netisr.h>
97
98#include <vm/vm.h>
99#include <vm/vm_extern.h>
100#include <vm/vm_kern.h>
101#include <vm/vm_page.h>
102#include <vm/vm_map.h>
103#include <vm/vm_object.h>
104#include <vm/vm_pager.h>
105
106#include <machine/altivec.h>
107#include <machine/bat.h>
108#include <machine/cpu.h>
109#include <machine/elf.h>
110#include <machine/fpu.h>
111#include <machine/hid.h>
112#include <machine/kdb.h>
113#include <machine/md_var.h>
114#include <machine/metadata.h>
115#include <machine/mmuvar.h>
116#include <machine/pcb.h>
117#include <machine/reg.h>
118#include <machine/sigframe.h>
119#include <machine/spr.h>
120#include <machine/trap.h>
121#include <machine/vmparam.h>
122
123#include <ddb/ddb.h>
124
125#include <dev/ofw/openfirm.h>
126
127#ifdef DDB
128extern vm_offset_t ksym_start, ksym_end;
129#endif
130
131int cold = 1;
132int cacheline_size = 32;
133int hw_direct_map = 1;
134
135struct pcpu __pcpu[MAXCPU];
136
137static struct trapframe frame0;
138
139char machine[] = "powerpc";
140SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
141
142static void cpu_startup(void *);
143SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
144
145SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
146 CTLFLAG_RD, &cacheline_size, 0, "");
147
148u_int powerpc_init(u_int, u_int, u_int, void *);
149
150int save_ofw_mapping(void);
151int restore_ofw_mapping(void);
152
153void install_extint(void (*)(void));
154
155int setfault(faultbuf); /* defined in locore.S */
156
157static int grab_mcontext(struct thread *, mcontext_t *, int);
158
159void asm_panic(char *);
160
161long Maxmem = 0;
162long realmem = 0;
163
164struct pmap ofw_pmap;
165extern int ofmsr;
166
167struct bat battable[16];
168
169struct kva_md_info kmi;
170
171static void
172powerpc_ofw_shutdown(void *junk, int howto)
173{
174 if (howto & RB_HALT) {
175 OF_halt();
176 }
177 OF_reboot();
178}
179
180static void
181cpu_startup(void *dummy)
182{
183
184 /*
185 * Initialise the decrementer-based clock.
186 */
187 decr_init();
188
189 /*
190 * Good {morning,afternoon,evening,night}.
191 */
192 cpu_setup(PCPU_GET(cpuid));
193
194#ifdef PERFMON
195 perfmon_init();
196#endif
197 printf("real memory = %ld (%ld MB)\n", ptoa(physmem),
198 ptoa(physmem) / 1048576);
199 realmem = physmem;
200
201 if (bootverbose)
202 printf("available KVA = %zd (%zd MB)\n",
203 virtual_end - virtual_avail,
204 (virtual_end - virtual_avail) / 1048576);
205
201 /*
202 * Display any holes after the first chunk of extended memory.
203 */
204 if (bootverbose) {
205 int indx;
206
207 printf("Physical memory chunk(s):\n");
208 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
209 int size1 = phys_avail[indx + 1] - phys_avail[indx];
210
211 printf("0x%08x - 0x%08x, %d bytes (%d pages)\n",
212 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
213 size1 / PAGE_SIZE);
214 }
215 }
216
217 vm_ksubmap_init(&kmi);
218
219 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
220 ptoa(cnt.v_free_count) / 1048576);
221
222 /*
223 * Set up buffers, so they can be used to read disk labels.
224 */
225 bufinit();
226 vm_pager_bufferinit();
227
228 EVENTHANDLER_REGISTER(shutdown_final, powerpc_ofw_shutdown, 0,
229 SHUTDOWN_PRI_LAST);
230}
231
232extern char kernel_text[], _end[];
233
234extern void *testppc64, *testppc64size;
235extern void *restorebridge, *restorebridgesize;
236extern void *rfid_patch, *rfi_patch1, *rfi_patch2;
237#ifdef SMP
238extern void *rstcode, *rstsize;
239#endif
240extern void *trapcode, *trapcode64, *trapsize;
241extern void *alitrap, *alisize;
242extern void *dsitrap, *dsisize;
243extern void *decrint, *decrsize;
244extern void *extint, *extsize;
245extern void *dblow, *dbsize;
246
247u_int
248powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
249{
250 struct pcpu *pc;
251 vm_offset_t end;
252 void *generictrap;
253 size_t trap_offset;
254 void *kmdp;
255 char *env;
256 uint32_t msr, scratch;
257 uint8_t *cache_check;
258 int ppc64;
259
260 end = 0;
261 kmdp = NULL;
262 trap_offset = 0;
263
264 /*
265 * Parse metadata if present and fetch parameters. Must be done
266 * before console is inited so cninit gets the right value of
267 * boothowto.
268 */
269 if (mdp != NULL) {
270 preload_metadata = mdp;
271 kmdp = preload_search_by_type("elf kernel");
272 if (kmdp != NULL) {
273 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
274 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
275 end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
276#ifdef DDB
277 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
278 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
279#endif
280 }
281 }
282
283 /*
284 * Init params/tunables that can be overridden by the loader
285 */
286 init_param1();
287
288 /*
289 * Start initializing proc0 and thread0.
290 */
291 proc_linkup0(&proc0, &thread0);
292 thread0.td_frame = &frame0;
293
294 /*
295 * Set up per-cpu data.
296 */
297 pc = __pcpu;
298 pcpu_init(pc, 0, sizeof(struct pcpu));
299 pc->pc_curthread = &thread0;
300 pc->pc_cpuid = 0;
301
302 __asm __volatile("mtsprg 0, %0" :: "r"(pc));
303
304 /*
305 * Init mutexes, which we use heavily in PMAP
306 */
307
308 mutex_init();
309
310 /*
311 * Install the OF client interface
312 */
313
314 OF_bootstrap();
315
316 /*
317 * Initialize the console before printing anything.
318 */
319 cninit();
320
321 /*
322 * Complain if there is no metadata.
323 */
324 if (mdp == NULL || kmdp == NULL) {
325 printf("powerpc_init: no loader metadata.\n");
326 }
327
328 /*
329 * Init KDB
330 */
331
332 kdb_init();
333
334 /*
335 * PowerPC 970 CPUs have a misfeature requested by Apple that makes
336 * them pretend they have a 32-byte cacheline. Turn this off
337 * before we measure the cacheline size.
338 */
339
340 switch (mfpvr() >> 16) {
341 case IBM970:
342 case IBM970FX:
343 case IBM970MP:
344 case IBM970GX:
345 scratch = mfspr64upper(SPR_HID5,msr);
346 scratch &= ~HID5_970_DCBZ_SIZE_HI;
347 mtspr64(SPR_HID5, scratch, mfspr(SPR_HID5), msr);
348 break;
349 }
350
351 /*
352 * Initialize the interrupt tables and figure out our cache line
353 * size and whether or not we need the 64-bit bridge code.
354 */
355
356 /*
357 * Disable translation in case the vector area hasn't been
358 * mapped (G5).
359 */
360
361 msr = mfmsr();
362 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
363 isync();
364
365 /*
366 * Measure the cacheline size using dcbz
367 *
368 * Use EXC_PGM as a playground. We are about to overwrite it
369 * anyway, we know it exists, and we know it is cache-aligned.
370 */
371
372 cache_check = (void *)EXC_PGM;
373
374 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
375 cache_check[cacheline_size] = 0xff;
376
377 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
378
379 /* Find the first byte dcbz did not zero to get the cache line size */
380 for (cacheline_size = 0; cacheline_size < 0x100 &&
381 cache_check[cacheline_size] == 0; cacheline_size++);
382
383 /* Work around psim bug */
384 if (cacheline_size == 0) {
385 printf("WARNING: cacheline size undetermined, setting to 32\n");
386 cacheline_size = 32;
387 }
388
389 /*
390 * Figure out whether we need to use the 64 bit PMAP. This works by
391 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
392 * and setting ppc64 = 0 if that causes a trap.
393 */
394
395 ppc64 = 1;
396
397 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size);
398 __syncicache((void *)EXC_PGM, (size_t)&testppc64size);
399
400 __asm __volatile("\
401 mfmsr %0; \
402 mtsprg2 %1; \
403 \
404 mtmsrd %0; \
405 mfsprg2 %1;"
406 : "=r"(scratch), "=r"(ppc64));
407
408 if (ppc64)
409 cpu_features |= PPC_FEATURE_64;
410
411 /*
412 * Now copy restorebridge into all the handlers, if necessary,
413 * and set up the trap tables.
414 */
415
416 if (cpu_features & PPC_FEATURE_64) {
417 /* Patch the two instances of rfi -> rfid */
418 bcopy(&rfid_patch,&rfi_patch1,4);
419 #ifdef KDB
420 /* rfi_patch2 is at the end of dbleave */
421 bcopy(&rfid_patch,&rfi_patch2,4);
422 #endif
423
424 /*
425 * Copy a code snippet to restore 32-bit bridge mode
426 * to the top of every non-generic trap handler
427 */
428
429 trap_offset += (size_t)&restorebridgesize;
430 bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
431 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
432 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
433 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
434 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
435 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
436 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
437
438 /*
439 * Set the common trap entry point to the one that
440 * knows to restore 32-bit operation on execution.
441 */
442
443 generictrap = &trapcode64;
444 } else {
445 generictrap = &trapcode;
446 }
447
448#ifdef SMP
449 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstsize);
450#else
451 bcopy(generictrap, (void *)EXC_RST, (size_t)&trapsize);
452#endif
453
454#ifdef KDB
455 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbsize);
456 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbsize);
457 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbsize);
458 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbsize);
459#else
460 bcopy(generictrap, (void *)EXC_MCHK, (size_t)&trapsize);
461 bcopy(generictrap, (void *)EXC_PGM, (size_t)&trapsize);
462 bcopy(generictrap, (void *)EXC_TRC, (size_t)&trapsize);
463 bcopy(generictrap, (void *)EXC_BPT, (size_t)&trapsize);
464#endif
465 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize);
466 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&alisize);
467 bcopy(generictrap, (void *)EXC_ISI, (size_t)&trapsize);
468 bcopy(generictrap, (void *)EXC_EXI, (size_t)&trapsize);
469 bcopy(generictrap, (void *)EXC_FPU, (size_t)&trapsize);
470 bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize);
471 bcopy(generictrap, (void *)EXC_SC, (size_t)&trapsize);
472 bcopy(generictrap, (void *)EXC_FPA, (size_t)&trapsize);
473 bcopy(generictrap, (void *)EXC_VEC, (size_t)&trapsize);
474 bcopy(generictrap, (void *)EXC_VECAST, (size_t)&trapsize);
475 bcopy(generictrap, (void *)EXC_THRM, (size_t)&trapsize);
476 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
477
478 /*
479 * Restore MSR
480 */
481 mtmsr(msr);
482 isync();
483
484 /*
485 * Choose a platform module so we can get the physical memory map.
486 */
487
488 platform_probe_and_attach();
489
490 /*
491 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
492 * in case the platform module had a better idea of what we
493 * should do.
494 */
495 if (cpu_features & PPC_FEATURE_64)
496 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
497 else
498 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
499
500 pmap_bootstrap(startkernel, endkernel);
501 mtmsr(mfmsr() | PSL_IR|PSL_DR|PSL_ME|PSL_RI);
502 isync();
503
504 /*
505 * Initialize params/tunables that are derived from memsize
506 */
507 init_param2(physmem);
508
509 /*
510 * Grab booted kernel's name
511 */
512 env = getenv("kernelname");
513 if (env != NULL) {
514 strlcpy(kernelname, env, sizeof(kernelname));
515 freeenv(env);
516 }
517
518 /*
519 * Finish setting up thread0.
520 */
521 thread0.td_pcb = (struct pcb *)
522 ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
523 sizeof(struct pcb)) & ~15);
524 bzero((void *)thread0.td_pcb, sizeof(struct pcb));
525 pc->pc_curpcb = thread0.td_pcb;
526
527 /* Initialise the message buffer. */
528 msgbufinit(msgbufp, MSGBUF_SIZE);
529
530#ifdef KDB
531 if (boothowto & RB_KDB)
532 kdb_enter(KDB_WHY_BOOTFLAGS,
533 "Boot flags requested debugger");
534#endif
535
536 return (((uintptr_t)thread0.td_pcb - 16) & ~15);
537}
538
539void
540bzero(void *buf, size_t len)
541{
542 caddr_t p;
543
544 p = buf;
545
546 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
547 *p++ = 0;
548 len--;
549 }
550
551 while (len >= sizeof(u_long) * 8) {
552 *(u_long*) p = 0;
553 *((u_long*) p + 1) = 0;
554 *((u_long*) p + 2) = 0;
555 *((u_long*) p + 3) = 0;
556 len -= sizeof(u_long) * 8;
557 *((u_long*) p + 4) = 0;
558 *((u_long*) p + 5) = 0;
559 *((u_long*) p + 6) = 0;
560 *((u_long*) p + 7) = 0;
561 p += sizeof(u_long) * 8;
562 }
563
564 while (len >= sizeof(u_long)) {
565 *(u_long*) p = 0;
566 len -= sizeof(u_long);
567 p += sizeof(u_long);
568 }
569
570 while (len) {
571 *p++ = 0;
572 len--;
573 }
574}
575
576void
577sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
578{
579 struct trapframe *tf;
580 struct sigframe *sfp;
581 struct sigacts *psp;
582 struct sigframe sf;
583 struct thread *td;
584 struct proc *p;
585 int oonstack, rndfsize;
586 int sig;
587 int code;
588
589 td = curthread;
590 p = td->td_proc;
591 PROC_LOCK_ASSERT(p, MA_OWNED);
592 sig = ksi->ksi_signo;
593 code = ksi->ksi_code;
594 psp = p->p_sigacts;
595 mtx_assert(&psp->ps_mtx, MA_OWNED);
596 tf = td->td_frame;
597 oonstack = sigonstack(tf->fixreg[1]);
598
599 rndfsize = ((sizeof(sf) + 15) / 16) * 16;
600
601 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
602 catcher, sig);
603
604 /*
605 * Save user context
606 */
607 memset(&sf, 0, sizeof(sf));
608 grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
609 sf.sf_uc.uc_sigmask = *mask;
610 sf.sf_uc.uc_stack = td->td_sigstk;
611 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
612 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
613
614 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
615
616 /*
617 * Allocate and validate space for the signal handler context.
618 */
619 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
620 SIGISMEMBER(psp->ps_sigonstack, sig)) {
621 sfp = (struct sigframe *)(td->td_sigstk.ss_sp +
622 td->td_sigstk.ss_size - rndfsize);
623 } else {
624 sfp = (struct sigframe *)(tf->fixreg[1] - rndfsize);
625 }
626
627 /*
628 * Translate the signal if appropriate (Linux emu ?)
629 */
630 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
631 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
632
633 /*
634 * Save the floating-point state, if necessary, then copy it.
635 */
636 /* XXX */
637
638 /*
639 * Set up the registers to return to sigcode.
640 *
641 * r1/sp - sigframe ptr
642 * lr - sig function, dispatched to by blrl in trampoline
643 * r3 - sig number
644 * r4 - SIGINFO ? &siginfo : exception code
645 * r5 - user context
646 * srr0 - trampoline function addr
647 */
648 tf->lr = (register_t)catcher;
649 tf->fixreg[1] = (register_t)sfp;
650 tf->fixreg[FIRSTARG] = sig;
651 tf->fixreg[FIRSTARG+2] = (register_t)&sfp->sf_uc;
652 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
653 /*
654 * Signal handler installed with SA_SIGINFO.
655 */
656 tf->fixreg[FIRSTARG+1] = (register_t)&sfp->sf_si;
657
658 /*
659 * Fill siginfo structure.
660 */
661 sf.sf_si = ksi->ksi_info;
662 sf.sf_si.si_signo = sig;
663 sf.sf_si.si_addr = (void *)((tf->exc == EXC_DSI) ?
664 tf->cpu.aim.dar : tf->srr0);
665 } else {
666 /* Old FreeBSD-style arguments. */
667 tf->fixreg[FIRSTARG+1] = code;
668 tf->fixreg[FIRSTARG+3] = (tf->exc == EXC_DSI) ?
669 tf->cpu.aim.dar : tf->srr0;
670 }
671 mtx_unlock(&psp->ps_mtx);
672 PROC_UNLOCK(p);
673
674 tf->srr0 = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
675
676 /*
677 * copy the frame out to userland.
678 */
679 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
680 /*
681 * Process has trashed its stack. Kill it.
682 */
683 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
684 PROC_LOCK(p);
685 sigexit(td, SIGILL);
686 }
687
688 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td,
689 tf->srr0, tf->fixreg[1]);
690
691 PROC_LOCK(p);
692 mtx_lock(&psp->ps_mtx);
693}
694
695int
696sigreturn(struct thread *td, struct sigreturn_args *uap)
697{
698 ucontext_t uc;
699 int error;
700
701 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
702
703 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
704 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
705 return (EFAULT);
706 }
707
708 error = set_mcontext(td, &uc.uc_mcontext);
709 if (error != 0)
710 return (error);
711
712 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
713
714 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
715 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
716
717 return (EJUSTRETURN);
718}
719
720#ifdef COMPAT_FREEBSD4
721int
722freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
723{
724
725 return sigreturn(td, (struct sigreturn_args *)uap);
726}
727#endif
728
729/*
730 * Construct a PCB from a trapframe. This is called from kdb_trap() where
731 * we want to start a backtrace from the function that caused us to enter
732 * the debugger. We have the context in the trapframe, but base the trace
733 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
734 * enough for a backtrace.
735 */
736void
737makectx(struct trapframe *tf, struct pcb *pcb)
738{
739
740 pcb->pcb_lr = tf->srr0;
741 pcb->pcb_sp = tf->fixreg[1];
742}
743
744/*
745 * get_mcontext/sendsig helper routine that doesn't touch the
746 * proc lock
747 */
748static int
749grab_mcontext(struct thread *td, mcontext_t *mcp, int flags)
750{
751 struct pcb *pcb;
752
753 pcb = td->td_pcb;
754
755 memset(mcp, 0, sizeof(mcontext_t));
756
757 mcp->mc_vers = _MC_VERSION;
758 mcp->mc_flags = 0;
759 memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe));
760 if (flags & GET_MC_CLEAR_RET) {
761 mcp->mc_gpr[3] = 0;
762 mcp->mc_gpr[4] = 0;
763 }
764
765 /*
766 * This assumes that floating-point context is *not* lazy,
767 * so if the thread has used FP there would have been a
768 * FP-unavailable exception that would have set things up
769 * correctly.
770 */
771 if (pcb->pcb_flags & PCB_FPU) {
772 KASSERT(td == curthread,
773 ("get_mcontext: fp save not curthread"));
774 critical_enter();
775 save_fpu(td);
776 critical_exit();
777 mcp->mc_flags |= _MC_FP_VALID;
778 memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
779 memcpy(mcp->mc_fpreg, pcb->pcb_fpu.fpr, 32*sizeof(double));
780 }
781
782 /*
783 * Repeat for Altivec context
784 */
785
786 if (pcb->pcb_flags & PCB_VEC) {
787 KASSERT(td == curthread,
788 ("get_mcontext: fp save not curthread"));
789 critical_enter();
790 save_vec(td);
791 critical_exit();
792 mcp->mc_flags |= _MC_AV_VALID;
793 mcp->mc_vscr = pcb->pcb_vec.vscr;
794 mcp->mc_vrsave = pcb->pcb_vec.vrsave;
795 memcpy(mcp->mc_avec, pcb->pcb_vec.vr, sizeof(mcp->mc_avec));
796 }
797
798 mcp->mc_len = sizeof(*mcp);
799
800 return (0);
801}
802
803int
804get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
805{
806 int error;
807
808 error = grab_mcontext(td, mcp, flags);
809 if (error == 0) {
810 PROC_LOCK(curthread->td_proc);
811 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
812 PROC_UNLOCK(curthread->td_proc);
813 }
814
815 return (error);
816}
817
818int
819set_mcontext(struct thread *td, const mcontext_t *mcp)
820{
821 struct pcb *pcb;
822 struct trapframe *tf;
823
824 pcb = td->td_pcb;
825 tf = td->td_frame;
826
827 if (mcp->mc_vers != _MC_VERSION ||
828 mcp->mc_len != sizeof(*mcp))
829 return (EINVAL);
830
831 /*
832 * Don't let the user set privileged MSR bits
833 */
834 if ((mcp->mc_srr1 & PSL_USERSTATIC) != (tf->srr1 & PSL_USERSTATIC)) {
835 return (EINVAL);
836 }
837
838 memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame));
839
840 if (mcp->mc_flags & _MC_FP_VALID) {
841 if ((pcb->pcb_flags & PCB_FPU) != PCB_FPU) {
842 critical_enter();
843 enable_fpu(td);
844 critical_exit();
845 }
846 memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double));
847 memcpy(pcb->pcb_fpu.fpr, mcp->mc_fpreg, 32*sizeof(double));
848 }
849
850 if (mcp->mc_flags & _MC_AV_VALID) {
851 if ((pcb->pcb_flags & PCB_VEC) != PCB_VEC) {
852 critical_enter();
853 enable_vec(td);
854 critical_exit();
855 }
856 pcb->pcb_vec.vscr = mcp->mc_vscr;
857 pcb->pcb_vec.vrsave = mcp->mc_vrsave;
858 memcpy(pcb->pcb_vec.vr, mcp->mc_avec, sizeof(mcp->mc_avec));
859 }
860
861
862 return (0);
863}
864
865void
866cpu_boot(int howto)
867{
868}
869
870/*
871 * Flush the D-cache for non-DMA I/O so that the I-cache can
872 * be made coherent later.
873 */
874void
875cpu_flush_dcache(void *ptr, size_t len)
876{
877 /* TBD */
878}
879
880void
881cpu_initclocks(void)
882{
883
884 decr_tc_init();
885 stathz = hz;
886 profhz = hz;
887}
888
889/*
890 * Shutdown the CPU as much as possible.
891 */
892void
893cpu_halt(void)
894{
895
896 OF_exit();
897}
898
899void
900cpu_idle(int busy)
901{
902 uint32_t msr;
903 uint16_t vers;
904
905 msr = mfmsr();
906 vers = mfpvr() >> 16;
907
908#ifdef INVARIANTS
909 if ((msr & PSL_EE) != PSL_EE) {
910 struct thread *td = curthread;
911 printf("td msr %x\n", td->td_md.md_saved_msr);
912 panic("ints disabled in idleproc!");
913 }
914#endif
915 if (powerpc_pow_enabled) {
916 switch (vers) {
917 case IBM970:
918 case IBM970FX:
919 case IBM970MP:
920 case MPC7447A:
921 case MPC7448:
922 case MPC7450:
923 case MPC7455:
924 case MPC7457:
925 __asm __volatile("\
926 dssall; sync; mtmsr %0; isync"
927 :: "r"(msr | PSL_POW));
928 break;
929 default:
930 powerpc_sync();
931 mtmsr(msr | PSL_POW);
932 isync();
933 break;
934 }
935 }
936}
937
938int
939cpu_idle_wakeup(int cpu)
940{
941
942 return (0);
943}
944
945/*
946 * Set set up registers on exec.
947 */
948void
949exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
950{
951 struct trapframe *tf;
952 struct ps_strings arginfo;
953
954 tf = trapframe(td);
955 bzero(tf, sizeof *tf);
956 tf->fixreg[1] = -roundup(-stack + 8, 16);
957
958 /*
959 * XXX Machine-independent code has already copied arguments and
960 * XXX environment to userland. Get them back here.
961 */
962 (void)copyin((char *)PS_STRINGS, &arginfo, sizeof(arginfo));
963
964 /*
965 * Set up arguments for _start():
966 * _start(argc, argv, envp, obj, cleanup, ps_strings);
967 *
968 * Notes:
969 * - obj and cleanup are the auxilliary and termination
970 * vectors. They are fixed up by ld.elf_so.
971 * - ps_strings is a NetBSD extention, and will be
972 * ignored by executables which are strictly
973 * compliant with the SVR4 ABI.
974 *
975 * XXX We have to set both regs and retval here due to different
976 * XXX calling convention in trap.c and init_main.c.
977 */
978 /*
979 * XXX PG: these get overwritten in the syscall return code.
980 * execve() should return EJUSTRETURN, like it does on NetBSD.
981 * Emulate by setting the syscall return value cells. The
982 * registers still have to be set for init's fork trampoline.
983 */
984 td->td_retval[0] = arginfo.ps_nargvstr;
985 td->td_retval[1] = (register_t)arginfo.ps_argvstr;
986 tf->fixreg[3] = arginfo.ps_nargvstr;
987 tf->fixreg[4] = (register_t)arginfo.ps_argvstr;
988 tf->fixreg[5] = (register_t)arginfo.ps_envstr;
989 tf->fixreg[6] = 0; /* auxillary vector */
990 tf->fixreg[7] = 0; /* termination vector */
991 tf->fixreg[8] = (register_t)PS_STRINGS; /* NetBSD extension */
992
993 tf->srr0 = entry;
994 tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
995 td->td_pcb->pcb_flags = 0;
996}
997
998int
999fill_regs(struct thread *td, struct reg *regs)
1000{
1001 struct trapframe *tf;
1002
1003 tf = td->td_frame;
1004 memcpy(regs, tf, sizeof(struct reg));
1005
1006 return (0);
1007}
1008
1009int
1010fill_dbregs(struct thread *td, struct dbreg *dbregs)
1011{
1012 /* No debug registers on PowerPC */
1013 return (ENOSYS);
1014}
1015
1016int
1017fill_fpregs(struct thread *td, struct fpreg *fpregs)
1018{
1019 struct pcb *pcb;
1020
1021 pcb = td->td_pcb;
1022
1023 if ((pcb->pcb_flags & PCB_FPU) == 0)
1024 memset(fpregs, 0, sizeof(struct fpreg));
1025 else
1026 memcpy(fpregs, &pcb->pcb_fpu, sizeof(struct fpreg));
1027
1028 return (0);
1029}
1030
1031int
1032set_regs(struct thread *td, struct reg *regs)
1033{
1034 struct trapframe *tf;
1035
1036 tf = td->td_frame;
1037 memcpy(tf, regs, sizeof(struct reg));
1038
1039 return (0);
1040}
1041
1042int
1043set_dbregs(struct thread *td, struct dbreg *dbregs)
1044{
1045 /* No debug registers on PowerPC */
1046 return (ENOSYS);
1047}
1048
1049int
1050set_fpregs(struct thread *td, struct fpreg *fpregs)
1051{
1052 struct pcb *pcb;
1053
1054 pcb = td->td_pcb;
1055 if ((pcb->pcb_flags & PCB_FPU) == 0)
1056 enable_fpu(td);
1057 memcpy(&pcb->pcb_fpu, fpregs, sizeof(struct fpreg));
1058
1059 return (0);
1060}
1061
1062int
1063ptrace_set_pc(struct thread *td, unsigned long addr)
1064{
1065 struct trapframe *tf;
1066
1067 tf = td->td_frame;
1068 tf->srr0 = (register_t)addr;
1069
1070 return (0);
1071}
1072
1073int
1074ptrace_single_step(struct thread *td)
1075{
1076 struct trapframe *tf;
1077
1078 tf = td->td_frame;
1079 tf->srr1 |= PSL_SE;
1080
1081 return (0);
1082}
1083
1084int
1085ptrace_clear_single_step(struct thread *td)
1086{
1087 struct trapframe *tf;
1088
1089 tf = td->td_frame;
1090 tf->srr1 &= ~PSL_SE;
1091
1092 return (0);
1093}
1094
1095void
1096kdb_cpu_clear_singlestep(void)
1097{
1098
1099 kdb_frame->srr1 &= ~PSL_SE;
1100}
1101
1102void
1103kdb_cpu_set_singlestep(void)
1104{
1105
1106 kdb_frame->srr1 |= PSL_SE;
1107}
1108
1109/*
1110 * Initialise a struct pcpu.
1111 */
1112void
1113cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
1114{
1115
1116}
1117
1118void
1119spinlock_enter(void)
1120{
1121 struct thread *td;
1122
1123 td = curthread;
1124 if (td->td_md.md_spinlock_count == 0)
1125 td->td_md.md_saved_msr = intr_disable();
1126 td->td_md.md_spinlock_count++;
1127 critical_enter();
1128}
1129
1130void
1131spinlock_exit(void)
1132{
1133 struct thread *td;
1134
1135 td = curthread;
1136 critical_exit();
1137 td->td_md.md_spinlock_count--;
1138 if (td->td_md.md_spinlock_count == 0)
1139 intr_restore(td->td_md.md_saved_msr);
1140}
1141
1142/*
1143 * kcopy(const void *src, void *dst, size_t len);
1144 *
1145 * Copy len bytes from src to dst, aborting if we encounter a fatal
1146 * page fault.
1147 *
1148 * kcopy() _must_ save and restore the old fault handler since it is
1149 * called by uiomove(), which may be in the path of servicing a non-fatal
1150 * page fault.
1151 */
1152int
1153kcopy(const void *src, void *dst, size_t len)
1154{
1155 struct thread *td;
1156 faultbuf env, *oldfault;
1157 int rv;
1158
1159 td = PCPU_GET(curthread);
1160 oldfault = td->td_pcb->pcb_onfault;
1161 if ((rv = setfault(env)) != 0) {
1162 td->td_pcb->pcb_onfault = oldfault;
1163 return rv;
1164 }
1165
1166 memcpy(dst, src, len);
1167
1168 td->td_pcb->pcb_onfault = oldfault;
1169 return (0);
1170}
1171
1172void
1173asm_panic(char *pstr)
1174{
1175 panic(pstr);
1176}
1177
1178int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */
1179
1180int
1181db_trap_glue(struct trapframe *frame)
1182{
1183 if (!(frame->srr1 & PSL_PR)
1184 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
1185 || (frame->exc == EXC_PGM
1186 && (frame->srr1 & 0x20000))
1187 || frame->exc == EXC_BPT
1188 || frame->exc == EXC_DSI)) {
1189 int type = frame->exc;
1190 if (type == EXC_PGM && (frame->srr1 & 0x20000)) {
1191 type = T_BREAKPOINT;
1192 }
1193 return (kdb_trap(type, 0, frame));
1194 }
1195
1196 return (0);
1197}
206 /*
207 * Display any holes after the first chunk of extended memory.
208 */
209 if (bootverbose) {
210 int indx;
211
212 printf("Physical memory chunk(s):\n");
213 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
214 int size1 = phys_avail[indx + 1] - phys_avail[indx];
215
216 printf("0x%08x - 0x%08x, %d bytes (%d pages)\n",
217 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
218 size1 / PAGE_SIZE);
219 }
220 }
221
222 vm_ksubmap_init(&kmi);
223
224 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
225 ptoa(cnt.v_free_count) / 1048576);
226
227 /*
228 * Set up buffers, so they can be used to read disk labels.
229 */
230 bufinit();
231 vm_pager_bufferinit();
232
233 EVENTHANDLER_REGISTER(shutdown_final, powerpc_ofw_shutdown, 0,
234 SHUTDOWN_PRI_LAST);
235}
236
237extern char kernel_text[], _end[];
238
239extern void *testppc64, *testppc64size;
240extern void *restorebridge, *restorebridgesize;
241extern void *rfid_patch, *rfi_patch1, *rfi_patch2;
242#ifdef SMP
243extern void *rstcode, *rstsize;
244#endif
245extern void *trapcode, *trapcode64, *trapsize;
246extern void *alitrap, *alisize;
247extern void *dsitrap, *dsisize;
248extern void *decrint, *decrsize;
249extern void *extint, *extsize;
250extern void *dblow, *dbsize;
251
252u_int
253powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
254{
255 struct pcpu *pc;
256 vm_offset_t end;
257 void *generictrap;
258 size_t trap_offset;
259 void *kmdp;
260 char *env;
261 uint32_t msr, scratch;
262 uint8_t *cache_check;
263 int ppc64;
264
265 end = 0;
266 kmdp = NULL;
267 trap_offset = 0;
268
269 /*
270 * Parse metadata if present and fetch parameters. Must be done
271 * before console is inited so cninit gets the right value of
272 * boothowto.
273 */
274 if (mdp != NULL) {
275 preload_metadata = mdp;
276 kmdp = preload_search_by_type("elf kernel");
277 if (kmdp != NULL) {
278 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
279 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
280 end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
281#ifdef DDB
282 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
283 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
284#endif
285 }
286 }
287
288 /*
289 * Init params/tunables that can be overridden by the loader
290 */
291 init_param1();
292
293 /*
294 * Start initializing proc0 and thread0.
295 */
296 proc_linkup0(&proc0, &thread0);
297 thread0.td_frame = &frame0;
298
299 /*
300 * Set up per-cpu data.
301 */
302 pc = __pcpu;
303 pcpu_init(pc, 0, sizeof(struct pcpu));
304 pc->pc_curthread = &thread0;
305 pc->pc_cpuid = 0;
306
307 __asm __volatile("mtsprg 0, %0" :: "r"(pc));
308
309 /*
310 * Init mutexes, which we use heavily in PMAP
311 */
312
313 mutex_init();
314
315 /*
316 * Install the OF client interface
317 */
318
319 OF_bootstrap();
320
321 /*
322 * Initialize the console before printing anything.
323 */
324 cninit();
325
326 /*
327 * Complain if there is no metadata.
328 */
329 if (mdp == NULL || kmdp == NULL) {
330 printf("powerpc_init: no loader metadata.\n");
331 }
332
333 /*
334 * Init KDB
335 */
336
337 kdb_init();
338
339 /*
340 * PowerPC 970 CPUs have a misfeature requested by Apple that makes
341 * them pretend they have a 32-byte cacheline. Turn this off
342 * before we measure the cacheline size.
343 */
344
345 switch (mfpvr() >> 16) {
346 case IBM970:
347 case IBM970FX:
348 case IBM970MP:
349 case IBM970GX:
350 scratch = mfspr64upper(SPR_HID5,msr);
351 scratch &= ~HID5_970_DCBZ_SIZE_HI;
352 mtspr64(SPR_HID5, scratch, mfspr(SPR_HID5), msr);
353 break;
354 }
355
356 /*
357 * Initialize the interrupt tables and figure out our cache line
358 * size and whether or not we need the 64-bit bridge code.
359 */
360
361 /*
362 * Disable translation in case the vector area hasn't been
363 * mapped (G5).
364 */
365
366 msr = mfmsr();
367 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
368 isync();
369
370 /*
371 * Measure the cacheline size using dcbz
372 *
373 * Use EXC_PGM as a playground. We are about to overwrite it
374 * anyway, we know it exists, and we know it is cache-aligned.
375 */
376
377 cache_check = (void *)EXC_PGM;
378
379 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
380 cache_check[cacheline_size] = 0xff;
381
382 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
383
384 /* Find the first byte dcbz did not zero to get the cache line size */
385 for (cacheline_size = 0; cacheline_size < 0x100 &&
386 cache_check[cacheline_size] == 0; cacheline_size++);
387
388 /* Work around psim bug */
389 if (cacheline_size == 0) {
390 printf("WARNING: cacheline size undetermined, setting to 32\n");
391 cacheline_size = 32;
392 }
393
394 /*
395 * Figure out whether we need to use the 64 bit PMAP. This works by
396 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
397 * and setting ppc64 = 0 if that causes a trap.
398 */
399
400 ppc64 = 1;
401
402 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size);
403 __syncicache((void *)EXC_PGM, (size_t)&testppc64size);
404
405 __asm __volatile("\
406 mfmsr %0; \
407 mtsprg2 %1; \
408 \
409 mtmsrd %0; \
410 mfsprg2 %1;"
411 : "=r"(scratch), "=r"(ppc64));
412
413 if (ppc64)
414 cpu_features |= PPC_FEATURE_64;
415
416 /*
417 * Now copy restorebridge into all the handlers, if necessary,
418 * and set up the trap tables.
419 */
420
421 if (cpu_features & PPC_FEATURE_64) {
422 /* Patch the two instances of rfi -> rfid */
423 bcopy(&rfid_patch,&rfi_patch1,4);
424 #ifdef KDB
425 /* rfi_patch2 is at the end of dbleave */
426 bcopy(&rfid_patch,&rfi_patch2,4);
427 #endif
428
429 /*
430 * Copy a code snippet to restore 32-bit bridge mode
431 * to the top of every non-generic trap handler
432 */
433
434 trap_offset += (size_t)&restorebridgesize;
435 bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
436 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
437 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
438 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
439 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
440 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
441 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
442
443 /*
444 * Set the common trap entry point to the one that
445 * knows to restore 32-bit operation on execution.
446 */
447
448 generictrap = &trapcode64;
449 } else {
450 generictrap = &trapcode;
451 }
452
453#ifdef SMP
454 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstsize);
455#else
456 bcopy(generictrap, (void *)EXC_RST, (size_t)&trapsize);
457#endif
458
459#ifdef KDB
460 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbsize);
461 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbsize);
462 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbsize);
463 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbsize);
464#else
465 bcopy(generictrap, (void *)EXC_MCHK, (size_t)&trapsize);
466 bcopy(generictrap, (void *)EXC_PGM, (size_t)&trapsize);
467 bcopy(generictrap, (void *)EXC_TRC, (size_t)&trapsize);
468 bcopy(generictrap, (void *)EXC_BPT, (size_t)&trapsize);
469#endif
470 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize);
471 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&alisize);
472 bcopy(generictrap, (void *)EXC_ISI, (size_t)&trapsize);
473 bcopy(generictrap, (void *)EXC_EXI, (size_t)&trapsize);
474 bcopy(generictrap, (void *)EXC_FPU, (size_t)&trapsize);
475 bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize);
476 bcopy(generictrap, (void *)EXC_SC, (size_t)&trapsize);
477 bcopy(generictrap, (void *)EXC_FPA, (size_t)&trapsize);
478 bcopy(generictrap, (void *)EXC_VEC, (size_t)&trapsize);
479 bcopy(generictrap, (void *)EXC_VECAST, (size_t)&trapsize);
480 bcopy(generictrap, (void *)EXC_THRM, (size_t)&trapsize);
481 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
482
483 /*
484 * Restore MSR
485 */
486 mtmsr(msr);
487 isync();
488
489 /*
490 * Choose a platform module so we can get the physical memory map.
491 */
492
493 platform_probe_and_attach();
494
495 /*
496 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
497 * in case the platform module had a better idea of what we
498 * should do.
499 */
500 if (cpu_features & PPC_FEATURE_64)
501 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
502 else
503 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
504
505 pmap_bootstrap(startkernel, endkernel);
506 mtmsr(mfmsr() | PSL_IR|PSL_DR|PSL_ME|PSL_RI);
507 isync();
508
509 /*
510 * Initialize params/tunables that are derived from memsize
511 */
512 init_param2(physmem);
513
514 /*
515 * Grab booted kernel's name
516 */
517 env = getenv("kernelname");
518 if (env != NULL) {
519 strlcpy(kernelname, env, sizeof(kernelname));
520 freeenv(env);
521 }
522
523 /*
524 * Finish setting up thread0.
525 */
526 thread0.td_pcb = (struct pcb *)
527 ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
528 sizeof(struct pcb)) & ~15);
529 bzero((void *)thread0.td_pcb, sizeof(struct pcb));
530 pc->pc_curpcb = thread0.td_pcb;
531
532 /* Initialise the message buffer. */
533 msgbufinit(msgbufp, MSGBUF_SIZE);
534
535#ifdef KDB
536 if (boothowto & RB_KDB)
537 kdb_enter(KDB_WHY_BOOTFLAGS,
538 "Boot flags requested debugger");
539#endif
540
541 return (((uintptr_t)thread0.td_pcb - 16) & ~15);
542}
543
544void
545bzero(void *buf, size_t len)
546{
547 caddr_t p;
548
549 p = buf;
550
551 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
552 *p++ = 0;
553 len--;
554 }
555
556 while (len >= sizeof(u_long) * 8) {
557 *(u_long*) p = 0;
558 *((u_long*) p + 1) = 0;
559 *((u_long*) p + 2) = 0;
560 *((u_long*) p + 3) = 0;
561 len -= sizeof(u_long) * 8;
562 *((u_long*) p + 4) = 0;
563 *((u_long*) p + 5) = 0;
564 *((u_long*) p + 6) = 0;
565 *((u_long*) p + 7) = 0;
566 p += sizeof(u_long) * 8;
567 }
568
569 while (len >= sizeof(u_long)) {
570 *(u_long*) p = 0;
571 len -= sizeof(u_long);
572 p += sizeof(u_long);
573 }
574
575 while (len) {
576 *p++ = 0;
577 len--;
578 }
579}
580
581void
582sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
583{
584 struct trapframe *tf;
585 struct sigframe *sfp;
586 struct sigacts *psp;
587 struct sigframe sf;
588 struct thread *td;
589 struct proc *p;
590 int oonstack, rndfsize;
591 int sig;
592 int code;
593
594 td = curthread;
595 p = td->td_proc;
596 PROC_LOCK_ASSERT(p, MA_OWNED);
597 sig = ksi->ksi_signo;
598 code = ksi->ksi_code;
599 psp = p->p_sigacts;
600 mtx_assert(&psp->ps_mtx, MA_OWNED);
601 tf = td->td_frame;
602 oonstack = sigonstack(tf->fixreg[1]);
603
604 rndfsize = ((sizeof(sf) + 15) / 16) * 16;
605
606 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
607 catcher, sig);
608
609 /*
610 * Save user context
611 */
612 memset(&sf, 0, sizeof(sf));
613 grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
614 sf.sf_uc.uc_sigmask = *mask;
615 sf.sf_uc.uc_stack = td->td_sigstk;
616 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
617 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
618
619 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
620
621 /*
622 * Allocate and validate space for the signal handler context.
623 */
624 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
625 SIGISMEMBER(psp->ps_sigonstack, sig)) {
626 sfp = (struct sigframe *)(td->td_sigstk.ss_sp +
627 td->td_sigstk.ss_size - rndfsize);
628 } else {
629 sfp = (struct sigframe *)(tf->fixreg[1] - rndfsize);
630 }
631
632 /*
633 * Translate the signal if appropriate (Linux emu ?)
634 */
635 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
636 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
637
638 /*
639 * Save the floating-point state, if necessary, then copy it.
640 */
641 /* XXX */
642
643 /*
644 * Set up the registers to return to sigcode.
645 *
646 * r1/sp - sigframe ptr
647 * lr - sig function, dispatched to by blrl in trampoline
648 * r3 - sig number
649 * r4 - SIGINFO ? &siginfo : exception code
650 * r5 - user context
651 * srr0 - trampoline function addr
652 */
653 tf->lr = (register_t)catcher;
654 tf->fixreg[1] = (register_t)sfp;
655 tf->fixreg[FIRSTARG] = sig;
656 tf->fixreg[FIRSTARG+2] = (register_t)&sfp->sf_uc;
657 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
658 /*
659 * Signal handler installed with SA_SIGINFO.
660 */
661 tf->fixreg[FIRSTARG+1] = (register_t)&sfp->sf_si;
662
663 /*
664 * Fill siginfo structure.
665 */
666 sf.sf_si = ksi->ksi_info;
667 sf.sf_si.si_signo = sig;
668 sf.sf_si.si_addr = (void *)((tf->exc == EXC_DSI) ?
669 tf->cpu.aim.dar : tf->srr0);
670 } else {
671 /* Old FreeBSD-style arguments. */
672 tf->fixreg[FIRSTARG+1] = code;
673 tf->fixreg[FIRSTARG+3] = (tf->exc == EXC_DSI) ?
674 tf->cpu.aim.dar : tf->srr0;
675 }
676 mtx_unlock(&psp->ps_mtx);
677 PROC_UNLOCK(p);
678
679 tf->srr0 = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
680
681 /*
682 * copy the frame out to userland.
683 */
684 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
685 /*
686 * Process has trashed its stack. Kill it.
687 */
688 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
689 PROC_LOCK(p);
690 sigexit(td, SIGILL);
691 }
692
693 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td,
694 tf->srr0, tf->fixreg[1]);
695
696 PROC_LOCK(p);
697 mtx_lock(&psp->ps_mtx);
698}
699
700int
701sigreturn(struct thread *td, struct sigreturn_args *uap)
702{
703 ucontext_t uc;
704 int error;
705
706 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
707
708 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
709 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
710 return (EFAULT);
711 }
712
713 error = set_mcontext(td, &uc.uc_mcontext);
714 if (error != 0)
715 return (error);
716
717 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
718
719 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
720 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
721
722 return (EJUSTRETURN);
723}
724
725#ifdef COMPAT_FREEBSD4
726int
727freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
728{
729
730 return sigreturn(td, (struct sigreturn_args *)uap);
731}
732#endif
733
734/*
735 * Construct a PCB from a trapframe. This is called from kdb_trap() where
736 * we want to start a backtrace from the function that caused us to enter
737 * the debugger. We have the context in the trapframe, but base the trace
738 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
739 * enough for a backtrace.
740 */
741void
742makectx(struct trapframe *tf, struct pcb *pcb)
743{
744
745 pcb->pcb_lr = tf->srr0;
746 pcb->pcb_sp = tf->fixreg[1];
747}
748
749/*
750 * get_mcontext/sendsig helper routine that doesn't touch the
751 * proc lock
752 */
753static int
754grab_mcontext(struct thread *td, mcontext_t *mcp, int flags)
755{
756 struct pcb *pcb;
757
758 pcb = td->td_pcb;
759
760 memset(mcp, 0, sizeof(mcontext_t));
761
762 mcp->mc_vers = _MC_VERSION;
763 mcp->mc_flags = 0;
764 memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe));
765 if (flags & GET_MC_CLEAR_RET) {
766 mcp->mc_gpr[3] = 0;
767 mcp->mc_gpr[4] = 0;
768 }
769
770 /*
771 * This assumes that floating-point context is *not* lazy,
772 * so if the thread has used FP there would have been a
773 * FP-unavailable exception that would have set things up
774 * correctly.
775 */
776 if (pcb->pcb_flags & PCB_FPU) {
777 KASSERT(td == curthread,
778 ("get_mcontext: fp save not curthread"));
779 critical_enter();
780 save_fpu(td);
781 critical_exit();
782 mcp->mc_flags |= _MC_FP_VALID;
783 memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
784 memcpy(mcp->mc_fpreg, pcb->pcb_fpu.fpr, 32*sizeof(double));
785 }
786
787 /*
788 * Repeat for Altivec context
789 */
790
791 if (pcb->pcb_flags & PCB_VEC) {
792 KASSERT(td == curthread,
793 ("get_mcontext: fp save not curthread"));
794 critical_enter();
795 save_vec(td);
796 critical_exit();
797 mcp->mc_flags |= _MC_AV_VALID;
798 mcp->mc_vscr = pcb->pcb_vec.vscr;
799 mcp->mc_vrsave = pcb->pcb_vec.vrsave;
800 memcpy(mcp->mc_avec, pcb->pcb_vec.vr, sizeof(mcp->mc_avec));
801 }
802
803 mcp->mc_len = sizeof(*mcp);
804
805 return (0);
806}
807
808int
809get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
810{
811 int error;
812
813 error = grab_mcontext(td, mcp, flags);
814 if (error == 0) {
815 PROC_LOCK(curthread->td_proc);
816 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
817 PROC_UNLOCK(curthread->td_proc);
818 }
819
820 return (error);
821}
822
823int
824set_mcontext(struct thread *td, const mcontext_t *mcp)
825{
826 struct pcb *pcb;
827 struct trapframe *tf;
828
829 pcb = td->td_pcb;
830 tf = td->td_frame;
831
832 if (mcp->mc_vers != _MC_VERSION ||
833 mcp->mc_len != sizeof(*mcp))
834 return (EINVAL);
835
836 /*
837 * Don't let the user set privileged MSR bits
838 */
839 if ((mcp->mc_srr1 & PSL_USERSTATIC) != (tf->srr1 & PSL_USERSTATIC)) {
840 return (EINVAL);
841 }
842
843 memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame));
844
845 if (mcp->mc_flags & _MC_FP_VALID) {
846 if ((pcb->pcb_flags & PCB_FPU) != PCB_FPU) {
847 critical_enter();
848 enable_fpu(td);
849 critical_exit();
850 }
851 memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double));
852 memcpy(pcb->pcb_fpu.fpr, mcp->mc_fpreg, 32*sizeof(double));
853 }
854
855 if (mcp->mc_flags & _MC_AV_VALID) {
856 if ((pcb->pcb_flags & PCB_VEC) != PCB_VEC) {
857 critical_enter();
858 enable_vec(td);
859 critical_exit();
860 }
861 pcb->pcb_vec.vscr = mcp->mc_vscr;
862 pcb->pcb_vec.vrsave = mcp->mc_vrsave;
863 memcpy(pcb->pcb_vec.vr, mcp->mc_avec, sizeof(mcp->mc_avec));
864 }
865
866
867 return (0);
868}
869
870void
871cpu_boot(int howto)
872{
873}
874
875/*
876 * Flush the D-cache for non-DMA I/O so that the I-cache can
877 * be made coherent later.
878 */
879void
880cpu_flush_dcache(void *ptr, size_t len)
881{
882 /* TBD */
883}
884
885void
886cpu_initclocks(void)
887{
888
889 decr_tc_init();
890 stathz = hz;
891 profhz = hz;
892}
893
894/*
895 * Shutdown the CPU as much as possible.
896 */
897void
898cpu_halt(void)
899{
900
901 OF_exit();
902}
903
904void
905cpu_idle(int busy)
906{
907 uint32_t msr;
908 uint16_t vers;
909
910 msr = mfmsr();
911 vers = mfpvr() >> 16;
912
913#ifdef INVARIANTS
914 if ((msr & PSL_EE) != PSL_EE) {
915 struct thread *td = curthread;
916 printf("td msr %x\n", td->td_md.md_saved_msr);
917 panic("ints disabled in idleproc!");
918 }
919#endif
920 if (powerpc_pow_enabled) {
921 switch (vers) {
922 case IBM970:
923 case IBM970FX:
924 case IBM970MP:
925 case MPC7447A:
926 case MPC7448:
927 case MPC7450:
928 case MPC7455:
929 case MPC7457:
930 __asm __volatile("\
931 dssall; sync; mtmsr %0; isync"
932 :: "r"(msr | PSL_POW));
933 break;
934 default:
935 powerpc_sync();
936 mtmsr(msr | PSL_POW);
937 isync();
938 break;
939 }
940 }
941}
942
943int
944cpu_idle_wakeup(int cpu)
945{
946
947 return (0);
948}
949
950/*
951 * Set set up registers on exec.
952 */
953void
954exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
955{
956 struct trapframe *tf;
957 struct ps_strings arginfo;
958
959 tf = trapframe(td);
960 bzero(tf, sizeof *tf);
961 tf->fixreg[1] = -roundup(-stack + 8, 16);
962
963 /*
964 * XXX Machine-independent code has already copied arguments and
965 * XXX environment to userland. Get them back here.
966 */
967 (void)copyin((char *)PS_STRINGS, &arginfo, sizeof(arginfo));
968
969 /*
970 * Set up arguments for _start():
971 * _start(argc, argv, envp, obj, cleanup, ps_strings);
972 *
973 * Notes:
974 * - obj and cleanup are the auxilliary and termination
975 * vectors. They are fixed up by ld.elf_so.
976 * - ps_strings is a NetBSD extention, and will be
977 * ignored by executables which are strictly
978 * compliant with the SVR4 ABI.
979 *
980 * XXX We have to set both regs and retval here due to different
981 * XXX calling convention in trap.c and init_main.c.
982 */
983 /*
984 * XXX PG: these get overwritten in the syscall return code.
985 * execve() should return EJUSTRETURN, like it does on NetBSD.
986 * Emulate by setting the syscall return value cells. The
987 * registers still have to be set for init's fork trampoline.
988 */
989 td->td_retval[0] = arginfo.ps_nargvstr;
990 td->td_retval[1] = (register_t)arginfo.ps_argvstr;
991 tf->fixreg[3] = arginfo.ps_nargvstr;
992 tf->fixreg[4] = (register_t)arginfo.ps_argvstr;
993 tf->fixreg[5] = (register_t)arginfo.ps_envstr;
994 tf->fixreg[6] = 0; /* auxillary vector */
995 tf->fixreg[7] = 0; /* termination vector */
996 tf->fixreg[8] = (register_t)PS_STRINGS; /* NetBSD extension */
997
998 tf->srr0 = entry;
999 tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
1000 td->td_pcb->pcb_flags = 0;
1001}
1002
1003int
1004fill_regs(struct thread *td, struct reg *regs)
1005{
1006 struct trapframe *tf;
1007
1008 tf = td->td_frame;
1009 memcpy(regs, tf, sizeof(struct reg));
1010
1011 return (0);
1012}
1013
1014int
1015fill_dbregs(struct thread *td, struct dbreg *dbregs)
1016{
1017 /* No debug registers on PowerPC */
1018 return (ENOSYS);
1019}
1020
1021int
1022fill_fpregs(struct thread *td, struct fpreg *fpregs)
1023{
1024 struct pcb *pcb;
1025
1026 pcb = td->td_pcb;
1027
1028 if ((pcb->pcb_flags & PCB_FPU) == 0)
1029 memset(fpregs, 0, sizeof(struct fpreg));
1030 else
1031 memcpy(fpregs, &pcb->pcb_fpu, sizeof(struct fpreg));
1032
1033 return (0);
1034}
1035
1036int
1037set_regs(struct thread *td, struct reg *regs)
1038{
1039 struct trapframe *tf;
1040
1041 tf = td->td_frame;
1042 memcpy(tf, regs, sizeof(struct reg));
1043
1044 return (0);
1045}
1046
1047int
1048set_dbregs(struct thread *td, struct dbreg *dbregs)
1049{
1050 /* No debug registers on PowerPC */
1051 return (ENOSYS);
1052}
1053
1054int
1055set_fpregs(struct thread *td, struct fpreg *fpregs)
1056{
1057 struct pcb *pcb;
1058
1059 pcb = td->td_pcb;
1060 if ((pcb->pcb_flags & PCB_FPU) == 0)
1061 enable_fpu(td);
1062 memcpy(&pcb->pcb_fpu, fpregs, sizeof(struct fpreg));
1063
1064 return (0);
1065}
1066
1067int
1068ptrace_set_pc(struct thread *td, unsigned long addr)
1069{
1070 struct trapframe *tf;
1071
1072 tf = td->td_frame;
1073 tf->srr0 = (register_t)addr;
1074
1075 return (0);
1076}
1077
1078int
1079ptrace_single_step(struct thread *td)
1080{
1081 struct trapframe *tf;
1082
1083 tf = td->td_frame;
1084 tf->srr1 |= PSL_SE;
1085
1086 return (0);
1087}
1088
1089int
1090ptrace_clear_single_step(struct thread *td)
1091{
1092 struct trapframe *tf;
1093
1094 tf = td->td_frame;
1095 tf->srr1 &= ~PSL_SE;
1096
1097 return (0);
1098}
1099
1100void
1101kdb_cpu_clear_singlestep(void)
1102{
1103
1104 kdb_frame->srr1 &= ~PSL_SE;
1105}
1106
1107void
1108kdb_cpu_set_singlestep(void)
1109{
1110
1111 kdb_frame->srr1 |= PSL_SE;
1112}
1113
1114/*
1115 * Initialise a struct pcpu.
1116 */
1117void
1118cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
1119{
1120
1121}
1122
1123void
1124spinlock_enter(void)
1125{
1126 struct thread *td;
1127
1128 td = curthread;
1129 if (td->td_md.md_spinlock_count == 0)
1130 td->td_md.md_saved_msr = intr_disable();
1131 td->td_md.md_spinlock_count++;
1132 critical_enter();
1133}
1134
1135void
1136spinlock_exit(void)
1137{
1138 struct thread *td;
1139
1140 td = curthread;
1141 critical_exit();
1142 td->td_md.md_spinlock_count--;
1143 if (td->td_md.md_spinlock_count == 0)
1144 intr_restore(td->td_md.md_saved_msr);
1145}
1146
1147/*
1148 * kcopy(const void *src, void *dst, size_t len);
1149 *
1150 * Copy len bytes from src to dst, aborting if we encounter a fatal
1151 * page fault.
1152 *
1153 * kcopy() _must_ save and restore the old fault handler since it is
1154 * called by uiomove(), which may be in the path of servicing a non-fatal
1155 * page fault.
1156 */
1157int
1158kcopy(const void *src, void *dst, size_t len)
1159{
1160 struct thread *td;
1161 faultbuf env, *oldfault;
1162 int rv;
1163
1164 td = PCPU_GET(curthread);
1165 oldfault = td->td_pcb->pcb_onfault;
1166 if ((rv = setfault(env)) != 0) {
1167 td->td_pcb->pcb_onfault = oldfault;
1168 return rv;
1169 }
1170
1171 memcpy(dst, src, len);
1172
1173 td->td_pcb->pcb_onfault = oldfault;
1174 return (0);
1175}
1176
1177void
1178asm_panic(char *pstr)
1179{
1180 panic(pstr);
1181}
1182
1183int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */
1184
1185int
1186db_trap_glue(struct trapframe *frame)
1187{
1188 if (!(frame->srr1 & PSL_PR)
1189 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
1190 || (frame->exc == EXC_PGM
1191 && (frame->srr1 & 0x20000))
1192 || frame->exc == EXC_BPT
1193 || frame->exc == EXC_DSI)) {
1194 int type = frame->exc;
1195 if (type == EXC_PGM && (frame->srr1 & 0x20000)) {
1196 type = T_BREAKPOINT;
1197 }
1198 return (kdb_trap(type, 0, frame));
1199 }
1200
1201 return (0);
1202}