Deleted Added
full compact
machdep.c (259235) machdep.c (262675)
1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31/*-
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55 */
56
57#include <sys/cdefs.h>
1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31/*-
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55 */
56
57#include <sys/cdefs.h>
58__FBSDID("$FreeBSD: stable/10/sys/powerpc/aim/machdep.c 259235 2013-12-11 22:36:20Z andreast $");
58__FBSDID("$FreeBSD: stable/10/sys/powerpc/aim/machdep.c 262675 2014-03-02 02:35:46Z jhibbits $");
59
60#include "opt_compat.h"
61#include "opt_ddb.h"
62#include "opt_kstack_pages.h"
63#include "opt_platform.h"
64
65#include <sys/param.h>
66#include <sys/proc.h>
67#include <sys/systm.h>
68#include <sys/bio.h>
69#include <sys/buf.h>
70#include <sys/bus.h>
71#include <sys/cons.h>
72#include <sys/cpu.h>
73#include <sys/eventhandler.h>
74#include <sys/exec.h>
75#include <sys/imgact.h>
76#include <sys/kdb.h>
77#include <sys/kernel.h>
78#include <sys/ktr.h>
79#include <sys/linker.h>
80#include <sys/lock.h>
81#include <sys/malloc.h>
82#include <sys/mbuf.h>
83#include <sys/msgbuf.h>
84#include <sys/mutex.h>
85#include <sys/ptrace.h>
86#include <sys/reboot.h>
87#include <sys/rwlock.h>
88#include <sys/signalvar.h>
89#include <sys/syscallsubr.h>
90#include <sys/sysctl.h>
91#include <sys/sysent.h>
92#include <sys/sysproto.h>
93#include <sys/ucontext.h>
94#include <sys/uio.h>
95#include <sys/vmmeter.h>
96#include <sys/vnode.h>
97
98#include <net/netisr.h>
99
100#include <vm/vm.h>
101#include <vm/vm_extern.h>
102#include <vm/vm_kern.h>
103#include <vm/vm_page.h>
104#include <vm/vm_map.h>
105#include <vm/vm_object.h>
106#include <vm/vm_pager.h>
107
108#include <machine/altivec.h>
109#ifndef __powerpc64__
110#include <machine/bat.h>
111#endif
112#include <machine/cpu.h>
113#include <machine/elf.h>
114#include <machine/fpu.h>
115#include <machine/hid.h>
116#include <machine/kdb.h>
117#include <machine/md_var.h>
118#include <machine/metadata.h>
119#include <machine/mmuvar.h>
120#include <machine/pcb.h>
121#include <machine/reg.h>
122#include <machine/sigframe.h>
123#include <machine/spr.h>
124#include <machine/trap.h>
125#include <machine/vmparam.h>
126#include <machine/ofw_machdep.h>
127
128#include <ddb/ddb.h>
129
130#include <dev/ofw/openfirm.h>
131
132#ifdef DDB
133extern vm_offset_t ksym_start, ksym_end;
134#endif
135
136int cold = 1;
137#ifdef __powerpc64__
138extern int n_slbs;
139int cacheline_size = 128;
140#else
141int cacheline_size = 32;
142#endif
143int hw_direct_map = 1;
144
59
60#include "opt_compat.h"
61#include "opt_ddb.h"
62#include "opt_kstack_pages.h"
63#include "opt_platform.h"
64
65#include <sys/param.h>
66#include <sys/proc.h>
67#include <sys/systm.h>
68#include <sys/bio.h>
69#include <sys/buf.h>
70#include <sys/bus.h>
71#include <sys/cons.h>
72#include <sys/cpu.h>
73#include <sys/eventhandler.h>
74#include <sys/exec.h>
75#include <sys/imgact.h>
76#include <sys/kdb.h>
77#include <sys/kernel.h>
78#include <sys/ktr.h>
79#include <sys/linker.h>
80#include <sys/lock.h>
81#include <sys/malloc.h>
82#include <sys/mbuf.h>
83#include <sys/msgbuf.h>
84#include <sys/mutex.h>
85#include <sys/ptrace.h>
86#include <sys/reboot.h>
87#include <sys/rwlock.h>
88#include <sys/signalvar.h>
89#include <sys/syscallsubr.h>
90#include <sys/sysctl.h>
91#include <sys/sysent.h>
92#include <sys/sysproto.h>
93#include <sys/ucontext.h>
94#include <sys/uio.h>
95#include <sys/vmmeter.h>
96#include <sys/vnode.h>
97
98#include <net/netisr.h>
99
100#include <vm/vm.h>
101#include <vm/vm_extern.h>
102#include <vm/vm_kern.h>
103#include <vm/vm_page.h>
104#include <vm/vm_map.h>
105#include <vm/vm_object.h>
106#include <vm/vm_pager.h>
107
108#include <machine/altivec.h>
109#ifndef __powerpc64__
110#include <machine/bat.h>
111#endif
112#include <machine/cpu.h>
113#include <machine/elf.h>
114#include <machine/fpu.h>
115#include <machine/hid.h>
116#include <machine/kdb.h>
117#include <machine/md_var.h>
118#include <machine/metadata.h>
119#include <machine/mmuvar.h>
120#include <machine/pcb.h>
121#include <machine/reg.h>
122#include <machine/sigframe.h>
123#include <machine/spr.h>
124#include <machine/trap.h>
125#include <machine/vmparam.h>
126#include <machine/ofw_machdep.h>
127
128#include <ddb/ddb.h>
129
130#include <dev/ofw/openfirm.h>
131
132#ifdef DDB
133extern vm_offset_t ksym_start, ksym_end;
134#endif
135
136int cold = 1;
137#ifdef __powerpc64__
138extern int n_slbs;
139int cacheline_size = 128;
140#else
141int cacheline_size = 32;
142#endif
143int hw_direct_map = 1;
144
145extern void *ap_pcpu;
146
145struct pcpu __pcpu[MAXCPU];
146
147static struct trapframe frame0;
148
149char machine[] = "powerpc";
150SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
151
152static void cpu_startup(void *);
153SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
154
155SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
156 CTLFLAG_RD, &cacheline_size, 0, "");
157
158uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *);
159
160int setfault(faultbuf); /* defined in locore.S */
161
162long Maxmem = 0;
163long realmem = 0;
164
165#ifndef __powerpc64__
166struct bat battable[16];
167#endif
168
169struct kva_md_info kmi;
170
171static void
172cpu_startup(void *dummy)
173{
174
175 /*
176 * Initialise the decrementer-based clock.
177 */
178 decr_init();
179
180 /*
181 * Good {morning,afternoon,evening,night}.
182 */
183 cpu_setup(PCPU_GET(cpuid));
184
185#ifdef PERFMON
186 perfmon_init();
187#endif
188 printf("real memory = %ld (%ld MB)\n", ptoa(physmem),
189 ptoa(physmem) / 1048576);
190 realmem = physmem;
191
192 if (bootverbose)
193 printf("available KVA = %zd (%zd MB)\n",
194 virtual_end - virtual_avail,
195 (virtual_end - virtual_avail) / 1048576);
196
197 /*
198 * Display any holes after the first chunk of extended memory.
199 */
200 if (bootverbose) {
201 int indx;
202
203 printf("Physical memory chunk(s):\n");
204 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
205 vm_offset_t size1 =
206 phys_avail[indx + 1] - phys_avail[indx];
207
208 #ifdef __powerpc64__
209 printf("0x%016lx - 0x%016lx, %ld bytes (%ld pages)\n",
210 #else
211 printf("0x%08x - 0x%08x, %d bytes (%ld pages)\n",
212 #endif
213 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
214 size1 / PAGE_SIZE);
215 }
216 }
217
218 vm_ksubmap_init(&kmi);
219
220 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
221 ptoa(cnt.v_free_count) / 1048576);
222
223 /*
224 * Set up buffers, so they can be used to read disk labels.
225 */
226 bufinit();
227 vm_pager_bufferinit();
228}
229
230extern char kernel_text[], _end[];
231
232#ifndef __powerpc64__
233/* Bits for running on 64-bit systems in 32-bit mode. */
234extern void *testppc64, *testppc64size;
235extern void *restorebridge, *restorebridgesize;
236extern void *rfid_patch, *rfi_patch1, *rfi_patch2;
237extern void *trapcode64;
238#endif
239
147struct pcpu __pcpu[MAXCPU];
148
149static struct trapframe frame0;
150
151char machine[] = "powerpc";
152SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
153
154static void cpu_startup(void *);
155SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
156
157SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
158 CTLFLAG_RD, &cacheline_size, 0, "");
159
160uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *);
161
162int setfault(faultbuf); /* defined in locore.S */
163
164long Maxmem = 0;
165long realmem = 0;
166
167#ifndef __powerpc64__
168struct bat battable[16];
169#endif
170
171struct kva_md_info kmi;
172
173static void
174cpu_startup(void *dummy)
175{
176
177 /*
178 * Initialise the decrementer-based clock.
179 */
180 decr_init();
181
182 /*
183 * Good {morning,afternoon,evening,night}.
184 */
185 cpu_setup(PCPU_GET(cpuid));
186
187#ifdef PERFMON
188 perfmon_init();
189#endif
190 printf("real memory = %ld (%ld MB)\n", ptoa(physmem),
191 ptoa(physmem) / 1048576);
192 realmem = physmem;
193
194 if (bootverbose)
195 printf("available KVA = %zd (%zd MB)\n",
196 virtual_end - virtual_avail,
197 (virtual_end - virtual_avail) / 1048576);
198
199 /*
200 * Display any holes after the first chunk of extended memory.
201 */
202 if (bootverbose) {
203 int indx;
204
205 printf("Physical memory chunk(s):\n");
206 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
207 vm_offset_t size1 =
208 phys_avail[indx + 1] - phys_avail[indx];
209
210 #ifdef __powerpc64__
211 printf("0x%016lx - 0x%016lx, %ld bytes (%ld pages)\n",
212 #else
213 printf("0x%08x - 0x%08x, %d bytes (%ld pages)\n",
214 #endif
215 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
216 size1 / PAGE_SIZE);
217 }
218 }
219
220 vm_ksubmap_init(&kmi);
221
222 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
223 ptoa(cnt.v_free_count) / 1048576);
224
225 /*
226 * Set up buffers, so they can be used to read disk labels.
227 */
228 bufinit();
229 vm_pager_bufferinit();
230}
231
232extern char kernel_text[], _end[];
233
234#ifndef __powerpc64__
235/* Bits for running on 64-bit systems in 32-bit mode. */
236extern void *testppc64, *testppc64size;
237extern void *restorebridge, *restorebridgesize;
238extern void *rfid_patch, *rfi_patch1, *rfi_patch2;
239extern void *trapcode64;
240#endif
241
240#ifdef SMP
241extern void *rstcode, *rstsize;
242extern void *rstcode, *rstsize;
242#endif
243extern void *trapcode, *trapsize;
244extern void *slbtrap, *slbtrapsize;
245extern void *alitrap, *alisize;
246extern void *dsitrap, *dsisize;
247extern void *decrint, *decrsize;
248extern void *extint, *extsize;
249extern void *dblow, *dbsize;
250extern void *imisstrap, *imisssize;
251extern void *dlmisstrap, *dlmisssize;
252extern void *dsmisstrap, *dsmisssize;
253char save_trap_init[0x2f00]; /* EXC_LAST */
254
255uintptr_t
256powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
257 vm_offset_t basekernel, void *mdp)
258{
259 struct pcpu *pc;
260 void *generictrap;
261 size_t trap_offset;
262 void *kmdp;
263 char *env;
264 register_t msr, scratch;
265#ifdef WII
266 register_t vers;
267#endif
268 uint8_t *cache_check;
269 int cacheline_warn;
270 #ifndef __powerpc64__
271 int ppc64;
272 #endif
273
274 kmdp = NULL;
275 trap_offset = 0;
276 cacheline_warn = 0;
277
278 /* Save trap vectors. */
279 ofw_save_trap_vec(save_trap_init);
280
281#ifdef WII
282 /*
283 * The Wii loader doesn't pass us any environment so, mdp
284 * points to garbage at this point. The Wii CPU is a 750CL.
285 */
286 vers = mfpvr();
287 if ((vers & 0xfffff0e0) == (MPC750 << 16 | MPC750CL))
288 mdp = NULL;
289#endif
290
291 /*
292 * Parse metadata if present and fetch parameters. Must be done
293 * before console is inited so cninit gets the right value of
294 * boothowto.
295 */
296 if (mdp != NULL) {
297 preload_metadata = mdp;
298 kmdp = preload_search_by_type("elf kernel");
299 if (kmdp != NULL) {
300 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
301 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
302 endkernel = ulmax(endkernel, MD_FETCH(kmdp,
303 MODINFOMD_KERNEND, vm_offset_t));
304#ifdef DDB
305 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
306 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
307#endif
308 }
309 }
310
311 /*
312 * Init params/tunables that can be overridden by the loader
313 */
314 init_param1();
315
316 /*
317 * Start initializing proc0 and thread0.
318 */
319 proc_linkup0(&proc0, &thread0);
320 thread0.td_frame = &frame0;
321
322 /*
323 * Set up per-cpu data.
324 */
325 pc = __pcpu;
326 pcpu_init(pc, 0, sizeof(struct pcpu));
327 pc->pc_curthread = &thread0;
328#ifdef __powerpc64__
329 __asm __volatile("mr 13,%0" :: "r"(pc->pc_curthread));
330#else
331 __asm __volatile("mr 2,%0" :: "r"(pc->pc_curthread));
332#endif
333 pc->pc_cpuid = 0;
334
335 __asm __volatile("mtsprg 0, %0" :: "r"(pc));
336
337 /*
338 * Init mutexes, which we use heavily in PMAP
339 */
340
341 mutex_init();
342
343 /*
344 * Install the OF client interface
345 */
346
347 OF_bootstrap();
348
349 /*
350 * Initialize the console before printing anything.
351 */
352 cninit();
353
354 /*
355 * Complain if there is no metadata.
356 */
357 if (mdp == NULL || kmdp == NULL) {
358 printf("powerpc_init: no loader metadata.\n");
359 }
360
361 /*
362 * Init KDB
363 */
364
365 kdb_init();
366
367 /* Various very early CPU fix ups */
368 switch (mfpvr() >> 16) {
369 /*
370 * PowerPC 970 CPUs have a misfeature requested by Apple that
371 * makes them pretend they have a 32-byte cacheline. Turn this
372 * off before we measure the cacheline size.
373 */
374 case IBM970:
375 case IBM970FX:
376 case IBM970MP:
377 case IBM970GX:
378 scratch = mfspr(SPR_HID5);
379 scratch &= ~HID5_970_DCBZ_SIZE_HI;
380 mtspr(SPR_HID5, scratch);
381 break;
382 #ifdef __powerpc64__
383 case IBMPOWER7:
384 /* XXX: get from ibm,slb-size in device tree */
385 n_slbs = 32;
386 break;
387 #endif
388 }
389
390 /*
391 * Initialize the interrupt tables and figure out our cache line
392 * size and whether or not we need the 64-bit bridge code.
393 */
394
395 /*
396 * Disable translation in case the vector area hasn't been
397 * mapped (G5). Note that no OFW calls can be made until
398 * translation is re-enabled.
399 */
400
401 msr = mfmsr();
402 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
403
404 /*
405 * Measure the cacheline size using dcbz
406 *
407 * Use EXC_PGM as a playground. We are about to overwrite it
408 * anyway, we know it exists, and we know it is cache-aligned.
409 */
410
411 cache_check = (void *)EXC_PGM;
412
413 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
414 cache_check[cacheline_size] = 0xff;
415
416 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
417
418 /* Find the first byte dcbz did not zero to get the cache line size */
419 for (cacheline_size = 0; cacheline_size < 0x100 &&
420 cache_check[cacheline_size] == 0; cacheline_size++);
421
422 /* Work around psim bug */
423 if (cacheline_size == 0) {
424 cacheline_warn = 1;
425 cacheline_size = 32;
426 }
427
428 /* Make sure the kernel icache is valid before we go too much further */
429 __syncicache((caddr_t)startkernel, endkernel - startkernel);
430
431 #ifndef __powerpc64__
432 /*
433 * Figure out whether we need to use the 64 bit PMAP. This works by
434 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
435 * and setting ppc64 = 0 if that causes a trap.
436 */
437
438 ppc64 = 1;
439
440 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size);
441 __syncicache((void *)EXC_PGM, (size_t)&testppc64size);
442
443 __asm __volatile("\
444 mfmsr %0; \
445 mtsprg2 %1; \
446 \
447 mtmsrd %0; \
448 mfsprg2 %1;"
449 : "=r"(scratch), "=r"(ppc64));
450
451 if (ppc64)
452 cpu_features |= PPC_FEATURE_64;
453
454 /*
455 * Now copy restorebridge into all the handlers, if necessary,
456 * and set up the trap tables.
457 */
458
459 if (cpu_features & PPC_FEATURE_64) {
460 /* Patch the two instances of rfi -> rfid */
461 bcopy(&rfid_patch,&rfi_patch1,4);
462 #ifdef KDB
463 /* rfi_patch2 is at the end of dbleave */
464 bcopy(&rfid_patch,&rfi_patch2,4);
465 #endif
466
467 /*
468 * Copy a code snippet to restore 32-bit bridge mode
469 * to the top of every non-generic trap handler
470 */
471
472 trap_offset += (size_t)&restorebridgesize;
473 bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
474 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
475 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
476 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
477 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
478 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
479 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
480
481 /*
482 * Set the common trap entry point to the one that
483 * knows to restore 32-bit operation on execution.
484 */
485
486 generictrap = &trapcode64;
487 } else {
488 generictrap = &trapcode;
489 }
490
491 #else /* powerpc64 */
492 cpu_features |= PPC_FEATURE_64;
493 generictrap = &trapcode;
494 #endif
495
243extern void *trapcode, *trapsize;
244extern void *slbtrap, *slbtrapsize;
245extern void *alitrap, *alisize;
246extern void *dsitrap, *dsisize;
247extern void *decrint, *decrsize;
248extern void *extint, *extsize;
249extern void *dblow, *dbsize;
250extern void *imisstrap, *imisssize;
251extern void *dlmisstrap, *dlmisssize;
252extern void *dsmisstrap, *dsmisssize;
253char save_trap_init[0x2f00]; /* EXC_LAST */
254
255uintptr_t
256powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
257 vm_offset_t basekernel, void *mdp)
258{
259 struct pcpu *pc;
260 void *generictrap;
261 size_t trap_offset;
262 void *kmdp;
263 char *env;
264 register_t msr, scratch;
265#ifdef WII
266 register_t vers;
267#endif
268 uint8_t *cache_check;
269 int cacheline_warn;
270 #ifndef __powerpc64__
271 int ppc64;
272 #endif
273
274 kmdp = NULL;
275 trap_offset = 0;
276 cacheline_warn = 0;
277
278 /* Save trap vectors. */
279 ofw_save_trap_vec(save_trap_init);
280
281#ifdef WII
282 /*
283 * The Wii loader doesn't pass us any environment so, mdp
284 * points to garbage at this point. The Wii CPU is a 750CL.
285 */
286 vers = mfpvr();
287 if ((vers & 0xfffff0e0) == (MPC750 << 16 | MPC750CL))
288 mdp = NULL;
289#endif
290
291 /*
292 * Parse metadata if present and fetch parameters. Must be done
293 * before console is inited so cninit gets the right value of
294 * boothowto.
295 */
296 if (mdp != NULL) {
297 preload_metadata = mdp;
298 kmdp = preload_search_by_type("elf kernel");
299 if (kmdp != NULL) {
300 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
301 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
302 endkernel = ulmax(endkernel, MD_FETCH(kmdp,
303 MODINFOMD_KERNEND, vm_offset_t));
304#ifdef DDB
305 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
306 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
307#endif
308 }
309 }
310
311 /*
312 * Init params/tunables that can be overridden by the loader
313 */
314 init_param1();
315
316 /*
317 * Start initializing proc0 and thread0.
318 */
319 proc_linkup0(&proc0, &thread0);
320 thread0.td_frame = &frame0;
321
322 /*
323 * Set up per-cpu data.
324 */
325 pc = __pcpu;
326 pcpu_init(pc, 0, sizeof(struct pcpu));
327 pc->pc_curthread = &thread0;
328#ifdef __powerpc64__
329 __asm __volatile("mr 13,%0" :: "r"(pc->pc_curthread));
330#else
331 __asm __volatile("mr 2,%0" :: "r"(pc->pc_curthread));
332#endif
333 pc->pc_cpuid = 0;
334
335 __asm __volatile("mtsprg 0, %0" :: "r"(pc));
336
337 /*
338 * Init mutexes, which we use heavily in PMAP
339 */
340
341 mutex_init();
342
343 /*
344 * Install the OF client interface
345 */
346
347 OF_bootstrap();
348
349 /*
350 * Initialize the console before printing anything.
351 */
352 cninit();
353
354 /*
355 * Complain if there is no metadata.
356 */
357 if (mdp == NULL || kmdp == NULL) {
358 printf("powerpc_init: no loader metadata.\n");
359 }
360
361 /*
362 * Init KDB
363 */
364
365 kdb_init();
366
367 /* Various very early CPU fix ups */
368 switch (mfpvr() >> 16) {
369 /*
370 * PowerPC 970 CPUs have a misfeature requested by Apple that
371 * makes them pretend they have a 32-byte cacheline. Turn this
372 * off before we measure the cacheline size.
373 */
374 case IBM970:
375 case IBM970FX:
376 case IBM970MP:
377 case IBM970GX:
378 scratch = mfspr(SPR_HID5);
379 scratch &= ~HID5_970_DCBZ_SIZE_HI;
380 mtspr(SPR_HID5, scratch);
381 break;
382 #ifdef __powerpc64__
383 case IBMPOWER7:
384 /* XXX: get from ibm,slb-size in device tree */
385 n_slbs = 32;
386 break;
387 #endif
388 }
389
390 /*
391 * Initialize the interrupt tables and figure out our cache line
392 * size and whether or not we need the 64-bit bridge code.
393 */
394
395 /*
396 * Disable translation in case the vector area hasn't been
397 * mapped (G5). Note that no OFW calls can be made until
398 * translation is re-enabled.
399 */
400
401 msr = mfmsr();
402 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
403
404 /*
405 * Measure the cacheline size using dcbz
406 *
407 * Use EXC_PGM as a playground. We are about to overwrite it
408 * anyway, we know it exists, and we know it is cache-aligned.
409 */
410
411 cache_check = (void *)EXC_PGM;
412
413 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
414 cache_check[cacheline_size] = 0xff;
415
416 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
417
418 /* Find the first byte dcbz did not zero to get the cache line size */
419 for (cacheline_size = 0; cacheline_size < 0x100 &&
420 cache_check[cacheline_size] == 0; cacheline_size++);
421
422 /* Work around psim bug */
423 if (cacheline_size == 0) {
424 cacheline_warn = 1;
425 cacheline_size = 32;
426 }
427
428 /* Make sure the kernel icache is valid before we go too much further */
429 __syncicache((caddr_t)startkernel, endkernel - startkernel);
430
431 #ifndef __powerpc64__
432 /*
433 * Figure out whether we need to use the 64 bit PMAP. This works by
434 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
435 * and setting ppc64 = 0 if that causes a trap.
436 */
437
438 ppc64 = 1;
439
440 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size);
441 __syncicache((void *)EXC_PGM, (size_t)&testppc64size);
442
443 __asm __volatile("\
444 mfmsr %0; \
445 mtsprg2 %1; \
446 \
447 mtmsrd %0; \
448 mfsprg2 %1;"
449 : "=r"(scratch), "=r"(ppc64));
450
451 if (ppc64)
452 cpu_features |= PPC_FEATURE_64;
453
454 /*
455 * Now copy restorebridge into all the handlers, if necessary,
456 * and set up the trap tables.
457 */
458
459 if (cpu_features & PPC_FEATURE_64) {
460 /* Patch the two instances of rfi -> rfid */
461 bcopy(&rfid_patch,&rfi_patch1,4);
462 #ifdef KDB
463 /* rfi_patch2 is at the end of dbleave */
464 bcopy(&rfid_patch,&rfi_patch2,4);
465 #endif
466
467 /*
468 * Copy a code snippet to restore 32-bit bridge mode
469 * to the top of every non-generic trap handler
470 */
471
472 trap_offset += (size_t)&restorebridgesize;
473 bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
474 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
475 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
476 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
477 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
478 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
479 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
480
481 /*
482 * Set the common trap entry point to the one that
483 * knows to restore 32-bit operation on execution.
484 */
485
486 generictrap = &trapcode64;
487 } else {
488 generictrap = &trapcode;
489 }
490
491 #else /* powerpc64 */
492 cpu_features |= PPC_FEATURE_64;
493 generictrap = &trapcode;
494 #endif
495
496#ifdef SMP
497 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstsize);
496 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstsize);
498#else
499 bcopy(generictrap, (void *)EXC_RST, (size_t)&trapsize);
500#endif
501
502#ifdef KDB
503 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbsize);
504 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbsize);
505 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbsize);
506 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbsize);
507#else
508 bcopy(generictrap, (void *)EXC_MCHK, (size_t)&trapsize);
509 bcopy(generictrap, (void *)EXC_PGM, (size_t)&trapsize);
510 bcopy(generictrap, (void *)EXC_TRC, (size_t)&trapsize);
511 bcopy(generictrap, (void *)EXC_BPT, (size_t)&trapsize);
512#endif
513 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&alisize);
514 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize);
515 bcopy(generictrap, (void *)EXC_ISI, (size_t)&trapsize);
516 #ifdef __powerpc64__
517 bcopy(&slbtrap, (void *)EXC_DSE, (size_t)&slbtrapsize);
518 bcopy(&slbtrap, (void *)EXC_ISE, (size_t)&slbtrapsize);
519 #endif
520 bcopy(generictrap, (void *)EXC_EXI, (size_t)&trapsize);
521 bcopy(generictrap, (void *)EXC_FPU, (size_t)&trapsize);
522 bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize);
523 bcopy(generictrap, (void *)EXC_SC, (size_t)&trapsize);
524 bcopy(generictrap, (void *)EXC_FPA, (size_t)&trapsize);
525 bcopy(generictrap, (void *)EXC_VEC, (size_t)&trapsize);
526 bcopy(generictrap, (void *)EXC_PERF, (size_t)&trapsize);
527 bcopy(generictrap, (void *)EXC_VECAST_G4, (size_t)&trapsize);
528 bcopy(generictrap, (void *)EXC_VECAST_G5, (size_t)&trapsize);
529 #ifndef __powerpc64__
530 /* G2-specific TLB miss helper handlers */
531 bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize);
532 bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize);
533 bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize);
534 #endif
535 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
536
537 /*
538 * Restore MSR
539 */
540 mtmsr(msr);
541
542 /* Warn if cachline size was not determined */
543 if (cacheline_warn == 1) {
544 printf("WARNING: cacheline size undetermined, setting to 32\n");
545 }
546
547 /*
548 * Choose a platform module so we can get the physical memory map.
549 */
550
551 platform_probe_and_attach();
552
553 /*
554 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
555 * in case the platform module had a better idea of what we
556 * should do.
557 */
558 if (cpu_features & PPC_FEATURE_64)
559 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
560 else
561 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
562
563 pmap_bootstrap(startkernel, endkernel);
564 mtmsr(PSL_KERNSET & ~PSL_EE);
565
566 /*
567 * Initialize params/tunables that are derived from memsize
568 */
569 init_param2(physmem);
570
571 /*
572 * Grab booted kernel's name
573 */
574 env = getenv("kernelname");
575 if (env != NULL) {
576 strlcpy(kernelname, env, sizeof(kernelname));
577 freeenv(env);
578 }
579
580 /*
581 * Finish setting up thread0.
582 */
583 thread0.td_pcb = (struct pcb *)
584 ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
585 sizeof(struct pcb)) & ~15UL);
586 bzero((void *)thread0.td_pcb, sizeof(struct pcb));
587 pc->pc_curpcb = thread0.td_pcb;
588
589 /* Initialise the message buffer. */
590 msgbufinit(msgbufp, msgbufsize);
591
592#ifdef KDB
593 if (boothowto & RB_KDB)
594 kdb_enter(KDB_WHY_BOOTFLAGS,
595 "Boot flags requested debugger");
596#endif
597
598 return (((uintptr_t)thread0.td_pcb -
599 (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL);
600}
601
602void
603bzero(void *buf, size_t len)
604{
605 caddr_t p;
606
607 p = buf;
608
609 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
610 *p++ = 0;
611 len--;
612 }
613
614 while (len >= sizeof(u_long) * 8) {
615 *(u_long*) p = 0;
616 *((u_long*) p + 1) = 0;
617 *((u_long*) p + 2) = 0;
618 *((u_long*) p + 3) = 0;
619 len -= sizeof(u_long) * 8;
620 *((u_long*) p + 4) = 0;
621 *((u_long*) p + 5) = 0;
622 *((u_long*) p + 6) = 0;
623 *((u_long*) p + 7) = 0;
624 p += sizeof(u_long) * 8;
625 }
626
627 while (len >= sizeof(u_long)) {
628 *(u_long*) p = 0;
629 len -= sizeof(u_long);
630 p += sizeof(u_long);
631 }
632
633 while (len) {
634 *p++ = 0;
635 len--;
636 }
637}
638
639void
640cpu_boot(int howto)
641{
642}
643
644/*
645 * Flush the D-cache for non-DMA I/O so that the I-cache can
646 * be made coherent later.
647 */
648void
649cpu_flush_dcache(void *ptr, size_t len)
650{
651 /* TBD */
652}
653
654void
655cpu_initclocks(void)
656{
657
658 decr_tc_init();
659 cpu_initclocks_bsp();
660}
661
662/*
663 * Shutdown the CPU as much as possible.
664 */
665void
666cpu_halt(void)
667{
668
669 OF_exit();
670}
671
672int
673ptrace_set_pc(struct thread *td, unsigned long addr)
674{
675 struct trapframe *tf;
676
677 tf = td->td_frame;
678 tf->srr0 = (register_t)addr;
679
680 return (0);
681}
682
683int
684ptrace_single_step(struct thread *td)
685{
686 struct trapframe *tf;
687
688 tf = td->td_frame;
689 tf->srr1 |= PSL_SE;
690
691 return (0);
692}
693
694int
695ptrace_clear_single_step(struct thread *td)
696{
697 struct trapframe *tf;
698
699 tf = td->td_frame;
700 tf->srr1 &= ~PSL_SE;
701
702 return (0);
703}
704
705void
706kdb_cpu_clear_singlestep(void)
707{
708
709 kdb_frame->srr1 &= ~PSL_SE;
710}
711
712void
713kdb_cpu_set_singlestep(void)
714{
715
716 kdb_frame->srr1 |= PSL_SE;
717}
718
719/*
720 * Initialise a struct pcpu.
721 */
722void
723cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
724{
725#ifdef __powerpc64__
726/* Copy the SLB contents from the current CPU */
727memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb));
728#endif
729}
730
731void
732spinlock_enter(void)
733{
734 struct thread *td;
735 register_t msr;
736
737 td = curthread;
738 if (td->td_md.md_spinlock_count == 0) {
739 msr = intr_disable();
740 td->td_md.md_spinlock_count = 1;
741 td->td_md.md_saved_msr = msr;
742 } else
743 td->td_md.md_spinlock_count++;
744 critical_enter();
745}
746
747void
748spinlock_exit(void)
749{
750 struct thread *td;
751 register_t msr;
752
753 td = curthread;
754 critical_exit();
755 msr = td->td_md.md_saved_msr;
756 td->td_md.md_spinlock_count--;
757 if (td->td_md.md_spinlock_count == 0)
758 intr_restore(msr);
759}
760
761int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */
762
763int
764db_trap_glue(struct trapframe *frame)
765{
766 if (!(frame->srr1 & PSL_PR)
767 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
768 || (frame->exc == EXC_PGM
769 && (frame->srr1 & 0x20000))
770 || frame->exc == EXC_BPT
771 || frame->exc == EXC_DSI)) {
772 int type = frame->exc;
773 if (type == EXC_PGM && (frame->srr1 & 0x20000)) {
774 type = T_BREAKPOINT;
775 }
776 return (kdb_trap(type, 0, frame));
777 }
778
779 return (0);
780}
781
782#ifndef __powerpc64__
783
784uint64_t
785va_to_vsid(pmap_t pm, vm_offset_t va)
786{
787 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
788}
789
790#endif
497
498#ifdef KDB
499 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbsize);
500 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbsize);
501 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbsize);
502 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbsize);
503#else
504 bcopy(generictrap, (void *)EXC_MCHK, (size_t)&trapsize);
505 bcopy(generictrap, (void *)EXC_PGM, (size_t)&trapsize);
506 bcopy(generictrap, (void *)EXC_TRC, (size_t)&trapsize);
507 bcopy(generictrap, (void *)EXC_BPT, (size_t)&trapsize);
508#endif
509 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&alisize);
510 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize);
511 bcopy(generictrap, (void *)EXC_ISI, (size_t)&trapsize);
512 #ifdef __powerpc64__
513 bcopy(&slbtrap, (void *)EXC_DSE, (size_t)&slbtrapsize);
514 bcopy(&slbtrap, (void *)EXC_ISE, (size_t)&slbtrapsize);
515 #endif
516 bcopy(generictrap, (void *)EXC_EXI, (size_t)&trapsize);
517 bcopy(generictrap, (void *)EXC_FPU, (size_t)&trapsize);
518 bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize);
519 bcopy(generictrap, (void *)EXC_SC, (size_t)&trapsize);
520 bcopy(generictrap, (void *)EXC_FPA, (size_t)&trapsize);
521 bcopy(generictrap, (void *)EXC_VEC, (size_t)&trapsize);
522 bcopy(generictrap, (void *)EXC_PERF, (size_t)&trapsize);
523 bcopy(generictrap, (void *)EXC_VECAST_G4, (size_t)&trapsize);
524 bcopy(generictrap, (void *)EXC_VECAST_G5, (size_t)&trapsize);
525 #ifndef __powerpc64__
526 /* G2-specific TLB miss helper handlers */
527 bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize);
528 bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize);
529 bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize);
530 #endif
531 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
532
533 /*
534 * Restore MSR
535 */
536 mtmsr(msr);
537
538 /* Warn if cachline size was not determined */
539 if (cacheline_warn == 1) {
540 printf("WARNING: cacheline size undetermined, setting to 32\n");
541 }
542
543 /*
544 * Choose a platform module so we can get the physical memory map.
545 */
546
547 platform_probe_and_attach();
548
549 /*
550 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
551 * in case the platform module had a better idea of what we
552 * should do.
553 */
554 if (cpu_features & PPC_FEATURE_64)
555 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
556 else
557 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
558
559 pmap_bootstrap(startkernel, endkernel);
560 mtmsr(PSL_KERNSET & ~PSL_EE);
561
562 /*
563 * Initialize params/tunables that are derived from memsize
564 */
565 init_param2(physmem);
566
567 /*
568 * Grab booted kernel's name
569 */
570 env = getenv("kernelname");
571 if (env != NULL) {
572 strlcpy(kernelname, env, sizeof(kernelname));
573 freeenv(env);
574 }
575
576 /*
577 * Finish setting up thread0.
578 */
579 thread0.td_pcb = (struct pcb *)
580 ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
581 sizeof(struct pcb)) & ~15UL);
582 bzero((void *)thread0.td_pcb, sizeof(struct pcb));
583 pc->pc_curpcb = thread0.td_pcb;
584
585 /* Initialise the message buffer. */
586 msgbufinit(msgbufp, msgbufsize);
587
588#ifdef KDB
589 if (boothowto & RB_KDB)
590 kdb_enter(KDB_WHY_BOOTFLAGS,
591 "Boot flags requested debugger");
592#endif
593
594 return (((uintptr_t)thread0.td_pcb -
595 (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL);
596}
597
598void
599bzero(void *buf, size_t len)
600{
601 caddr_t p;
602
603 p = buf;
604
605 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
606 *p++ = 0;
607 len--;
608 }
609
610 while (len >= sizeof(u_long) * 8) {
611 *(u_long*) p = 0;
612 *((u_long*) p + 1) = 0;
613 *((u_long*) p + 2) = 0;
614 *((u_long*) p + 3) = 0;
615 len -= sizeof(u_long) * 8;
616 *((u_long*) p + 4) = 0;
617 *((u_long*) p + 5) = 0;
618 *((u_long*) p + 6) = 0;
619 *((u_long*) p + 7) = 0;
620 p += sizeof(u_long) * 8;
621 }
622
623 while (len >= sizeof(u_long)) {
624 *(u_long*) p = 0;
625 len -= sizeof(u_long);
626 p += sizeof(u_long);
627 }
628
629 while (len) {
630 *p++ = 0;
631 len--;
632 }
633}
634
635void
636cpu_boot(int howto)
637{
638}
639
640/*
641 * Flush the D-cache for non-DMA I/O so that the I-cache can
642 * be made coherent later.
643 */
644void
645cpu_flush_dcache(void *ptr, size_t len)
646{
647 /* TBD */
648}
649
650void
651cpu_initclocks(void)
652{
653
654 decr_tc_init();
655 cpu_initclocks_bsp();
656}
657
658/*
659 * Shutdown the CPU as much as possible.
660 */
661void
662cpu_halt(void)
663{
664
665 OF_exit();
666}
667
668int
669ptrace_set_pc(struct thread *td, unsigned long addr)
670{
671 struct trapframe *tf;
672
673 tf = td->td_frame;
674 tf->srr0 = (register_t)addr;
675
676 return (0);
677}
678
679int
680ptrace_single_step(struct thread *td)
681{
682 struct trapframe *tf;
683
684 tf = td->td_frame;
685 tf->srr1 |= PSL_SE;
686
687 return (0);
688}
689
690int
691ptrace_clear_single_step(struct thread *td)
692{
693 struct trapframe *tf;
694
695 tf = td->td_frame;
696 tf->srr1 &= ~PSL_SE;
697
698 return (0);
699}
700
701void
702kdb_cpu_clear_singlestep(void)
703{
704
705 kdb_frame->srr1 &= ~PSL_SE;
706}
707
708void
709kdb_cpu_set_singlestep(void)
710{
711
712 kdb_frame->srr1 |= PSL_SE;
713}
714
715/*
716 * Initialise a struct pcpu.
717 */
718void
719cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
720{
721#ifdef __powerpc64__
722/* Copy the SLB contents from the current CPU */
723memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb));
724#endif
725}
726
727void
728spinlock_enter(void)
729{
730 struct thread *td;
731 register_t msr;
732
733 td = curthread;
734 if (td->td_md.md_spinlock_count == 0) {
735 msr = intr_disable();
736 td->td_md.md_spinlock_count = 1;
737 td->td_md.md_saved_msr = msr;
738 } else
739 td->td_md.md_spinlock_count++;
740 critical_enter();
741}
742
743void
744spinlock_exit(void)
745{
746 struct thread *td;
747 register_t msr;
748
749 td = curthread;
750 critical_exit();
751 msr = td->td_md.md_saved_msr;
752 td->td_md.md_spinlock_count--;
753 if (td->td_md.md_spinlock_count == 0)
754 intr_restore(msr);
755}
756
757int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */
758
759int
760db_trap_glue(struct trapframe *frame)
761{
762 if (!(frame->srr1 & PSL_PR)
763 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
764 || (frame->exc == EXC_PGM
765 && (frame->srr1 & 0x20000))
766 || frame->exc == EXC_BPT
767 || frame->exc == EXC_DSI)) {
768 int type = frame->exc;
769 if (type == EXC_PGM && (frame->srr1 & 0x20000)) {
770 type = T_BREAKPOINT;
771 }
772 return (kdb_trap(type, 0, frame));
773 }
774
775 return (0);
776}
777
778#ifndef __powerpc64__
779
780uint64_t
781va_to_vsid(pmap_t pm, vm_offset_t va)
782{
783 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
784}
785
786#endif
787
788/* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */
789void
790flush_disable_caches(void)
791{
792 register_t msr;
793 register_t msscr0;
794 register_t cache_reg;
795 volatile uint32_t *memp;
796 uint32_t temp;
797 int i;
798 int x;
799
800 msr = mfmsr();
801 powerpc_sync();
802 mtmsr(msr & ~(PSL_EE | PSL_DR));
803 msscr0 = mfspr(SPR_MSSCR0);
804 msscr0 &= ~MSSCR0_L2PFE;
805 mtspr(SPR_MSSCR0, msscr0);
806 powerpc_sync();
807 isync();
808 __asm__ __volatile__("dssall; sync");
809 powerpc_sync();
810 isync();
811 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
812 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
813 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
814
815 /* Lock the L1 Data cache. */
816 mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF);
817 powerpc_sync();
818 isync();
819
820 mtspr(SPR_LDSTCR, 0);
821
822 /*
823 * Perform this in two stages: Flush the cache starting in RAM, then do it
824 * from ROM.
825 */
826 memp = (volatile uint32_t *)0x00000000;
827 for (i = 0; i < 128 * 1024; i++) {
828 temp = *memp;
829 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
830 memp += 32/sizeof(*memp);
831 }
832
833 memp = (volatile uint32_t *)0xfff00000;
834 x = 0xfe;
835
836 for (; x != 0xff;) {
837 mtspr(SPR_LDSTCR, x);
838 for (i = 0; i < 128; i++) {
839 temp = *memp;
840 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
841 memp += 32/sizeof(*memp);
842 }
843 x = ((x << 1) | 1) & 0xff;
844 }
845 mtspr(SPR_LDSTCR, 0);
846
847 cache_reg = mfspr(SPR_L2CR);
848 if (cache_reg & L2CR_L2E) {
849 cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450);
850 mtspr(SPR_L2CR, cache_reg);
851 powerpc_sync();
852 mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF);
853 while (mfspr(SPR_L2CR) & L2CR_L2HWF)
854 ; /* Busy wait for cache to flush */
855 powerpc_sync();
856 cache_reg &= ~L2CR_L2E;
857 mtspr(SPR_L2CR, cache_reg);
858 powerpc_sync();
859 mtspr(SPR_L2CR, cache_reg | L2CR_L2I);
860 powerpc_sync();
861 while (mfspr(SPR_L2CR) & L2CR_L2I)
862 ; /* Busy wait for L2 cache invalidate */
863 powerpc_sync();
864 }
865
866 cache_reg = mfspr(SPR_L3CR);
867 if (cache_reg & L3CR_L3E) {
868 cache_reg &= ~(L3CR_L3IO | L3CR_L3DO);
869 mtspr(SPR_L3CR, cache_reg);
870 powerpc_sync();
871 mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF);
872 while (mfspr(SPR_L3CR) & L3CR_L3HWF)
873 ; /* Busy wait for cache to flush */
874 powerpc_sync();
875 cache_reg &= ~L3CR_L3E;
876 mtspr(SPR_L3CR, cache_reg);
877 powerpc_sync();
878 mtspr(SPR_L3CR, cache_reg | L3CR_L3I);
879 powerpc_sync();
880 while (mfspr(SPR_L3CR) & L3CR_L3I)
881 ; /* Busy wait for L3 cache invalidate */
882 powerpc_sync();
883 }
884
885 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE);
886 powerpc_sync();
887 isync();
888
889 mtmsr(msr);
890}
891
892void
893cpu_sleep()
894{
895 static u_quad_t timebase = 0;
896 static register_t sprgs[4];
897 static register_t srrs[2];
898
899 jmp_buf resetjb;
900 struct thread *fputd;
901 struct thread *vectd;
902 register_t hid0;
903 register_t msr;
904 register_t saved_msr;
905
906 ap_pcpu = pcpup;
907
908 PCPU_SET(restore, &resetjb);
909
910 saved_msr = mfmsr();
911 fputd = PCPU_GET(fputhread);
912 vectd = PCPU_GET(vecthread);
913 if (fputd != NULL)
914 save_fpu(fputd);
915 if (vectd != NULL)
916 save_vec(vectd);
917 if (setjmp(resetjb) == 0) {
918 sprgs[0] = mfspr(SPR_SPRG0);
919 sprgs[1] = mfspr(SPR_SPRG1);
920 sprgs[2] = mfspr(SPR_SPRG2);
921 sprgs[3] = mfspr(SPR_SPRG3);
922 srrs[0] = mfspr(SPR_SRR0);
923 srrs[1] = mfspr(SPR_SRR1);
924 timebase = mftb();
925 powerpc_sync();
926 flush_disable_caches();
927 hid0 = mfspr(SPR_HID0);
928 hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP;
929 powerpc_sync();
930 isync();
931 msr = mfmsr() | PSL_POW;
932 mtspr(SPR_HID0, hid0);
933 powerpc_sync();
934
935 while (1)
936 mtmsr(msr);
937 }
938 mttb(timebase);
939 PCPU_SET(curthread, curthread);
940 PCPU_SET(curpcb, curthread->td_pcb);
941 pmap_activate(curthread);
942 powerpc_sync();
943 mtspr(SPR_SPRG0, sprgs[0]);
944 mtspr(SPR_SPRG1, sprgs[1]);
945 mtspr(SPR_SPRG2, sprgs[2]);
946 mtspr(SPR_SPRG3, sprgs[3]);
947 mtspr(SPR_SRR0, srrs[0]);
948 mtspr(SPR_SRR1, srrs[1]);
949 mtmsr(saved_msr);
950 if (fputd == curthread)
951 enable_fpu(curthread);
952 if (vectd == curthread)
953 enable_vec(curthread);
954 powerpc_sync();
955}