Deleted Added
full compact
aim_machdep.c (275268) aim_machdep.c (277334)
1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31/*-
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55 */
56
57#include <sys/cdefs.h>
1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31/*-
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55 */
56
57#include <sys/cdefs.h>
58__FBSDID("$FreeBSD: head/sys/powerpc/aim/machdep.c 275268 2014-11-29 20:54:33Z jhibbits $");
58__FBSDID("$FreeBSD: head/sys/powerpc/aim/machdep.c 277334 2015-01-18 18:32:43Z nwhitehorn $");
59
60#include "opt_compat.h"
61#include "opt_ddb.h"
62#include "opt_kstack_pages.h"
63#include "opt_platform.h"
64
65#include <sys/param.h>
66#include <sys/proc.h>
67#include <sys/systm.h>
68#include <sys/bio.h>
69#include <sys/buf.h>
70#include <sys/bus.h>
71#include <sys/cons.h>
72#include <sys/cpu.h>
73#include <sys/eventhandler.h>
74#include <sys/exec.h>
75#include <sys/imgact.h>
76#include <sys/kdb.h>
77#include <sys/kernel.h>
78#include <sys/ktr.h>
79#include <sys/linker.h>
80#include <sys/lock.h>
81#include <sys/malloc.h>
82#include <sys/mbuf.h>
83#include <sys/msgbuf.h>
84#include <sys/mutex.h>
85#include <sys/ptrace.h>
86#include <sys/reboot.h>
87#include <sys/rwlock.h>
88#include <sys/signalvar.h>
89#include <sys/syscallsubr.h>
90#include <sys/sysctl.h>
91#include <sys/sysent.h>
92#include <sys/sysproto.h>
93#include <sys/ucontext.h>
94#include <sys/uio.h>
95#include <sys/vmmeter.h>
96#include <sys/vnode.h>
97
98#include <net/netisr.h>
99
100#include <vm/vm.h>
101#include <vm/vm_extern.h>
102#include <vm/vm_kern.h>
103#include <vm/vm_page.h>
104#include <vm/vm_map.h>
105#include <vm/vm_object.h>
106#include <vm/vm_pager.h>
107
108#include <machine/altivec.h>
109#ifndef __powerpc64__
110#include <machine/bat.h>
111#endif
112#include <machine/cpu.h>
113#include <machine/elf.h>
114#include <machine/fpu.h>
115#include <machine/hid.h>
116#include <machine/kdb.h>
117#include <machine/md_var.h>
118#include <machine/metadata.h>
119#include <machine/mmuvar.h>
120#include <machine/pcb.h>
121#include <machine/reg.h>
122#include <machine/sigframe.h>
123#include <machine/spr.h>
124#include <machine/trap.h>
125#include <machine/vmparam.h>
126#include <machine/ofw_machdep.h>
127
128#include <ddb/ddb.h>
129
130#include <dev/ofw/openfirm.h>
131
132int cold = 1;
133#ifdef __powerpc64__
134extern int n_slbs;
135int cacheline_size = 128;
136#else
137int cacheline_size = 32;
138#endif
139int hw_direct_map = 1;
140
141extern void *ap_pcpu;
142
143struct pcpu __pcpu[MAXCPU];
144
145static struct trapframe frame0;
146
147char machine[] = "powerpc";
148SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
149
150static void cpu_startup(void *);
151SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
152
153SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
154 CTLFLAG_RD, &cacheline_size, 0, "");
155
156uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *);
157
158long Maxmem = 0;
159long realmem = 0;
160
161#ifndef __powerpc64__
162struct bat battable[16];
163#endif
164
165struct kva_md_info kmi;
166
167static void
168cpu_startup(void *dummy)
169{
170
171 /*
172 * Initialise the decrementer-based clock.
173 */
174 decr_init();
175
176 /*
177 * Good {morning,afternoon,evening,night}.
178 */
179 cpu_setup(PCPU_GET(cpuid));
180
181#ifdef PERFMON
182 perfmon_init();
183#endif
184 printf("real memory = %ld (%ld MB)\n", ptoa(physmem),
185 ptoa(physmem) / 1048576);
186 realmem = physmem;
187
188 if (bootverbose)
189 printf("available KVA = %zd (%zd MB)\n",
190 virtual_end - virtual_avail,
191 (virtual_end - virtual_avail) / 1048576);
192
193 /*
194 * Display any holes after the first chunk of extended memory.
195 */
196 if (bootverbose) {
197 int indx;
198
199 printf("Physical memory chunk(s):\n");
200 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
201 vm_offset_t size1 =
202 phys_avail[indx + 1] - phys_avail[indx];
203
204 #ifdef __powerpc64__
205 printf("0x%016lx - 0x%016lx, %ld bytes (%ld pages)\n",
206 #else
207 printf("0x%08x - 0x%08x, %d bytes (%ld pages)\n",
208 #endif
209 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
210 size1 / PAGE_SIZE);
211 }
212 }
213
214 vm_ksubmap_init(&kmi);
215
216 printf("avail memory = %ld (%ld MB)\n", ptoa(vm_cnt.v_free_count),
217 ptoa(vm_cnt.v_free_count) / 1048576);
218
219 /*
220 * Set up buffers, so they can be used to read disk labels.
221 */
222 bufinit();
223 vm_pager_bufferinit();
224}
225
59
60#include "opt_compat.h"
61#include "opt_ddb.h"
62#include "opt_kstack_pages.h"
63#include "opt_platform.h"
64
65#include <sys/param.h>
66#include <sys/proc.h>
67#include <sys/systm.h>
68#include <sys/bio.h>
69#include <sys/buf.h>
70#include <sys/bus.h>
71#include <sys/cons.h>
72#include <sys/cpu.h>
73#include <sys/eventhandler.h>
74#include <sys/exec.h>
75#include <sys/imgact.h>
76#include <sys/kdb.h>
77#include <sys/kernel.h>
78#include <sys/ktr.h>
79#include <sys/linker.h>
80#include <sys/lock.h>
81#include <sys/malloc.h>
82#include <sys/mbuf.h>
83#include <sys/msgbuf.h>
84#include <sys/mutex.h>
85#include <sys/ptrace.h>
86#include <sys/reboot.h>
87#include <sys/rwlock.h>
88#include <sys/signalvar.h>
89#include <sys/syscallsubr.h>
90#include <sys/sysctl.h>
91#include <sys/sysent.h>
92#include <sys/sysproto.h>
93#include <sys/ucontext.h>
94#include <sys/uio.h>
95#include <sys/vmmeter.h>
96#include <sys/vnode.h>
97
98#include <net/netisr.h>
99
100#include <vm/vm.h>
101#include <vm/vm_extern.h>
102#include <vm/vm_kern.h>
103#include <vm/vm_page.h>
104#include <vm/vm_map.h>
105#include <vm/vm_object.h>
106#include <vm/vm_pager.h>
107
108#include <machine/altivec.h>
109#ifndef __powerpc64__
110#include <machine/bat.h>
111#endif
112#include <machine/cpu.h>
113#include <machine/elf.h>
114#include <machine/fpu.h>
115#include <machine/hid.h>
116#include <machine/kdb.h>
117#include <machine/md_var.h>
118#include <machine/metadata.h>
119#include <machine/mmuvar.h>
120#include <machine/pcb.h>
121#include <machine/reg.h>
122#include <machine/sigframe.h>
123#include <machine/spr.h>
124#include <machine/trap.h>
125#include <machine/vmparam.h>
126#include <machine/ofw_machdep.h>
127
128#include <ddb/ddb.h>
129
130#include <dev/ofw/openfirm.h>
131
132int cold = 1;
133#ifdef __powerpc64__
134extern int n_slbs;
135int cacheline_size = 128;
136#else
137int cacheline_size = 32;
138#endif
139int hw_direct_map = 1;
140
141extern void *ap_pcpu;
142
143struct pcpu __pcpu[MAXCPU];
144
145static struct trapframe frame0;
146
147char machine[] = "powerpc";
148SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
149
150static void cpu_startup(void *);
151SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
152
153SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
154 CTLFLAG_RD, &cacheline_size, 0, "");
155
156uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *);
157
158long Maxmem = 0;
159long realmem = 0;
160
161#ifndef __powerpc64__
162struct bat battable[16];
163#endif
164
165struct kva_md_info kmi;
166
167static void
168cpu_startup(void *dummy)
169{
170
171 /*
172 * Initialise the decrementer-based clock.
173 */
174 decr_init();
175
176 /*
177 * Good {morning,afternoon,evening,night}.
178 */
179 cpu_setup(PCPU_GET(cpuid));
180
181#ifdef PERFMON
182 perfmon_init();
183#endif
184 printf("real memory = %ld (%ld MB)\n", ptoa(physmem),
185 ptoa(physmem) / 1048576);
186 realmem = physmem;
187
188 if (bootverbose)
189 printf("available KVA = %zd (%zd MB)\n",
190 virtual_end - virtual_avail,
191 (virtual_end - virtual_avail) / 1048576);
192
193 /*
194 * Display any holes after the first chunk of extended memory.
195 */
196 if (bootverbose) {
197 int indx;
198
199 printf("Physical memory chunk(s):\n");
200 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
201 vm_offset_t size1 =
202 phys_avail[indx + 1] - phys_avail[indx];
203
204 #ifdef __powerpc64__
205 printf("0x%016lx - 0x%016lx, %ld bytes (%ld pages)\n",
206 #else
207 printf("0x%08x - 0x%08x, %d bytes (%ld pages)\n",
208 #endif
209 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
210 size1 / PAGE_SIZE);
211 }
212 }
213
214 vm_ksubmap_init(&kmi);
215
216 printf("avail memory = %ld (%ld MB)\n", ptoa(vm_cnt.v_free_count),
217 ptoa(vm_cnt.v_free_count) / 1048576);
218
219 /*
220 * Set up buffers, so they can be used to read disk labels.
221 */
222 bufinit();
223 vm_pager_bufferinit();
224}
225
226extern char kernel_text[], _end[];
226extern vm_offset_t __startkernel, __endkernel;
227
228#ifndef __powerpc64__
229/* Bits for running on 64-bit systems in 32-bit mode. */
230extern void *testppc64, *testppc64size;
231extern void *restorebridge, *restorebridgesize;
232extern void *rfid_patch, *rfi_patch1, *rfi_patch2;
233extern void *trapcode64;
234#endif
235
236extern void *rstcode, *rstsize;
237extern void *trapcode, *trapsize;
238extern void *slbtrap, *slbtrapsize;
239extern void *alitrap, *alisize;
240extern void *dsitrap, *dsisize;
241extern void *decrint, *decrsize;
242extern void *extint, *extsize;
243extern void *dblow, *dbsize;
244extern void *imisstrap, *imisssize;
245extern void *dlmisstrap, *dlmisssize;
246extern void *dsmisstrap, *dsmisssize;
227
228#ifndef __powerpc64__
229/* Bits for running on 64-bit systems in 32-bit mode. */
230extern void *testppc64, *testppc64size;
231extern void *restorebridge, *restorebridgesize;
232extern void *rfid_patch, *rfi_patch1, *rfi_patch2;
233extern void *trapcode64;
234#endif
235
236extern void *rstcode, *rstsize;
237extern void *trapcode, *trapsize;
238extern void *slbtrap, *slbtrapsize;
239extern void *alitrap, *alisize;
240extern void *dsitrap, *dsisize;
241extern void *decrint, *decrsize;
242extern void *extint, *extsize;
243extern void *dblow, *dbsize;
244extern void *imisstrap, *imisssize;
245extern void *dlmisstrap, *dlmisssize;
246extern void *dsmisstrap, *dsmisssize;
247char save_trap_init[0x2f00]; /* EXC_LAST */
248
249uintptr_t
247
248uintptr_t
250powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
251 vm_offset_t basekernel, void *mdp)
249powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp)
252{
253 struct pcpu *pc;
250{
251 struct pcpu *pc;
252 vm_offset_t startkernel, endkernel;
254 void *generictrap;
255 size_t trap_offset;
256 void *kmdp;
257 char *env;
258 register_t msr, scratch;
259#ifdef WII
260 register_t vers;
261#endif
262 uint8_t *cache_check;
263 int cacheline_warn;
264 #ifndef __powerpc64__
265 int ppc64;
266 #endif
267#ifdef DDB
268 vm_offset_t ksym_start;
269 vm_offset_t ksym_end;
270#endif
271
272 kmdp = NULL;
273 trap_offset = 0;
274 cacheline_warn = 0;
275
253 void *generictrap;
254 size_t trap_offset;
255 void *kmdp;
256 char *env;
257 register_t msr, scratch;
258#ifdef WII
259 register_t vers;
260#endif
261 uint8_t *cache_check;
262 int cacheline_warn;
263 #ifndef __powerpc64__
264 int ppc64;
265 #endif
266#ifdef DDB
267 vm_offset_t ksym_start;
268 vm_offset_t ksym_end;
269#endif
270
271 kmdp = NULL;
272 trap_offset = 0;
273 cacheline_warn = 0;
274
276 /* Save trap vectors. */
277 ofw_save_trap_vec(save_trap_init);
275 /* Store boot environment state */
276 OF_initial_setup((void *)fdt, NULL, (int (*)(void *))ofentry);
278
277
278 /* First guess at start/end kernel positions */
279 startkernel = __startkernel;
280 endkernel = __endkernel;
281
279#ifdef WII
280 /*
281 * The Wii loader doesn't pass us any environment so, mdp
282 * points to garbage at this point. The Wii CPU is a 750CL.
283 */
284 vers = mfpvr();
285 if ((vers & 0xfffff0e0) == (MPC750 << 16 | MPC750CL))
286 mdp = NULL;
287#endif
288
289 /*
290 * Parse metadata if present and fetch parameters. Must be done
291 * before console is inited so cninit gets the right value of
292 * boothowto.
293 */
294 if (mdp != NULL) {
295 preload_metadata = mdp;
296 kmdp = preload_search_by_type("elf kernel");
297 if (kmdp != NULL) {
298 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
299 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
300 endkernel = ulmax(endkernel, MD_FETCH(kmdp,
301 MODINFOMD_KERNEND, vm_offset_t));
302#ifdef DDB
303 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
304 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
305 db_fetch_ksymtab(ksym_start, ksym_end);
306#endif
307 }
308 }
309
310 /*
311 * Init params/tunables that can be overridden by the loader
312 */
313 init_param1();
314
315 /*
316 * Start initializing proc0 and thread0.
317 */
318 proc_linkup0(&proc0, &thread0);
319 thread0.td_frame = &frame0;
320
321 /*
322 * Set up per-cpu data.
323 */
324 pc = __pcpu;
325 pcpu_init(pc, 0, sizeof(struct pcpu));
326 pc->pc_curthread = &thread0;
327#ifdef __powerpc64__
328 __asm __volatile("mr 13,%0" :: "r"(pc->pc_curthread));
329#else
330 __asm __volatile("mr 2,%0" :: "r"(pc->pc_curthread));
331#endif
332 pc->pc_cpuid = 0;
333
334 __asm __volatile("mtsprg 0, %0" :: "r"(pc));
335
336 /*
337 * Init mutexes, which we use heavily in PMAP
338 */
339
340 mutex_init();
341
342 /*
343 * Install the OF client interface
344 */
345
346 OF_bootstrap();
347
348 /*
349 * Initialize the console before printing anything.
350 */
351 cninit();
352
353 /*
354 * Complain if there is no metadata.
355 */
356 if (mdp == NULL || kmdp == NULL) {
357 printf("powerpc_init: no loader metadata.\n");
358 }
359
360 /*
361 * Init KDB
362 */
363
364 kdb_init();
365
366 /* Various very early CPU fix ups */
367 switch (mfpvr() >> 16) {
368 /*
369 * PowerPC 970 CPUs have a misfeature requested by Apple that
370 * makes them pretend they have a 32-byte cacheline. Turn this
371 * off before we measure the cacheline size.
372 */
373 case IBM970:
374 case IBM970FX:
375 case IBM970MP:
376 case IBM970GX:
377 scratch = mfspr(SPR_HID5);
378 scratch &= ~HID5_970_DCBZ_SIZE_HI;
379 mtspr(SPR_HID5, scratch);
380 break;
381 #ifdef __powerpc64__
382 case IBMPOWER7:
383 /* XXX: get from ibm,slb-size in device tree */
384 n_slbs = 32;
385 break;
386 #endif
387 }
388
389 /*
390 * Initialize the interrupt tables and figure out our cache line
391 * size and whether or not we need the 64-bit bridge code.
392 */
393
394 /*
395 * Disable translation in case the vector area hasn't been
396 * mapped (G5). Note that no OFW calls can be made until
397 * translation is re-enabled.
398 */
399
400 msr = mfmsr();
401 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
402
403 /*
404 * Measure the cacheline size using dcbz
405 *
406 * Use EXC_PGM as a playground. We are about to overwrite it
407 * anyway, we know it exists, and we know it is cache-aligned.
408 */
409
410 cache_check = (void *)EXC_PGM;
411
412 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
413 cache_check[cacheline_size] = 0xff;
414
415 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
416
417 /* Find the first byte dcbz did not zero to get the cache line size */
418 for (cacheline_size = 0; cacheline_size < 0x100 &&
419 cache_check[cacheline_size] == 0; cacheline_size++);
420
421 /* Work around psim bug */
422 if (cacheline_size == 0) {
423 cacheline_warn = 1;
424 cacheline_size = 32;
425 }
426
427 /* Make sure the kernel icache is valid before we go too much further */
428 __syncicache((caddr_t)startkernel, endkernel - startkernel);
429
430 #ifndef __powerpc64__
431 /*
432 * Figure out whether we need to use the 64 bit PMAP. This works by
433 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
434 * and setting ppc64 = 0 if that causes a trap.
435 */
436
437 ppc64 = 1;
438
439 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size);
440 __syncicache((void *)EXC_PGM, (size_t)&testppc64size);
441
442 __asm __volatile("\
443 mfmsr %0; \
444 mtsprg2 %1; \
445 \
446 mtmsrd %0; \
447 mfsprg2 %1;"
448 : "=r"(scratch), "=r"(ppc64));
449
450 if (ppc64)
451 cpu_features |= PPC_FEATURE_64;
452
453 /*
454 * Now copy restorebridge into all the handlers, if necessary,
455 * and set up the trap tables.
456 */
457
458 if (cpu_features & PPC_FEATURE_64) {
459 /* Patch the two instances of rfi -> rfid */
460 bcopy(&rfid_patch,&rfi_patch1,4);
461 #ifdef KDB
462 /* rfi_patch2 is at the end of dbleave */
463 bcopy(&rfid_patch,&rfi_patch2,4);
464 #endif
465
466 /*
467 * Copy a code snippet to restore 32-bit bridge mode
468 * to the top of every non-generic trap handler
469 */
470
471 trap_offset += (size_t)&restorebridgesize;
472 bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
473 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
474 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
475 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
476 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
477 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
478 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
479
480 /*
481 * Set the common trap entry point to the one that
482 * knows to restore 32-bit operation on execution.
483 */
484
485 generictrap = &trapcode64;
486 } else {
487 generictrap = &trapcode;
488 }
489
490 #else /* powerpc64 */
491 cpu_features |= PPC_FEATURE_64;
492 generictrap = &trapcode;
282#ifdef WII
283 /*
284 * The Wii loader doesn't pass us any environment so, mdp
285 * points to garbage at this point. The Wii CPU is a 750CL.
286 */
287 vers = mfpvr();
288 if ((vers & 0xfffff0e0) == (MPC750 << 16 | MPC750CL))
289 mdp = NULL;
290#endif
291
292 /*
293 * Parse metadata if present and fetch parameters. Must be done
294 * before console is inited so cninit gets the right value of
295 * boothowto.
296 */
297 if (mdp != NULL) {
298 preload_metadata = mdp;
299 kmdp = preload_search_by_type("elf kernel");
300 if (kmdp != NULL) {
301 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
302 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
303 endkernel = ulmax(endkernel, MD_FETCH(kmdp,
304 MODINFOMD_KERNEND, vm_offset_t));
305#ifdef DDB
306 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
307 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
308 db_fetch_ksymtab(ksym_start, ksym_end);
309#endif
310 }
311 }
312
313 /*
314 * Init params/tunables that can be overridden by the loader
315 */
316 init_param1();
317
318 /*
319 * Start initializing proc0 and thread0.
320 */
321 proc_linkup0(&proc0, &thread0);
322 thread0.td_frame = &frame0;
323
324 /*
325 * Set up per-cpu data.
326 */
327 pc = __pcpu;
328 pcpu_init(pc, 0, sizeof(struct pcpu));
329 pc->pc_curthread = &thread0;
330#ifdef __powerpc64__
331 __asm __volatile("mr 13,%0" :: "r"(pc->pc_curthread));
332#else
333 __asm __volatile("mr 2,%0" :: "r"(pc->pc_curthread));
334#endif
335 pc->pc_cpuid = 0;
336
337 __asm __volatile("mtsprg 0, %0" :: "r"(pc));
338
339 /*
340 * Init mutexes, which we use heavily in PMAP
341 */
342
343 mutex_init();
344
345 /*
346 * Install the OF client interface
347 */
348
349 OF_bootstrap();
350
351 /*
352 * Initialize the console before printing anything.
353 */
354 cninit();
355
356 /*
357 * Complain if there is no metadata.
358 */
359 if (mdp == NULL || kmdp == NULL) {
360 printf("powerpc_init: no loader metadata.\n");
361 }
362
363 /*
364 * Init KDB
365 */
366
367 kdb_init();
368
369 /* Various very early CPU fix ups */
370 switch (mfpvr() >> 16) {
371 /*
372 * PowerPC 970 CPUs have a misfeature requested by Apple that
373 * makes them pretend they have a 32-byte cacheline. Turn this
374 * off before we measure the cacheline size.
375 */
376 case IBM970:
377 case IBM970FX:
378 case IBM970MP:
379 case IBM970GX:
380 scratch = mfspr(SPR_HID5);
381 scratch &= ~HID5_970_DCBZ_SIZE_HI;
382 mtspr(SPR_HID5, scratch);
383 break;
384 #ifdef __powerpc64__
385 case IBMPOWER7:
386 /* XXX: get from ibm,slb-size in device tree */
387 n_slbs = 32;
388 break;
389 #endif
390 }
391
392 /*
393 * Initialize the interrupt tables and figure out our cache line
394 * size and whether or not we need the 64-bit bridge code.
395 */
396
397 /*
398 * Disable translation in case the vector area hasn't been
399 * mapped (G5). Note that no OFW calls can be made until
400 * translation is re-enabled.
401 */
402
403 msr = mfmsr();
404 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
405
406 /*
407 * Measure the cacheline size using dcbz
408 *
409 * Use EXC_PGM as a playground. We are about to overwrite it
410 * anyway, we know it exists, and we know it is cache-aligned.
411 */
412
413 cache_check = (void *)EXC_PGM;
414
415 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
416 cache_check[cacheline_size] = 0xff;
417
418 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
419
420 /* Find the first byte dcbz did not zero to get the cache line size */
421 for (cacheline_size = 0; cacheline_size < 0x100 &&
422 cache_check[cacheline_size] == 0; cacheline_size++);
423
424 /* Work around psim bug */
425 if (cacheline_size == 0) {
426 cacheline_warn = 1;
427 cacheline_size = 32;
428 }
429
430 /* Make sure the kernel icache is valid before we go too much further */
431 __syncicache((caddr_t)startkernel, endkernel - startkernel);
432
433 #ifndef __powerpc64__
434 /*
435 * Figure out whether we need to use the 64 bit PMAP. This works by
436 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
437 * and setting ppc64 = 0 if that causes a trap.
438 */
439
440 ppc64 = 1;
441
442 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size);
443 __syncicache((void *)EXC_PGM, (size_t)&testppc64size);
444
445 __asm __volatile("\
446 mfmsr %0; \
447 mtsprg2 %1; \
448 \
449 mtmsrd %0; \
450 mfsprg2 %1;"
451 : "=r"(scratch), "=r"(ppc64));
452
453 if (ppc64)
454 cpu_features |= PPC_FEATURE_64;
455
456 /*
457 * Now copy restorebridge into all the handlers, if necessary,
458 * and set up the trap tables.
459 */
460
461 if (cpu_features & PPC_FEATURE_64) {
462 /* Patch the two instances of rfi -> rfid */
463 bcopy(&rfid_patch,&rfi_patch1,4);
464 #ifdef KDB
465 /* rfi_patch2 is at the end of dbleave */
466 bcopy(&rfid_patch,&rfi_patch2,4);
467 #endif
468
469 /*
470 * Copy a code snippet to restore 32-bit bridge mode
471 * to the top of every non-generic trap handler
472 */
473
474 trap_offset += (size_t)&restorebridgesize;
475 bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
476 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
477 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
478 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
479 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
480 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
481 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
482
483 /*
484 * Set the common trap entry point to the one that
485 * knows to restore 32-bit operation on execution.
486 */
487
488 generictrap = &trapcode64;
489 } else {
490 generictrap = &trapcode;
491 }
492
493 #else /* powerpc64 */
494 cpu_features |= PPC_FEATURE_64;
495 generictrap = &trapcode;
496
497 /* Set TOC base so that the interrupt code can get at it */
498 *((register_t *)TRAP_TOCBASE) = toc;
493 #endif
494
495 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstsize);
496
497#ifdef KDB
498 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbsize);
499 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbsize);
500 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbsize);
501 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbsize);
502#else
503 bcopy(generictrap, (void *)EXC_MCHK, (size_t)&trapsize);
504 bcopy(generictrap, (void *)EXC_PGM, (size_t)&trapsize);
505 bcopy(generictrap, (void *)EXC_TRC, (size_t)&trapsize);
506 bcopy(generictrap, (void *)EXC_BPT, (size_t)&trapsize);
507#endif
508 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&alisize);
509 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize);
510 bcopy(generictrap, (void *)EXC_ISI, (size_t)&trapsize);
511 #ifdef __powerpc64__
512 bcopy(&slbtrap, (void *)EXC_DSE, (size_t)&slbtrapsize);
513 bcopy(&slbtrap, (void *)EXC_ISE, (size_t)&slbtrapsize);
514 #endif
515 bcopy(generictrap, (void *)EXC_EXI, (size_t)&trapsize);
516 bcopy(generictrap, (void *)EXC_FPU, (size_t)&trapsize);
517 bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize);
518 bcopy(generictrap, (void *)EXC_SC, (size_t)&trapsize);
519 bcopy(generictrap, (void *)EXC_FPA, (size_t)&trapsize);
520 bcopy(generictrap, (void *)EXC_VEC, (size_t)&trapsize);
521 bcopy(generictrap, (void *)EXC_PERF, (size_t)&trapsize);
522 bcopy(generictrap, (void *)EXC_VECAST_G4, (size_t)&trapsize);
523 bcopy(generictrap, (void *)EXC_VECAST_G5, (size_t)&trapsize);
524 #ifndef __powerpc64__
525 /* G2-specific TLB miss helper handlers */
526 bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize);
527 bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize);
528 bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize);
529 #endif
530 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
531
532 /*
533 * Restore MSR
534 */
535 mtmsr(msr);
536
537 /* Warn if cachline size was not determined */
538 if (cacheline_warn == 1) {
539 printf("WARNING: cacheline size undetermined, setting to 32\n");
540 }
541
542 /*
543 * Choose a platform module so we can get the physical memory map.
544 */
545
546 platform_probe_and_attach();
547
548 /*
549 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
550 * in case the platform module had a better idea of what we
551 * should do.
552 */
553 if (cpu_features & PPC_FEATURE_64)
554 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
555 else
556 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
557
558 pmap_bootstrap(startkernel, endkernel);
559 mtmsr(PSL_KERNSET & ~PSL_EE);
560
561 /*
562 * Initialize params/tunables that are derived from memsize
563 */
564 init_param2(physmem);
565
566 /*
567 * Grab booted kernel's name
568 */
569 env = kern_getenv("kernelname");
570 if (env != NULL) {
571 strlcpy(kernelname, env, sizeof(kernelname));
572 freeenv(env);
573 }
574
575 /*
576 * Finish setting up thread0.
577 */
578 thread0.td_pcb = (struct pcb *)
579 ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
580 sizeof(struct pcb)) & ~15UL);
581 bzero((void *)thread0.td_pcb, sizeof(struct pcb));
582 pc->pc_curpcb = thread0.td_pcb;
583
584 /* Initialise the message buffer. */
585 msgbufinit(msgbufp, msgbufsize);
586
587#ifdef KDB
588 if (boothowto & RB_KDB)
589 kdb_enter(KDB_WHY_BOOTFLAGS,
590 "Boot flags requested debugger");
591#endif
592
593 return (((uintptr_t)thread0.td_pcb -
594 (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL);
595}
596
597void
598bzero(void *buf, size_t len)
599{
600 caddr_t p;
601
602 p = buf;
603
604 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
605 *p++ = 0;
606 len--;
607 }
608
609 while (len >= sizeof(u_long) * 8) {
610 *(u_long*) p = 0;
611 *((u_long*) p + 1) = 0;
612 *((u_long*) p + 2) = 0;
613 *((u_long*) p + 3) = 0;
614 len -= sizeof(u_long) * 8;
615 *((u_long*) p + 4) = 0;
616 *((u_long*) p + 5) = 0;
617 *((u_long*) p + 6) = 0;
618 *((u_long*) p + 7) = 0;
619 p += sizeof(u_long) * 8;
620 }
621
622 while (len >= sizeof(u_long)) {
623 *(u_long*) p = 0;
624 len -= sizeof(u_long);
625 p += sizeof(u_long);
626 }
627
628 while (len) {
629 *p++ = 0;
630 len--;
631 }
632}
633
634void
635cpu_boot(int howto)
636{
637}
638
639/*
640 * Flush the D-cache for non-DMA I/O so that the I-cache can
641 * be made coherent later.
642 */
643void
644cpu_flush_dcache(void *ptr, size_t len)
645{
646 /* TBD */
647}
648
649/*
650 * Shutdown the CPU as much as possible.
651 */
652void
653cpu_halt(void)
654{
655
656 OF_exit();
657}
658
659int
660ptrace_set_pc(struct thread *td, unsigned long addr)
661{
662 struct trapframe *tf;
663
664 tf = td->td_frame;
665 tf->srr0 = (register_t)addr;
666
667 return (0);
668}
669
670int
671ptrace_single_step(struct thread *td)
672{
673 struct trapframe *tf;
674
675 tf = td->td_frame;
676 tf->srr1 |= PSL_SE;
677
678 return (0);
679}
680
681int
682ptrace_clear_single_step(struct thread *td)
683{
684 struct trapframe *tf;
685
686 tf = td->td_frame;
687 tf->srr1 &= ~PSL_SE;
688
689 return (0);
690}
691
692void
693kdb_cpu_clear_singlestep(void)
694{
695
696 kdb_frame->srr1 &= ~PSL_SE;
697}
698
699void
700kdb_cpu_set_singlestep(void)
701{
702
703 kdb_frame->srr1 |= PSL_SE;
704}
705
706/*
707 * Initialise a struct pcpu.
708 */
709void
710cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
711{
712#ifdef __powerpc64__
713/* Copy the SLB contents from the current CPU */
714memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb));
715#endif
716}
717
718void
719spinlock_enter(void)
720{
721 struct thread *td;
722 register_t msr;
723
724 td = curthread;
725 if (td->td_md.md_spinlock_count == 0) {
726 msr = intr_disable();
727 td->td_md.md_spinlock_count = 1;
728 td->td_md.md_saved_msr = msr;
729 } else
730 td->td_md.md_spinlock_count++;
731 critical_enter();
732}
733
734void
735spinlock_exit(void)
736{
737 struct thread *td;
738 register_t msr;
739
740 td = curthread;
741 critical_exit();
742 msr = td->td_md.md_saved_msr;
743 td->td_md.md_spinlock_count--;
744 if (td->td_md.md_spinlock_count == 0)
745 intr_restore(msr);
746}
747
748int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */
749
750int
751db_trap_glue(struct trapframe *frame)
752{
753 if (!(frame->srr1 & PSL_PR)
754 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
755 || (frame->exc == EXC_PGM
756 && (frame->srr1 & 0x20000))
757 || frame->exc == EXC_BPT
758 || frame->exc == EXC_DSI)) {
759 int type = frame->exc;
760
761 /* Ignore DTrace traps. */
762 if (*(uint32_t *)frame->srr0 == EXC_DTRACE)
763 return (0);
764 if (type == EXC_PGM && (frame->srr1 & 0x20000)) {
765 type = T_BREAKPOINT;
766 }
767 return (kdb_trap(type, 0, frame));
768 }
769
770 return (0);
771}
772
773#ifndef __powerpc64__
774
775uint64_t
776va_to_vsid(pmap_t pm, vm_offset_t va)
777{
778 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
779}
780
781#endif
782
783vm_offset_t
784pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
785{
786
787 return (pa);
788}
789
790/* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */
791void
792flush_disable_caches(void)
793{
794 register_t msr;
795 register_t msscr0;
796 register_t cache_reg;
797 volatile uint32_t *memp;
798 uint32_t temp;
799 int i;
800 int x;
801
802 msr = mfmsr();
803 powerpc_sync();
804 mtmsr(msr & ~(PSL_EE | PSL_DR));
805 msscr0 = mfspr(SPR_MSSCR0);
806 msscr0 &= ~MSSCR0_L2PFE;
807 mtspr(SPR_MSSCR0, msscr0);
808 powerpc_sync();
809 isync();
810 __asm__ __volatile__("dssall; sync");
811 powerpc_sync();
812 isync();
813 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
814 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
815 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
816
817 /* Lock the L1 Data cache. */
818 mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF);
819 powerpc_sync();
820 isync();
821
822 mtspr(SPR_LDSTCR, 0);
823
824 /*
825 * Perform this in two stages: Flush the cache starting in RAM, then do it
826 * from ROM.
827 */
828 memp = (volatile uint32_t *)0x00000000;
829 for (i = 0; i < 128 * 1024; i++) {
830 temp = *memp;
831 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
832 memp += 32/sizeof(*memp);
833 }
834
835 memp = (volatile uint32_t *)0xfff00000;
836 x = 0xfe;
837
838 for (; x != 0xff;) {
839 mtspr(SPR_LDSTCR, x);
840 for (i = 0; i < 128; i++) {
841 temp = *memp;
842 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
843 memp += 32/sizeof(*memp);
844 }
845 x = ((x << 1) | 1) & 0xff;
846 }
847 mtspr(SPR_LDSTCR, 0);
848
849 cache_reg = mfspr(SPR_L2CR);
850 if (cache_reg & L2CR_L2E) {
851 cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450);
852 mtspr(SPR_L2CR, cache_reg);
853 powerpc_sync();
854 mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF);
855 while (mfspr(SPR_L2CR) & L2CR_L2HWF)
856 ; /* Busy wait for cache to flush */
857 powerpc_sync();
858 cache_reg &= ~L2CR_L2E;
859 mtspr(SPR_L2CR, cache_reg);
860 powerpc_sync();
861 mtspr(SPR_L2CR, cache_reg | L2CR_L2I);
862 powerpc_sync();
863 while (mfspr(SPR_L2CR) & L2CR_L2I)
864 ; /* Busy wait for L2 cache invalidate */
865 powerpc_sync();
866 }
867
868 cache_reg = mfspr(SPR_L3CR);
869 if (cache_reg & L3CR_L3E) {
870 cache_reg &= ~(L3CR_L3IO | L3CR_L3DO);
871 mtspr(SPR_L3CR, cache_reg);
872 powerpc_sync();
873 mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF);
874 while (mfspr(SPR_L3CR) & L3CR_L3HWF)
875 ; /* Busy wait for cache to flush */
876 powerpc_sync();
877 cache_reg &= ~L3CR_L3E;
878 mtspr(SPR_L3CR, cache_reg);
879 powerpc_sync();
880 mtspr(SPR_L3CR, cache_reg | L3CR_L3I);
881 powerpc_sync();
882 while (mfspr(SPR_L3CR) & L3CR_L3I)
883 ; /* Busy wait for L3 cache invalidate */
884 powerpc_sync();
885 }
886
887 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE);
888 powerpc_sync();
889 isync();
890
891 mtmsr(msr);
892}
893
894void
895cpu_sleep()
896{
897 static u_quad_t timebase = 0;
898 static register_t sprgs[4];
899 static register_t srrs[2];
900
901 jmp_buf resetjb;
902 struct thread *fputd;
903 struct thread *vectd;
904 register_t hid0;
905 register_t msr;
906 register_t saved_msr;
907
908 ap_pcpu = pcpup;
909
910 PCPU_SET(restore, &resetjb);
911
912 saved_msr = mfmsr();
913 fputd = PCPU_GET(fputhread);
914 vectd = PCPU_GET(vecthread);
915 if (fputd != NULL)
916 save_fpu(fputd);
917 if (vectd != NULL)
918 save_vec(vectd);
919 if (setjmp(resetjb) == 0) {
920 sprgs[0] = mfspr(SPR_SPRG0);
921 sprgs[1] = mfspr(SPR_SPRG1);
922 sprgs[2] = mfspr(SPR_SPRG2);
923 sprgs[3] = mfspr(SPR_SPRG3);
924 srrs[0] = mfspr(SPR_SRR0);
925 srrs[1] = mfspr(SPR_SRR1);
926 timebase = mftb();
927 powerpc_sync();
928 flush_disable_caches();
929 hid0 = mfspr(SPR_HID0);
930 hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP;
931 powerpc_sync();
932 isync();
933 msr = mfmsr() | PSL_POW;
934 mtspr(SPR_HID0, hid0);
935 powerpc_sync();
936
937 while (1)
938 mtmsr(msr);
939 }
940 mttb(timebase);
941 PCPU_SET(curthread, curthread);
942 PCPU_SET(curpcb, curthread->td_pcb);
943 pmap_activate(curthread);
944 powerpc_sync();
945 mtspr(SPR_SPRG0, sprgs[0]);
946 mtspr(SPR_SPRG1, sprgs[1]);
947 mtspr(SPR_SPRG2, sprgs[2]);
948 mtspr(SPR_SPRG3, sprgs[3]);
949 mtspr(SPR_SRR0, srrs[0]);
950 mtspr(SPR_SRR1, srrs[1]);
951 mtmsr(saved_msr);
952 if (fputd == curthread)
953 enable_fpu(curthread);
954 if (vectd == curthread)
955 enable_vec(curthread);
956 powerpc_sync();
957}
499 #endif
500
501 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstsize);
502
503#ifdef KDB
504 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbsize);
505 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbsize);
506 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbsize);
507 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbsize);
508#else
509 bcopy(generictrap, (void *)EXC_MCHK, (size_t)&trapsize);
510 bcopy(generictrap, (void *)EXC_PGM, (size_t)&trapsize);
511 bcopy(generictrap, (void *)EXC_TRC, (size_t)&trapsize);
512 bcopy(generictrap, (void *)EXC_BPT, (size_t)&trapsize);
513#endif
514 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&alisize);
515 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize);
516 bcopy(generictrap, (void *)EXC_ISI, (size_t)&trapsize);
517 #ifdef __powerpc64__
518 bcopy(&slbtrap, (void *)EXC_DSE, (size_t)&slbtrapsize);
519 bcopy(&slbtrap, (void *)EXC_ISE, (size_t)&slbtrapsize);
520 #endif
521 bcopy(generictrap, (void *)EXC_EXI, (size_t)&trapsize);
522 bcopy(generictrap, (void *)EXC_FPU, (size_t)&trapsize);
523 bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize);
524 bcopy(generictrap, (void *)EXC_SC, (size_t)&trapsize);
525 bcopy(generictrap, (void *)EXC_FPA, (size_t)&trapsize);
526 bcopy(generictrap, (void *)EXC_VEC, (size_t)&trapsize);
527 bcopy(generictrap, (void *)EXC_PERF, (size_t)&trapsize);
528 bcopy(generictrap, (void *)EXC_VECAST_G4, (size_t)&trapsize);
529 bcopy(generictrap, (void *)EXC_VECAST_G5, (size_t)&trapsize);
530 #ifndef __powerpc64__
531 /* G2-specific TLB miss helper handlers */
532 bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize);
533 bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize);
534 bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize);
535 #endif
536 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
537
538 /*
539 * Restore MSR
540 */
541 mtmsr(msr);
542
543 /* Warn if cachline size was not determined */
544 if (cacheline_warn == 1) {
545 printf("WARNING: cacheline size undetermined, setting to 32\n");
546 }
547
548 /*
549 * Choose a platform module so we can get the physical memory map.
550 */
551
552 platform_probe_and_attach();
553
554 /*
555 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
556 * in case the platform module had a better idea of what we
557 * should do.
558 */
559 if (cpu_features & PPC_FEATURE_64)
560 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
561 else
562 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
563
564 pmap_bootstrap(startkernel, endkernel);
565 mtmsr(PSL_KERNSET & ~PSL_EE);
566
567 /*
568 * Initialize params/tunables that are derived from memsize
569 */
570 init_param2(physmem);
571
572 /*
573 * Grab booted kernel's name
574 */
575 env = kern_getenv("kernelname");
576 if (env != NULL) {
577 strlcpy(kernelname, env, sizeof(kernelname));
578 freeenv(env);
579 }
580
581 /*
582 * Finish setting up thread0.
583 */
584 thread0.td_pcb = (struct pcb *)
585 ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
586 sizeof(struct pcb)) & ~15UL);
587 bzero((void *)thread0.td_pcb, sizeof(struct pcb));
588 pc->pc_curpcb = thread0.td_pcb;
589
590 /* Initialise the message buffer. */
591 msgbufinit(msgbufp, msgbufsize);
592
593#ifdef KDB
594 if (boothowto & RB_KDB)
595 kdb_enter(KDB_WHY_BOOTFLAGS,
596 "Boot flags requested debugger");
597#endif
598
599 return (((uintptr_t)thread0.td_pcb -
600 (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL);
601}
602
603void
604bzero(void *buf, size_t len)
605{
606 caddr_t p;
607
608 p = buf;
609
610 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
611 *p++ = 0;
612 len--;
613 }
614
615 while (len >= sizeof(u_long) * 8) {
616 *(u_long*) p = 0;
617 *((u_long*) p + 1) = 0;
618 *((u_long*) p + 2) = 0;
619 *((u_long*) p + 3) = 0;
620 len -= sizeof(u_long) * 8;
621 *((u_long*) p + 4) = 0;
622 *((u_long*) p + 5) = 0;
623 *((u_long*) p + 6) = 0;
624 *((u_long*) p + 7) = 0;
625 p += sizeof(u_long) * 8;
626 }
627
628 while (len >= sizeof(u_long)) {
629 *(u_long*) p = 0;
630 len -= sizeof(u_long);
631 p += sizeof(u_long);
632 }
633
634 while (len) {
635 *p++ = 0;
636 len--;
637 }
638}
639
640void
641cpu_boot(int howto)
642{
643}
644
645/*
646 * Flush the D-cache for non-DMA I/O so that the I-cache can
647 * be made coherent later.
648 */
649void
650cpu_flush_dcache(void *ptr, size_t len)
651{
652 /* TBD */
653}
654
655/*
656 * Shutdown the CPU as much as possible.
657 */
658void
659cpu_halt(void)
660{
661
662 OF_exit();
663}
664
665int
666ptrace_set_pc(struct thread *td, unsigned long addr)
667{
668 struct trapframe *tf;
669
670 tf = td->td_frame;
671 tf->srr0 = (register_t)addr;
672
673 return (0);
674}
675
676int
677ptrace_single_step(struct thread *td)
678{
679 struct trapframe *tf;
680
681 tf = td->td_frame;
682 tf->srr1 |= PSL_SE;
683
684 return (0);
685}
686
687int
688ptrace_clear_single_step(struct thread *td)
689{
690 struct trapframe *tf;
691
692 tf = td->td_frame;
693 tf->srr1 &= ~PSL_SE;
694
695 return (0);
696}
697
698void
699kdb_cpu_clear_singlestep(void)
700{
701
702 kdb_frame->srr1 &= ~PSL_SE;
703}
704
705void
706kdb_cpu_set_singlestep(void)
707{
708
709 kdb_frame->srr1 |= PSL_SE;
710}
711
712/*
713 * Initialise a struct pcpu.
714 */
715void
716cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
717{
718#ifdef __powerpc64__
719/* Copy the SLB contents from the current CPU */
720memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb));
721#endif
722}
723
724void
725spinlock_enter(void)
726{
727 struct thread *td;
728 register_t msr;
729
730 td = curthread;
731 if (td->td_md.md_spinlock_count == 0) {
732 msr = intr_disable();
733 td->td_md.md_spinlock_count = 1;
734 td->td_md.md_saved_msr = msr;
735 } else
736 td->td_md.md_spinlock_count++;
737 critical_enter();
738}
739
740void
741spinlock_exit(void)
742{
743 struct thread *td;
744 register_t msr;
745
746 td = curthread;
747 critical_exit();
748 msr = td->td_md.md_saved_msr;
749 td->td_md.md_spinlock_count--;
750 if (td->td_md.md_spinlock_count == 0)
751 intr_restore(msr);
752}
753
754int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */
755
756int
757db_trap_glue(struct trapframe *frame)
758{
759 if (!(frame->srr1 & PSL_PR)
760 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
761 || (frame->exc == EXC_PGM
762 && (frame->srr1 & 0x20000))
763 || frame->exc == EXC_BPT
764 || frame->exc == EXC_DSI)) {
765 int type = frame->exc;
766
767 /* Ignore DTrace traps. */
768 if (*(uint32_t *)frame->srr0 == EXC_DTRACE)
769 return (0);
770 if (type == EXC_PGM && (frame->srr1 & 0x20000)) {
771 type = T_BREAKPOINT;
772 }
773 return (kdb_trap(type, 0, frame));
774 }
775
776 return (0);
777}
778
779#ifndef __powerpc64__
780
781uint64_t
782va_to_vsid(pmap_t pm, vm_offset_t va)
783{
784 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
785}
786
787#endif
788
789vm_offset_t
790pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
791{
792
793 return (pa);
794}
795
796/* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */
797void
798flush_disable_caches(void)
799{
800 register_t msr;
801 register_t msscr0;
802 register_t cache_reg;
803 volatile uint32_t *memp;
804 uint32_t temp;
805 int i;
806 int x;
807
808 msr = mfmsr();
809 powerpc_sync();
810 mtmsr(msr & ~(PSL_EE | PSL_DR));
811 msscr0 = mfspr(SPR_MSSCR0);
812 msscr0 &= ~MSSCR0_L2PFE;
813 mtspr(SPR_MSSCR0, msscr0);
814 powerpc_sync();
815 isync();
816 __asm__ __volatile__("dssall; sync");
817 powerpc_sync();
818 isync();
819 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
820 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
821 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
822
823 /* Lock the L1 Data cache. */
824 mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF);
825 powerpc_sync();
826 isync();
827
828 mtspr(SPR_LDSTCR, 0);
829
830 /*
831 * Perform this in two stages: Flush the cache starting in RAM, then do it
832 * from ROM.
833 */
834 memp = (volatile uint32_t *)0x00000000;
835 for (i = 0; i < 128 * 1024; i++) {
836 temp = *memp;
837 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
838 memp += 32/sizeof(*memp);
839 }
840
841 memp = (volatile uint32_t *)0xfff00000;
842 x = 0xfe;
843
844 for (; x != 0xff;) {
845 mtspr(SPR_LDSTCR, x);
846 for (i = 0; i < 128; i++) {
847 temp = *memp;
848 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
849 memp += 32/sizeof(*memp);
850 }
851 x = ((x << 1) | 1) & 0xff;
852 }
853 mtspr(SPR_LDSTCR, 0);
854
855 cache_reg = mfspr(SPR_L2CR);
856 if (cache_reg & L2CR_L2E) {
857 cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450);
858 mtspr(SPR_L2CR, cache_reg);
859 powerpc_sync();
860 mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF);
861 while (mfspr(SPR_L2CR) & L2CR_L2HWF)
862 ; /* Busy wait for cache to flush */
863 powerpc_sync();
864 cache_reg &= ~L2CR_L2E;
865 mtspr(SPR_L2CR, cache_reg);
866 powerpc_sync();
867 mtspr(SPR_L2CR, cache_reg | L2CR_L2I);
868 powerpc_sync();
869 while (mfspr(SPR_L2CR) & L2CR_L2I)
870 ; /* Busy wait for L2 cache invalidate */
871 powerpc_sync();
872 }
873
874 cache_reg = mfspr(SPR_L3CR);
875 if (cache_reg & L3CR_L3E) {
876 cache_reg &= ~(L3CR_L3IO | L3CR_L3DO);
877 mtspr(SPR_L3CR, cache_reg);
878 powerpc_sync();
879 mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF);
880 while (mfspr(SPR_L3CR) & L3CR_L3HWF)
881 ; /* Busy wait for cache to flush */
882 powerpc_sync();
883 cache_reg &= ~L3CR_L3E;
884 mtspr(SPR_L3CR, cache_reg);
885 powerpc_sync();
886 mtspr(SPR_L3CR, cache_reg | L3CR_L3I);
887 powerpc_sync();
888 while (mfspr(SPR_L3CR) & L3CR_L3I)
889 ; /* Busy wait for L3 cache invalidate */
890 powerpc_sync();
891 }
892
893 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE);
894 powerpc_sync();
895 isync();
896
897 mtmsr(msr);
898}
899
900void
901cpu_sleep()
902{
903 static u_quad_t timebase = 0;
904 static register_t sprgs[4];
905 static register_t srrs[2];
906
907 jmp_buf resetjb;
908 struct thread *fputd;
909 struct thread *vectd;
910 register_t hid0;
911 register_t msr;
912 register_t saved_msr;
913
914 ap_pcpu = pcpup;
915
916 PCPU_SET(restore, &resetjb);
917
918 saved_msr = mfmsr();
919 fputd = PCPU_GET(fputhread);
920 vectd = PCPU_GET(vecthread);
921 if (fputd != NULL)
922 save_fpu(fputd);
923 if (vectd != NULL)
924 save_vec(vectd);
925 if (setjmp(resetjb) == 0) {
926 sprgs[0] = mfspr(SPR_SPRG0);
927 sprgs[1] = mfspr(SPR_SPRG1);
928 sprgs[2] = mfspr(SPR_SPRG2);
929 sprgs[3] = mfspr(SPR_SPRG3);
930 srrs[0] = mfspr(SPR_SRR0);
931 srrs[1] = mfspr(SPR_SRR1);
932 timebase = mftb();
933 powerpc_sync();
934 flush_disable_caches();
935 hid0 = mfspr(SPR_HID0);
936 hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP;
937 powerpc_sync();
938 isync();
939 msr = mfmsr() | PSL_POW;
940 mtspr(SPR_HID0, hid0);
941 powerpc_sync();
942
943 while (1)
944 mtmsr(msr);
945 }
946 mttb(timebase);
947 PCPU_SET(curthread, curthread);
948 PCPU_SET(curpcb, curthread->td_pcb);
949 pmap_activate(curthread);
950 powerpc_sync();
951 mtspr(SPR_SPRG0, sprgs[0]);
952 mtspr(SPR_SPRG1, sprgs[1]);
953 mtspr(SPR_SPRG2, sprgs[2]);
954 mtspr(SPR_SPRG3, sprgs[3]);
955 mtspr(SPR_SRR0, srrs[0]);
956 mtspr(SPR_SRR1, srrs[1]);
957 mtmsr(saved_msr);
958 if (fputd == curthread)
959 enable_fpu(curthread);
960 if (vectd == curthread)
961 enable_vec(curthread);
962 powerpc_sync();
963}