Deleted Added
full compact
1/*-
2 * Initial implementation:
3 * Copyright (c) 2001 Robert Drehmel
4 * All rights reserved.
5 *
6 * As long as the above copyright statement and this notice remain
7 * unchanged, you can do what ever you want with this file.
8 */
9/*-
10 * Copyright (c) 2008 Marius Strobl <marius@FreeBSD.org>
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/boot/sparc64/loader/main.c 221869 2011-05-14 01:53:38Z attilio $");
36__FBSDID("$FreeBSD: head/sys/boot/sparc64/loader/main.c 223719 2011-07-02 11:14:54Z marius $");
37
38/*
39 * FreeBSD/sparc64 kernel loader - machine dependent part
40 *
41 * - implements copyin and readin functions that map kernel
42 * pages on demand. The machine independent code does not
43 * know the size of the kernel early enough to pre-enter
44 * TTEs and install just one 4MB mapping seemed to limiting
45 * to me.
46 */
47
48#include <stand.h>
49#include <sys/param.h>
50#include <sys/exec.h>
51#include <sys/linker.h>
52#include <sys/queue.h>
53#include <sys/types.h>
54
55#include <vm/vm.h>
56#include <machine/asi.h>
57#include <machine/cmt.h>
58#include <machine/cpufunc.h>
59#include <machine/elf.h>
60#include <machine/fireplane.h>
61#include <machine/jbus.h>
62#include <machine/lsu.h>
63#include <machine/metadata.h>
64#include <machine/tte.h>
65#include <machine/tlb.h>
66#include <machine/upa.h>
67#include <machine/ver.h>
68#include <machine/vmparam.h>
69
70#include "bootstrap.h"
71#include "libofw.h"
72#include "dev_net.h"
73
74#ifndef CTASSERT
75#define CTASSERT(x) _CTASSERT(x, __LINE__)
76#define _CTASSERT(x, y) __CTASSERT(x, y)
77#define __CTASSERT(x, y) typedef char __assert ## y[(x) ? 1 : -1]
78#endif
79
80extern char bootprog_name[], bootprog_rev[], bootprog_date[], bootprog_maker[];
81
82enum {
83 HEAPVA = 0x800000,
84 HEAPSZ = 0x1000000,
85 LOADSZ = 0x1000000 /* for kernel and modules */
86};
87
88/* At least Sun Fire V1280 require page sized allocations to be claimed. */
89CTASSERT(HEAPSZ % PAGE_SIZE == 0);
90
91static struct mmu_ops {
92 void (*tlb_init)(void);
93 int (*mmu_mapin)(vm_offset_t va, vm_size_t len);
94} *mmu_ops;
95
96typedef void kernel_entry_t(vm_offset_t mdp, u_long o1, u_long o2, u_long o3,
97 void *openfirmware);
98
99static inline u_long dtlb_get_data_sun4u(u_int);
99static inline u_long dtlb_get_data_sun4u(u_int, u_int);
100static int dtlb_enter_sun4u(u_int, u_long data, vm_offset_t);
101static vm_offset_t dtlb_va_to_pa_sun4u(vm_offset_t);
102static inline u_long itlb_get_data_sun4u(u_int);
102static inline u_long itlb_get_data_sun4u(u_int, u_int);
103static int itlb_enter_sun4u(u_int, u_long data, vm_offset_t);
104static vm_offset_t itlb_va_to_pa_sun4u(vm_offset_t);
105static void itlb_relocate_locked0_sun4u(void);
106extern vm_offset_t md_load(char *, vm_offset_t *);
107static int sparc64_autoload(void);
108static ssize_t sparc64_readin(const int, vm_offset_t, const size_t);
109static ssize_t sparc64_copyin(const void *, vm_offset_t, size_t);
110static vm_offset_t claim_virt(vm_offset_t, size_t, int);
111static vm_offset_t alloc_phys(size_t, int);
112static int map_phys(int, size_t, vm_offset_t, vm_offset_t);
113static void release_phys(vm_offset_t, u_int);
114static int __elfN(exec)(struct preloaded_file *);
115static int mmu_mapin_sun4u(vm_offset_t, vm_size_t);
116static vm_offset_t init_heap(void);
117static phandle_t find_bsp_sun4u(phandle_t, uint32_t);
118const char *cpu_cpuid_prop_sun4u(void);
119uint32_t cpu_get_mid_sun4u(void);
120static void tlb_init_sun4u(void);
121
122#ifdef LOADER_DEBUG
123typedef u_int64_t tte_t;
124
125static void pmap_print_tlb_sun4u(void);
126static void pmap_print_tte_sun4u(tte_t, tte_t);
127#endif
128
129static struct mmu_ops mmu_ops_sun4u = { tlb_init_sun4u, mmu_mapin_sun4u };
130
131/* sun4u */
132struct tlb_entry *dtlb_store;
133struct tlb_entry *itlb_store;
134u_int dtlb_slot;
135u_int itlb_slot;
136static int cpu_impl;
137static u_int dtlb_slot_max;
138static u_int itlb_slot_max;
139static u_int tlb_locked;
140
141static vm_offset_t curkva = 0;
142static vm_offset_t heapva;
143
144static phandle_t root;
145
146/*
147 * Machine dependent structures that the machine independent
148 * loader part uses.
149 */
150struct devsw *devsw[] = {
151#ifdef LOADER_DISK_SUPPORT
152 &ofwdisk,
153#endif
154#ifdef LOADER_NET_SUPPORT
155 &netdev,
156#endif
157 0
158};
159struct arch_switch archsw;
160
161static struct file_format sparc64_elf = {
162 __elfN(loadfile),
163 __elfN(exec)
164};
165struct file_format *file_formats[] = {
166 &sparc64_elf,
167 0
168};
169struct fs_ops *file_system[] = {
170#ifdef LOADER_UFS_SUPPORT
171 &ufs_fsops,
172#endif
173#ifdef LOADER_CD9660_SUPPORT
174 &cd9660_fsops,
175#endif
176#ifdef LOADER_ZIP_SUPPORT
177 &zipfs_fsops,
178#endif
179#ifdef LOADER_GZIP_SUPPORT
180 &gzipfs_fsops,
181#endif
182#ifdef LOADER_BZIP2_SUPPORT
183 &bzipfs_fsops,
184#endif
185#ifdef LOADER_NFS_SUPPORT
186 &nfs_fsops,
187#endif
188#ifdef LOADER_TFTP_SUPPORT
189 &tftp_fsops,
190#endif
191 0
192};
193struct netif_driver *netif_drivers[] = {
194#ifdef LOADER_NET_SUPPORT
195 &ofwnet,
196#endif
197 0
198};
199
200extern struct console ofwconsole;
201struct console *consoles[] = {
202 &ofwconsole,
203 0
204};
205
206#ifdef LOADER_DEBUG
207static int
208watch_phys_set_mask(vm_offset_t pa, u_long mask)
209{
210 u_long lsucr;
211
212 stxa(AA_DMMU_PWPR, ASI_DMMU, pa & (((2UL << 38) - 1) << 3));
213 lsucr = ldxa(0, ASI_LSU_CTL_REG);
214 lsucr = ((lsucr | LSU_PW) & ~LSU_PM_MASK) |
215 (mask << LSU_PM_SHIFT);
216 stxa(0, ASI_LSU_CTL_REG, lsucr);
217 return (0);
218}
219
220static int
221watch_phys_set(vm_offset_t pa, int sz)
222{
223 u_long off;
224
225 off = (u_long)pa & 7;
226 /* Test for misaligned watch points. */
227 if (off + sz > 8)
228 return (-1);
229 return (watch_phys_set_mask(pa, ((1 << sz) - 1) << off));
230}
231
232
233static int
234watch_virt_set_mask(vm_offset_t va, u_long mask)
235{
236 u_long lsucr;
237
238 stxa(AA_DMMU_VWPR, ASI_DMMU, va & (((2UL << 41) - 1) << 3));
239 lsucr = ldxa(0, ASI_LSU_CTL_REG);
240 lsucr = ((lsucr | LSU_VW) & ~LSU_VM_MASK) |
241 (mask << LSU_VM_SHIFT);
242 stxa(0, ASI_LSU_CTL_REG, lsucr);
243 return (0);
244}
245
246static int
247watch_virt_set(vm_offset_t va, int sz)
248{
249 u_long off;
250
251 off = (u_long)va & 7;
252 /* Test for misaligned watch points. */
253 if (off + sz > 8)
254 return (-1);
255 return (watch_virt_set_mask(va, ((1 << sz) - 1) << off));
256}
257#endif
258
259/*
260 * archsw functions
261 */
262static int
263sparc64_autoload(void)
264{
265
266 return (0);
267}
268
269static ssize_t
270sparc64_readin(const int fd, vm_offset_t va, const size_t len)
271{
272
273 mmu_ops->mmu_mapin(va, len);
274 return (read(fd, (void *)va, len));
275}
276
277static ssize_t
278sparc64_copyin(const void *src, vm_offset_t dest, size_t len)
279{
280
281 mmu_ops->mmu_mapin(dest, len);
282 memcpy((void *)dest, src, len);
283 return (len);
284}
285
286/*
287 * other MD functions
288 */
289static vm_offset_t
290claim_virt(vm_offset_t virt, size_t size, int align)
291{
292 vm_offset_t mva;
293
294 if (OF_call_method("claim", mmu, 3, 1, virt, size, align, &mva) == -1)
295 return ((vm_offset_t)-1);
296 return (mva);
297}
298
299static vm_offset_t
300alloc_phys(size_t size, int align)
301{
302 cell_t phys_hi, phys_low;
303
304 if (OF_call_method("claim", memory, 2, 2, size, align, &phys_low,
305 &phys_hi) == -1)
306 return ((vm_offset_t)-1);
307 return ((vm_offset_t)phys_hi << 32 | phys_low);
308}
309
310static int
311map_phys(int mode, size_t size, vm_offset_t virt, vm_offset_t phys)
312{
313
314 return (OF_call_method("map", mmu, 5, 0, (uint32_t)phys,
315 (uint32_t)(phys >> 32), virt, size, mode));
316}
317
318static void
319release_phys(vm_offset_t phys, u_int size)
320{
321
322 (void)OF_call_method("release", memory, 3, 0, (uint32_t)phys,
323 (uint32_t)(phys >> 32), size);
324}
325
326static int
327__elfN(exec)(struct preloaded_file *fp)
328{
329 struct file_metadata *fmp;
330 vm_offset_t mdp;
331 Elf_Addr entry;
332 Elf_Ehdr *e;
333 int error;
334
335 if ((fmp = file_findmetadata(fp, MODINFOMD_ELFHDR)) == 0)
336 return (EFTYPE);
337 e = (Elf_Ehdr *)&fmp->md_data;
338
339 if ((error = md_load(fp->f_args, &mdp)) != 0)
340 return (error);
341
342 printf("jumping to kernel entry at %#lx.\n", e->e_entry);
343#ifdef LOADER_DEBUG
344 pmap_print_tlb_sun4u();
345#endif
346
347 dev_cleanup();
348
349 entry = e->e_entry;
350
351 OF_release((void *)heapva, HEAPSZ);
352
353 ((kernel_entry_t *)entry)(mdp, 0, 0, 0, openfirmware);
354
355 panic("%s: exec returned", __func__);
356}
357
358static inline u_long
358dtlb_get_data_sun4u(u_int slot)
359dtlb_get_data_sun4u(u_int tlb, u_int slot)
360{
361 u_long data, pstate;
362
363 slot = TLB_DAR_SLOT(tlb, slot);
364 /*
362 * We read ASI_DTLB_DATA_ACCESS_REG twice in order to work
363 * around errata of USIII and beyond.
365 * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to
366 * work around errata of USIII and beyond.
367 */
365 (void)ldxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG);
366 return (ldxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG));
368 pstate = rdpr(pstate);
369 wrpr(pstate, pstate & ~PSTATE_IE, 0);
370 (void)ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
371 data = ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
372 wrpr(pstate, pstate, 0);
373 return (data);
374}
375
376static inline u_long
370itlb_get_data_sun4u(u_int slot)
377itlb_get_data_sun4u(u_int tlb, u_int slot)
378{
379 u_long data, pstate;
380
381 slot = TLB_DAR_SLOT(tlb, slot);
382 /*
374 * We read ASI_ITLB_DATA_ACCESS_REG twice in order to work
375 * around errata of USIII and beyond.
383 * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to
384 * work around errata of USIII and beyond.
385 */
377 (void)ldxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG);
378 return (ldxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG));
386 pstate = rdpr(pstate);
387 wrpr(pstate, pstate & ~PSTATE_IE, 0);
388 (void)ldxa(slot, ASI_ITLB_DATA_ACCESS_REG);
389 data = ldxa(slot, ASI_ITLB_DATA_ACCESS_REG);
390 wrpr(pstate, pstate, 0);
391 return (data);
392}
393
394static vm_offset_t
395dtlb_va_to_pa_sun4u(vm_offset_t va)
396{
397 u_long pstate, reg;
385 int i;
398 u_int i, tlb;
399
400 pstate = rdpr(pstate);
401 wrpr(pstate, pstate & ~PSTATE_IE, 0);
402 for (i = 0; i < dtlb_slot_max; i++) {
390 reg = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_TAG_READ_REG);
403 reg = ldxa(TLB_DAR_SLOT(tlb_locked, i),
404 ASI_DTLB_TAG_READ_REG);
405 if (TLB_TAR_VA(reg) != va)
406 continue;
393 reg = dtlb_get_data_sun4u(i);
407 reg = dtlb_get_data_sun4u(tlb_locked, i);
408 wrpr(pstate, pstate, 0);
409 reg >>= TD_PA_SHIFT;
410 if (cpu_impl == CPU_IMPL_SPARC64V ||
411 cpu_impl >= CPU_IMPL_ULTRASPARCIII)
412 return (reg & TD_PA_CH_MASK);
413 return (reg & TD_PA_SF_MASK);
414 }
415 wrpr(pstate, pstate, 0);
416 return (-1);
417}
418
419static vm_offset_t
420itlb_va_to_pa_sun4u(vm_offset_t va)
421{
422 u_long pstate, reg;
423 int i;
424
425 pstate = rdpr(pstate);
426 wrpr(pstate, pstate & ~PSTATE_IE, 0);
427 for (i = 0; i < itlb_slot_max; i++) {
414 reg = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_TAG_READ_REG);
428 reg = ldxa(TLB_DAR_SLOT(tlb_locked, i),
429 ASI_ITLB_TAG_READ_REG);
430 if (TLB_TAR_VA(reg) != va)
431 continue;
417 reg = itlb_get_data_sun4u(i);
432 reg = itlb_get_data_sun4u(tlb_locked, i);
433 wrpr(pstate, pstate, 0);
434 reg >>= TD_PA_SHIFT;
435 if (cpu_impl == CPU_IMPL_SPARC64V ||
436 cpu_impl >= CPU_IMPL_ULTRASPARCIII)
437 return (reg & TD_PA_CH_MASK);
438 return (reg & TD_PA_SF_MASK);
439 }
440 wrpr(pstate, pstate, 0);
441 return (-1);
442}
443
444static int
445dtlb_enter_sun4u(u_int index, u_long data, vm_offset_t virt)
446{
447
448 return (OF_call_method("SUNW,dtlb-load", mmu, 3, 0, index, data,
449 virt));
450}
451
452static int
453itlb_enter_sun4u(u_int index, u_long data, vm_offset_t virt)
454{
455
456 if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp && index == 0 &&
457 (data & TD_L) != 0)
458 panic("%s: won't enter locked TLB entry at index 0 on USIII+",
459 __func__);
460 return (OF_call_method("SUNW,itlb-load", mmu, 3, 0, index, data,
461 virt));
462}
463
464static void
465itlb_relocate_locked0_sun4u(void)
466{
467 u_long data, pstate, tag;
468 int i;
469
470 if (cpu_impl != CPU_IMPL_ULTRASPARCIIIp)
471 return;
472
473 pstate = rdpr(pstate);
474 wrpr(pstate, pstate & ~PSTATE_IE, 0);
475
461 data = itlb_get_data_sun4u(0);
476 data = itlb_get_data_sun4u(tlb_locked, 0);
477 if ((data & (TD_V | TD_L)) != (TD_V | TD_L)) {
478 wrpr(pstate, pstate, 0);
479 return;
480 }
481
482 /* Flush the mapping of slot 0. */
468 tag = ldxa(TLB_DAR_SLOT(0), ASI_ITLB_TAG_READ_REG);
483 tag = ldxa(TLB_DAR_SLOT(tlb_locked, 0), ASI_ITLB_TAG_READ_REG);
484 stxa(TLB_DEMAP_VA(TLB_TAR_VA(tag)) | TLB_DEMAP_PRIMARY |
485 TLB_DEMAP_PAGE, ASI_IMMU_DEMAP, 0);
486 flush(0); /* The USIII-family ignores the address. */
487
488 /*
489 * Search a replacement slot != 0 and enter the data and tag
490 * that formerly were in slot 0.
491 */
492 for (i = 1; i < itlb_slot_max; i++) {
478 if ((itlb_get_data_sun4u(i) & TD_V) != 0)
493 if ((itlb_get_data_sun4u(tlb_locked, i) & TD_V) != 0)
494 continue;
495
496 stxa(AA_IMMU_TAR, ASI_IMMU, tag);
482 stxa(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG, data);
497 stxa(TLB_DAR_SLOT(tlb_locked, i), ASI_ITLB_DATA_ACCESS_REG,
498 data);
499 flush(0); /* The USIII-family ignores the address. */
500 break;
501 }
502 wrpr(pstate, pstate, 0);
503 if (i == itlb_slot_max)
504 panic("%s: could not find a replacement slot", __func__);
505}
506
507static int
508mmu_mapin_sun4u(vm_offset_t va, vm_size_t len)
509{
510 vm_offset_t pa, mva;
511 u_long data;
512 u_int index;
513
514 if (va + len > curkva)
515 curkva = va + len;
516
517 pa = (vm_offset_t)-1;
518 len += va & PAGE_MASK_4M;
519 va &= ~PAGE_MASK_4M;
520 while (len) {
521 if (dtlb_va_to_pa_sun4u(va) == (vm_offset_t)-1 ||
522 itlb_va_to_pa_sun4u(va) == (vm_offset_t)-1) {
523 /* Allocate a physical page, claim the virtual area. */
524 if (pa == (vm_offset_t)-1) {
525 pa = alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
526 if (pa == (vm_offset_t)-1)
527 panic("%s: out of memory", __func__);
528 mva = claim_virt(va, PAGE_SIZE_4M, 0);
529 if (mva != va)
530 panic("%s: can't claim virtual page "
531 "(wanted %#lx, got %#lx)",
532 __func__, va, mva);
533 /*
534 * The mappings may have changed, be paranoid.
535 */
536 continue;
537 }
538 /*
539 * Actually, we can only allocate two pages less at
540 * most (depending on the kernel TSB size).
541 */
542 if (dtlb_slot >= dtlb_slot_max)
543 panic("%s: out of dtlb_slots", __func__);
544 if (itlb_slot >= itlb_slot_max)
545 panic("%s: out of itlb_slots", __func__);
546 data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP |
547 TD_CV | TD_P | TD_W;
548 dtlb_store[dtlb_slot].te_pa = pa;
549 dtlb_store[dtlb_slot].te_va = va;
550 index = dtlb_slot_max - dtlb_slot - 1;
551 if (dtlb_enter_sun4u(index, data, va) < 0)
552 panic("%s: can't enter dTLB slot %d data "
553 "%#lx va %#lx", __func__, index, data,
554 va);
555 dtlb_slot++;
556 itlb_store[itlb_slot].te_pa = pa;
557 itlb_store[itlb_slot].te_va = va;
558 index = itlb_slot_max - itlb_slot - 1;
559 if (itlb_enter_sun4u(index, data, va) < 0)
560 panic("%s: can't enter iTLB slot %d data "
561 "%#lx va %#lxd", __func__, index, data,
562 va);
563 itlb_slot++;
564 pa = (vm_offset_t)-1;
565 }
566 len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
567 va += PAGE_SIZE_4M;
568 }
569 if (pa != (vm_offset_t)-1)
570 release_phys(pa, PAGE_SIZE_4M);
571 return (0);
572}
573
574static vm_offset_t
575init_heap(void)
576{
577
578 /* There is no need for continuous physical heap memory. */
579 heapva = (vm_offset_t)OF_claim((void *)HEAPVA, HEAPSZ, 32);
580 return (heapva);
581}
582
583static phandle_t
584find_bsp_sun4u(phandle_t node, uint32_t bspid)
585{
586 char type[sizeof("cpu")];
587 phandle_t child;
588 uint32_t cpuid;
589
590 for (; node > 0; node = OF_peer(node)) {
591 child = OF_child(node);
592 if (child > 0) {
593 child = find_bsp_sun4u(child, bspid);
594 if (child > 0)
595 return (child);
596 } else {
597 if (OF_getprop(node, "device_type", type,
598 sizeof(type)) <= 0)
599 continue;
600 if (strcmp(type, "cpu") != 0)
601 continue;
602 if (OF_getprop(node, cpu_cpuid_prop_sun4u(), &cpuid,
603 sizeof(cpuid)) <= 0)
604 continue;
605 if (cpuid == bspid)
606 return (node);
607 }
608 }
609 return (0);
610}
611
612const char *
613cpu_cpuid_prop_sun4u(void)
614{
615
616 switch (cpu_impl) {
617 case CPU_IMPL_SPARC64:
618 case CPU_IMPL_SPARC64V:
619 case CPU_IMPL_ULTRASPARCI:
620 case CPU_IMPL_ULTRASPARCII:
621 case CPU_IMPL_ULTRASPARCIIi:
622 case CPU_IMPL_ULTRASPARCIIe:
623 return ("upa-portid");
624 case CPU_IMPL_ULTRASPARCIII:
625 case CPU_IMPL_ULTRASPARCIIIp:
626 case CPU_IMPL_ULTRASPARCIIIi:
627 case CPU_IMPL_ULTRASPARCIIIip:
628 return ("portid");
629 case CPU_IMPL_ULTRASPARCIV:
630 case CPU_IMPL_ULTRASPARCIVp:
631 return ("cpuid");
632 default:
633 return ("");
634 }
635}
636
637uint32_t
638cpu_get_mid_sun4u(void)
639{
640
641 switch (cpu_impl) {
642 case CPU_IMPL_SPARC64:
643 case CPU_IMPL_SPARC64V:
644 case CPU_IMPL_ULTRASPARCI:
645 case CPU_IMPL_ULTRASPARCII:
646 case CPU_IMPL_ULTRASPARCIIi:
647 case CPU_IMPL_ULTRASPARCIIe:
648 return (UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG)));
649 case CPU_IMPL_ULTRASPARCIII:
650 case CPU_IMPL_ULTRASPARCIIIp:
651 return (FIREPLANE_CR_GET_AID(ldxa(AA_FIREPLANE_CONFIG,
652 ASI_FIREPLANE_CONFIG_REG)));
653 case CPU_IMPL_ULTRASPARCIIIi:
654 case CPU_IMPL_ULTRASPARCIIIip:
655 return (JBUS_CR_GET_JID(ldxa(0, ASI_JBUS_CONFIG_REG)));
656 case CPU_IMPL_ULTRASPARCIV:
657 case CPU_IMPL_ULTRASPARCIVp:
658 return (INTR_ID_GET_ID(ldxa(AA_INTR_ID, ASI_INTR_ID)));
659 default:
660 return (0);
661 }
662}
663
664static void
665tlb_init_sun4u(void)
666{
667 phandle_t bsp;
668
669 cpu_impl = VER_IMPL(rdpr(ver));
670 switch (cpu_impl) {
671 case CPU_IMPL_SPARC64:
672 case CPU_IMPL_ULTRASPARCI:
673 case CPU_IMPL_ULTRASPARCII:
674 case CPU_IMPL_ULTRASPARCIIi:
675 case CPU_IMPL_ULTRASPARCIIe:
676 tlb_locked = TLB_DAR_T32;
677 break;
678 case CPU_IMPL_ULTRASPARCIII:
679 case CPU_IMPL_ULTRASPARCIIIp:
680 case CPU_IMPL_ULTRASPARCIIIi:
681 case CPU_IMPL_ULTRASPARCIIIip:
682 case CPU_IMPL_ULTRASPARCIV:
683 case CPU_IMPL_ULTRASPARCIVp:
684 tlb_locked = TLB_DAR_T16;
685 break;
686 case CPU_IMPL_SPARC64V:
687 tlb_locked = TLB_DAR_FTLB;
688 break;
689 }
690 bsp = find_bsp_sun4u(OF_child(root), cpu_get_mid_sun4u());
691 if (bsp == 0)
692 panic("%s: no node for bootcpu?!?!", __func__);
693
694 if (OF_getprop(bsp, "#dtlb-entries", &dtlb_slot_max,
695 sizeof(dtlb_slot_max)) == -1 ||
696 OF_getprop(bsp, "#itlb-entries", &itlb_slot_max,
697 sizeof(itlb_slot_max)) == -1)
698 panic("%s: can't get TLB slot max.", __func__);
699
700 if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp) {
701#ifdef LOADER_DEBUG
702 printf("pre fixup:\n");
703 pmap_print_tlb_sun4u();
704#endif
705
706 /*
707 * Relocate the locked entry in it16 slot 0 (if existent)
708 * as part of working around Cheetah+ erratum 34.
709 */
710 itlb_relocate_locked0_sun4u();
711
712#ifdef LOADER_DEBUG
713 printf("post fixup:\n");
714 pmap_print_tlb_sun4u();
715#endif
716 }
717
718 dtlb_store = malloc(dtlb_slot_max * sizeof(*dtlb_store));
719 itlb_store = malloc(itlb_slot_max * sizeof(*itlb_store));
720 if (dtlb_store == NULL || itlb_store == NULL)
721 panic("%s: can't allocate TLB store", __func__);
722}
723
724int
725main(int (*openfirm)(void *))
726{
727 char bootpath[64];
728 char compatible[32];
729 struct devsw **dp;
730
731 /*
732 * Tell the Open Firmware functions where they find the OFW gate.
733 */
734 OF_init(openfirm);
735
736 archsw.arch_getdev = ofw_getdev;
737 archsw.arch_copyin = sparc64_copyin;
738 archsw.arch_copyout = ofw_copyout;
739 archsw.arch_readin = sparc64_readin;
740 archsw.arch_autoload = sparc64_autoload;
741
742 if (init_heap() == (vm_offset_t)-1)
743 OF_exit();
744 setheap((void *)heapva, (void *)(heapva + HEAPSZ));
745
746 /*
747 * Probe for a console.
748 */
749 cons_probe();
750
751 if ((root = OF_peer(0)) == -1)
752 panic("%s: can't get root phandle", __func__);
753 OF_getprop(root, "compatible", compatible, sizeof(compatible));
754 mmu_ops = &mmu_ops_sun4u;
755
756 mmu_ops->tlb_init();
757
758 /*
759 * Initialize devices.
760 */
761 for (dp = devsw; *dp != 0; dp++) {
762 if ((*dp)->dv_init != 0)
763 (*dp)->dv_init();
764 }
765
766 /*
767 * Set up the current device.
768 */
769 OF_getprop(chosen, "bootpath", bootpath, sizeof(bootpath));
770
771 /*
772 * Sun compatible bootable CD-ROMs have a disk label placed
773 * before the cd9660 data, with the actual filesystem being
774 * in the first partition, while the other partitions contain
775 * pseudo disk labels with embedded boot blocks for different
776 * architectures, which may be followed by UFS filesystems.
777 * The firmware will set the boot path to the partition it
778 * boots from ('f' in the sun4u case), but we want the kernel
779 * to be loaded from the cd9660 fs ('a'), so the boot path
780 * needs to be altered.
781 */
782 if (bootpath[strlen(bootpath) - 2] == ':' &&
783 bootpath[strlen(bootpath) - 1] == 'f') {
784 bootpath[strlen(bootpath) - 1] = 'a';
785 printf("Boot path set to %s\n", bootpath);
786 }
787
788 env_setenv("currdev", EV_VOLATILE, bootpath,
789 ofw_setcurrdev, env_nounset);
790 env_setenv("loaddev", EV_VOLATILE, bootpath,
791 env_noset, env_nounset);
792
793 printf("\n");
794 printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
795 printf("(%s, %s)\n", bootprog_maker, bootprog_date);
796 printf("bootpath=\"%s\"\n", bootpath);
797
798 /* Give control to the machine independent loader code. */
799 interact();
800 return (1);
801}
802
803COMMAND_SET(reboot, "reboot", "reboot the system", command_reboot);
804
805static int
806command_reboot(int argc, char *argv[])
807{
808 int i;
809
810 for (i = 0; devsw[i] != NULL; ++i)
811 if (devsw[i]->dv_cleanup != NULL)
812 (devsw[i]->dv_cleanup)();
813
814 printf("Rebooting...\n");
815 OF_exit();
816}
817
818/* provide this for panic, as it's not in the startup code */
819void
820exit(int code)
821{
822
823 OF_exit();
824}
825
826#ifdef LOADER_DEBUG
827static const char *const page_sizes[] = {
828 " 8k", " 64k", "512k", " 4m"
829};
830
831static void
832pmap_print_tte_sun4u(tte_t tag, tte_t tte)
833{
834
835 printf("%s %s ",
836 page_sizes[(tte >> TD_SIZE_SHIFT) & TD_SIZE_MASK],
837 tag & TD_G ? "G" : " ");
838 printf(tte & TD_W ? "W " : " ");
839 printf(tte & TD_P ? "\e[33mP\e[0m " : " ");
840 printf(tte & TD_E ? "E " : " ");
841 printf(tte & TD_CV ? "CV " : " ");
842 printf(tte & TD_CP ? "CP " : " ");
843 printf(tte & TD_L ? "\e[32mL\e[0m " : " ");
844 printf(tte & TD_IE ? "IE " : " ");
845 printf(tte & TD_NFO ? "NFO " : " ");
846 printf("pa=0x%lx va=0x%lx ctx=%ld\n",
847 TD_PA(tte), TLB_TAR_VA(tag), TLB_TAR_CTX(tag));
848}
849
850static void
851pmap_print_tlb_sun4u(void)
852{
853 tte_t tag, tte;
854 u_long pstate;
855 int i;
856
857 pstate = rdpr(pstate);
858 for (i = 0; i < itlb_slot_max; i++) {
859 wrpr(pstate, pstate & ~PSTATE_IE, 0);
824 tte = itlb_get_data_sun4u(i);
860 tte = itlb_get_data_sun4u(tlb_locked, i);
861 wrpr(pstate, pstate, 0);
862 if (!(tte & TD_V))
863 continue;
828 tag = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_TAG_READ_REG);
864 tag = ldxa(TLB_DAR_SLOT(tlb_locked, i),
865 ASI_ITLB_TAG_READ_REG);
866 printf("iTLB-%2u: ", i);
867 pmap_print_tte_sun4u(tag, tte);
868 }
869 for (i = 0; i < dtlb_slot_max; i++) {
870 wrpr(pstate, pstate & ~PSTATE_IE, 0);
834 tte = dtlb_get_data_sun4u(i);
871 tte = dtlb_get_data_sun4u(tlb_locked, i);
872 wrpr(pstate, pstate, 0);
873 if (!(tte & TD_V))
874 continue;
838 tag = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_TAG_READ_REG);
875 tag = ldxa(TLB_DAR_SLOT(tlb_locked, i),
876 ASI_DTLB_TAG_READ_REG);
877 printf("dTLB-%2u: ", i);
878 pmap_print_tte_sun4u(tag, tte);
879 }
880}
881#endif