main.c revision 332134
1/*-
2 * Initial implementation:
3 * Copyright (c) 2001 Robert Drehmel
4 * All rights reserved.
5 *
6 * As long as the above copyright statement and this notice remain
7 * unchanged, you can do what ever you want with this file.
8 */
9/*-
10 * Copyright (c) 2008 - 2012 Marius Strobl <marius@FreeBSD.org>
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/11/stand/sparc64/loader/main.c 332134 2018-04-06 19:11:58Z kevans $");
37
38/*
39 * FreeBSD/sparc64 kernel loader - machine dependent part
40 *
41 *  - implements copyin and readin functions that map kernel
42 *    pages on demand.  The machine independent code does not
43 *    know the size of the kernel early enough to pre-enter
44 *    TTEs and install just one 4MB mapping seemed to limiting
45 *    to me.
46 */
47
48#include <stand.h>
49#include <sys/param.h>
50#include <sys/exec.h>
51#include <sys/linker.h>
52#include <sys/queue.h>
53#include <sys/types.h>
54#ifdef LOADER_ZFS_SUPPORT
55#include <sys/vtoc.h>
56#include "../zfs/libzfs.h"
57#endif
58
59#include <vm/vm.h>
60#include <machine/asi.h>
61#include <machine/cmt.h>
62#include <machine/cpufunc.h>
63#include <machine/elf.h>
64#include <machine/fireplane.h>
65#include <machine/jbus.h>
66#include <machine/lsu.h>
67#include <machine/metadata.h>
68#include <machine/tte.h>
69#include <machine/tlb.h>
70#include <machine/upa.h>
71#include <machine/ver.h>
72#include <machine/vmparam.h>
73
74#include "bootstrap.h"
75#include "libofw.h"
76#include "dev_net.h"
77
78extern char bootprog_info[];
79
80enum {
81	HEAPVA		= 0x800000,
82	HEAPSZ		= 0x1000000,
83	LOADSZ		= 0x1000000	/* for kernel and modules */
84};
85
86/* At least Sun Fire V1280 require page sized allocations to be claimed. */
87CTASSERT(HEAPSZ % PAGE_SIZE == 0);
88
89static struct mmu_ops {
90	void (*tlb_init)(void);
91	int (*mmu_mapin)(vm_offset_t va, vm_size_t len);
92} *mmu_ops;
93
94typedef void kernel_entry_t(vm_offset_t mdp, u_long o1, u_long o2, u_long o3,
95    void *openfirmware);
96
97static inline u_long dtlb_get_data_sun4u(u_int, u_int);
98static int dtlb_enter_sun4u(u_int, u_long data, vm_offset_t);
99static vm_offset_t dtlb_va_to_pa_sun4u(vm_offset_t);
100static inline u_long itlb_get_data_sun4u(u_int, u_int);
101static int itlb_enter_sun4u(u_int, u_long data, vm_offset_t);
102static vm_offset_t itlb_va_to_pa_sun4u(vm_offset_t);
103static void itlb_relocate_locked0_sun4u(void);
104static int sparc64_autoload(void);
105static ssize_t sparc64_readin(const int, vm_offset_t, const size_t);
106static ssize_t sparc64_copyin(const void *, vm_offset_t, size_t);
107static vm_offset_t claim_virt(vm_offset_t, size_t, int);
108static vm_offset_t alloc_phys(size_t, int);
109static int map_phys(int, size_t, vm_offset_t, vm_offset_t);
110static void release_phys(vm_offset_t, u_int);
111static int __elfN(exec)(struct preloaded_file *);
112static int mmu_mapin_sun4u(vm_offset_t, vm_size_t);
113static vm_offset_t init_heap(void);
114static phandle_t find_bsp_sun4u(phandle_t, uint32_t);
115const char *cpu_cpuid_prop_sun4u(void);
116uint32_t cpu_get_mid_sun4u(void);
117static void tlb_init_sun4u(void);
118
119#ifdef LOADER_DEBUG
120typedef u_int64_t tte_t;
121
122static void pmap_print_tlb_sun4u(void);
123static void pmap_print_tte_sun4u(tte_t, tte_t);
124#endif
125
126static struct mmu_ops mmu_ops_sun4u = { tlb_init_sun4u, mmu_mapin_sun4u };
127
128/* sun4u */
129struct tlb_entry *dtlb_store;
130struct tlb_entry *itlb_store;
131u_int dtlb_slot;
132u_int itlb_slot;
133static int cpu_impl;
134static u_int dtlb_slot_max;
135static u_int itlb_slot_max;
136static u_int tlb_locked;
137
138static vm_offset_t curkva = 0;
139static vm_offset_t heapva;
140
141static char bootpath[64];
142static phandle_t root;
143
144#ifdef LOADER_ZFS_SUPPORT
145static struct zfs_devdesc zfs_currdev;
146#endif
147
148/*
149 * Machine dependent structures that the machine independent
150 * loader part uses.
151 */
152struct devsw *devsw[] = {
153#ifdef LOADER_DISK_SUPPORT
154	&ofwdisk,
155#endif
156#ifdef LOADER_NET_SUPPORT
157	&netdev,
158#endif
159#ifdef LOADER_ZFS_SUPPORT
160	&zfs_dev,
161#endif
162	NULL
163};
164
165struct arch_switch archsw;
166
167static struct file_format sparc64_elf = {
168	__elfN(loadfile),
169	__elfN(exec)
170};
171
172struct file_format *file_formats[] = {
173	&sparc64_elf,
174	NULL
175};
176
177struct fs_ops *file_system[] = {
178#ifdef LOADER_ZFS_SUPPORT
179	&zfs_fsops,
180#endif
181#ifdef LOADER_UFS_SUPPORT
182	&ufs_fsops,
183#endif
184#ifdef LOADER_CD9660_SUPPORT
185	&cd9660_fsops,
186#endif
187#ifdef LOADER_ZIP_SUPPORT
188	&zipfs_fsops,
189#endif
190#ifdef LOADER_GZIP_SUPPORT
191	&gzipfs_fsops,
192#endif
193#ifdef LOADER_BZIP2_SUPPORT
194	&bzipfs_fsops,
195#endif
196#ifdef LOADER_NFS_SUPPORT
197	&nfs_fsops,
198#endif
199#ifdef LOADER_TFTP_SUPPORT
200	&tftp_fsops,
201#endif
202	NULL
203};
204
205struct netif_driver *netif_drivers[] = {
206#ifdef LOADER_NET_SUPPORT
207	&ofwnet,
208#endif
209	NULL
210};
211
212extern struct console ofwconsole;
213struct console *consoles[] = {
214	&ofwconsole,
215	NULL
216};
217
218#ifdef LOADER_DEBUG
219static int
220watch_phys_set_mask(vm_offset_t pa, u_long mask)
221{
222	u_long lsucr;
223
224	stxa(AA_DMMU_PWPR, ASI_DMMU, pa & (((2UL << 38) - 1) << 3));
225	lsucr = ldxa(0, ASI_LSU_CTL_REG);
226	lsucr = ((lsucr | LSU_PW) & ~LSU_PM_MASK) |
227	    (mask << LSU_PM_SHIFT);
228	stxa(0, ASI_LSU_CTL_REG, lsucr);
229	return (0);
230}
231
232static int
233watch_phys_set(vm_offset_t pa, int sz)
234{
235	u_long off;
236
237	off = (u_long)pa & 7;
238	/* Test for misaligned watch points. */
239	if (off + sz > 8)
240		return (-1);
241	return (watch_phys_set_mask(pa, ((1 << sz) - 1) << off));
242}
243
244
245static int
246watch_virt_set_mask(vm_offset_t va, u_long mask)
247{
248	u_long lsucr;
249
250	stxa(AA_DMMU_VWPR, ASI_DMMU, va & (((2UL << 41) - 1) << 3));
251	lsucr = ldxa(0, ASI_LSU_CTL_REG);
252	lsucr = ((lsucr | LSU_VW) & ~LSU_VM_MASK) |
253	    (mask << LSU_VM_SHIFT);
254	stxa(0, ASI_LSU_CTL_REG, lsucr);
255	return (0);
256}
257
258static int
259watch_virt_set(vm_offset_t va, int sz)
260{
261	u_long off;
262
263	off = (u_long)va & 7;
264	/* Test for misaligned watch points. */
265	if (off + sz > 8)
266		return (-1);
267	return (watch_virt_set_mask(va, ((1 << sz) - 1) << off));
268}
269#endif
270
271/*
272 * archsw functions
273 */
274static int
275sparc64_autoload(void)
276{
277
278	return (0);
279}
280
281static ssize_t
282sparc64_readin(const int fd, vm_offset_t va, const size_t len)
283{
284
285	mmu_ops->mmu_mapin(va, len);
286	return (read(fd, (void *)va, len));
287}
288
289static ssize_t
290sparc64_copyin(const void *src, vm_offset_t dest, size_t len)
291{
292
293	mmu_ops->mmu_mapin(dest, len);
294	memcpy((void *)dest, src, len);
295	return (len);
296}
297
298/*
299 * other MD functions
300 */
301static vm_offset_t
302claim_virt(vm_offset_t virt, size_t size, int align)
303{
304	vm_offset_t mva;
305
306	if (OF_call_method("claim", mmu, 3, 1, virt, size, align, &mva) == -1)
307		return ((vm_offset_t)-1);
308	return (mva);
309}
310
311static vm_offset_t
312alloc_phys(size_t size, int align)
313{
314	cell_t phys_hi, phys_low;
315
316	if (OF_call_method("claim", memory, 2, 2, size, align, &phys_low,
317	    &phys_hi) == -1)
318		return ((vm_offset_t)-1);
319	return ((vm_offset_t)phys_hi << 32 | phys_low);
320}
321
322static int
323map_phys(int mode, size_t size, vm_offset_t virt, vm_offset_t phys)
324{
325
326	return (OF_call_method("map", mmu, 5, 0, (uint32_t)phys,
327	    (uint32_t)(phys >> 32), virt, size, mode));
328}
329
330static void
331release_phys(vm_offset_t phys, u_int size)
332{
333
334	(void)OF_call_method("release", memory, 3, 0, (uint32_t)phys,
335	    (uint32_t)(phys >> 32), size);
336}
337
338static int
339__elfN(exec)(struct preloaded_file *fp)
340{
341	struct file_metadata *fmp;
342	vm_offset_t mdp, dtbp;
343	Elf_Addr entry;
344	Elf_Ehdr *e;
345	int error;
346
347	if ((fmp = file_findmetadata(fp, MODINFOMD_ELFHDR)) == 0)
348		return (EFTYPE);
349	e = (Elf_Ehdr *)&fmp->md_data;
350
351	if ((error = md_load(fp->f_args, &mdp, &dtbp)) != 0)
352		return (error);
353
354	printf("jumping to kernel entry at %#lx.\n", e->e_entry);
355#ifdef LOADER_DEBUG
356	pmap_print_tlb_sun4u();
357#endif
358
359	dev_cleanup();
360
361	entry = e->e_entry;
362
363	OF_release((void *)heapva, HEAPSZ);
364
365	((kernel_entry_t *)entry)(mdp, 0, 0, 0, openfirmware);
366
367	panic("%s: exec returned", __func__);
368}
369
370static inline u_long
371dtlb_get_data_sun4u(u_int tlb, u_int slot)
372{
373	u_long data, pstate;
374
375	slot = TLB_DAR_SLOT(tlb, slot);
376	/*
377	 * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to
378	 * work around errata of USIII and beyond.
379	 */
380	pstate = rdpr(pstate);
381	wrpr(pstate, pstate & ~PSTATE_IE, 0);
382	(void)ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
383	data = ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
384	wrpr(pstate, pstate, 0);
385	return (data);
386}
387
388static inline u_long
389itlb_get_data_sun4u(u_int tlb, u_int slot)
390{
391	u_long data, pstate;
392
393	slot = TLB_DAR_SLOT(tlb, slot);
394	/*
395	 * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to
396	 * work around errata of USIII and beyond.
397	 */
398	pstate = rdpr(pstate);
399	wrpr(pstate, pstate & ~PSTATE_IE, 0);
400	(void)ldxa(slot, ASI_ITLB_DATA_ACCESS_REG);
401	data = ldxa(slot, ASI_ITLB_DATA_ACCESS_REG);
402	wrpr(pstate, pstate, 0);
403	return (data);
404}
405
406static vm_offset_t
407dtlb_va_to_pa_sun4u(vm_offset_t va)
408{
409	u_long pstate, reg;
410	u_int i, tlb;
411
412	pstate = rdpr(pstate);
413	wrpr(pstate, pstate & ~PSTATE_IE, 0);
414	for (i = 0; i < dtlb_slot_max; i++) {
415		reg = ldxa(TLB_DAR_SLOT(tlb_locked, i),
416		    ASI_DTLB_TAG_READ_REG);
417		if (TLB_TAR_VA(reg) != va)
418			continue;
419		reg = dtlb_get_data_sun4u(tlb_locked, i);
420		wrpr(pstate, pstate, 0);
421		reg >>= TD_PA_SHIFT;
422		if (cpu_impl == CPU_IMPL_SPARC64V ||
423		    cpu_impl >= CPU_IMPL_ULTRASPARCIII)
424			return (reg & TD_PA_CH_MASK);
425		return (reg & TD_PA_SF_MASK);
426	}
427	wrpr(pstate, pstate, 0);
428	return (-1);
429}
430
431static vm_offset_t
432itlb_va_to_pa_sun4u(vm_offset_t va)
433{
434	u_long pstate, reg;
435	int i;
436
437	pstate = rdpr(pstate);
438	wrpr(pstate, pstate & ~PSTATE_IE, 0);
439	for (i = 0; i < itlb_slot_max; i++) {
440		reg = ldxa(TLB_DAR_SLOT(tlb_locked, i),
441		    ASI_ITLB_TAG_READ_REG);
442		if (TLB_TAR_VA(reg) != va)
443			continue;
444		reg = itlb_get_data_sun4u(tlb_locked, i);
445		wrpr(pstate, pstate, 0);
446		reg >>= TD_PA_SHIFT;
447		if (cpu_impl == CPU_IMPL_SPARC64V ||
448		    cpu_impl >= CPU_IMPL_ULTRASPARCIII)
449			return (reg & TD_PA_CH_MASK);
450		return (reg & TD_PA_SF_MASK);
451	}
452	wrpr(pstate, pstate, 0);
453	return (-1);
454}
455
456static int
457dtlb_enter_sun4u(u_int index, u_long data, vm_offset_t virt)
458{
459
460	return (OF_call_method("SUNW,dtlb-load", mmu, 3, 0, index, data,
461	    virt));
462}
463
464static int
465itlb_enter_sun4u(u_int index, u_long data, vm_offset_t virt)
466{
467
468	if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp && index == 0 &&
469	    (data & TD_L) != 0)
470		panic("%s: won't enter locked TLB entry at index 0 on USIII+",
471		    __func__);
472	return (OF_call_method("SUNW,itlb-load", mmu, 3, 0, index, data,
473	    virt));
474}
475
476static void
477itlb_relocate_locked0_sun4u(void)
478{
479	u_long data, pstate, tag;
480	int i;
481
482	if (cpu_impl != CPU_IMPL_ULTRASPARCIIIp)
483		return;
484
485	pstate = rdpr(pstate);
486	wrpr(pstate, pstate & ~PSTATE_IE, 0);
487
488	data = itlb_get_data_sun4u(tlb_locked, 0);
489	if ((data & (TD_V | TD_L)) != (TD_V | TD_L)) {
490		wrpr(pstate, pstate, 0);
491		return;
492	}
493
494	/* Flush the mapping of slot 0. */
495	tag = ldxa(TLB_DAR_SLOT(tlb_locked, 0), ASI_ITLB_TAG_READ_REG);
496	stxa(TLB_DEMAP_VA(TLB_TAR_VA(tag)) | TLB_DEMAP_PRIMARY |
497	    TLB_DEMAP_PAGE, ASI_IMMU_DEMAP, 0);
498	flush(0);	/* The USIII-family ignores the address. */
499
500	/*
501	 * Search a replacement slot != 0 and enter the data and tag
502	 * that formerly were in slot 0.
503	 */
504	for (i = 1; i < itlb_slot_max; i++) {
505		if ((itlb_get_data_sun4u(tlb_locked, i) & TD_V) != 0)
506			continue;
507
508		stxa(AA_IMMU_TAR, ASI_IMMU, tag);
509		stxa(TLB_DAR_SLOT(tlb_locked, i), ASI_ITLB_DATA_ACCESS_REG,
510		    data);
511		flush(0);	/* The USIII-family ignores the address. */
512		break;
513	}
514	wrpr(pstate, pstate, 0);
515	if (i == itlb_slot_max)
516		panic("%s: could not find a replacement slot", __func__);
517}
518
519static int
520mmu_mapin_sun4u(vm_offset_t va, vm_size_t len)
521{
522	vm_offset_t pa, mva;
523	u_long data;
524	u_int index;
525
526	if (va + len > curkva)
527		curkva = va + len;
528
529	pa = (vm_offset_t)-1;
530	len += va & PAGE_MASK_4M;
531	va &= ~PAGE_MASK_4M;
532	while (len) {
533		if (dtlb_va_to_pa_sun4u(va) == (vm_offset_t)-1 ||
534		    itlb_va_to_pa_sun4u(va) == (vm_offset_t)-1) {
535			/* Allocate a physical page, claim the virtual area. */
536			if (pa == (vm_offset_t)-1) {
537				pa = alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
538				if (pa == (vm_offset_t)-1)
539					panic("%s: out of memory", __func__);
540				mva = claim_virt(va, PAGE_SIZE_4M, 0);
541				if (mva != va)
542					panic("%s: can't claim virtual page "
543					    "(wanted %#lx, got %#lx)",
544					    __func__, va, mva);
545				/*
546				 * The mappings may have changed, be paranoid.
547				 */
548				continue;
549			}
550			/*
551			 * Actually, we can only allocate two pages less at
552			 * most (depending on the kernel TSB size).
553			 */
554			if (dtlb_slot >= dtlb_slot_max)
555				panic("%s: out of dtlb_slots", __func__);
556			if (itlb_slot >= itlb_slot_max)
557				panic("%s: out of itlb_slots", __func__);
558			data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP |
559			    TD_CV | TD_P | TD_W;
560			dtlb_store[dtlb_slot].te_pa = pa;
561			dtlb_store[dtlb_slot].te_va = va;
562			index = dtlb_slot_max - dtlb_slot - 1;
563			if (dtlb_enter_sun4u(index, data, va) < 0)
564				panic("%s: can't enter dTLB slot %d data "
565				    "%#lx va %#lx", __func__, index, data,
566				    va);
567			dtlb_slot++;
568			itlb_store[itlb_slot].te_pa = pa;
569			itlb_store[itlb_slot].te_va = va;
570			index = itlb_slot_max - itlb_slot - 1;
571			if (itlb_enter_sun4u(index, data, va) < 0)
572				panic("%s: can't enter iTLB slot %d data "
573				    "%#lx va %#lxd", __func__, index, data,
574				    va);
575			itlb_slot++;
576			pa = (vm_offset_t)-1;
577		}
578		len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
579		va += PAGE_SIZE_4M;
580	}
581	if (pa != (vm_offset_t)-1)
582		release_phys(pa, PAGE_SIZE_4M);
583	return (0);
584}
585
586static vm_offset_t
587init_heap(void)
588{
589
590	/* There is no need for continuous physical heap memory. */
591	heapva = (vm_offset_t)OF_claim((void *)HEAPVA, HEAPSZ, 32);
592	return (heapva);
593}
594
595static phandle_t
596find_bsp_sun4u(phandle_t node, uint32_t bspid)
597{
598	char type[sizeof("cpu")];
599	phandle_t child;
600	uint32_t cpuid;
601
602	for (; node > 0; node = OF_peer(node)) {
603		child = OF_child(node);
604		if (child > 0) {
605			child = find_bsp_sun4u(child, bspid);
606			if (child > 0)
607				return (child);
608		} else {
609			if (OF_getprop(node, "device_type", type,
610			    sizeof(type)) <= 0)
611				continue;
612			if (strcmp(type, "cpu") != 0)
613				continue;
614			if (OF_getprop(node, cpu_cpuid_prop_sun4u(), &cpuid,
615			    sizeof(cpuid)) <= 0)
616				continue;
617			if (cpuid == bspid)
618				return (node);
619		}
620	}
621	return (0);
622}
623
624const char *
625cpu_cpuid_prop_sun4u(void)
626{
627
628	switch (cpu_impl) {
629	case CPU_IMPL_SPARC64:
630	case CPU_IMPL_SPARC64V:
631	case CPU_IMPL_ULTRASPARCI:
632	case CPU_IMPL_ULTRASPARCII:
633	case CPU_IMPL_ULTRASPARCIIi:
634	case CPU_IMPL_ULTRASPARCIIe:
635		return ("upa-portid");
636	case CPU_IMPL_ULTRASPARCIII:
637	case CPU_IMPL_ULTRASPARCIIIp:
638	case CPU_IMPL_ULTRASPARCIIIi:
639	case CPU_IMPL_ULTRASPARCIIIip:
640		return ("portid");
641	case CPU_IMPL_ULTRASPARCIV:
642	case CPU_IMPL_ULTRASPARCIVp:
643		return ("cpuid");
644	default:
645		return ("");
646	}
647}
648
649uint32_t
650cpu_get_mid_sun4u(void)
651{
652
653	switch (cpu_impl) {
654	case CPU_IMPL_SPARC64:
655	case CPU_IMPL_SPARC64V:
656	case CPU_IMPL_ULTRASPARCI:
657	case CPU_IMPL_ULTRASPARCII:
658	case CPU_IMPL_ULTRASPARCIIi:
659	case CPU_IMPL_ULTRASPARCIIe:
660		return (UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG)));
661	case CPU_IMPL_ULTRASPARCIII:
662	case CPU_IMPL_ULTRASPARCIIIp:
663		return (FIREPLANE_CR_GET_AID(ldxa(AA_FIREPLANE_CONFIG,
664		    ASI_FIREPLANE_CONFIG_REG)));
665	case CPU_IMPL_ULTRASPARCIIIi:
666	case CPU_IMPL_ULTRASPARCIIIip:
667		return (JBUS_CR_GET_JID(ldxa(0, ASI_JBUS_CONFIG_REG)));
668	case CPU_IMPL_ULTRASPARCIV:
669	case CPU_IMPL_ULTRASPARCIVp:
670		return (INTR_ID_GET_ID(ldxa(AA_INTR_ID, ASI_INTR_ID)));
671	default:
672		return (0);
673	}
674}
675
676static void
677tlb_init_sun4u(void)
678{
679	phandle_t bsp;
680
681	cpu_impl = VER_IMPL(rdpr(ver));
682	switch (cpu_impl) {
683	case CPU_IMPL_SPARC64:
684	case CPU_IMPL_ULTRASPARCI:
685	case CPU_IMPL_ULTRASPARCII:
686	case CPU_IMPL_ULTRASPARCIIi:
687	case CPU_IMPL_ULTRASPARCIIe:
688		tlb_locked = TLB_DAR_T32;
689		break;
690	case CPU_IMPL_ULTRASPARCIII:
691	case CPU_IMPL_ULTRASPARCIIIp:
692	case CPU_IMPL_ULTRASPARCIIIi:
693	case CPU_IMPL_ULTRASPARCIIIip:
694	case CPU_IMPL_ULTRASPARCIV:
695	case CPU_IMPL_ULTRASPARCIVp:
696		tlb_locked = TLB_DAR_T16;
697		break;
698	case CPU_IMPL_SPARC64V:
699		tlb_locked = TLB_DAR_FTLB;
700		break;
701	}
702	bsp = find_bsp_sun4u(OF_child(root), cpu_get_mid_sun4u());
703	if (bsp == 0)
704		panic("%s: no node for bootcpu?!?!", __func__);
705
706	if (OF_getprop(bsp, "#dtlb-entries", &dtlb_slot_max,
707	    sizeof(dtlb_slot_max)) == -1 ||
708	    OF_getprop(bsp, "#itlb-entries", &itlb_slot_max,
709	    sizeof(itlb_slot_max)) == -1)
710		panic("%s: can't get TLB slot max.", __func__);
711
712	if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp) {
713#ifdef LOADER_DEBUG
714		printf("pre fixup:\n");
715		pmap_print_tlb_sun4u();
716#endif
717
718		/*
719		 * Relocate the locked entry in it16 slot 0 (if existent)
720		 * as part of working around Cheetah+ erratum 34.
721		 */
722		itlb_relocate_locked0_sun4u();
723
724#ifdef LOADER_DEBUG
725		printf("post fixup:\n");
726		pmap_print_tlb_sun4u();
727#endif
728	}
729
730	dtlb_store = malloc(dtlb_slot_max * sizeof(*dtlb_store));
731	itlb_store = malloc(itlb_slot_max * sizeof(*itlb_store));
732	if (dtlb_store == NULL || itlb_store == NULL)
733		panic("%s: can't allocate TLB store", __func__);
734}
735
736#ifdef LOADER_ZFS_SUPPORT
737
738static void
739sparc64_zfs_probe(void)
740{
741	struct vtoc8 vtoc;
742	char alias[64], devname[sizeof(alias) + sizeof(":x") - 1];
743	char type[sizeof("device_type")];
744	char *bdev, *dev, *odev;
745	uint64_t guid, *guidp;
746	int fd, len, part;
747	phandle_t aliases, options;
748
749	guid = 0;
750
751	/*
752	 * Get the GUIDs of the ZFS pools on any additional disks listed in
753	 * the boot-device environment variable.
754	 */
755	if ((aliases = OF_finddevice("/aliases")) == -1)
756		goto out;
757	options = OF_finddevice("/options");
758	len = OF_getproplen(options, "boot-device");
759	if (len <= 0)
760		goto out;
761	bdev = odev = malloc(len + 1);
762	if (bdev == NULL)
763		goto out;
764	if (OF_getprop(options, "boot-device", bdev, len) <= 0)
765		goto out;
766	bdev[len] = '\0';
767	while ((dev = strsep(&bdev, " ")) != NULL) {
768		if (*dev == '\0')
769			continue;
770		strcpy(alias, dev);
771		(void)OF_getprop(aliases, dev, alias, sizeof(alias));
772		if (OF_getprop(OF_finddevice(alias), "device_type", type,
773		    sizeof(type)) == -1)
774			continue;
775		if (strcmp(type, "block") != 0)
776			continue;
777
778		/* Find freebsd-zfs slices in the VTOC. */
779		fd = open(alias, O_RDONLY);
780		if (fd == -1)
781			continue;
782		lseek(fd, 0, SEEK_SET);
783		if (read(fd, &vtoc, sizeof(vtoc)) != sizeof(vtoc)) {
784			close(fd);
785			continue;
786		}
787		close(fd);
788
789		for (part = 0; part < 8; part++) {
790			if (part == 2 || vtoc.part[part].tag !=
791			    VTOC_TAG_FREEBSD_ZFS)
792				continue;
793			(void)sprintf(devname, "%s:%c", alias, part + 'a');
794			/* Get the GUID of the ZFS pool on the boot device. */
795			if (strcmp(devname, bootpath) == 0)
796				guidp = &guid;
797			else
798				guidp = NULL;
799			if (zfs_probe_dev(devname, guidp) == ENXIO)
800				break;
801		}
802	}
803	free(odev);
804
805 out:
806	if (guid != 0) {
807		zfs_currdev.pool_guid = guid;
808		zfs_currdev.root_guid = 0;
809		zfs_currdev.d_dev = &zfs_dev;
810		zfs_currdev.d_type = zfs_currdev.d_dev->dv_type;
811	}
812}
813#endif /* LOADER_ZFS_SUPPORT */
814
815int
816main(int (*openfirm)(void *))
817{
818	char compatible[32];
819	struct devsw **dp;
820
821	/*
822	 * Tell the Open Firmware functions where they find the OFW gate.
823	 */
824	OF_init(openfirm);
825
826	archsw.arch_getdev = ofw_getdev;
827	archsw.arch_copyin = sparc64_copyin;
828	archsw.arch_copyout = ofw_copyout;
829	archsw.arch_readin = sparc64_readin;
830	archsw.arch_autoload = sparc64_autoload;
831#ifdef LOADER_ZFS_SUPPORT
832	archsw.arch_zfs_probe = sparc64_zfs_probe;
833#endif
834
835	if (init_heap() == (vm_offset_t)-1)
836		OF_exit();
837	setheap((void *)heapva, (void *)(heapva + HEAPSZ));
838
839	/*
840	 * Probe for a console.
841	 */
842	cons_probe();
843
844	if ((root = OF_peer(0)) == -1)
845		panic("%s: can't get root phandle", __func__);
846	OF_getprop(root, "compatible", compatible, sizeof(compatible));
847	mmu_ops = &mmu_ops_sun4u;
848
849	mmu_ops->tlb_init();
850
851	/*
852	 * Set up the current device.
853	 */
854	OF_getprop(chosen, "bootpath", bootpath, sizeof(bootpath));
855
856	/*
857	 * Initialize devices.
858	 */
859	for (dp = devsw; *dp != NULL; dp++)
860		if ((*dp)->dv_init != 0)
861			(*dp)->dv_init();
862
863#ifdef LOADER_ZFS_SUPPORT
864	if (zfs_currdev.pool_guid != 0) {
865		(void)strncpy(bootpath, zfs_fmtdev(&zfs_currdev),
866		    sizeof(bootpath) - 1);
867		bootpath[sizeof(bootpath) - 1] = '\0';
868	} else
869#endif
870
871	/*
872	 * Sun compatible bootable CD-ROMs have a disk label placed before
873	 * the ISO 9660 data, with the actual file system being in the first
874	 * partition, while the other partitions contain pseudo disk labels
875	 * with embedded boot blocks for different architectures, which may
876	 * be followed by UFS file systems.
877	 * The firmware will set the boot path to the partition it boots from
878	 * ('f' in the sun4u/sun4v case), but we want the kernel to be loaded
879	 * from the ISO 9660 file system ('a'), so the boot path needs to be
880	 * altered.
881	 */
882	if (bootpath[strlen(bootpath) - 2] == ':' &&
883	    bootpath[strlen(bootpath) - 1] == 'f')
884		bootpath[strlen(bootpath) - 1] = 'a';
885
886	env_setenv("currdev", EV_VOLATILE, bootpath,
887	    ofw_setcurrdev, env_nounset);
888	env_setenv("loaddev", EV_VOLATILE, bootpath,
889	    env_noset, env_nounset);
890
891	printf("\n%s", bootprog_info);
892	printf("bootpath=\"%s\"\n", bootpath);
893
894	/* Give control to the machine independent loader code. */
895	interact();
896	return (1);
897}
898
899COMMAND_SET(heap, "heap", "show heap usage", command_heap);
900
901static int
902command_heap(int argc, char *argv[])
903{
904
905	mallocstats();
906	printf("heap base at %p, top at %p, upper limit at %p\n", heapva,
907	    sbrk(0), heapva + HEAPSZ);
908	return(CMD_OK);
909}
910
911COMMAND_SET(reboot, "reboot", "reboot the system", command_reboot);
912
913static int
914command_reboot(int argc, char *argv[])
915{
916	int i;
917
918	for (i = 0; devsw[i] != NULL; ++i)
919		if (devsw[i]->dv_cleanup != NULL)
920			(devsw[i]->dv_cleanup)();
921
922	printf("Rebooting...\n");
923	OF_exit();
924}
925
926/* provide this for panic, as it's not in the startup code */
927void
928exit(int code)
929{
930
931	OF_exit();
932}
933
934#ifdef LOADER_DEBUG
935static const char *const page_sizes[] = {
936	"  8k", " 64k", "512k", "  4m"
937};
938
939static void
940pmap_print_tte_sun4u(tte_t tag, tte_t tte)
941{
942
943	printf("%s %s ",
944	    page_sizes[(tte >> TD_SIZE_SHIFT) & TD_SIZE_MASK],
945	    tag & TD_G ? "G" : " ");
946	printf(tte & TD_W ? "W " : "  ");
947	printf(tte & TD_P ? "\e[33mP\e[0m " : "  ");
948	printf(tte & TD_E ? "E " : "  ");
949	printf(tte & TD_CV ? "CV " : "   ");
950	printf(tte & TD_CP ? "CP " : "   ");
951	printf(tte & TD_L ? "\e[32mL\e[0m " : "  ");
952	printf(tte & TD_IE ? "IE " : "   ");
953	printf(tte & TD_NFO ? "NFO " : "    ");
954	printf("pa=0x%lx va=0x%lx ctx=%ld\n",
955	    TD_PA(tte), TLB_TAR_VA(tag), TLB_TAR_CTX(tag));
956}
957
958static void
959pmap_print_tlb_sun4u(void)
960{
961	tte_t tag, tte;
962	u_long pstate;
963	int i;
964
965	pstate = rdpr(pstate);
966	for (i = 0; i < itlb_slot_max; i++) {
967		wrpr(pstate, pstate & ~PSTATE_IE, 0);
968		tte = itlb_get_data_sun4u(tlb_locked, i);
969		wrpr(pstate, pstate, 0);
970		if (!(tte & TD_V))
971			continue;
972		tag = ldxa(TLB_DAR_SLOT(tlb_locked, i),
973		    ASI_ITLB_TAG_READ_REG);
974		printf("iTLB-%2u: ", i);
975		pmap_print_tte_sun4u(tag, tte);
976	}
977	for (i = 0; i < dtlb_slot_max; i++) {
978		wrpr(pstate, pstate & ~PSTATE_IE, 0);
979		tte = dtlb_get_data_sun4u(tlb_locked, i);
980		wrpr(pstate, pstate, 0);
981		if (!(tte & TD_V))
982			continue;
983		tag = ldxa(TLB_DAR_SLOT(tlb_locked, i),
984		    ASI_DTLB_TAG_READ_REG);
985		printf("dTLB-%2u: ", i);
986		pmap_print_tte_sun4u(tag, tte);
987	}
988}
989#endif
990