main.c revision 181398
1/*-
2 * Initial implementation:
3 * Copyright (c) 2001 Robert Drehmel
4 * All rights reserved.
5 *
6 * As long as the above copyright statement and this notice remain
7 * unchanged, you can do what ever you want with this file.
8 */
9
10#include <sys/cdefs.h>
11__FBSDID("$FreeBSD: head/sys/boot/sparc64/loader/main.c 181398 2008-08-07 22:46:25Z marius $");
12/*
13 * FreeBSD/sparc64 kernel loader - machine dependent part
14 *
15 *  - implements copyin and readin functions that map kernel
16 *    pages on demand.  The machine independent code does not
17 *    know the size of the kernel early enough to pre-enter
18 *    TTEs and install just one 4MB mapping seemed to limiting
19 *    to me.
20 */
21
22#include <stand.h>
23#include <sys/exec.h>
24#include <sys/param.h>
25#include <sys/queue.h>
26#include <sys/linker.h>
27#include <sys/types.h>
28
29#include <vm/vm.h>
30#include <machine/asi.h>
31#include <machine/atomic.h>
32#include <machine/cpufunc.h>
33#include <machine/elf.h>
34#include <machine/lsu.h>
35#include <machine/metadata.h>
36#include <machine/tte.h>
37#include <machine/tlb.h>
38#include <machine/upa.h>
39
40#include "bootstrap.h"
41#include "libofw.h"
42#include "dev_net.h"
43
44extern char bootprog_name[], bootprog_rev[], bootprog_date[], bootprog_maker[];
45
46enum {
47	HEAPVA		= 0x800000,
48	HEAPSZ		= 0x1000000,
49	LOADSZ		= 0x1000000	/* for kernel and modules */
50};
51
52static struct mmu_ops {
53	void (*tlb_init)(void);
54	int (*mmu_mapin)(vm_offset_t va, vm_size_t len);
55} *mmu_ops;
56
57typedef void kernel_entry_t(vm_offset_t mdp, u_long o1, u_long o2, u_long o3,
58    void *openfirmware);
59
60static void dtlb_enter_sun4u(u_long vpn, u_long data);
61static vm_offset_t dtlb_va_to_pa_sun4u(vm_offset_t);
62static void itlb_enter_sun4u(u_long vpn, u_long data);
63static vm_offset_t itlb_va_to_pa_sun4u(vm_offset_t);
64extern vm_offset_t md_load(char *, vm_offset_t *);
65static int sparc64_autoload(void);
66static ssize_t sparc64_readin(const int, vm_offset_t, const size_t);
67static ssize_t sparc64_copyin(const void *, vm_offset_t, size_t);
68static void sparc64_maphint(vm_offset_t, size_t);
69static vm_offset_t claim_virt(vm_offset_t, size_t, int);
70static vm_offset_t alloc_phys(size_t, int);
71static int map_phys(int, size_t, vm_offset_t, vm_offset_t);
72static void release_phys(vm_offset_t, u_int);
73static int __elfN(exec)(struct preloaded_file *);
74static int mmu_mapin_sun4u(vm_offset_t, vm_size_t);
75static int mmu_mapin_sun4v(vm_offset_t, vm_size_t);
76static vm_offset_t init_heap(void);
77static void tlb_init_sun4u(void);
78static void tlb_init_sun4v(void);
79
80#ifdef LOADER_DEBUG
81typedef u_int64_t tte_t;
82
83static void pmap_print_tlb_sun4u(void);
84static void pmap_print_tte_sun4u(tte_t, tte_t);
85#endif
86
87static struct mmu_ops mmu_ops_sun4u = { tlb_init_sun4u, mmu_mapin_sun4u };
88static struct mmu_ops mmu_ops_sun4v = { tlb_init_sun4v, mmu_mapin_sun4v };
89
90/* sun4u */
91struct tlb_entry *dtlb_store;
92struct tlb_entry *itlb_store;
93int dtlb_slot;
94int itlb_slot;
95static int dtlb_slot_max;
96static int itlb_slot_max;
97
98/* sun4v */
99static struct tlb_entry *tlb_store;
100static int is_sun4v = 0;
101/*
102 * no direct TLB access on sun4v
103 * we somewhat arbitrarily declare enough
104 * slots to cover a 4GB AS with 4MB pages
105 */
106#define	SUN4V_TLB_SLOT_MAX	(1 << 10)
107
108static vm_offset_t curkva = 0;
109static vm_offset_t heapva;
110
111static phandle_t root;
112
113/*
114 * Machine dependent structures that the machine independent
115 * loader part uses.
116 */
117struct devsw *devsw[] = {
118#ifdef LOADER_DISK_SUPPORT
119	&ofwdisk,
120#endif
121#ifdef LOADER_NET_SUPPORT
122	&netdev,
123#endif
124	0
125};
126struct arch_switch archsw;
127
128static struct file_format sparc64_elf = {
129	__elfN(loadfile),
130	__elfN(exec)
131};
132struct file_format *file_formats[] = {
133	&sparc64_elf,
134	0
135};
136struct fs_ops *file_system[] = {
137#ifdef LOADER_UFS_SUPPORT
138	&ufs_fsops,
139#endif
140#ifdef LOADER_CD9660_SUPPORT
141	&cd9660_fsops,
142#endif
143#ifdef LOADER_ZIP_SUPPORT
144	&zipfs_fsops,
145#endif
146#ifdef LOADER_GZIP_SUPPORT
147	&gzipfs_fsops,
148#endif
149#ifdef LOADER_BZIP2_SUPPORT
150	&bzipfs_fsops,
151#endif
152#ifdef LOADER_NFS_SUPPORT
153	&nfs_fsops,
154#endif
155#ifdef LOADER_TFTP_SUPPORT
156	&tftp_fsops,
157#endif
158	0
159};
160struct netif_driver *netif_drivers[] = {
161#ifdef LOADER_NET_SUPPORT
162	&ofwnet,
163#endif
164	0
165};
166
167extern struct console ofwconsole;
168struct console *consoles[] = {
169	&ofwconsole,
170	0
171};
172
173#ifdef LOADER_DEBUG
174static int
175watch_phys_set_mask(vm_offset_t pa, u_long mask)
176{
177	u_long lsucr;
178
179	stxa(AA_DMMU_PWPR, ASI_DMMU, pa & (((2UL << 38) - 1) << 3));
180	lsucr = ldxa(0, ASI_LSU_CTL_REG);
181	lsucr = ((lsucr | LSU_PW) & ~LSU_PM_MASK) |
182	    (mask << LSU_PM_SHIFT);
183	stxa(0, ASI_LSU_CTL_REG, lsucr);
184	return (0);
185}
186
187static int
188watch_phys_set(vm_offset_t pa, int sz)
189{
190	u_long off;
191
192	off = (u_long)pa & 7;
193	/* Test for misaligned watch points. */
194	if (off + sz > 8)
195		return (-1);
196	return (watch_phys_set_mask(pa, ((1 << sz) - 1) << off));
197}
198
199
200static int
201watch_virt_set_mask(vm_offset_t va, u_long mask)
202{
203	u_long lsucr;
204
205	stxa(AA_DMMU_VWPR, ASI_DMMU, va & (((2UL << 41) - 1) << 3));
206	lsucr = ldxa(0, ASI_LSU_CTL_REG);
207	lsucr = ((lsucr | LSU_VW) & ~LSU_VM_MASK) |
208	    (mask << LSU_VM_SHIFT);
209	stxa(0, ASI_LSU_CTL_REG, lsucr);
210	return (0);
211}
212
213static int
214watch_virt_set(vm_offset_t va, int sz)
215{
216	u_long off;
217
218	off = (u_long)va & 7;
219	/* Test for misaligned watch points. */
220	if (off + sz > 8)
221		return (-1);
222	return (watch_virt_set_mask(va, ((1 << sz) - 1) << off));
223}
224#endif
225
226/*
227 * archsw functions
228 */
229static int
230sparc64_autoload(void)
231{
232
233	printf("nothing to autoload yet.\n");
234	return (0);
235}
236
237static ssize_t
238sparc64_readin(const int fd, vm_offset_t va, const size_t len)
239{
240
241	mmu_ops->mmu_mapin(va, len);
242	return (read(fd, (void *)va, len));
243}
244
245static ssize_t
246sparc64_copyin(const void *src, vm_offset_t dest, size_t len)
247{
248
249	mmu_ops->mmu_mapin(dest, len);
250	memcpy((void *)dest, src, len);
251	return (len);
252}
253
254static void
255sparc64_maphint(vm_offset_t va, size_t len)
256{
257	vm_paddr_t pa;
258	vm_offset_t mva;
259	size_t size;
260	int i, free_excess = 0;
261
262	if (!is_sun4v)
263		return;
264
265	if (tlb_store[va >> 22].te_pa != -1)
266		return;
267
268	/* round up to nearest 4MB page */
269	size = (len + PAGE_MASK_4M) & ~PAGE_MASK_4M;
270#if 0
271	pa = alloc_phys(PAGE_SIZE_256M, PAGE_SIZE_256M);
272
273	if (pa != -1)
274		free_excess = 1;
275	else
276#endif
277		pa = alloc_phys(size, PAGE_SIZE_256M);
278	if (pa == -1)
279		pa = alloc_phys(size, PAGE_SIZE_4M);
280	if (pa == -1)
281		panic("%s: out of memory", __func__);
282
283	for (i = 0; i < size; i += PAGE_SIZE_4M) {
284		mva = claim_virt(va + i, PAGE_SIZE_4M, 0);
285		if (mva != (va + i))
286			panic("%s: can't claim virtual page "
287			    "(wanted %#lx, got %#lx)",
288			    __func__, va, mva);
289
290		tlb_store[mva >> 22].te_pa = pa + i;
291		if (map_phys(-1, PAGE_SIZE_4M, mva, pa + i) != 0)
292			printf("%s: can't map physical page\n", __func__);
293	}
294	if (free_excess)
295		release_phys(pa, PAGE_SIZE_256M);
296}
297
298/*
299 * other MD functions
300 */
301static vm_offset_t
302claim_virt(vm_offset_t virt, size_t size, int align)
303{
304	vm_offset_t mva;
305
306	if (OF_call_method("claim", mmu, 3, 1, virt, size, align, &mva) == -1)
307		return ((vm_offset_t)-1);
308	return (mva);
309}
310
311static vm_offset_t
312alloc_phys(size_t size, int align)
313{
314	cell_t phys_hi, phys_low;
315
316	if (OF_call_method("claim", memory, 2, 2, size, align, &phys_low,
317	    &phys_hi) == -1)
318		return ((vm_offset_t)-1);
319	return ((vm_offset_t)phys_hi << 32 | phys_low);
320}
321
322static int
323map_phys(int mode, size_t size, vm_offset_t virt, vm_offset_t phys)
324{
325
326	return (OF_call_method("map", mmu, 5, 0, (uint32_t)phys,
327	    (uint32_t)(phys >> 32), virt, size, mode));
328}
329
330static void
331release_phys(vm_offset_t phys, u_int size)
332{
333
334	(void)OF_call_method("release", memory, 3, 0, (uint32_t)phys,
335	    (uint32_t)(phys >> 32), size);
336}
337
338static int
339__elfN(exec)(struct preloaded_file *fp)
340{
341	struct file_metadata *fmp;
342	vm_offset_t mdp;
343	Elf_Addr entry;
344	Elf_Ehdr *e;
345	int error;
346
347	if ((fmp = file_findmetadata(fp, MODINFOMD_ELFHDR)) == 0)
348		return (EFTYPE);
349	e = (Elf_Ehdr *)&fmp->md_data;
350
351	if ((error = md_load(fp->f_args, &mdp)) != 0)
352		return (error);
353
354	printf("jumping to kernel entry at %#lx.\n", e->e_entry);
355#if LOADER_DEBUG
356	pmap_print_tlb_sun4u();
357#endif
358
359	entry = e->e_entry;
360
361	OF_release((void *)heapva, HEAPSZ);
362
363	((kernel_entry_t *)entry)(mdp, 0, 0, 0, openfirmware);
364
365	panic("%s: exec returned", __func__);
366}
367
368static vm_offset_t
369dtlb_va_to_pa_sun4u(vm_offset_t va)
370{
371	u_long reg;
372	int i;
373
374	for (i = 0; i < dtlb_slot_max; i++) {
375		reg = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_TAG_READ_REG);
376		if (TLB_TAR_VA(reg) != va)
377			continue;
378		reg = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_DATA_ACCESS_REG);
379		return ((reg & TD_PA_SF_MASK) >> TD_PA_SHIFT);
380	}
381	return (-1);
382}
383
384static vm_offset_t
385itlb_va_to_pa_sun4u(vm_offset_t va)
386{
387	u_long reg;
388	int i;
389
390	for (i = 0; i < itlb_slot_max; i++) {
391		reg = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_TAG_READ_REG);
392		if (TLB_TAR_VA(reg) != va)
393			continue;
394		reg = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG);
395		return ((reg & TD_PA_SF_MASK) >> TD_PA_SHIFT);
396	}
397	return (-1);
398}
399
400static void
401itlb_enter_sun4u(u_long vpn, u_long data)
402{
403	u_long reg;
404
405	reg = rdpr(pstate);
406	wrpr(pstate, reg & ~PSTATE_IE, 0);
407	stxa(AA_IMMU_TAR, ASI_IMMU, vpn);
408	stxa(0, ASI_ITLB_DATA_IN_REG, data);
409	membar(Sync);
410	wrpr(pstate, reg, 0);
411}
412
413static void
414dtlb_enter_sun4u(u_long vpn, u_long data)
415{
416	u_long reg;
417
418	reg = rdpr(pstate);
419	wrpr(pstate, reg & ~PSTATE_IE, 0);
420	stxa(AA_DMMU_TAR, ASI_DMMU, vpn);
421	stxa(0, ASI_DTLB_DATA_IN_REG, data);
422	membar(Sync);
423	wrpr(pstate, reg, 0);
424}
425
426static int
427mmu_mapin_sun4u(vm_offset_t va, vm_size_t len)
428{
429	vm_offset_t pa, mva;
430	u_long data;
431
432	if (va + len > curkva)
433		curkva = va + len;
434
435	pa = (vm_offset_t)-1;
436	len += va & PAGE_MASK_4M;
437	va &= ~PAGE_MASK_4M;
438	while (len) {
439		if (dtlb_va_to_pa_sun4u(va) == (vm_offset_t)-1 ||
440		    itlb_va_to_pa_sun4u(va) == (vm_offset_t)-1) {
441			/* Allocate a physical page, claim the virtual area */
442			if (pa == (vm_offset_t)-1) {
443				pa = alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
444				if (pa == (vm_offset_t)-1)
445					panic("%s: out of memory", __func__);
446				mva = claim_virt(va, PAGE_SIZE_4M, 0);
447				if (mva != va)
448					panic("%s: can't claim virtual page "
449					    "(wanted %#lx, got %#lx)",
450					    __func__, va, mva);
451				/* The mappings may have changed, be paranoid. */
452				continue;
453			}
454			/*
455			 * Actually, we can only allocate two pages less at
456			 * most (depending on the kernel TSB size).
457			 */
458			if (dtlb_slot >= dtlb_slot_max)
459				panic("%s: out of dtlb_slots", __func__);
460			if (itlb_slot >= itlb_slot_max)
461				panic("%s: out of itlb_slots", __func__);
462			data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP |
463			    TD_CV | TD_P | TD_W;
464			dtlb_store[dtlb_slot].te_pa = pa;
465			dtlb_store[dtlb_slot].te_va = va;
466			itlb_store[itlb_slot].te_pa = pa;
467			itlb_store[itlb_slot].te_va = va;
468			dtlb_slot++;
469			itlb_slot++;
470			dtlb_enter_sun4u(va, data);
471			itlb_enter_sun4u(va, data);
472			pa = (vm_offset_t)-1;
473		}
474		len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
475		va += PAGE_SIZE_4M;
476	}
477	if (pa != (vm_offset_t)-1)
478		release_phys(pa, PAGE_SIZE_4M);
479	return (0);
480}
481
482static int
483mmu_mapin_sun4v(vm_offset_t va, vm_size_t len)
484{
485	vm_offset_t pa, mva;
486
487	if (va + len > curkva)
488		curkva = va + len;
489
490	pa = (vm_offset_t)-1;
491	len += va & PAGE_MASK_4M;
492	va &= ~PAGE_MASK_4M;
493	while (len) {
494		if ((va >> 22) > SUN4V_TLB_SLOT_MAX)
495			panic("%s: trying to map more than 4GB", __func__);
496		if (tlb_store[va >> 22].te_pa == -1) {
497			/* Allocate a physical page, claim the virtual area */
498			if (pa == (vm_offset_t)-1) {
499				pa = alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
500				if (pa == (vm_offset_t)-1)
501				    panic("%s: out of memory", __func__);
502				mva = claim_virt(va, PAGE_SIZE_4M, 0);
503				if (mva != va)
504					panic("%s: can't claim virtual page "
505					    "(wanted %#lx, got %#lx)",
506					    __func__, va, mva);
507			}
508
509			tlb_store[va >> 22].te_pa = pa;
510			if (map_phys(-1, PAGE_SIZE_4M, va, pa) == -1)
511				printf("%s: can't map physical page\n",
512				    __func__);
513			pa = (vm_offset_t)-1;
514		}
515		len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
516		va += PAGE_SIZE_4M;
517	}
518	if (pa != (vm_offset_t)-1)
519		release_phys(pa, PAGE_SIZE_4M);
520	return (0);
521}
522
523static vm_offset_t
524init_heap(void)
525{
526
527	/* There is no need for continuous physical heap memory. */
528	heapva = (vm_offset_t)OF_claim((void *)HEAPVA, HEAPSZ, 32);
529	return (heapva);
530}
531
532static void
533tlb_init_sun4u(void)
534{
535	phandle_t child;
536	char buf[128];
537	u_int bootcpu;
538	u_int cpu;
539
540	bootcpu = UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG));
541	for (child = OF_child(root); child != 0; child = OF_peer(child)) {
542		if (child == -1)
543			panic("%s: can't get child phandle", __func__);
544		if (OF_getprop(child, "device_type", buf, sizeof(buf)) > 0 &&
545		    strcmp(buf, "cpu") == 0) {
546			if (OF_getprop(child, "upa-portid", &cpu,
547			    sizeof(cpu)) == -1 && OF_getprop(child, "portid",
548			    &cpu, sizeof(cpu)) == -1)
549				panic("%s: can't get portid", __func__);
550			if (cpu == bootcpu)
551				break;
552		}
553	}
554	if (cpu != bootcpu)
555		panic("%s: no node for bootcpu?!?!", __func__);
556	if (OF_getprop(child, "#dtlb-entries", &dtlb_slot_max,
557	    sizeof(dtlb_slot_max)) == -1 ||
558	    OF_getprop(child, "#itlb-entries", &itlb_slot_max,
559	    sizeof(itlb_slot_max)) == -1)
560		panic("%s: can't get TLB slot max.", __func__);
561	dtlb_store = malloc(dtlb_slot_max * sizeof(*dtlb_store));
562	itlb_store = malloc(itlb_slot_max * sizeof(*itlb_store));
563	if (dtlb_store == NULL || itlb_store == NULL)
564		panic("%s: can't allocate TLB store", __func__);
565}
566
567static void
568tlb_init_sun4v(void)
569{
570
571	tlb_store = malloc(SUN4V_TLB_SLOT_MAX * sizeof(*tlb_store));
572	memset(tlb_store, 0xFF, SUN4V_TLB_SLOT_MAX * sizeof(*tlb_store));
573}
574
575int
576main(int (*openfirm)(void *))
577{
578	char bootpath[64];
579	char compatible[32];
580	struct devsw **dp;
581
582	/*
583	 * Tell the Open Firmware functions where they find the ofw gate.
584	 */
585	OF_init(openfirm);
586
587	archsw.arch_getdev = ofw_getdev;
588	archsw.arch_copyin = sparc64_copyin;
589	archsw.arch_copyout = ofw_copyout;
590	archsw.arch_readin = sparc64_readin;
591	archsw.arch_autoload = sparc64_autoload;
592	archsw.arch_maphint = sparc64_maphint;
593
594	init_heap();
595	setheap((void *)heapva, (void *)(heapva + HEAPSZ));
596
597	/*
598	 * Probe for a console.
599	 */
600	cons_probe();
601
602	if ((root = OF_peer(0)) == -1)
603		panic("%s: can't get root phandle", __func__);
604	OF_getprop(root, "compatible", compatible, sizeof(compatible));
605	if (!strcmp(compatible, "sun4v")) {
606		printf("\nBooting with sun4v support.\n");
607		mmu_ops = &mmu_ops_sun4v;
608		is_sun4v = 1;
609	} else {
610		printf("\nBooting with sun4u support.\n");
611		mmu_ops = &mmu_ops_sun4u;
612	}
613
614	mmu_ops->tlb_init();
615
616	/*
617	 * Initialize devices.
618	 */
619	for (dp = devsw; *dp != 0; dp++) {
620		if ((*dp)->dv_init != 0)
621			(*dp)->dv_init();
622	}
623
624	/*
625	 * Set up the current device.
626	 */
627	OF_getprop(chosen, "bootpath", bootpath, sizeof(bootpath));
628
629	/*
630	 * Sun compatible bootable CD-ROMs have a disk label placed
631	 * before the cd9660 data, with the actual filesystem being
632	 * in the first partition, while the other partitions contain
633	 * pseudo disk labels with embedded boot blocks for different
634	 * architectures, which may be followed by UFS filesystems.
635	 * The firmware will set the boot path to the partition it
636	 * boots from ('f' in the sun4u case), but we want the kernel
637	 * to be loaded from the cd9660 fs ('a'), so the boot path
638	 * needs to be altered.
639	 */
640	if (bootpath[strlen(bootpath) - 2] == ':' &&
641	    bootpath[strlen(bootpath) - 1] == 'f') {
642		bootpath[strlen(bootpath) - 1] = 'a';
643		printf("Boot path set to %s\n", bootpath);
644	}
645
646	env_setenv("currdev", EV_VOLATILE, bootpath,
647	    ofw_setcurrdev, env_nounset);
648	env_setenv("loaddev", EV_VOLATILE, bootpath,
649	    env_noset, env_nounset);
650
651	printf("\n");
652	printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
653	printf("(%s, %s)\n", bootprog_maker, bootprog_date);
654	printf("bootpath=\"%s\"\n", bootpath);
655
656	/* Give control to the machine independent loader code. */
657	interact();
658	return (1);
659}
660
661COMMAND_SET(reboot, "reboot", "reboot the system", command_reboot);
662
663static int
664command_reboot(int argc, char *argv[])
665{
666	int i;
667
668	for (i = 0; devsw[i] != NULL; ++i)
669		if (devsw[i]->dv_cleanup != NULL)
670			(devsw[i]->dv_cleanup)();
671
672	printf("Rebooting...\n");
673	OF_exit();
674}
675
676/* provide this for panic, as it's not in the startup code */
677void
678exit(int code)
679{
680
681	OF_exit();
682}
683
684#ifdef LOADER_DEBUG
685static const char *page_sizes[] = {
686	"  8k", " 64k", "512k", "  4m"
687};
688
689static void
690pmap_print_tte_sun4u(tte_t tag, tte_t tte)
691{
692
693	printf("%s %s ",
694	    page_sizes[(tte & TD_SIZE_MASK) >> TD_SIZE_SHIFT],
695	    tag & TD_G ? "G" : " ");
696	printf(tte & TD_W ? "W " : "  ");
697	printf(tte & TD_P ? "\e[33mP\e[0m " : "  ");
698	printf(tte & TD_E ? "E " : "  ");
699	printf(tte & TD_CV ? "CV " : "   ");
700	printf(tte & TD_CP ? "CP " : "   ");
701	printf(tte & TD_L ? "\e[32mL\e[0m " : "  ");
702	printf(tte & TD_IE ? "IE " : "   ");
703	printf(tte & TD_NFO ? "NFO " : "    ");
704	printf("pa=0x%lx va=0x%lx ctx=%ld\n",
705	    TD_PA(tte), TLB_TAR_VA(tag), TLB_TAR_CTX(tag));
706}
707
708static void
709pmap_print_tlb_sun4u(void)
710{
711	tte_t tag, tte;
712	int i;
713
714	for (i = 0; i < itlb_slot_max; i++) {
715		tte = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG);
716		if (!(tte & TD_V))
717			continue;
718		tag = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_TAG_READ_REG);
719		printf("iTLB-%2u: ", i);
720		pmap_print_tte_sun4u(tag, tte);
721	}
722	for (i = 0; i < dtlb_slot_max; i++) {
723		tte = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_DATA_ACCESS_REG);
724		if (!(tte & TD_V))
725			continue;
726		tag = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_TAG_READ_REG);
727		printf("dTLB-%2u: ", i);
728		pmap_print_tte_sun4u(tag, tte);
729	}
730}
731#endif
732