Deleted Added
full compact
main.c (183628) main.c (188455)
1/*-
2 * Initial implementation:
3 * Copyright (c) 2001 Robert Drehmel
4 * All rights reserved.
5 *
6 * As long as the above copyright statement and this notice remain
7 * unchanged, you can do what ever you want with this file.
8 */
9/*-
10 * Copyright (c) 2008 Marius Strobl <marius@FreeBSD.org>
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
1/*-
2 * Initial implementation:
3 * Copyright (c) 2001 Robert Drehmel
4 * All rights reserved.
5 *
6 * As long as the above copyright statement and this notice remain
7 * unchanged, you can do what ever you want with this file.
8 */
9/*-
10 * Copyright (c) 2008 Marius Strobl <marius@FreeBSD.org>
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/boot/sparc64/loader/main.c 183628 2008-10-05 14:00:44Z marius $");
36__FBSDID("$FreeBSD: head/sys/boot/sparc64/loader/main.c 188455 2009-02-10 21:48:42Z marius $");
37
38/*
39 * FreeBSD/sparc64 kernel loader - machine dependent part
40 *
41 * - implements copyin and readin functions that map kernel
42 * pages on demand. The machine independent code does not
43 * know the size of the kernel early enough to pre-enter
44 * TTEs and install just one 4MB mapping seemed to limiting
45 * to me.
46 */
47
48#include <stand.h>
49#include <sys/exec.h>
50#include <sys/param.h>
51#include <sys/queue.h>
52#include <sys/linker.h>
53#include <sys/types.h>
54
55#include <vm/vm.h>
56#include <machine/asi.h>
57#include <machine/cpufunc.h>
58#include <machine/elf.h>
59#include <machine/lsu.h>
60#include <machine/metadata.h>
61#include <machine/tte.h>
62#include <machine/tlb.h>
63#include <machine/upa.h>
64#include <machine/ver.h>
65#include <machine/vmparam.h>
66
67#include "bootstrap.h"
68#include "libofw.h"
69#include "dev_net.h"
70
71extern char bootprog_name[], bootprog_rev[], bootprog_date[], bootprog_maker[];
72
73enum {
74 HEAPVA = 0x800000,
75 HEAPSZ = 0x1000000,
76 LOADSZ = 0x1000000 /* for kernel and modules */
77};
78
79static struct mmu_ops {
80 void (*tlb_init)(void);
81 int (*mmu_mapin)(vm_offset_t va, vm_size_t len);
82} *mmu_ops;
83
84typedef void kernel_entry_t(vm_offset_t mdp, u_long o1, u_long o2, u_long o3,
85 void *openfirmware);
86
87static inline u_long dtlb_get_data_sun4u(int slot);
88static void dtlb_enter_sun4u(u_long vpn, u_long data);
89static vm_offset_t dtlb_va_to_pa_sun4u(vm_offset_t);
90static inline u_long itlb_get_data_sun4u(int slot);
91static void itlb_enter_sun4u(u_long vpn, u_long data);
92static vm_offset_t itlb_va_to_pa_sun4u(vm_offset_t);
93static void itlb_relocate_locked0_sun4u(void);
94extern vm_offset_t md_load(char *, vm_offset_t *);
95static int sparc64_autoload(void);
96static ssize_t sparc64_readin(const int, vm_offset_t, const size_t);
97static ssize_t sparc64_copyin(const void *, vm_offset_t, size_t);
98static void sparc64_maphint(vm_offset_t, size_t);
99static vm_offset_t claim_virt(vm_offset_t, size_t, int);
100static vm_offset_t alloc_phys(size_t, int);
101static int map_phys(int, size_t, vm_offset_t, vm_offset_t);
102static void release_phys(vm_offset_t, u_int);
103static int __elfN(exec)(struct preloaded_file *);
104static int mmu_mapin_sun4u(vm_offset_t, vm_size_t);
105static int mmu_mapin_sun4v(vm_offset_t, vm_size_t);
106static vm_offset_t init_heap(void);
107static void tlb_init_sun4u(void);
108static void tlb_init_sun4v(void);
109
110#ifdef LOADER_DEBUG
111typedef u_int64_t tte_t;
112
113static void pmap_print_tlb_sun4u(void);
114static void pmap_print_tte_sun4u(tte_t, tte_t);
115#endif
116
117static struct mmu_ops mmu_ops_sun4u = { tlb_init_sun4u, mmu_mapin_sun4u };
118static struct mmu_ops mmu_ops_sun4v = { tlb_init_sun4v, mmu_mapin_sun4v };
119
120/* sun4u */
121struct tlb_entry *dtlb_store;
122struct tlb_entry *itlb_store;
123int dtlb_slot;
124int itlb_slot;
125int cpu_impl;
126static int dtlb_slot_max;
127static int itlb_slot_max;
128
129/* sun4v */
130static struct tlb_entry *tlb_store;
131static int is_sun4v = 0;
132/*
133 * no direct TLB access on sun4v
134 * we somewhat arbitrarily declare enough
135 * slots to cover a 4GB AS with 4MB pages
136 */
137#define SUN4V_TLB_SLOT_MAX (1 << 10)
138
139static vm_offset_t curkva = 0;
140static vm_offset_t heapva;
141
142static phandle_t root;
143
144/*
145 * Machine dependent structures that the machine independent
146 * loader part uses.
147 */
148struct devsw *devsw[] = {
149#ifdef LOADER_DISK_SUPPORT
150 &ofwdisk,
151#endif
152#ifdef LOADER_NET_SUPPORT
153 &netdev,
154#endif
155 0
156};
157struct arch_switch archsw;
158
159static struct file_format sparc64_elf = {
160 __elfN(loadfile),
161 __elfN(exec)
162};
163struct file_format *file_formats[] = {
164 &sparc64_elf,
165 0
166};
167struct fs_ops *file_system[] = {
168#ifdef LOADER_UFS_SUPPORT
169 &ufs_fsops,
170#endif
171#ifdef LOADER_CD9660_SUPPORT
172 &cd9660_fsops,
173#endif
174#ifdef LOADER_ZIP_SUPPORT
175 &zipfs_fsops,
176#endif
177#ifdef LOADER_GZIP_SUPPORT
178 &gzipfs_fsops,
179#endif
180#ifdef LOADER_BZIP2_SUPPORT
181 &bzipfs_fsops,
182#endif
183#ifdef LOADER_NFS_SUPPORT
184 &nfs_fsops,
185#endif
186#ifdef LOADER_TFTP_SUPPORT
187 &tftp_fsops,
188#endif
189 0
190};
191struct netif_driver *netif_drivers[] = {
192#ifdef LOADER_NET_SUPPORT
193 &ofwnet,
194#endif
195 0
196};
197
198extern struct console ofwconsole;
199struct console *consoles[] = {
200 &ofwconsole,
201 0
202};
203
204#ifdef LOADER_DEBUG
205static int
206watch_phys_set_mask(vm_offset_t pa, u_long mask)
207{
208 u_long lsucr;
209
210 stxa(AA_DMMU_PWPR, ASI_DMMU, pa & (((2UL << 38) - 1) << 3));
211 lsucr = ldxa(0, ASI_LSU_CTL_REG);
212 lsucr = ((lsucr | LSU_PW) & ~LSU_PM_MASK) |
213 (mask << LSU_PM_SHIFT);
214 stxa(0, ASI_LSU_CTL_REG, lsucr);
215 return (0);
216}
217
218static int
219watch_phys_set(vm_offset_t pa, int sz)
220{
221 u_long off;
222
223 off = (u_long)pa & 7;
224 /* Test for misaligned watch points. */
225 if (off + sz > 8)
226 return (-1);
227 return (watch_phys_set_mask(pa, ((1 << sz) - 1) << off));
228}
229
230
231static int
232watch_virt_set_mask(vm_offset_t va, u_long mask)
233{
234 u_long lsucr;
235
236 stxa(AA_DMMU_VWPR, ASI_DMMU, va & (((2UL << 41) - 1) << 3));
237 lsucr = ldxa(0, ASI_LSU_CTL_REG);
238 lsucr = ((lsucr | LSU_VW) & ~LSU_VM_MASK) |
239 (mask << LSU_VM_SHIFT);
240 stxa(0, ASI_LSU_CTL_REG, lsucr);
241 return (0);
242}
243
244static int
245watch_virt_set(vm_offset_t va, int sz)
246{
247 u_long off;
248
249 off = (u_long)va & 7;
250 /* Test for misaligned watch points. */
251 if (off + sz > 8)
252 return (-1);
253 return (watch_virt_set_mask(va, ((1 << sz) - 1) << off));
254}
255#endif
256
257/*
258 * archsw functions
259 */
260static int
261sparc64_autoload(void)
262{
263
264 setenv("hw.ata.atapi_dma", "0", 0);
265 return (0);
266}
267
268static ssize_t
269sparc64_readin(const int fd, vm_offset_t va, const size_t len)
270{
271
272 mmu_ops->mmu_mapin(va, len);
273 return (read(fd, (void *)va, len));
274}
275
276static ssize_t
277sparc64_copyin(const void *src, vm_offset_t dest, size_t len)
278{
279
280 mmu_ops->mmu_mapin(dest, len);
281 memcpy((void *)dest, src, len);
282 return (len);
283}
284
285static void
286sparc64_maphint(vm_offset_t va, size_t len)
287{
288 vm_paddr_t pa;
289 vm_offset_t mva;
290 size_t size;
291 int i, free_excess = 0;
292
293 if (!is_sun4v)
294 return;
295
296 if (tlb_store[va >> 22].te_pa != -1)
297 return;
298
299 /* round up to nearest 4MB page */
300 size = (len + PAGE_MASK_4M) & ~PAGE_MASK_4M;
301#if 0
302 pa = alloc_phys(PAGE_SIZE_256M, PAGE_SIZE_256M);
303
304 if (pa != -1)
305 free_excess = 1;
306 else
307#endif
308 pa = alloc_phys(size, PAGE_SIZE_256M);
309 if (pa == -1)
310 pa = alloc_phys(size, PAGE_SIZE_4M);
311 if (pa == -1)
312 panic("%s: out of memory", __func__);
313
314 for (i = 0; i < size; i += PAGE_SIZE_4M) {
315 mva = claim_virt(va + i, PAGE_SIZE_4M, 0);
316 if (mva != (va + i))
317 panic("%s: can't claim virtual page "
318 "(wanted %#lx, got %#lx)",
319 __func__, va, mva);
320
321 tlb_store[mva >> 22].te_pa = pa + i;
322 if (map_phys(-1, PAGE_SIZE_4M, mva, pa + i) != 0)
323 printf("%s: can't map physical page\n", __func__);
324 }
325 if (free_excess)
326 release_phys(pa, PAGE_SIZE_256M);
327}
328
329/*
330 * other MD functions
331 */
332static vm_offset_t
333claim_virt(vm_offset_t virt, size_t size, int align)
334{
335 vm_offset_t mva;
336
337 if (OF_call_method("claim", mmu, 3, 1, virt, size, align, &mva) == -1)
338 return ((vm_offset_t)-1);
339 return (mva);
340}
341
342static vm_offset_t
343alloc_phys(size_t size, int align)
344{
345 cell_t phys_hi, phys_low;
346
347 if (OF_call_method("claim", memory, 2, 2, size, align, &phys_low,
348 &phys_hi) == -1)
349 return ((vm_offset_t)-1);
350 return ((vm_offset_t)phys_hi << 32 | phys_low);
351}
352
353static int
354map_phys(int mode, size_t size, vm_offset_t virt, vm_offset_t phys)
355{
356
357 return (OF_call_method("map", mmu, 5, 0, (uint32_t)phys,
358 (uint32_t)(phys >> 32), virt, size, mode));
359}
360
361static void
362release_phys(vm_offset_t phys, u_int size)
363{
364
365 (void)OF_call_method("release", memory, 3, 0, (uint32_t)phys,
366 (uint32_t)(phys >> 32), size);
367}
368
369static int
370__elfN(exec)(struct preloaded_file *fp)
371{
372 struct file_metadata *fmp;
373 vm_offset_t mdp;
374 Elf_Addr entry;
375 Elf_Ehdr *e;
376 int error;
377
378 if ((fmp = file_findmetadata(fp, MODINFOMD_ELFHDR)) == 0)
379 return (EFTYPE);
380 e = (Elf_Ehdr *)&fmp->md_data;
381
382 if ((error = md_load(fp->f_args, &mdp)) != 0)
383 return (error);
384
385 printf("jumping to kernel entry at %#lx.\n", e->e_entry);
37
38/*
39 * FreeBSD/sparc64 kernel loader - machine dependent part
40 *
41 * - implements copyin and readin functions that map kernel
42 * pages on demand. The machine independent code does not
43 * know the size of the kernel early enough to pre-enter
44 * TTEs and install just one 4MB mapping seemed to limiting
45 * to me.
46 */
47
48#include <stand.h>
49#include <sys/exec.h>
50#include <sys/param.h>
51#include <sys/queue.h>
52#include <sys/linker.h>
53#include <sys/types.h>
54
55#include <vm/vm.h>
56#include <machine/asi.h>
57#include <machine/cpufunc.h>
58#include <machine/elf.h>
59#include <machine/lsu.h>
60#include <machine/metadata.h>
61#include <machine/tte.h>
62#include <machine/tlb.h>
63#include <machine/upa.h>
64#include <machine/ver.h>
65#include <machine/vmparam.h>
66
67#include "bootstrap.h"
68#include "libofw.h"
69#include "dev_net.h"
70
71extern char bootprog_name[], bootprog_rev[], bootprog_date[], bootprog_maker[];
72
73enum {
74 HEAPVA = 0x800000,
75 HEAPSZ = 0x1000000,
76 LOADSZ = 0x1000000 /* for kernel and modules */
77};
78
79static struct mmu_ops {
80 void (*tlb_init)(void);
81 int (*mmu_mapin)(vm_offset_t va, vm_size_t len);
82} *mmu_ops;
83
84typedef void kernel_entry_t(vm_offset_t mdp, u_long o1, u_long o2, u_long o3,
85 void *openfirmware);
86
87static inline u_long dtlb_get_data_sun4u(int slot);
88static void dtlb_enter_sun4u(u_long vpn, u_long data);
89static vm_offset_t dtlb_va_to_pa_sun4u(vm_offset_t);
90static inline u_long itlb_get_data_sun4u(int slot);
91static void itlb_enter_sun4u(u_long vpn, u_long data);
92static vm_offset_t itlb_va_to_pa_sun4u(vm_offset_t);
93static void itlb_relocate_locked0_sun4u(void);
94extern vm_offset_t md_load(char *, vm_offset_t *);
95static int sparc64_autoload(void);
96static ssize_t sparc64_readin(const int, vm_offset_t, const size_t);
97static ssize_t sparc64_copyin(const void *, vm_offset_t, size_t);
98static void sparc64_maphint(vm_offset_t, size_t);
99static vm_offset_t claim_virt(vm_offset_t, size_t, int);
100static vm_offset_t alloc_phys(size_t, int);
101static int map_phys(int, size_t, vm_offset_t, vm_offset_t);
102static void release_phys(vm_offset_t, u_int);
103static int __elfN(exec)(struct preloaded_file *);
104static int mmu_mapin_sun4u(vm_offset_t, vm_size_t);
105static int mmu_mapin_sun4v(vm_offset_t, vm_size_t);
106static vm_offset_t init_heap(void);
107static void tlb_init_sun4u(void);
108static void tlb_init_sun4v(void);
109
110#ifdef LOADER_DEBUG
111typedef u_int64_t tte_t;
112
113static void pmap_print_tlb_sun4u(void);
114static void pmap_print_tte_sun4u(tte_t, tte_t);
115#endif
116
117static struct mmu_ops mmu_ops_sun4u = { tlb_init_sun4u, mmu_mapin_sun4u };
118static struct mmu_ops mmu_ops_sun4v = { tlb_init_sun4v, mmu_mapin_sun4v };
119
120/* sun4u */
121struct tlb_entry *dtlb_store;
122struct tlb_entry *itlb_store;
123int dtlb_slot;
124int itlb_slot;
125int cpu_impl;
126static int dtlb_slot_max;
127static int itlb_slot_max;
128
129/* sun4v */
130static struct tlb_entry *tlb_store;
131static int is_sun4v = 0;
132/*
133 * no direct TLB access on sun4v
134 * we somewhat arbitrarily declare enough
135 * slots to cover a 4GB AS with 4MB pages
136 */
137#define SUN4V_TLB_SLOT_MAX (1 << 10)
138
139static vm_offset_t curkva = 0;
140static vm_offset_t heapva;
141
142static phandle_t root;
143
144/*
145 * Machine dependent structures that the machine independent
146 * loader part uses.
147 */
148struct devsw *devsw[] = {
149#ifdef LOADER_DISK_SUPPORT
150 &ofwdisk,
151#endif
152#ifdef LOADER_NET_SUPPORT
153 &netdev,
154#endif
155 0
156};
157struct arch_switch archsw;
158
159static struct file_format sparc64_elf = {
160 __elfN(loadfile),
161 __elfN(exec)
162};
163struct file_format *file_formats[] = {
164 &sparc64_elf,
165 0
166};
167struct fs_ops *file_system[] = {
168#ifdef LOADER_UFS_SUPPORT
169 &ufs_fsops,
170#endif
171#ifdef LOADER_CD9660_SUPPORT
172 &cd9660_fsops,
173#endif
174#ifdef LOADER_ZIP_SUPPORT
175 &zipfs_fsops,
176#endif
177#ifdef LOADER_GZIP_SUPPORT
178 &gzipfs_fsops,
179#endif
180#ifdef LOADER_BZIP2_SUPPORT
181 &bzipfs_fsops,
182#endif
183#ifdef LOADER_NFS_SUPPORT
184 &nfs_fsops,
185#endif
186#ifdef LOADER_TFTP_SUPPORT
187 &tftp_fsops,
188#endif
189 0
190};
191struct netif_driver *netif_drivers[] = {
192#ifdef LOADER_NET_SUPPORT
193 &ofwnet,
194#endif
195 0
196};
197
198extern struct console ofwconsole;
199struct console *consoles[] = {
200 &ofwconsole,
201 0
202};
203
204#ifdef LOADER_DEBUG
205static int
206watch_phys_set_mask(vm_offset_t pa, u_long mask)
207{
208 u_long lsucr;
209
210 stxa(AA_DMMU_PWPR, ASI_DMMU, pa & (((2UL << 38) - 1) << 3));
211 lsucr = ldxa(0, ASI_LSU_CTL_REG);
212 lsucr = ((lsucr | LSU_PW) & ~LSU_PM_MASK) |
213 (mask << LSU_PM_SHIFT);
214 stxa(0, ASI_LSU_CTL_REG, lsucr);
215 return (0);
216}
217
218static int
219watch_phys_set(vm_offset_t pa, int sz)
220{
221 u_long off;
222
223 off = (u_long)pa & 7;
224 /* Test for misaligned watch points. */
225 if (off + sz > 8)
226 return (-1);
227 return (watch_phys_set_mask(pa, ((1 << sz) - 1) << off));
228}
229
230
231static int
232watch_virt_set_mask(vm_offset_t va, u_long mask)
233{
234 u_long lsucr;
235
236 stxa(AA_DMMU_VWPR, ASI_DMMU, va & (((2UL << 41) - 1) << 3));
237 lsucr = ldxa(0, ASI_LSU_CTL_REG);
238 lsucr = ((lsucr | LSU_VW) & ~LSU_VM_MASK) |
239 (mask << LSU_VM_SHIFT);
240 stxa(0, ASI_LSU_CTL_REG, lsucr);
241 return (0);
242}
243
244static int
245watch_virt_set(vm_offset_t va, int sz)
246{
247 u_long off;
248
249 off = (u_long)va & 7;
250 /* Test for misaligned watch points. */
251 if (off + sz > 8)
252 return (-1);
253 return (watch_virt_set_mask(va, ((1 << sz) - 1) << off));
254}
255#endif
256
257/*
258 * archsw functions
259 */
260static int
261sparc64_autoload(void)
262{
263
264 setenv("hw.ata.atapi_dma", "0", 0);
265 return (0);
266}
267
268static ssize_t
269sparc64_readin(const int fd, vm_offset_t va, const size_t len)
270{
271
272 mmu_ops->mmu_mapin(va, len);
273 return (read(fd, (void *)va, len));
274}
275
276static ssize_t
277sparc64_copyin(const void *src, vm_offset_t dest, size_t len)
278{
279
280 mmu_ops->mmu_mapin(dest, len);
281 memcpy((void *)dest, src, len);
282 return (len);
283}
284
285static void
286sparc64_maphint(vm_offset_t va, size_t len)
287{
288 vm_paddr_t pa;
289 vm_offset_t mva;
290 size_t size;
291 int i, free_excess = 0;
292
293 if (!is_sun4v)
294 return;
295
296 if (tlb_store[va >> 22].te_pa != -1)
297 return;
298
299 /* round up to nearest 4MB page */
300 size = (len + PAGE_MASK_4M) & ~PAGE_MASK_4M;
301#if 0
302 pa = alloc_phys(PAGE_SIZE_256M, PAGE_SIZE_256M);
303
304 if (pa != -1)
305 free_excess = 1;
306 else
307#endif
308 pa = alloc_phys(size, PAGE_SIZE_256M);
309 if (pa == -1)
310 pa = alloc_phys(size, PAGE_SIZE_4M);
311 if (pa == -1)
312 panic("%s: out of memory", __func__);
313
314 for (i = 0; i < size; i += PAGE_SIZE_4M) {
315 mva = claim_virt(va + i, PAGE_SIZE_4M, 0);
316 if (mva != (va + i))
317 panic("%s: can't claim virtual page "
318 "(wanted %#lx, got %#lx)",
319 __func__, va, mva);
320
321 tlb_store[mva >> 22].te_pa = pa + i;
322 if (map_phys(-1, PAGE_SIZE_4M, mva, pa + i) != 0)
323 printf("%s: can't map physical page\n", __func__);
324 }
325 if (free_excess)
326 release_phys(pa, PAGE_SIZE_256M);
327}
328
329/*
330 * other MD functions
331 */
332static vm_offset_t
333claim_virt(vm_offset_t virt, size_t size, int align)
334{
335 vm_offset_t mva;
336
337 if (OF_call_method("claim", mmu, 3, 1, virt, size, align, &mva) == -1)
338 return ((vm_offset_t)-1);
339 return (mva);
340}
341
342static vm_offset_t
343alloc_phys(size_t size, int align)
344{
345 cell_t phys_hi, phys_low;
346
347 if (OF_call_method("claim", memory, 2, 2, size, align, &phys_low,
348 &phys_hi) == -1)
349 return ((vm_offset_t)-1);
350 return ((vm_offset_t)phys_hi << 32 | phys_low);
351}
352
353static int
354map_phys(int mode, size_t size, vm_offset_t virt, vm_offset_t phys)
355{
356
357 return (OF_call_method("map", mmu, 5, 0, (uint32_t)phys,
358 (uint32_t)(phys >> 32), virt, size, mode));
359}
360
361static void
362release_phys(vm_offset_t phys, u_int size)
363{
364
365 (void)OF_call_method("release", memory, 3, 0, (uint32_t)phys,
366 (uint32_t)(phys >> 32), size);
367}
368
369static int
370__elfN(exec)(struct preloaded_file *fp)
371{
372 struct file_metadata *fmp;
373 vm_offset_t mdp;
374 Elf_Addr entry;
375 Elf_Ehdr *e;
376 int error;
377
378 if ((fmp = file_findmetadata(fp, MODINFOMD_ELFHDR)) == 0)
379 return (EFTYPE);
380 e = (Elf_Ehdr *)&fmp->md_data;
381
382 if ((error = md_load(fp->f_args, &mdp)) != 0)
383 return (error);
384
385 printf("jumping to kernel entry at %#lx.\n", e->e_entry);
386#if LOADER_DEBUG
386#ifdef LOADER_DEBUG
387 pmap_print_tlb_sun4u();
388#endif
389
390 entry = e->e_entry;
391
392 OF_release((void *)heapva, HEAPSZ);
393
394 ((kernel_entry_t *)entry)(mdp, 0, 0, 0, openfirmware);
395
396 panic("%s: exec returned", __func__);
397}
398
399static inline u_long
400dtlb_get_data_sun4u(int slot)
401{
402
403 /*
404 * We read ASI_DTLB_DATA_ACCESS_REG twice in order to work
405 * around errata of USIII and beyond.
406 */
407 (void)ldxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG);
408 return (ldxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG));
409}
410
411static inline u_long
412itlb_get_data_sun4u(int slot)
413{
414
415 /*
416 * We read ASI_ITLB_DATA_ACCESS_REG twice in order to work
417 * around errata of USIII and beyond.
418 */
419 (void)ldxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG);
420 return (ldxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG));
421}
422
423static vm_offset_t
424dtlb_va_to_pa_sun4u(vm_offset_t va)
425{
426 u_long pstate, reg;
427 int i;
428
429 pstate = rdpr(pstate);
430 wrpr(pstate, pstate & ~PSTATE_IE, 0);
431 for (i = 0; i < dtlb_slot_max; i++) {
432 reg = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_TAG_READ_REG);
433 if (TLB_TAR_VA(reg) != va)
434 continue;
435 reg = dtlb_get_data_sun4u(i);
436 wrpr(pstate, pstate, 0);
437 if (cpu_impl >= CPU_IMPL_ULTRASPARCIII)
438 return ((reg & TD_PA_CH_MASK) >> TD_PA_SHIFT);
439 return ((reg & TD_PA_SF_MASK) >> TD_PA_SHIFT);
440 }
441 wrpr(pstate, pstate, 0);
442 return (-1);
443}
444
445static vm_offset_t
446itlb_va_to_pa_sun4u(vm_offset_t va)
447{
448 u_long pstate, reg;
449 int i;
450
451 pstate = rdpr(pstate);
452 wrpr(pstate, pstate & ~PSTATE_IE, 0);
453 for (i = 0; i < itlb_slot_max; i++) {
454 reg = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_TAG_READ_REG);
455 if (TLB_TAR_VA(reg) != va)
456 continue;
457 reg = itlb_get_data_sun4u(i);
458 wrpr(pstate, pstate, 0);
459 if (cpu_impl >= CPU_IMPL_ULTRASPARCIII)
460 return ((reg & TD_PA_CH_MASK) >> TD_PA_SHIFT);
461 return ((reg & TD_PA_SF_MASK) >> TD_PA_SHIFT);
462 }
463 wrpr(pstate, pstate, 0);
464 return (-1);
465}
466
467static void
468dtlb_enter_sun4u(u_long vpn, u_long data)
469{
470 u_long reg;
471
472 reg = rdpr(pstate);
473 wrpr(pstate, reg & ~PSTATE_IE, 0);
474 stxa(AA_DMMU_TAR, ASI_DMMU,
475 TLB_TAR_VA(vpn) | TLB_TAR_CTX(TLB_CTX_KERNEL));
476 stxa(0, ASI_DTLB_DATA_IN_REG, data);
477 membar(Sync);
478 wrpr(pstate, reg, 0);
479}
480
481static void
482itlb_enter_sun4u(u_long vpn, u_long data)
483{
484 u_long reg;
485 int i;
486
487 reg = rdpr(pstate);
488 wrpr(pstate, reg & ~PSTATE_IE, 0);
489
490 if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp) {
491 /*
492 * Search an unused slot != 0 and explicitly enter the data
493 * and tag there in order to avoid Cheetah+ erratum 34.
494 */
495 for (i = 1; i < itlb_slot_max; i++) {
496 if ((itlb_get_data_sun4u(i) & TD_V) != 0)
497 continue;
498
499 stxa(AA_IMMU_TAR, ASI_IMMU,
500 TLB_TAR_VA(vpn) | TLB_TAR_CTX(TLB_CTX_KERNEL));
501 stxa(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG, data);
387 pmap_print_tlb_sun4u();
388#endif
389
390 entry = e->e_entry;
391
392 OF_release((void *)heapva, HEAPSZ);
393
394 ((kernel_entry_t *)entry)(mdp, 0, 0, 0, openfirmware);
395
396 panic("%s: exec returned", __func__);
397}
398
399static inline u_long
400dtlb_get_data_sun4u(int slot)
401{
402
403 /*
404 * We read ASI_DTLB_DATA_ACCESS_REG twice in order to work
405 * around errata of USIII and beyond.
406 */
407 (void)ldxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG);
408 return (ldxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG));
409}
410
411static inline u_long
412itlb_get_data_sun4u(int slot)
413{
414
415 /*
416 * We read ASI_ITLB_DATA_ACCESS_REG twice in order to work
417 * around errata of USIII and beyond.
418 */
419 (void)ldxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG);
420 return (ldxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG));
421}
422
423static vm_offset_t
424dtlb_va_to_pa_sun4u(vm_offset_t va)
425{
426 u_long pstate, reg;
427 int i;
428
429 pstate = rdpr(pstate);
430 wrpr(pstate, pstate & ~PSTATE_IE, 0);
431 for (i = 0; i < dtlb_slot_max; i++) {
432 reg = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_TAG_READ_REG);
433 if (TLB_TAR_VA(reg) != va)
434 continue;
435 reg = dtlb_get_data_sun4u(i);
436 wrpr(pstate, pstate, 0);
437 if (cpu_impl >= CPU_IMPL_ULTRASPARCIII)
438 return ((reg & TD_PA_CH_MASK) >> TD_PA_SHIFT);
439 return ((reg & TD_PA_SF_MASK) >> TD_PA_SHIFT);
440 }
441 wrpr(pstate, pstate, 0);
442 return (-1);
443}
444
445static vm_offset_t
446itlb_va_to_pa_sun4u(vm_offset_t va)
447{
448 u_long pstate, reg;
449 int i;
450
451 pstate = rdpr(pstate);
452 wrpr(pstate, pstate & ~PSTATE_IE, 0);
453 for (i = 0; i < itlb_slot_max; i++) {
454 reg = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_TAG_READ_REG);
455 if (TLB_TAR_VA(reg) != va)
456 continue;
457 reg = itlb_get_data_sun4u(i);
458 wrpr(pstate, pstate, 0);
459 if (cpu_impl >= CPU_IMPL_ULTRASPARCIII)
460 return ((reg & TD_PA_CH_MASK) >> TD_PA_SHIFT);
461 return ((reg & TD_PA_SF_MASK) >> TD_PA_SHIFT);
462 }
463 wrpr(pstate, pstate, 0);
464 return (-1);
465}
466
467static void
468dtlb_enter_sun4u(u_long vpn, u_long data)
469{
470 u_long reg;
471
472 reg = rdpr(pstate);
473 wrpr(pstate, reg & ~PSTATE_IE, 0);
474 stxa(AA_DMMU_TAR, ASI_DMMU,
475 TLB_TAR_VA(vpn) | TLB_TAR_CTX(TLB_CTX_KERNEL));
476 stxa(0, ASI_DTLB_DATA_IN_REG, data);
477 membar(Sync);
478 wrpr(pstate, reg, 0);
479}
480
481static void
482itlb_enter_sun4u(u_long vpn, u_long data)
483{
484 u_long reg;
485 int i;
486
487 reg = rdpr(pstate);
488 wrpr(pstate, reg & ~PSTATE_IE, 0);
489
490 if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp) {
491 /*
492 * Search an unused slot != 0 and explicitly enter the data
493 * and tag there in order to avoid Cheetah+ erratum 34.
494 */
495 for (i = 1; i < itlb_slot_max; i++) {
496 if ((itlb_get_data_sun4u(i) & TD_V) != 0)
497 continue;
498
499 stxa(AA_IMMU_TAR, ASI_IMMU,
500 TLB_TAR_VA(vpn) | TLB_TAR_CTX(TLB_CTX_KERNEL));
501 stxa(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG, data);
502 flush(KERNBASE);
502 flush(PROMBASE);
503 break;
504 }
505 wrpr(pstate, reg, 0);
506 if (i == itlb_slot_max)
507 panic("%s: could not find an unused slot", __func__);
508 return;
509 }
510
511 stxa(AA_IMMU_TAR, ASI_IMMU,
512 TLB_TAR_VA(vpn) | TLB_TAR_CTX(TLB_CTX_KERNEL));
513 stxa(0, ASI_ITLB_DATA_IN_REG, data);
503 break;
504 }
505 wrpr(pstate, reg, 0);
506 if (i == itlb_slot_max)
507 panic("%s: could not find an unused slot", __func__);
508 return;
509 }
510
511 stxa(AA_IMMU_TAR, ASI_IMMU,
512 TLB_TAR_VA(vpn) | TLB_TAR_CTX(TLB_CTX_KERNEL));
513 stxa(0, ASI_ITLB_DATA_IN_REG, data);
514 flush(KERNBASE);
514 flush(PROMBASE);
515 wrpr(pstate, reg, 0);
516}
517
518static void
519itlb_relocate_locked0_sun4u(void)
520{
521 u_long data, pstate, tag;
522 int i;
523
524 if (cpu_impl != CPU_IMPL_ULTRASPARCIIIp)
525 return;
526
527 pstate = rdpr(pstate);
528 wrpr(pstate, pstate & ~PSTATE_IE, 0);
529
530 data = itlb_get_data_sun4u(0);
531 if ((data & (TD_V | TD_L)) != (TD_V | TD_L)) {
532 wrpr(pstate, pstate, 0);
533 return;
534 }
535
536 /* Flush the mapping of slot 0. */
537 tag = ldxa(TLB_DAR_SLOT(0), ASI_ITLB_TAG_READ_REG);
538 stxa(TLB_DEMAP_VA(TLB_TAR_VA(tag)) | TLB_DEMAP_PRIMARY |
539 TLB_DEMAP_PAGE, ASI_IMMU_DEMAP, 0);
540 flush(0); /* The USIII-family ignores the address. */
541
542 /*
543 * Search a replacement slot != 0 and enter the data and tag
544 * that formerly were in slot 0.
545 */
546 for (i = 1; i < itlb_slot_max; i++) {
547 if ((itlb_get_data_sun4u(i) & TD_V) != 0)
548 continue;
549
550 stxa(AA_IMMU_TAR, ASI_IMMU, tag);
551 stxa(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG, data);
552 flush(0); /* The USIII-family ignores the address. */
553 break;
554 }
555 wrpr(pstate, pstate, 0);
556 if (i == itlb_slot_max)
557 panic("%s: could not find a replacement slot", __func__);
558}
559
560static int
561mmu_mapin_sun4u(vm_offset_t va, vm_size_t len)
562{
563 vm_offset_t pa, mva;
564 u_long data;
565
566 if (va + len > curkva)
567 curkva = va + len;
568
569 pa = (vm_offset_t)-1;
570 len += va & PAGE_MASK_4M;
571 va &= ~PAGE_MASK_4M;
572 while (len) {
573 if (dtlb_va_to_pa_sun4u(va) == (vm_offset_t)-1 ||
574 itlb_va_to_pa_sun4u(va) == (vm_offset_t)-1) {
575 /* Allocate a physical page, claim the virtual area. */
576 if (pa == (vm_offset_t)-1) {
577 pa = alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
578 if (pa == (vm_offset_t)-1)
579 panic("%s: out of memory", __func__);
580 mva = claim_virt(va, PAGE_SIZE_4M, 0);
581 if (mva != va)
582 panic("%s: can't claim virtual page "
583 "(wanted %#lx, got %#lx)",
584 __func__, va, mva);
585 /*
586 * The mappings may have changed, be paranoid.
587 */
588 continue;
589 }
590 /*
591 * Actually, we can only allocate two pages less at
592 * most (depending on the kernel TSB size).
593 */
594 if (dtlb_slot >= dtlb_slot_max)
595 panic("%s: out of dtlb_slots", __func__);
596 if (itlb_slot >= itlb_slot_max)
597 panic("%s: out of itlb_slots", __func__);
598 data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP |
599 TD_CV | TD_P | TD_W;
600 dtlb_store[dtlb_slot].te_pa = pa;
601 dtlb_store[dtlb_slot].te_va = va;
602 itlb_store[itlb_slot].te_pa = pa;
603 itlb_store[itlb_slot].te_va = va;
604 dtlb_slot++;
605 itlb_slot++;
606 dtlb_enter_sun4u(va, data);
607 itlb_enter_sun4u(va, data);
608 pa = (vm_offset_t)-1;
609 }
610 len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
611 va += PAGE_SIZE_4M;
612 }
613 if (pa != (vm_offset_t)-1)
614 release_phys(pa, PAGE_SIZE_4M);
615 return (0);
616}
617
618static int
619mmu_mapin_sun4v(vm_offset_t va, vm_size_t len)
620{
621 vm_offset_t pa, mva;
622
623 if (va + len > curkva)
624 curkva = va + len;
625
626 pa = (vm_offset_t)-1;
627 len += va & PAGE_MASK_4M;
628 va &= ~PAGE_MASK_4M;
629 while (len) {
630 if ((va >> 22) > SUN4V_TLB_SLOT_MAX)
631 panic("%s: trying to map more than 4GB", __func__);
632 if (tlb_store[va >> 22].te_pa == -1) {
633 /* Allocate a physical page, claim the virtual area */
634 if (pa == (vm_offset_t)-1) {
635 pa = alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
636 if (pa == (vm_offset_t)-1)
637 panic("%s: out of memory", __func__);
638 mva = claim_virt(va, PAGE_SIZE_4M, 0);
639 if (mva != va)
640 panic("%s: can't claim virtual page "
641 "(wanted %#lx, got %#lx)",
642 __func__, va, mva);
643 }
644
645 tlb_store[va >> 22].te_pa = pa;
646 if (map_phys(-1, PAGE_SIZE_4M, va, pa) == -1)
647 printf("%s: can't map physical page\n",
648 __func__);
649 pa = (vm_offset_t)-1;
650 }
651 len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
652 va += PAGE_SIZE_4M;
653 }
654 if (pa != (vm_offset_t)-1)
655 release_phys(pa, PAGE_SIZE_4M);
656 return (0);
657}
658
659static vm_offset_t
660init_heap(void)
661{
662
663 /* There is no need for continuous physical heap memory. */
664 heapva = (vm_offset_t)OF_claim((void *)HEAPVA, HEAPSZ, 32);
665 return (heapva);
666}
667
668static void
669tlb_init_sun4u(void)
670{
671 phandle_t child;
672 char buf[128];
673 u_int bootcpu;
674 u_int cpu;
675
676 cpu_impl = VER_IMPL(rdpr(ver));
677 bootcpu = UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG));
678 for (child = OF_child(root); child != 0; child = OF_peer(child)) {
679 if (OF_getprop(child, "device_type", buf, sizeof(buf)) <= 0)
680 continue;
681 if (strcmp(buf, "cpu") != 0)
682 continue;
683 if (OF_getprop(child, cpu_impl < CPU_IMPL_ULTRASPARCIII ?
684 "upa-portid" : "portid", &cpu, sizeof(cpu)) <= 0)
685 continue;
686 if (cpu == bootcpu)
687 break;
688 }
689 if (cpu != bootcpu)
690 panic("%s: no node for bootcpu?!?!", __func__);
691
692 if (OF_getprop(child, "#dtlb-entries", &dtlb_slot_max,
693 sizeof(dtlb_slot_max)) == -1 ||
694 OF_getprop(child, "#itlb-entries", &itlb_slot_max,
695 sizeof(itlb_slot_max)) == -1)
696 panic("%s: can't get TLB slot max.", __func__);
697
698 if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp) {
699#ifdef LOADER_DEBUG
700 printf("pre fixup:\n");
701 pmap_print_tlb_sun4u();
702#endif
703
704 /*
705 * Relocate the locked entry in it16 slot 0 (if existent)
706 * as part of working around Cheetah+ erratum 34.
707 */
708 itlb_relocate_locked0_sun4u();
709
710#ifdef LOADER_DEBUG
711 printf("post fixup:\n");
712 pmap_print_tlb_sun4u();
713#endif
714 }
715
716 dtlb_store = malloc(dtlb_slot_max * sizeof(*dtlb_store));
717 itlb_store = malloc(itlb_slot_max * sizeof(*itlb_store));
718 if (dtlb_store == NULL || itlb_store == NULL)
719 panic("%s: can't allocate TLB store", __func__);
720}
721
722static void
723tlb_init_sun4v(void)
724{
725
726 tlb_store = malloc(SUN4V_TLB_SLOT_MAX * sizeof(*tlb_store));
727 memset(tlb_store, 0xFF, SUN4V_TLB_SLOT_MAX * sizeof(*tlb_store));
728}
729
730int
731main(int (*openfirm)(void *))
732{
733 char bootpath[64];
734 char compatible[32];
735 struct devsw **dp;
736
737 /*
738 * Tell the Open Firmware functions where they find the OFW gate.
739 */
740 OF_init(openfirm);
741
742 archsw.arch_getdev = ofw_getdev;
743 archsw.arch_copyin = sparc64_copyin;
744 archsw.arch_copyout = ofw_copyout;
745 archsw.arch_readin = sparc64_readin;
746 archsw.arch_autoload = sparc64_autoload;
747 archsw.arch_maphint = sparc64_maphint;
748
749 init_heap();
750 setheap((void *)heapva, (void *)(heapva + HEAPSZ));
751
752 /*
753 * Probe for a console.
754 */
755 cons_probe();
756
757 if ((root = OF_peer(0)) == -1)
758 panic("%s: can't get root phandle", __func__);
759 OF_getprop(root, "compatible", compatible, sizeof(compatible));
760 if (!strcmp(compatible, "sun4v")) {
761 printf("\nBooting with sun4v support.\n");
762 mmu_ops = &mmu_ops_sun4v;
763 is_sun4v = 1;
764 } else {
765 printf("\nBooting with sun4u support.\n");
766 mmu_ops = &mmu_ops_sun4u;
767 }
768
769 mmu_ops->tlb_init();
770
771 /*
772 * Initialize devices.
773 */
774 for (dp = devsw; *dp != 0; dp++) {
775 if ((*dp)->dv_init != 0)
776 (*dp)->dv_init();
777 }
778
779 /*
780 * Set up the current device.
781 */
782 OF_getprop(chosen, "bootpath", bootpath, sizeof(bootpath));
783
784 /*
785 * Sun compatible bootable CD-ROMs have a disk label placed
786 * before the cd9660 data, with the actual filesystem being
787 * in the first partition, while the other partitions contain
788 * pseudo disk labels with embedded boot blocks for different
789 * architectures, which may be followed by UFS filesystems.
790 * The firmware will set the boot path to the partition it
791 * boots from ('f' in the sun4u case), but we want the kernel
792 * to be loaded from the cd9660 fs ('a'), so the boot path
793 * needs to be altered.
794 */
795 if (bootpath[strlen(bootpath) - 2] == ':' &&
796 bootpath[strlen(bootpath) - 1] == 'f') {
797 bootpath[strlen(bootpath) - 1] = 'a';
798 printf("Boot path set to %s\n", bootpath);
799 }
800
801 env_setenv("currdev", EV_VOLATILE, bootpath,
802 ofw_setcurrdev, env_nounset);
803 env_setenv("loaddev", EV_VOLATILE, bootpath,
804 env_noset, env_nounset);
805
806 printf("\n");
807 printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
808 printf("(%s, %s)\n", bootprog_maker, bootprog_date);
809 printf("bootpath=\"%s\"\n", bootpath);
810
811 /* Give control to the machine independent loader code. */
812 interact();
813 return (1);
814}
815
816COMMAND_SET(reboot, "reboot", "reboot the system", command_reboot);
817
818static int
819command_reboot(int argc, char *argv[])
820{
821 int i;
822
823 for (i = 0; devsw[i] != NULL; ++i)
824 if (devsw[i]->dv_cleanup != NULL)
825 (devsw[i]->dv_cleanup)();
826
827 printf("Rebooting...\n");
828 OF_exit();
829}
830
831/* provide this for panic, as it's not in the startup code */
832void
833exit(int code)
834{
835
836 OF_exit();
837}
838
839#ifdef LOADER_DEBUG
515 wrpr(pstate, reg, 0);
516}
517
518static void
519itlb_relocate_locked0_sun4u(void)
520{
521 u_long data, pstate, tag;
522 int i;
523
524 if (cpu_impl != CPU_IMPL_ULTRASPARCIIIp)
525 return;
526
527 pstate = rdpr(pstate);
528 wrpr(pstate, pstate & ~PSTATE_IE, 0);
529
530 data = itlb_get_data_sun4u(0);
531 if ((data & (TD_V | TD_L)) != (TD_V | TD_L)) {
532 wrpr(pstate, pstate, 0);
533 return;
534 }
535
536 /* Flush the mapping of slot 0. */
537 tag = ldxa(TLB_DAR_SLOT(0), ASI_ITLB_TAG_READ_REG);
538 stxa(TLB_DEMAP_VA(TLB_TAR_VA(tag)) | TLB_DEMAP_PRIMARY |
539 TLB_DEMAP_PAGE, ASI_IMMU_DEMAP, 0);
540 flush(0); /* The USIII-family ignores the address. */
541
542 /*
543 * Search a replacement slot != 0 and enter the data and tag
544 * that formerly were in slot 0.
545 */
546 for (i = 1; i < itlb_slot_max; i++) {
547 if ((itlb_get_data_sun4u(i) & TD_V) != 0)
548 continue;
549
550 stxa(AA_IMMU_TAR, ASI_IMMU, tag);
551 stxa(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG, data);
552 flush(0); /* The USIII-family ignores the address. */
553 break;
554 }
555 wrpr(pstate, pstate, 0);
556 if (i == itlb_slot_max)
557 panic("%s: could not find a replacement slot", __func__);
558}
559
560static int
561mmu_mapin_sun4u(vm_offset_t va, vm_size_t len)
562{
563 vm_offset_t pa, mva;
564 u_long data;
565
566 if (va + len > curkva)
567 curkva = va + len;
568
569 pa = (vm_offset_t)-1;
570 len += va & PAGE_MASK_4M;
571 va &= ~PAGE_MASK_4M;
572 while (len) {
573 if (dtlb_va_to_pa_sun4u(va) == (vm_offset_t)-1 ||
574 itlb_va_to_pa_sun4u(va) == (vm_offset_t)-1) {
575 /* Allocate a physical page, claim the virtual area. */
576 if (pa == (vm_offset_t)-1) {
577 pa = alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
578 if (pa == (vm_offset_t)-1)
579 panic("%s: out of memory", __func__);
580 mva = claim_virt(va, PAGE_SIZE_4M, 0);
581 if (mva != va)
582 panic("%s: can't claim virtual page "
583 "(wanted %#lx, got %#lx)",
584 __func__, va, mva);
585 /*
586 * The mappings may have changed, be paranoid.
587 */
588 continue;
589 }
590 /*
591 * Actually, we can only allocate two pages less at
592 * most (depending on the kernel TSB size).
593 */
594 if (dtlb_slot >= dtlb_slot_max)
595 panic("%s: out of dtlb_slots", __func__);
596 if (itlb_slot >= itlb_slot_max)
597 panic("%s: out of itlb_slots", __func__);
598 data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP |
599 TD_CV | TD_P | TD_W;
600 dtlb_store[dtlb_slot].te_pa = pa;
601 dtlb_store[dtlb_slot].te_va = va;
602 itlb_store[itlb_slot].te_pa = pa;
603 itlb_store[itlb_slot].te_va = va;
604 dtlb_slot++;
605 itlb_slot++;
606 dtlb_enter_sun4u(va, data);
607 itlb_enter_sun4u(va, data);
608 pa = (vm_offset_t)-1;
609 }
610 len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
611 va += PAGE_SIZE_4M;
612 }
613 if (pa != (vm_offset_t)-1)
614 release_phys(pa, PAGE_SIZE_4M);
615 return (0);
616}
617
618static int
619mmu_mapin_sun4v(vm_offset_t va, vm_size_t len)
620{
621 vm_offset_t pa, mva;
622
623 if (va + len > curkva)
624 curkva = va + len;
625
626 pa = (vm_offset_t)-1;
627 len += va & PAGE_MASK_4M;
628 va &= ~PAGE_MASK_4M;
629 while (len) {
630 if ((va >> 22) > SUN4V_TLB_SLOT_MAX)
631 panic("%s: trying to map more than 4GB", __func__);
632 if (tlb_store[va >> 22].te_pa == -1) {
633 /* Allocate a physical page, claim the virtual area */
634 if (pa == (vm_offset_t)-1) {
635 pa = alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
636 if (pa == (vm_offset_t)-1)
637 panic("%s: out of memory", __func__);
638 mva = claim_virt(va, PAGE_SIZE_4M, 0);
639 if (mva != va)
640 panic("%s: can't claim virtual page "
641 "(wanted %#lx, got %#lx)",
642 __func__, va, mva);
643 }
644
645 tlb_store[va >> 22].te_pa = pa;
646 if (map_phys(-1, PAGE_SIZE_4M, va, pa) == -1)
647 printf("%s: can't map physical page\n",
648 __func__);
649 pa = (vm_offset_t)-1;
650 }
651 len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
652 va += PAGE_SIZE_4M;
653 }
654 if (pa != (vm_offset_t)-1)
655 release_phys(pa, PAGE_SIZE_4M);
656 return (0);
657}
658
659static vm_offset_t
660init_heap(void)
661{
662
663 /* There is no need for continuous physical heap memory. */
664 heapva = (vm_offset_t)OF_claim((void *)HEAPVA, HEAPSZ, 32);
665 return (heapva);
666}
667
668static void
669tlb_init_sun4u(void)
670{
671 phandle_t child;
672 char buf[128];
673 u_int bootcpu;
674 u_int cpu;
675
676 cpu_impl = VER_IMPL(rdpr(ver));
677 bootcpu = UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG));
678 for (child = OF_child(root); child != 0; child = OF_peer(child)) {
679 if (OF_getprop(child, "device_type", buf, sizeof(buf)) <= 0)
680 continue;
681 if (strcmp(buf, "cpu") != 0)
682 continue;
683 if (OF_getprop(child, cpu_impl < CPU_IMPL_ULTRASPARCIII ?
684 "upa-portid" : "portid", &cpu, sizeof(cpu)) <= 0)
685 continue;
686 if (cpu == bootcpu)
687 break;
688 }
689 if (cpu != bootcpu)
690 panic("%s: no node for bootcpu?!?!", __func__);
691
692 if (OF_getprop(child, "#dtlb-entries", &dtlb_slot_max,
693 sizeof(dtlb_slot_max)) == -1 ||
694 OF_getprop(child, "#itlb-entries", &itlb_slot_max,
695 sizeof(itlb_slot_max)) == -1)
696 panic("%s: can't get TLB slot max.", __func__);
697
698 if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp) {
699#ifdef LOADER_DEBUG
700 printf("pre fixup:\n");
701 pmap_print_tlb_sun4u();
702#endif
703
704 /*
705 * Relocate the locked entry in it16 slot 0 (if existent)
706 * as part of working around Cheetah+ erratum 34.
707 */
708 itlb_relocate_locked0_sun4u();
709
710#ifdef LOADER_DEBUG
711 printf("post fixup:\n");
712 pmap_print_tlb_sun4u();
713#endif
714 }
715
716 dtlb_store = malloc(dtlb_slot_max * sizeof(*dtlb_store));
717 itlb_store = malloc(itlb_slot_max * sizeof(*itlb_store));
718 if (dtlb_store == NULL || itlb_store == NULL)
719 panic("%s: can't allocate TLB store", __func__);
720}
721
722static void
723tlb_init_sun4v(void)
724{
725
726 tlb_store = malloc(SUN4V_TLB_SLOT_MAX * sizeof(*tlb_store));
727 memset(tlb_store, 0xFF, SUN4V_TLB_SLOT_MAX * sizeof(*tlb_store));
728}
729
730int
731main(int (*openfirm)(void *))
732{
733 char bootpath[64];
734 char compatible[32];
735 struct devsw **dp;
736
737 /*
738 * Tell the Open Firmware functions where they find the OFW gate.
739 */
740 OF_init(openfirm);
741
742 archsw.arch_getdev = ofw_getdev;
743 archsw.arch_copyin = sparc64_copyin;
744 archsw.arch_copyout = ofw_copyout;
745 archsw.arch_readin = sparc64_readin;
746 archsw.arch_autoload = sparc64_autoload;
747 archsw.arch_maphint = sparc64_maphint;
748
749 init_heap();
750 setheap((void *)heapva, (void *)(heapva + HEAPSZ));
751
752 /*
753 * Probe for a console.
754 */
755 cons_probe();
756
757 if ((root = OF_peer(0)) == -1)
758 panic("%s: can't get root phandle", __func__);
759 OF_getprop(root, "compatible", compatible, sizeof(compatible));
760 if (!strcmp(compatible, "sun4v")) {
761 printf("\nBooting with sun4v support.\n");
762 mmu_ops = &mmu_ops_sun4v;
763 is_sun4v = 1;
764 } else {
765 printf("\nBooting with sun4u support.\n");
766 mmu_ops = &mmu_ops_sun4u;
767 }
768
769 mmu_ops->tlb_init();
770
771 /*
772 * Initialize devices.
773 */
774 for (dp = devsw; *dp != 0; dp++) {
775 if ((*dp)->dv_init != 0)
776 (*dp)->dv_init();
777 }
778
779 /*
780 * Set up the current device.
781 */
782 OF_getprop(chosen, "bootpath", bootpath, sizeof(bootpath));
783
784 /*
785 * Sun compatible bootable CD-ROMs have a disk label placed
786 * before the cd9660 data, with the actual filesystem being
787 * in the first partition, while the other partitions contain
788 * pseudo disk labels with embedded boot blocks for different
789 * architectures, which may be followed by UFS filesystems.
790 * The firmware will set the boot path to the partition it
791 * boots from ('f' in the sun4u case), but we want the kernel
792 * to be loaded from the cd9660 fs ('a'), so the boot path
793 * needs to be altered.
794 */
795 if (bootpath[strlen(bootpath) - 2] == ':' &&
796 bootpath[strlen(bootpath) - 1] == 'f') {
797 bootpath[strlen(bootpath) - 1] = 'a';
798 printf("Boot path set to %s\n", bootpath);
799 }
800
801 env_setenv("currdev", EV_VOLATILE, bootpath,
802 ofw_setcurrdev, env_nounset);
803 env_setenv("loaddev", EV_VOLATILE, bootpath,
804 env_noset, env_nounset);
805
806 printf("\n");
807 printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
808 printf("(%s, %s)\n", bootprog_maker, bootprog_date);
809 printf("bootpath=\"%s\"\n", bootpath);
810
811 /* Give control to the machine independent loader code. */
812 interact();
813 return (1);
814}
815
816COMMAND_SET(reboot, "reboot", "reboot the system", command_reboot);
817
818static int
819command_reboot(int argc, char *argv[])
820{
821 int i;
822
823 for (i = 0; devsw[i] != NULL; ++i)
824 if (devsw[i]->dv_cleanup != NULL)
825 (devsw[i]->dv_cleanup)();
826
827 printf("Rebooting...\n");
828 OF_exit();
829}
830
831/* provide this for panic, as it's not in the startup code */
832void
833exit(int code)
834{
835
836 OF_exit();
837}
838
839#ifdef LOADER_DEBUG
840static const char *page_sizes[] = {
840static const char *const page_sizes[] = {
841 " 8k", " 64k", "512k", " 4m"
842};
843
844static void
845pmap_print_tte_sun4u(tte_t tag, tte_t tte)
846{
847
848 printf("%s %s ",
849 page_sizes[(tte & TD_SIZE_MASK) >> TD_SIZE_SHIFT],
850 tag & TD_G ? "G" : " ");
851 printf(tte & TD_W ? "W " : " ");
852 printf(tte & TD_P ? "\e[33mP\e[0m " : " ");
853 printf(tte & TD_E ? "E " : " ");
854 printf(tte & TD_CV ? "CV " : " ");
855 printf(tte & TD_CP ? "CP " : " ");
856 printf(tte & TD_L ? "\e[32mL\e[0m " : " ");
857 printf(tte & TD_IE ? "IE " : " ");
858 printf(tte & TD_NFO ? "NFO " : " ");
859 printf("pa=0x%lx va=0x%lx ctx=%ld\n",
860 TD_PA(tte), TLB_TAR_VA(tag), TLB_TAR_CTX(tag));
861}
862
863static void
864pmap_print_tlb_sun4u(void)
865{
866 tte_t tag, tte;
867 u_long pstate;
868 int i;
869
870 pstate = rdpr(pstate);
871 for (i = 0; i < itlb_slot_max; i++) {
872 wrpr(pstate, pstate & ~PSTATE_IE, 0);
873 tte = itlb_get_data_sun4u(i);
874 wrpr(pstate, pstate, 0);
875 if (!(tte & TD_V))
876 continue;
877 tag = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_TAG_READ_REG);
878 printf("iTLB-%2u: ", i);
879 pmap_print_tte_sun4u(tag, tte);
880 }
881 for (i = 0; i < dtlb_slot_max; i++) {
882 wrpr(pstate, pstate & ~PSTATE_IE, 0);
883 tte = dtlb_get_data_sun4u(i);
884 wrpr(pstate, pstate, 0);
885 if (!(tte & TD_V))
886 continue;
887 tag = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_TAG_READ_REG);
888 printf("dTLB-%2u: ", i);
889 pmap_print_tte_sun4u(tag, tte);
890 }
891}
892#endif
841 " 8k", " 64k", "512k", " 4m"
842};
843
844static void
845pmap_print_tte_sun4u(tte_t tag, tte_t tte)
846{
847
848 printf("%s %s ",
849 page_sizes[(tte & TD_SIZE_MASK) >> TD_SIZE_SHIFT],
850 tag & TD_G ? "G" : " ");
851 printf(tte & TD_W ? "W " : " ");
852 printf(tte & TD_P ? "\e[33mP\e[0m " : " ");
853 printf(tte & TD_E ? "E " : " ");
854 printf(tte & TD_CV ? "CV " : " ");
855 printf(tte & TD_CP ? "CP " : " ");
856 printf(tte & TD_L ? "\e[32mL\e[0m " : " ");
857 printf(tte & TD_IE ? "IE " : " ");
858 printf(tte & TD_NFO ? "NFO " : " ");
859 printf("pa=0x%lx va=0x%lx ctx=%ld\n",
860 TD_PA(tte), TLB_TAR_VA(tag), TLB_TAR_CTX(tag));
861}
862
863static void
864pmap_print_tlb_sun4u(void)
865{
866 tte_t tag, tte;
867 u_long pstate;
868 int i;
869
870 pstate = rdpr(pstate);
871 for (i = 0; i < itlb_slot_max; i++) {
872 wrpr(pstate, pstate & ~PSTATE_IE, 0);
873 tte = itlb_get_data_sun4u(i);
874 wrpr(pstate, pstate, 0);
875 if (!(tte & TD_V))
876 continue;
877 tag = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_TAG_READ_REG);
878 printf("iTLB-%2u: ", i);
879 pmap_print_tte_sun4u(tag, tte);
880 }
881 for (i = 0; i < dtlb_slot_max; i++) {
882 wrpr(pstate, pstate & ~PSTATE_IE, 0);
883 tte = dtlb_get_data_sun4u(i);
884 wrpr(pstate, pstate, 0);
885 if (!(tte & TD_V))
886 continue;
887 tag = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_TAG_READ_REG);
888 printf("dTLB-%2u: ", i);
889 pmap_print_tte_sun4u(tag, tte);
890 }
891}
892#endif