1/*-
2 * Copyright (c) 2006 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <stand.h>
31#include <string.h>
32
33#include <sys/param.h>
34#include <sys/linker.h>
35#include <machine/elf.h>
36#include <machine/ia64_cpu.h>
37#include <machine/pte.h>
38
39#include <efi.h>
40#include <efilib.h>
41
42#include "libia64.h"
43
44static u_int itr_idx = 0;
45static u_int dtr_idx = 0;
46
47static vm_offset_t ia64_text_start;
48static size_t ia64_text_size;
49
50static vm_offset_t ia64_data_start;
51static size_t ia64_data_size;
52
53static int elf64_exec(struct preloaded_file *amp);
54static int elf64_obj_exec(struct preloaded_file *amp);
55
56static struct file_format ia64_elf = {
57	elf64_loadfile,
58	elf64_exec
59};
60static struct file_format ia64_elf_obj = {
61	elf64_obj_loadfile,
62	elf64_obj_exec
63};
64
65struct file_format *file_formats[] = {
66	&ia64_elf,
67	&ia64_elf_obj,
68	NULL
69};
70
71static u_int
72sz2shft(vm_offset_t ofs, vm_size_t sz)
73{
74	vm_size_t s;
75	u_int shft;
76
77	shft = 12;	/* Start with 4K */
78	s = 1 << shft;
79	while (s <= sz) {
80		shft++;
81		s <<= 1;
82	}
83	do {
84		shft--;
85		s >>= 1;
86	} while (ofs & (s - 1));
87
88	return (shft);
89}
90
91/*
92 * Entered with psr.ic and psr.i both zero.
93 */
94static void
95enter_kernel(uint64_t start, struct bootinfo *bi)
96{
97
98	__asm __volatile("srlz.i;;");
99	__asm __volatile("mov cr.ipsr=%0"
100			 :: "r"(IA64_PSR_IC
101				| IA64_PSR_DT
102				| IA64_PSR_RT
103				| IA64_PSR_IT
104				| IA64_PSR_BN));
105	__asm __volatile("mov cr.iip=%0" :: "r"(start));
106	__asm __volatile("mov cr.ifs=r0;;");
107	__asm __volatile("mov ar.rsc=0;; flushrs;;");
108	__asm __volatile("mov r8=%0" :: "r" (bi));
109	__asm __volatile("rfi;;");
110
111	/* NOTREACHED */
112}
113
114static u_int
115mmu_wire(vm_offset_t va, vm_paddr_t pa, u_int pgshft, u_int acc)
116{
117	pt_entry_t pte;
118
119	/* Round up to the smallest possible page size. */
120	if (pgshft < 12)
121		pgshft = 12;
122	/* Truncate to the largest possible page size (256MB). */
123	if (pgshft > 28)
124		pgshft = 28;
125	/* Round down to a valid (mappable) page size. */
126	if (pgshft > 14 && (pgshft & 1) != 0)
127		pgshft--;
128
129	pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
130	    PTE_PL_KERN | (acc & PTE_AR_MASK) | (pa & PTE_PPN_MASK);
131
132	__asm __volatile("mov cr.ifa=%0" :: "r"(va));
133	__asm __volatile("mov cr.itir=%0" :: "r"(pgshft << 2));
134	__asm __volatile("srlz.d;;");
135
136	__asm __volatile("ptr.d %0,%1" :: "r"(va), "r"(pgshft << 2));
137	__asm __volatile("srlz.d;;");
138	__asm __volatile("itr.d dtr[%0]=%1" :: "r"(dtr_idx), "r"(pte));
139	__asm __volatile("srlz.d;;");
140	dtr_idx++;
141
142	if (acc == PTE_AR_RWX || acc == PTE_AR_RX) {
143		__asm __volatile("ptr.i %0,%1;;" :: "r"(va), "r"(pgshft << 2));
144		__asm __volatile("srlz.i;;");
145		__asm __volatile("itr.i itr[%0]=%1;;" :: "r"(itr_idx), "r"(pte));
146		__asm __volatile("srlz.i;;");
147		itr_idx++;
148	}
149
150	return (pgshft);
151}
152
153static void
154mmu_setup_legacy(uint64_t entry)
155{
156
157	/*
158	 * Region 6 is direct mapped UC and region 7 is direct mapped
159	 * WC. The details of this is controlled by the Alt {I,D}TLB
160	 * handlers. Here we just make sure that they have the largest
161	 * possible page size to minimise TLB usage.
162	 */
163	ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2));
164	ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));
165	__asm __volatile("srlz.i;;");
166
167	mmu_wire(entry, IA64_RR_MASK(entry), 28, PTE_AR_RWX);
168}
169
170static void
171mmu_setup_paged(struct bootinfo *bi)
172{
173	void *pa;
174	size_t sz;
175	u_int shft;
176
177	ia64_set_rr(IA64_RR_BASE(IA64_PBVM_RR),
178	    (IA64_PBVM_RR << 8) | (IA64_PBVM_PAGE_SHIFT << 2));
179	__asm __volatile("srlz.i;;");
180
181	/* Wire the PBVM page table. */
182	mmu_wire(IA64_PBVM_PGTBL, (uintptr_t)ia64_pgtbl,
183	    sz2shft(IA64_PBVM_PGTBL, ia64_pgtblsz), PTE_AR_RW);
184
185	/* Wire as much of the text segment as we can. */
186	sz = ia64_text_size;	/* XXX */
187	pa = ia64_va2pa(ia64_text_start, &ia64_text_size);
188	ia64_text_size = sz;	/* XXX */
189	shft = sz2shft(ia64_text_start, ia64_text_size);
190	shft = mmu_wire(ia64_text_start, (uintptr_t)pa, shft, PTE_AR_RWX);
191	ia64_copyin(&shft, (uintptr_t)&bi->bi_text_mapped, 4);
192
193	/* Wire as much of the data segment as well. */
194	sz = ia64_data_size;	/* XXX */
195	pa = ia64_va2pa(ia64_data_start, &ia64_data_size);
196	ia64_data_size = sz;	/* XXX */
197	shft = sz2shft(ia64_data_start, ia64_data_size);
198	shft = mmu_wire(ia64_data_start, (uintptr_t)pa, shft, PTE_AR_RW);
199	ia64_copyin(&shft, (uintptr_t)&bi->bi_data_mapped, 4);
200
201	/* Update the bootinfo with the number of TRs used. */
202	ia64_copyin(&itr_idx, (uintptr_t)&bi->bi_itr_used, 4);
203	ia64_copyin(&dtr_idx, (uintptr_t)&bi->bi_dtr_used, 4);
204}
205
206static int
207elf64_exec(struct preloaded_file *fp)
208{
209	struct bootinfo *bi;
210	struct file_metadata *md;
211	Elf_Ehdr *hdr;
212	int error;
213
214	md = file_findmetadata(fp, MODINFOMD_ELFHDR);
215	if (md == NULL)
216		return (EINVAL);
217
218	error = ia64_bootinfo(fp, &bi);
219	if (error)
220		return (error);
221
222	hdr = (Elf_Ehdr *)&(md->md_data);
223	printf("Entering %s at 0x%lx...\n", fp->f_name, hdr->e_entry);
224
225	error = ia64_platform_enter(fp->f_name);
226	if (error)
227		return (error);
228
229	__asm __volatile("rsm psr.ic|psr.i;;");
230	__asm __volatile("srlz.i;;");
231
232	if (IS_LEGACY_KERNEL())
233		mmu_setup_legacy(hdr->e_entry);
234	else
235		mmu_setup_paged(bi);
236
237	enter_kernel(hdr->e_entry, bi);
238	/* NOTREACHED */
239	return (EDOOFUS);
240}
241
242static int
243elf64_obj_exec(struct preloaded_file *fp)
244{
245
246	printf("%s called for preloaded file %p (=%s):\n", __func__, fp,
247	    fp->f_name);
248	return (ENOSYS);
249}
250
251void
252ia64_loadseg(Elf_Ehdr *eh, Elf_Phdr *ph, uint64_t delta)
253{
254
255	if (eh->e_type != ET_EXEC)
256		return;
257
258	if (ph->p_flags & PF_X) {
259		ia64_text_start = ph->p_vaddr + delta;
260		ia64_text_size = ph->p_memsz;
261
262		ia64_sync_icache(ia64_text_start, ia64_text_size);
263	} else {
264		ia64_data_start = ph->p_vaddr + delta;
265		ia64_data_size = ph->p_memsz;
266	}
267}
268
269