elf_machdep.c revision 331722
1/*-
2 * Copyright 1996-1998 John D. Polstra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD: stable/11/sys/arm/arm/elf_machdep.c 331722 2018-03-29 02:50:57Z eadler $");
28
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/systm.h>
32#include <sys/exec.h>
33#include <sys/imgact.h>
34#include <sys/linker.h>
35#include <sys/sysent.h>
36#include <sys/imgact_elf.h>
37#include <sys/proc.h>
38#include <sys/syscall.h>
39#include <sys/signalvar.h>
40#include <sys/vnode.h>
41
42#include <vm/vm.h>
43#include <vm/pmap.h>
44#include <vm/vm_param.h>
45
46#include <machine/elf.h>
47#include <machine/md_var.h>
48#ifdef VFP
49#include <machine/vfp.h>
50#endif
51
52static boolean_t elf32_arm_abi_supported(struct image_params *);
53
54u_long elf_hwcap;
55u_long elf_hwcap2;
56
57struct sysentvec elf32_freebsd_sysvec = {
58	.sv_size	= SYS_MAXSYSCALL,
59	.sv_table	= sysent,
60	.sv_mask	= 0,
61	.sv_errsize	= 0,
62	.sv_errtbl	= NULL,
63	.sv_transtrap	= NULL,
64	.sv_fixup	= __elfN(freebsd_fixup),
65	.sv_sendsig	= sendsig,
66	.sv_sigcode	= sigcode,
67	.sv_szsigcode	= &szsigcode,
68	.sv_name	= "FreeBSD ELF32",
69	.sv_coredump	= __elfN(coredump),
70	.sv_imgact_try	= NULL,
71	.sv_minsigstksz	= MINSIGSTKSZ,
72	.sv_pagesize	= PAGE_SIZE,
73	.sv_minuser	= VM_MIN_ADDRESS,
74	.sv_maxuser	= VM_MAXUSER_ADDRESS,
75	.sv_usrstack	= USRSTACK,
76	.sv_psstrings	= PS_STRINGS,
77	.sv_stackprot	= VM_PROT_ALL,
78	.sv_copyout_strings = exec_copyout_strings,
79	.sv_setregs	= exec_setregs,
80	.sv_fixlimit	= NULL,
81	.sv_maxssiz	= NULL,
82	.sv_flags	=
83#if __ARM_ARCH >= 6
84			  SV_SHP | SV_TIMEKEEP |
85#endif
86			  SV_ABI_FREEBSD | SV_ILP32 | SV_HWCAP,
87	.sv_set_syscall_retval = cpu_set_syscall_retval,
88	.sv_fetch_syscall_args = cpu_fetch_syscall_args,
89	.sv_syscallnames = syscallnames,
90	.sv_shared_page_base = SHAREDPAGE,
91	.sv_shared_page_len = PAGE_SIZE,
92	.sv_schedtail	= NULL,
93	.sv_thread_detach = NULL,
94	.sv_trap	= NULL,
95	.sv_hwcap	= &elf_hwcap,
96	.sv_hwcap2	= &elf_hwcap2,
97};
98INIT_SYSENTVEC(elf32_sysvec, &elf32_freebsd_sysvec);
99
100static Elf32_Brandinfo freebsd_brand_info = {
101	.brand		= ELFOSABI_FREEBSD,
102	.machine	= EM_ARM,
103	.compat_3_brand	= "FreeBSD",
104	.emul_path	= NULL,
105	.interp_path	= "/libexec/ld-elf.so.1",
106	.sysvec		= &elf32_freebsd_sysvec,
107	.interp_newpath	= NULL,
108	.brand_note	= &elf32_freebsd_brandnote,
109	.flags		= BI_CAN_EXEC_DYN | BI_BRAND_NOTE,
110	.header_supported= elf32_arm_abi_supported,
111};
112
113SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_FIRST,
114	(sysinit_cfunc_t) elf32_insert_brand_entry,
115	&freebsd_brand_info);
116
117static boolean_t
118elf32_arm_abi_supported(struct image_params *imgp)
119{
120	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
121
122	/*
123	 * When configured for EABI, FreeBSD supports EABI vesions 4 and 5.
124	 */
125	if (EF_ARM_EABI_VERSION(hdr->e_flags) < EF_ARM_EABI_FREEBSD_MIN) {
126		if (bootverbose)
127			uprintf("Attempting to execute non EABI binary (rev %d) image %s",
128			    EF_ARM_EABI_VERSION(hdr->e_flags), imgp->args->fname);
129		return (FALSE);
130	}
131	return (TRUE);
132}
133
134void
135elf32_dump_thread(struct thread *td, void *dst, size_t *off)
136{
137#ifdef VFP
138	mcontext_vfp_t vfp;
139
140	if (dst != NULL) {
141		get_vfpcontext(td, &vfp);
142		*off = elf32_populate_note(NT_ARM_VFP, &vfp, dst, sizeof(vfp),
143		    NULL);
144	} else
145		*off = elf32_populate_note(NT_ARM_VFP, NULL, NULL, sizeof(vfp),
146		    NULL);
147#endif
148}
149
150/*
151 * It is possible for the compiler to emit relocations for unaligned data.
152 * We handle this situation with these inlines.
153 */
154#define	RELOC_ALIGNED_P(x) \
155	(((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
156
157static __inline Elf_Addr
158load_ptr(Elf_Addr *where)
159{
160	Elf_Addr res;
161
162	if (RELOC_ALIGNED_P(where))
163		return *where;
164	memcpy(&res, where, sizeof(res));
165	return (res);
166}
167
168static __inline void
169store_ptr(Elf_Addr *where, Elf_Addr val)
170{
171	if (RELOC_ALIGNED_P(where))
172		*where = val;
173	else
174		memcpy(where, &val, sizeof(val));
175}
176#undef RELOC_ALIGNED_P
177
178
179/* Process one elf relocation with addend. */
180static int
181elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
182    int type, int local, elf_lookup_fn lookup)
183{
184	Elf_Addr *where;
185	Elf_Addr addr;
186	Elf_Addr addend;
187	Elf_Word rtype, symidx;
188	const Elf_Rel *rel;
189	const Elf_Rela *rela;
190	int error;
191
192	switch (type) {
193	case ELF_RELOC_REL:
194		rel = (const Elf_Rel *)data;
195		where = (Elf_Addr *) (relocbase + rel->r_offset);
196		addend = load_ptr(where);
197		rtype = ELF_R_TYPE(rel->r_info);
198		symidx = ELF_R_SYM(rel->r_info);
199		break;
200	case ELF_RELOC_RELA:
201		rela = (const Elf_Rela *)data;
202		where = (Elf_Addr *) (relocbase + rela->r_offset);
203		addend = rela->r_addend;
204		rtype = ELF_R_TYPE(rela->r_info);
205		symidx = ELF_R_SYM(rela->r_info);
206		break;
207	default:
208		panic("unknown reloc type %d\n", type);
209	}
210
211	if (local) {
212		if (rtype == R_ARM_RELATIVE) {	/* A + B */
213			addr = elf_relocaddr(lf, relocbase + addend);
214			if (load_ptr(where) != addr)
215				store_ptr(where, addr);
216		}
217		return (0);
218	}
219
220	switch (rtype) {
221
222		case R_ARM_NONE:	/* none */
223			break;
224
225		case R_ARM_ABS32:
226			error = lookup(lf, symidx, 1, &addr);
227			if (error != 0)
228				return -1;
229			store_ptr(where, addr + load_ptr(where));
230			break;
231
232		case R_ARM_COPY:	/* none */
233			/*
234			 * There shouldn't be copy relocations in kernel
235			 * objects.
236			 */
237			printf("kldload: unexpected R_COPY relocation\n");
238			return -1;
239			break;
240
241		case R_ARM_JUMP_SLOT:
242			error = lookup(lf, symidx, 1, &addr);
243			if (error == 0) {
244				store_ptr(where, addr);
245				return (0);
246			}
247			return (-1);
248		case R_ARM_RELATIVE:
249			break;
250
251		default:
252			printf("kldload: unexpected relocation type %d\n",
253			       rtype);
254			return -1;
255	}
256	return(0);
257}
258
259int
260elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type,
261    elf_lookup_fn lookup)
262{
263
264	return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup));
265}
266
267int
268elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data,
269    int type, elf_lookup_fn lookup)
270{
271
272	return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup));
273}
274
275int
276elf_cpu_load_file(linker_file_t lf)
277{
278
279	/*
280	 * The pmap code does not do an icache sync upon establishing executable
281	 * mappings in the kernel pmap.  It's an optimization based on the fact
282	 * that kernel memory allocations always have EXECUTABLE protection even
283	 * when the memory isn't going to hold executable code.  The only time
284	 * kernel memory holding instructions does need a sync is after loading
285	 * a kernel module, and that's when this function gets called.
286	 *
287	 * This syncs data and instruction caches after loading a module.  We
288	 * don't worry about the kernel itself (lf->id is 1) as locore.S did
289	 * that on entry.  Even if data cache maintenance was done by IO code,
290	 * the relocation fixup process creates dirty cache entries that we must
291	 * write back before doing icache sync. The instruction cache sync also
292	 * invalidates the branch predictor cache on platforms that have one.
293	 */
294	if (lf->id == 1)
295		return (0);
296#if __ARM_ARCH >= 6
297	dcache_wb_pou((vm_offset_t)lf->address, (vm_size_t)lf->size);
298	icache_inv_all();
299#else
300	cpu_dcache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
301	cpu_l2cache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
302	cpu_icache_sync_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
303#endif
304	return (0);
305}
306
307int
308elf_cpu_unload_file(linker_file_t lf __unused)
309{
310
311	return (0);
312}
313