elf_machdep.c revision 295207
1/*-
2 * Copyright 1996-1998 John D. Polstra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD: head/sys/arm/arm/elf_machdep.c 295207 2016-02-03 13:47:50Z mmel $");
28
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/systm.h>
32#include <sys/exec.h>
33#include <sys/imgact.h>
34#include <sys/linker.h>
35#include <sys/sysent.h>
36#include <sys/imgact_elf.h>
37#include <sys/proc.h>
38#include <sys/syscall.h>
39#include <sys/signalvar.h>
40#include <sys/vnode.h>
41
42#include <vm/vm.h>
43#include <vm/pmap.h>
44#include <vm/vm_param.h>
45
46#include <machine/acle-compat.h>
47#include <machine/elf.h>
48#include <machine/md_var.h>
49
50static boolean_t elf32_arm_abi_supported(struct image_params *);
51
52struct sysentvec elf32_freebsd_sysvec = {
53	.sv_size	= SYS_MAXSYSCALL,
54	.sv_table	= sysent,
55	.sv_mask	= 0,
56	.sv_errsize	= 0,
57	.sv_errtbl	= NULL,
58	.sv_transtrap	= NULL,
59	.sv_fixup	= __elfN(freebsd_fixup),
60	.sv_sendsig	= sendsig,
61	.sv_sigcode	= sigcode,
62	.sv_szsigcode	= &szsigcode,
63	.sv_name	= "FreeBSD ELF32",
64	.sv_coredump	= __elfN(coredump),
65	.sv_imgact_try	= NULL,
66	.sv_minsigstksz	= MINSIGSTKSZ,
67	.sv_pagesize	= PAGE_SIZE,
68	.sv_minuser	= VM_MIN_ADDRESS,
69	.sv_maxuser	= VM_MAXUSER_ADDRESS,
70	.sv_usrstack	= USRSTACK,
71	.sv_psstrings	= PS_STRINGS,
72	.sv_stackprot	= VM_PROT_ALL,
73	.sv_copyout_strings = exec_copyout_strings,
74	.sv_setregs	= exec_setregs,
75	.sv_fixlimit	= NULL,
76	.sv_maxssiz	= NULL,
77	.sv_flags	=
78#if __ARM_ARCH >= 6
79			  SV_SHP | SV_TIMEKEEP |
80#endif
81			  SV_ABI_FREEBSD | SV_ILP32,
82	.sv_set_syscall_retval = cpu_set_syscall_retval,
83	.sv_fetch_syscall_args = cpu_fetch_syscall_args,
84	.sv_syscallnames = syscallnames,
85	.sv_shared_page_base = SHAREDPAGE,
86	.sv_shared_page_len = PAGE_SIZE,
87	.sv_schedtail	= NULL,
88	.sv_thread_detach = NULL,
89	.sv_trap	= NULL,
90};
91INIT_SYSENTVEC(elf32_sysvec, &elf32_freebsd_sysvec);
92
93static Elf32_Brandinfo freebsd_brand_info = {
94	.brand		= ELFOSABI_FREEBSD,
95	.machine	= EM_ARM,
96	.compat_3_brand	= "FreeBSD",
97	.emul_path	= NULL,
98	.interp_path	= "/libexec/ld-elf.so.1",
99	.sysvec		= &elf32_freebsd_sysvec,
100	.interp_newpath	= NULL,
101	.brand_note	= &elf32_freebsd_brandnote,
102	.flags		= BI_CAN_EXEC_DYN | BI_BRAND_NOTE,
103	.header_supported= elf32_arm_abi_supported,
104};
105
106SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_FIRST,
107	(sysinit_cfunc_t) elf32_insert_brand_entry,
108	&freebsd_brand_info);
109
110static boolean_t
111elf32_arm_abi_supported(struct image_params *imgp)
112{
113	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
114
115	/*
116	 * When configured for EABI, FreeBSD supports EABI vesions 4 and 5.
117	 */
118	if (EF_ARM_EABI_VERSION(hdr->e_flags) < EF_ARM_EABI_FREEBSD_MIN) {
119		if (bootverbose)
120			uprintf("Attempting to execute non EABI binary (rev %d) image %s",
121			    EF_ARM_EABI_VERSION(hdr->e_flags), imgp->args->fname);
122		return (FALSE);
123	}
124	return (TRUE);
125}
126
127void
128elf32_dump_thread(struct thread *td __unused, void *dst __unused,
129    size_t *off __unused)
130{
131}
132
133/*
134 * It is possible for the compiler to emit relocations for unaligned data.
135 * We handle this situation with these inlines.
136 */
137#define	RELOC_ALIGNED_P(x) \
138	(((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
139
140static __inline Elf_Addr
141load_ptr(Elf_Addr *where)
142{
143	Elf_Addr res;
144
145	if (RELOC_ALIGNED_P(where))
146		return *where;
147	memcpy(&res, where, sizeof(res));
148	return (res);
149}
150
151static __inline void
152store_ptr(Elf_Addr *where, Elf_Addr val)
153{
154	if (RELOC_ALIGNED_P(where))
155		*where = val;
156	else
157		memcpy(where, &val, sizeof(val));
158}
159#undef RELOC_ALIGNED_P
160
161
162/* Process one elf relocation with addend. */
163static int
164elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
165    int type, int local, elf_lookup_fn lookup)
166{
167	Elf_Addr *where;
168	Elf_Addr addr;
169	Elf_Addr addend;
170	Elf_Word rtype, symidx;
171	const Elf_Rel *rel;
172	const Elf_Rela *rela;
173	int error;
174
175	switch (type) {
176	case ELF_RELOC_REL:
177		rel = (const Elf_Rel *)data;
178		where = (Elf_Addr *) (relocbase + rel->r_offset);
179		addend = load_ptr(where);
180		rtype = ELF_R_TYPE(rel->r_info);
181		symidx = ELF_R_SYM(rel->r_info);
182		break;
183	case ELF_RELOC_RELA:
184		rela = (const Elf_Rela *)data;
185		where = (Elf_Addr *) (relocbase + rela->r_offset);
186		addend = rela->r_addend;
187		rtype = ELF_R_TYPE(rela->r_info);
188		symidx = ELF_R_SYM(rela->r_info);
189		break;
190	default:
191		panic("unknown reloc type %d\n", type);
192	}
193
194	if (local) {
195		if (rtype == R_ARM_RELATIVE) {	/* A + B */
196			addr = elf_relocaddr(lf, relocbase + addend);
197			if (load_ptr(where) != addr)
198				store_ptr(where, addr);
199		}
200		return (0);
201	}
202
203	switch (rtype) {
204
205		case R_ARM_NONE:	/* none */
206			break;
207
208		case R_ARM_ABS32:
209			error = lookup(lf, symidx, 1, &addr);
210			if (error != 0)
211				return -1;
212			store_ptr(where, addr + load_ptr(where));
213			break;
214
215		case R_ARM_COPY:	/* none */
216			/*
217			 * There shouldn't be copy relocations in kernel
218			 * objects.
219			 */
220			printf("kldload: unexpected R_COPY relocation\n");
221			return -1;
222			break;
223
224		case R_ARM_JUMP_SLOT:
225			error = lookup(lf, symidx, 1, &addr);
226			if (error == 0) {
227				store_ptr(where, addr);
228				return (0);
229			}
230			return (-1);
231		case R_ARM_RELATIVE:
232			break;
233
234		default:
235			printf("kldload: unexpected relocation type %d\n",
236			       rtype);
237			return -1;
238	}
239	return(0);
240}
241
242int
243elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type,
244    elf_lookup_fn lookup)
245{
246
247	return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup));
248}
249
250int
251elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data,
252    int type, elf_lookup_fn lookup)
253{
254
255	return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup));
256}
257
258int
259elf_cpu_load_file(linker_file_t lf)
260{
261
262	/*
263	 * The pmap code does not do an icache sync upon establishing executable
264	 * mappings in the kernel pmap.  It's an optimization based on the fact
265	 * that kernel memory allocations always have EXECUTABLE protection even
266	 * when the memory isn't going to hold executable code.  The only time
267	 * kernel memory holding instructions does need a sync is after loading
268	 * a kernel module, and that's when this function gets called.
269	 *
270	 * This syncs data and instruction caches after loading a module.  We
271	 * don't worry about the kernel itself (lf->id is 1) as locore.S did
272	 * that on entry.  Even if data cache maintenance was done by IO code,
273	 * the relocation fixup process creates dirty cache entries that we must
274	 * write back before doing icache sync. The instruction cache sync also
275	 * invalidates the branch predictor cache on platforms that have one.
276	 */
277	if (lf->id == 1)
278		return (0);
279#if __ARM_ARCH >= 6
280	dcache_wb_pou((vm_offset_t)lf->address, (vm_size_t)lf->size);
281	icache_inv_all();
282#else
283	cpu_dcache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
284	cpu_l2cache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
285	cpu_icache_sync_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
286#endif
287	return (0);
288}
289
290int
291elf_cpu_unload_file(linker_file_t lf __unused)
292{
293
294	return (0);
295}
296