elf_machdep.c revision 325810
1/*-
2 * Copyright 1996-1998 John D. Polstra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD: stable/11/sys/arm/arm/elf_machdep.c 325810 2017-11-14 16:03:07Z jhb $");
28
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/systm.h>
32#include <sys/exec.h>
33#include <sys/imgact.h>
34#include <sys/linker.h>
35#include <sys/sysent.h>
36#include <sys/imgact_elf.h>
37#include <sys/proc.h>
38#include <sys/syscall.h>
39#include <sys/signalvar.h>
40#include <sys/vnode.h>
41
42#include <vm/vm.h>
43#include <vm/pmap.h>
44#include <vm/vm_param.h>
45
46#include <machine/elf.h>
47#include <machine/md_var.h>
48
49static boolean_t elf32_arm_abi_supported(struct image_params *);
50
51u_long elf_hwcap;
52u_long elf_hwcap2;
53
54struct sysentvec elf32_freebsd_sysvec = {
55	.sv_size	= SYS_MAXSYSCALL,
56	.sv_table	= sysent,
57	.sv_mask	= 0,
58	.sv_errsize	= 0,
59	.sv_errtbl	= NULL,
60	.sv_transtrap	= NULL,
61	.sv_fixup	= __elfN(freebsd_fixup),
62	.sv_sendsig	= sendsig,
63	.sv_sigcode	= sigcode,
64	.sv_szsigcode	= &szsigcode,
65	.sv_name	= "FreeBSD ELF32",
66	.sv_coredump	= __elfN(coredump),
67	.sv_imgact_try	= NULL,
68	.sv_minsigstksz	= MINSIGSTKSZ,
69	.sv_pagesize	= PAGE_SIZE,
70	.sv_minuser	= VM_MIN_ADDRESS,
71	.sv_maxuser	= VM_MAXUSER_ADDRESS,
72	.sv_usrstack	= USRSTACK,
73	.sv_psstrings	= PS_STRINGS,
74	.sv_stackprot	= VM_PROT_ALL,
75	.sv_copyout_strings = exec_copyout_strings,
76	.sv_setregs	= exec_setregs,
77	.sv_fixlimit	= NULL,
78	.sv_maxssiz	= NULL,
79	.sv_flags	=
80#if __ARM_ARCH >= 6
81			  SV_SHP | SV_TIMEKEEP |
82#endif
83			  SV_ABI_FREEBSD | SV_ILP32 | SV_HWCAP,
84	.sv_set_syscall_retval = cpu_set_syscall_retval,
85	.sv_fetch_syscall_args = cpu_fetch_syscall_args,
86	.sv_syscallnames = syscallnames,
87	.sv_shared_page_base = SHAREDPAGE,
88	.sv_shared_page_len = PAGE_SIZE,
89	.sv_schedtail	= NULL,
90	.sv_thread_detach = NULL,
91	.sv_trap	= NULL,
92	.sv_hwcap	= &elf_hwcap,
93	.sv_hwcap2	= &elf_hwcap2,
94};
95INIT_SYSENTVEC(elf32_sysvec, &elf32_freebsd_sysvec);
96
97static Elf32_Brandinfo freebsd_brand_info = {
98	.brand		= ELFOSABI_FREEBSD,
99	.machine	= EM_ARM,
100	.compat_3_brand	= "FreeBSD",
101	.emul_path	= NULL,
102	.interp_path	= "/libexec/ld-elf.so.1",
103	.sysvec		= &elf32_freebsd_sysvec,
104	.interp_newpath	= NULL,
105	.brand_note	= &elf32_freebsd_brandnote,
106	.flags		= BI_CAN_EXEC_DYN | BI_BRAND_NOTE,
107	.header_supported= elf32_arm_abi_supported,
108};
109
110SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_FIRST,
111	(sysinit_cfunc_t) elf32_insert_brand_entry,
112	&freebsd_brand_info);
113
114static boolean_t
115elf32_arm_abi_supported(struct image_params *imgp)
116{
117	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
118
119	/*
120	 * When configured for EABI, FreeBSD supports EABI vesions 4 and 5.
121	 */
122	if (EF_ARM_EABI_VERSION(hdr->e_flags) < EF_ARM_EABI_FREEBSD_MIN) {
123		if (bootverbose)
124			uprintf("Attempting to execute non EABI binary (rev %d) image %s",
125			    EF_ARM_EABI_VERSION(hdr->e_flags), imgp->args->fname);
126		return (FALSE);
127	}
128	return (TRUE);
129}
130
131void
132elf32_dump_thread(struct thread *td __unused, void *dst __unused,
133    size_t *off __unused)
134{
135}
136
137/*
138 * It is possible for the compiler to emit relocations for unaligned data.
139 * We handle this situation with these inlines.
140 */
141#define	RELOC_ALIGNED_P(x) \
142	(((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
143
144static __inline Elf_Addr
145load_ptr(Elf_Addr *where)
146{
147	Elf_Addr res;
148
149	if (RELOC_ALIGNED_P(where))
150		return *where;
151	memcpy(&res, where, sizeof(res));
152	return (res);
153}
154
155static __inline void
156store_ptr(Elf_Addr *where, Elf_Addr val)
157{
158	if (RELOC_ALIGNED_P(where))
159		*where = val;
160	else
161		memcpy(where, &val, sizeof(val));
162}
163#undef RELOC_ALIGNED_P
164
165
166/* Process one elf relocation with addend. */
167static int
168elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
169    int type, int local, elf_lookup_fn lookup)
170{
171	Elf_Addr *where;
172	Elf_Addr addr;
173	Elf_Addr addend;
174	Elf_Word rtype, symidx;
175	const Elf_Rel *rel;
176	const Elf_Rela *rela;
177	int error;
178
179	switch (type) {
180	case ELF_RELOC_REL:
181		rel = (const Elf_Rel *)data;
182		where = (Elf_Addr *) (relocbase + rel->r_offset);
183		addend = load_ptr(where);
184		rtype = ELF_R_TYPE(rel->r_info);
185		symidx = ELF_R_SYM(rel->r_info);
186		break;
187	case ELF_RELOC_RELA:
188		rela = (const Elf_Rela *)data;
189		where = (Elf_Addr *) (relocbase + rela->r_offset);
190		addend = rela->r_addend;
191		rtype = ELF_R_TYPE(rela->r_info);
192		symidx = ELF_R_SYM(rela->r_info);
193		break;
194	default:
195		panic("unknown reloc type %d\n", type);
196	}
197
198	if (local) {
199		if (rtype == R_ARM_RELATIVE) {	/* A + B */
200			addr = elf_relocaddr(lf, relocbase + addend);
201			if (load_ptr(where) != addr)
202				store_ptr(where, addr);
203		}
204		return (0);
205	}
206
207	switch (rtype) {
208
209		case R_ARM_NONE:	/* none */
210			break;
211
212		case R_ARM_ABS32:
213			error = lookup(lf, symidx, 1, &addr);
214			if (error != 0)
215				return -1;
216			store_ptr(where, addr + load_ptr(where));
217			break;
218
219		case R_ARM_COPY:	/* none */
220			/*
221			 * There shouldn't be copy relocations in kernel
222			 * objects.
223			 */
224			printf("kldload: unexpected R_COPY relocation\n");
225			return -1;
226			break;
227
228		case R_ARM_JUMP_SLOT:
229			error = lookup(lf, symidx, 1, &addr);
230			if (error == 0) {
231				store_ptr(where, addr);
232				return (0);
233			}
234			return (-1);
235		case R_ARM_RELATIVE:
236			break;
237
238		default:
239			printf("kldload: unexpected relocation type %d\n",
240			       rtype);
241			return -1;
242	}
243	return(0);
244}
245
246int
247elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type,
248    elf_lookup_fn lookup)
249{
250
251	return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup));
252}
253
254int
255elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data,
256    int type, elf_lookup_fn lookup)
257{
258
259	return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup));
260}
261
262int
263elf_cpu_load_file(linker_file_t lf)
264{
265
266	/*
267	 * The pmap code does not do an icache sync upon establishing executable
268	 * mappings in the kernel pmap.  It's an optimization based on the fact
269	 * that kernel memory allocations always have EXECUTABLE protection even
270	 * when the memory isn't going to hold executable code.  The only time
271	 * kernel memory holding instructions does need a sync is after loading
272	 * a kernel module, and that's when this function gets called.
273	 *
274	 * This syncs data and instruction caches after loading a module.  We
275	 * don't worry about the kernel itself (lf->id is 1) as locore.S did
276	 * that on entry.  Even if data cache maintenance was done by IO code,
277	 * the relocation fixup process creates dirty cache entries that we must
278	 * write back before doing icache sync. The instruction cache sync also
279	 * invalidates the branch predictor cache on platforms that have one.
280	 */
281	if (lf->id == 1)
282		return (0);
283#if __ARM_ARCH >= 6
284	dcache_wb_pou((vm_offset_t)lf->address, (vm_size_t)lf->size);
285	icache_inv_all();
286#else
287	cpu_dcache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
288	cpu_l2cache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
289	cpu_icache_sync_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
290#endif
291	return (0);
292}
293
294int
295elf_cpu_unload_file(linker_file_t lf __unused)
296{
297
298	return (0);
299}
300