1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
4 */
5
6#include <linux/elf.h>
7#include <linux/ftrace.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/moduleloader.h>
11#include <linux/sort.h>
12
13static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
14					    enum aarch64_insn_register reg)
15{
16	u32 adrp, add;
17
18	adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP);
19	add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K,
20					   AARCH64_INSN_VARIANT_64BIT,
21					   AARCH64_INSN_ADSB_ADD);
22
23	return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) };
24}
25
26struct plt_entry get_plt_entry(u64 dst, void *pc)
27{
28	struct plt_entry plt;
29	static u32 br;
30
31	if (!br)
32		br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16,
33						 AARCH64_INSN_BRANCH_NOLINK);
34
35	plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16);
36	plt.br = cpu_to_le32(br);
37
38	return plt;
39}
40
41static bool plt_entries_equal(const struct plt_entry *a,
42			      const struct plt_entry *b)
43{
44	u64 p, q;
45
46	/*
47	 * Check whether both entries refer to the same target:
48	 * do the cheapest checks first.
49	 * If the 'add' or 'br' opcodes are different, then the target
50	 * cannot be the same.
51	 */
52	if (a->add != b->add || a->br != b->br)
53		return false;
54
55	p = ALIGN_DOWN((u64)a, SZ_4K);
56	q = ALIGN_DOWN((u64)b, SZ_4K);
57
58	/*
59	 * If the 'adrp' opcodes are the same then we just need to check
60	 * that they refer to the same 4k region.
61	 */
62	if (a->adrp == b->adrp && p == q)
63		return true;
64
65	return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) ==
66	       (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
67}
68
69u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
70			  void *loc, const Elf64_Rela *rela,
71			  Elf64_Sym *sym)
72{
73	struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ?
74						&mod->arch.core : &mod->arch.init;
75	struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
76	int i = pltsec->plt_num_entries;
77	int j = i - 1;
78	u64 val = sym->st_value + rela->r_addend;
79
80	if (is_forbidden_offset_for_adrp(&plt[i].adrp))
81		i++;
82
83	plt[i] = get_plt_entry(val, &plt[i]);
84
85	/*
86	 * Check if the entry we just created is a duplicate. Given that the
87	 * relocations are sorted, this will be the last entry we allocated.
88	 * (if one exists).
89	 */
90	if (j >= 0 && plt_entries_equal(plt + i, plt + j))
91		return (u64)&plt[j];
92
93	pltsec->plt_num_entries += i - j;
94	if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
95		return 0;
96
97	return (u64)&plt[i];
98}
99
100#ifdef CONFIG_ARM64_ERRATUM_843419
101u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
102				void *loc, u64 val)
103{
104	struct mod_plt_sec *pltsec = !within_module_init((unsigned long)loc, mod) ?
105						&mod->arch.core : &mod->arch.init;
106	struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
107	int i = pltsec->plt_num_entries++;
108	u32 br;
109	int rd;
110
111	if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
112		return 0;
113
114	if (is_forbidden_offset_for_adrp(&plt[i].adrp))
115		i = pltsec->plt_num_entries++;
116
117	/* get the destination register of the ADRP instruction */
118	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
119					  le32_to_cpup((__le32 *)loc));
120
121	br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
122					 AARCH64_INSN_BRANCH_NOLINK);
123
124	plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd);
125	plt[i].br = cpu_to_le32(br);
126
127	return (u64)&plt[i];
128}
129#endif
130
131#define cmp_3way(a, b)	((a) < (b) ? -1 : (a) > (b))
132
133static int cmp_rela(const void *a, const void *b)
134{
135	const Elf64_Rela *x = a, *y = b;
136	int i;
137
138	/* sort by type, symbol index and addend */
139	i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
140	if (i == 0)
141		i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
142	if (i == 0)
143		i = cmp_3way(x->r_addend, y->r_addend);
144	return i;
145}
146
147static bool duplicate_rel(const Elf64_Rela *rela, int num)
148{
149	/*
150	 * Entries are sorted by type, symbol index and addend. That means
151	 * that, if a duplicate entry exists, it must be in the preceding
152	 * slot.
153	 */
154	return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
155}
156
157static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
158			       Elf64_Word dstidx, Elf_Shdr *dstsec)
159{
160	unsigned int ret = 0;
161	Elf64_Sym *s;
162	int i;
163
164	for (i = 0; i < num; i++) {
165		u64 min_align;
166
167		switch (ELF64_R_TYPE(rela[i].r_info)) {
168		case R_AARCH64_JUMP26:
169		case R_AARCH64_CALL26:
170			/*
171			 * We only have to consider branch targets that resolve
172			 * to symbols that are defined in a different section.
173			 * This is not simply a heuristic, it is a fundamental
174			 * limitation, since there is no guaranteed way to emit
175			 * PLT entries sufficiently close to the branch if the
176			 * section size exceeds the range of a branch
177			 * instruction. So ignore relocations against defined
178			 * symbols if they live in the same section as the
179			 * relocation target.
180			 */
181			s = syms + ELF64_R_SYM(rela[i].r_info);
182			if (s->st_shndx == dstidx)
183				break;
184
185			/*
186			 * Jump relocations with non-zero addends against
187			 * undefined symbols are supported by the ELF spec, but
188			 * do not occur in practice (e.g., 'jump n bytes past
189			 * the entry point of undefined function symbol f').
190			 * So we need to support them, but there is no need to
191			 * take them into consideration when trying to optimize
192			 * this code. So let's only check for duplicates when
193			 * the addend is zero: this allows us to record the PLT
194			 * entry address in the symbol table itself, rather than
195			 * having to search the list for duplicates each time we
196			 * emit one.
197			 */
198			if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
199				ret++;
200			break;
201		case R_AARCH64_ADR_PREL_PG_HI21_NC:
202		case R_AARCH64_ADR_PREL_PG_HI21:
203			if (!cpus_have_final_cap(ARM64_WORKAROUND_843419))
204				break;
205
206			/*
207			 * Determine the minimal safe alignment for this ADRP
208			 * instruction: the section alignment at which it is
209			 * guaranteed not to appear at a vulnerable offset.
210			 *
211			 * This comes down to finding the least significant zero
212			 * bit in bits [11:3] of the section offset, and
213			 * increasing the section's alignment so that the
214			 * resulting address of this instruction is guaranteed
215			 * to equal the offset in that particular bit (as well
216			 * as all less significant bits). This ensures that the
217			 * address modulo 4 KB != 0xfff8 or 0xfffc (which would
218			 * have all ones in bits [11:3])
219			 */
220			min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
221
222			/*
223			 * Allocate veneer space for each ADRP that may appear
224			 * at a vulnerable offset nonetheless. At relocation
225			 * time, some of these will remain unused since some
226			 * ADRP instructions can be patched to ADR instructions
227			 * instead.
228			 */
229			if (min_align > SZ_4K)
230				ret++;
231			else
232				dstsec->sh_addralign = max(dstsec->sh_addralign,
233							   min_align);
234			break;
235		}
236	}
237
238	if (cpus_have_final_cap(ARM64_WORKAROUND_843419)) {
239		/*
240		 * Add some slack so we can skip PLT slots that may trigger
241		 * the erratum due to the placement of the ADRP instruction.
242		 */
243		ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
244	}
245
246	return ret;
247}
248
249static bool branch_rela_needs_plt(Elf64_Sym *syms, Elf64_Rela *rela,
250				  Elf64_Word dstidx)
251{
252
253	Elf64_Sym *s = syms + ELF64_R_SYM(rela->r_info);
254
255	if (s->st_shndx == dstidx)
256		return false;
257
258	return ELF64_R_TYPE(rela->r_info) == R_AARCH64_JUMP26 ||
259	       ELF64_R_TYPE(rela->r_info) == R_AARCH64_CALL26;
260}
261
262/* Group branch PLT relas at the front end of the array. */
263static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
264				      int numrels, Elf64_Word dstidx)
265{
266	int i = 0, j = numrels - 1;
267
268	while (i < j) {
269		if (branch_rela_needs_plt(syms, &rela[i], dstidx))
270			i++;
271		else if (branch_rela_needs_plt(syms, &rela[j], dstidx))
272			swap(rela[i], rela[j]);
273		else
274			j--;
275	}
276
277	return i;
278}
279
280int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
281			      char *secstrings, struct module *mod)
282{
283	unsigned long core_plts = 0;
284	unsigned long init_plts = 0;
285	Elf64_Sym *syms = NULL;
286	Elf_Shdr *pltsec, *tramp = NULL;
287	int i;
288
289	/*
290	 * Find the empty .plt section so we can expand it to store the PLT
291	 * entries. Record the symtab address as well.
292	 */
293	for (i = 0; i < ehdr->e_shnum; i++) {
294		if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
295			mod->arch.core.plt_shndx = i;
296		else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
297			mod->arch.init.plt_shndx = i;
298		else if (!strcmp(secstrings + sechdrs[i].sh_name,
299				 ".text.ftrace_trampoline"))
300			tramp = sechdrs + i;
301		else if (sechdrs[i].sh_type == SHT_SYMTAB)
302			syms = (Elf64_Sym *)sechdrs[i].sh_addr;
303	}
304
305	if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
306		pr_err("%s: module PLT section(s) missing\n", mod->name);
307		return -ENOEXEC;
308	}
309	if (!syms) {
310		pr_err("%s: module symtab section missing\n", mod->name);
311		return -ENOEXEC;
312	}
313
314	for (i = 0; i < ehdr->e_shnum; i++) {
315		Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
316		int nents, numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
317		Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
318
319		if (sechdrs[i].sh_type != SHT_RELA)
320			continue;
321
322		/* ignore relocations that operate on non-exec sections */
323		if (!(dstsec->sh_flags & SHF_EXECINSTR))
324			continue;
325
326		/*
327		 * sort branch relocations requiring a PLT by type, symbol index
328		 * and addend
329		 */
330		nents = partition_branch_plt_relas(syms, rels, numrels,
331						   sechdrs[i].sh_info);
332		if (nents)
333			sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
334
335		if (!module_init_layout_section(secstrings + dstsec->sh_name))
336			core_plts += count_plts(syms, rels, numrels,
337						sechdrs[i].sh_info, dstsec);
338		else
339			init_plts += count_plts(syms, rels, numrels,
340						sechdrs[i].sh_info, dstsec);
341	}
342
343	pltsec = sechdrs + mod->arch.core.plt_shndx;
344	pltsec->sh_type = SHT_NOBITS;
345	pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
346	pltsec->sh_addralign = L1_CACHE_BYTES;
347	pltsec->sh_size = (core_plts  + 1) * sizeof(struct plt_entry);
348	mod->arch.core.plt_num_entries = 0;
349	mod->arch.core.plt_max_entries = core_plts;
350
351	pltsec = sechdrs + mod->arch.init.plt_shndx;
352	pltsec->sh_type = SHT_NOBITS;
353	pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
354	pltsec->sh_addralign = L1_CACHE_BYTES;
355	pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
356	mod->arch.init.plt_num_entries = 0;
357	mod->arch.init.plt_max_entries = init_plts;
358
359	if (tramp) {
360		tramp->sh_type = SHT_NOBITS;
361		tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
362		tramp->sh_addralign = __alignof__(struct plt_entry);
363		tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
364	}
365
366	return 0;
367}
368