1// SPDX-License-Identifier: GPL-2.0-or-later
2/*  Kernel module help for PPC.
3    Copyright (C) 2001 Rusty Russell.
4
5*/
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/module.h>
10#include <linux/moduleloader.h>
11#include <linux/elf.h>
12#include <linux/vmalloc.h>
13#include <linux/fs.h>
14#include <linux/string.h>
15#include <linux/kernel.h>
16#include <linux/ftrace.h>
17#include <linux/cache.h>
18#include <linux/bug.h>
19#include <linux/sort.h>
20#include <asm/setup.h>
21#include <asm/code-patching.h>
22
23/* Count how many different relocations (different symbol, different
24   addend) */
25static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
26{
27	unsigned int i, r_info, r_addend, _count_relocs;
28
29	_count_relocs = 0;
30	r_info = 0;
31	r_addend = 0;
32	for (i = 0; i < num; i++)
33		/* Only count 24-bit relocs, others don't need stubs */
34		if (ELF32_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
35		    (r_info != ELF32_R_SYM(rela[i].r_info) ||
36		     r_addend != rela[i].r_addend)) {
37			_count_relocs++;
38			r_info = ELF32_R_SYM(rela[i].r_info);
39			r_addend = rela[i].r_addend;
40		}
41
42#ifdef CONFIG_DYNAMIC_FTRACE
43	_count_relocs++;	/* add one for ftrace_caller */
44#endif
45	return _count_relocs;
46}
47
48static int relacmp(const void *_x, const void *_y)
49{
50	const Elf32_Rela *x, *y;
51
52	y = (Elf32_Rela *)_x;
53	x = (Elf32_Rela *)_y;
54
55	/* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
56	 * make the comparison cheaper/faster. It won't affect the sorting or
57	 * the counting algorithms' performance
58	 */
59	if (x->r_info < y->r_info)
60		return -1;
61	else if (x->r_info > y->r_info)
62		return 1;
63	else if (x->r_addend < y->r_addend)
64		return -1;
65	else if (x->r_addend > y->r_addend)
66		return 1;
67	else
68		return 0;
69}
70
71/* Get the potential trampolines size required of the init and
72   non-init sections */
73static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
74				  const Elf32_Shdr *sechdrs,
75				  const char *secstrings,
76				  int is_init)
77{
78	unsigned long ret = 0;
79	unsigned i;
80
81	/* Everything marked ALLOC (this includes the exported
82           symbols) */
83	for (i = 1; i < hdr->e_shnum; i++) {
84		/* If it's called *.init*, and we're not init, we're
85                   not interested */
86		if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != NULL)
87		    != is_init)
88			continue;
89
90		/* We don't want to look at debug sections. */
91		if (strstr(secstrings + sechdrs[i].sh_name, ".debug"))
92			continue;
93
94		if (sechdrs[i].sh_type == SHT_RELA) {
95			pr_debug("Found relocations in section %u\n", i);
96			pr_debug("Ptr: %p.  Number: %u\n",
97			       (void *)hdr + sechdrs[i].sh_offset,
98			       sechdrs[i].sh_size / sizeof(Elf32_Rela));
99
100			/* Sort the relocation information based on a symbol and
101			 * addend key. This is a stable O(n*log n) complexity
102			 * algorithm but it will reduce the complexity of
103			 * count_relocs() to linear complexity O(n)
104			 */
105			sort((void *)hdr + sechdrs[i].sh_offset,
106			     sechdrs[i].sh_size / sizeof(Elf32_Rela),
107			     sizeof(Elf32_Rela), relacmp, NULL);
108
109			ret += count_relocs((void *)hdr
110					     + sechdrs[i].sh_offset,
111					     sechdrs[i].sh_size
112					     / sizeof(Elf32_Rela))
113				* sizeof(struct ppc_plt_entry);
114		}
115	}
116
117	return ret;
118}
119
120int module_frob_arch_sections(Elf32_Ehdr *hdr,
121			      Elf32_Shdr *sechdrs,
122			      char *secstrings,
123			      struct module *me)
124{
125	unsigned int i;
126
127	/* Find .plt and .init.plt sections */
128	for (i = 0; i < hdr->e_shnum; i++) {
129		if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0)
130			me->arch.init_plt_section = i;
131		else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
132			me->arch.core_plt_section = i;
133	}
134	if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
135		pr_err("Module doesn't contain .plt or .init.plt sections.\n");
136		return -ENOEXEC;
137	}
138
139	/* Override their sizes */
140	sechdrs[me->arch.core_plt_section].sh_size
141		= get_plt_size(hdr, sechdrs, secstrings, 0);
142	sechdrs[me->arch.init_plt_section].sh_size
143		= get_plt_size(hdr, sechdrs, secstrings, 1);
144	return 0;
145}
146
147static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
148{
149	if (entry->jump[0] != PPC_RAW_LIS(_R12, PPC_HA(val)))
150		return 0;
151	if (entry->jump[1] != PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))
152		return 0;
153	return 1;
154}
155
156/* Set up a trampoline in the PLT to bounce us to the distant function */
157static uint32_t do_plt_call(void *location,
158			    Elf32_Addr val,
159			    const Elf32_Shdr *sechdrs,
160			    struct module *mod)
161{
162	struct ppc_plt_entry *entry;
163
164	pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
165	/* Init, or core PLT? */
166	if (within_module_core((unsigned long)location, mod))
167		entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
168	else
169		entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
170
171	/* Find this entry, or if that fails, the next avail. entry */
172	while (entry->jump[0]) {
173		if (entry_matches(entry, val)) return (uint32_t)entry;
174		entry++;
175	}
176
177	if (patch_instruction(&entry->jump[0], ppc_inst(PPC_RAW_LIS(_R12, PPC_HA(val)))))
178		return 0;
179	if (patch_instruction(&entry->jump[1], ppc_inst(PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))))
180		return 0;
181	if (patch_instruction(&entry->jump[2], ppc_inst(PPC_RAW_MTCTR(_R12))))
182		return 0;
183	if (patch_instruction(&entry->jump[3], ppc_inst(PPC_RAW_BCTR())))
184		return 0;
185
186	pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
187	return (uint32_t)entry;
188}
189
190static int patch_location_16(uint32_t *loc, u16 value)
191{
192	loc = PTR_ALIGN_DOWN(loc, sizeof(u32));
193	return patch_instruction(loc, ppc_inst((*loc & 0xffff0000) | value));
194}
195
196int apply_relocate_add(Elf32_Shdr *sechdrs,
197		       const char *strtab,
198		       unsigned int symindex,
199		       unsigned int relsec,
200		       struct module *module)
201{
202	unsigned int i;
203	Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
204	Elf32_Sym *sym;
205	uint32_t *location;
206	uint32_t value;
207
208	pr_debug("Applying ADD relocate section %u to %u\n", relsec,
209	       sechdrs[relsec].sh_info);
210	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
211		/* This is where to make the change */
212		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
213			+ rela[i].r_offset;
214		/* This is the symbol it is referring to.  Note that all
215		   undefined symbols have been resolved.  */
216		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
217			+ ELF32_R_SYM(rela[i].r_info);
218		/* `Everything is relative'. */
219		value = sym->st_value + rela[i].r_addend;
220
221		switch (ELF32_R_TYPE(rela[i].r_info)) {
222		case R_PPC_ADDR32:
223			/* Simply set it */
224			*(uint32_t *)location = value;
225			break;
226
227		case R_PPC_ADDR16_LO:
228			/* Low half of the symbol */
229			if (patch_location_16(location, PPC_LO(value)))
230				return -EFAULT;
231			break;
232
233		case R_PPC_ADDR16_HI:
234			/* Higher half of the symbol */
235			if (patch_location_16(location, PPC_HI(value)))
236				return -EFAULT;
237			break;
238
239		case R_PPC_ADDR16_HA:
240			if (patch_location_16(location, PPC_HA(value)))
241				return -EFAULT;
242			break;
243
244		case R_PPC_REL24:
245			if ((int)(value - (uint32_t)location) < -0x02000000
246			    || (int)(value - (uint32_t)location) >= 0x02000000) {
247				value = do_plt_call(location, value,
248						    sechdrs, module);
249				if (!value)
250					return -EFAULT;
251			}
252
253			/* Only replace bits 2 through 26 */
254			pr_debug("REL24 value = %08X. location = %08X\n",
255			       value, (uint32_t)location);
256			pr_debug("Location before: %08X.\n",
257			       *(uint32_t *)location);
258			value = (*(uint32_t *)location & ~PPC_LI_MASK) |
259				PPC_LI(value - (uint32_t)location);
260
261			if (patch_instruction(location, ppc_inst(value)))
262				return -EFAULT;
263
264			pr_debug("Location after: %08X.\n",
265			       *(uint32_t *)location);
266			pr_debug("ie. jump to %08X+%08X = %08X\n",
267				 *(uint32_t *)PPC_LI((uint32_t)location), (uint32_t)location,
268				 (*(uint32_t *)PPC_LI((uint32_t)location)) + (uint32_t)location);
269			break;
270
271		case R_PPC_REL32:
272			/* 32-bit relative jump. */
273			*(uint32_t *)location = value - (uint32_t)location;
274			break;
275
276		default:
277			pr_err("%s: unknown ADD relocation: %u\n",
278			       module->name,
279			       ELF32_R_TYPE(rela[i].r_info));
280			return -ENOEXEC;
281		}
282	}
283
284	return 0;
285}
286
287#ifdef CONFIG_DYNAMIC_FTRACE
288notrace int module_trampoline_target(struct module *mod, unsigned long addr,
289				     unsigned long *target)
290{
291	ppc_inst_t jmp[4];
292
293	/* Find where the trampoline jumps to */
294	if (copy_inst_from_kernel_nofault(jmp, (void *)addr))
295		return -EFAULT;
296	if (__copy_inst_from_kernel_nofault(jmp + 1, (void *)addr + 4))
297		return -EFAULT;
298	if (__copy_inst_from_kernel_nofault(jmp + 2, (void *)addr + 8))
299		return -EFAULT;
300	if (__copy_inst_from_kernel_nofault(jmp + 3, (void *)addr + 12))
301		return -EFAULT;
302
303	/* verify that this is what we expect it to be */
304	if ((ppc_inst_val(jmp[0]) & 0xffff0000) != PPC_RAW_LIS(_R12, 0))
305		return -EINVAL;
306	if ((ppc_inst_val(jmp[1]) & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0))
307		return -EINVAL;
308	if (ppc_inst_val(jmp[2]) != PPC_RAW_MTCTR(_R12))
309		return -EINVAL;
310	if (ppc_inst_val(jmp[3]) != PPC_RAW_BCTR())
311		return -EINVAL;
312
313	addr = (ppc_inst_val(jmp[1]) & 0xffff) | ((ppc_inst_val(jmp[0]) & 0xffff) << 16);
314	if (addr & 0x8000)
315		addr -= 0x10000;
316
317	*target = addr;
318
319	return 0;
320}
321
322int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
323{
324	module->arch.tramp = do_plt_call(module->mem[MOD_TEXT].base,
325					 (unsigned long)ftrace_caller,
326					 sechdrs, module);
327	if (!module->arch.tramp)
328		return -ENOENT;
329
330#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
331	module->arch.tramp_regs = do_plt_call(module->mem[MOD_TEXT].base,
332					      (unsigned long)ftrace_regs_caller,
333					      sechdrs, module);
334	if (!module->arch.tramp_regs)
335		return -ENOENT;
336#endif
337
338	return 0;
339}
340#endif
341