• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/ia64/kernel/
1/*
2 * IA-64-specific support for kernel module loader.
3 *
4 * Copyright (C) 2003 Hewlett-Packard Co
5 *	David Mosberger-Tang <davidm@hpl.hp.com>
6 *
7 * Loosely based on patch by Rusty Russell.
8 */
9
10/* relocs tested so far:
11
12   DIR64LSB
13   FPTR64LSB
14   GPREL22
15   LDXMOV
16   LDXMOV
17   LTOFF22
18   LTOFF22X
19   LTOFF22X
20   LTOFF_FPTR22
21   PCREL21B	(for br.call only; br.cond is not supported out of modules!)
22   PCREL60B	(for brl.cond only; brl.call is not supported for modules!)
23   PCREL64LSB
24   SECREL32LSB
25   SEGREL64LSB
26 */
27
28
29#include <linux/kernel.h>
30#include <linux/sched.h>
31#include <linux/elf.h>
32#include <linux/moduleloader.h>
33#include <linux/string.h>
34#include <linux/vmalloc.h>
35
36#include <asm/patch.h>
37#include <asm/unaligned.h>
38
39#define ARCH_MODULE_DEBUG 0
40
41#if ARCH_MODULE_DEBUG
42# define DEBUGP printk
43# define inline
44#else
45# define DEBUGP(fmt , a...)
46#endif
47
48#ifdef CONFIG_ITANIUM
49# define USE_BRL	0
50#else
51# define USE_BRL	1
52#endif
53
54#define MAX_LTOFF	((uint64_t) (1 << 22))	/* max. allowable linkage-table offset */
55
56/* Define some relocation helper macros/types: */
57
58#define FORMAT_SHIFT	0
59#define FORMAT_BITS	3
60#define FORMAT_MASK	((1 << FORMAT_BITS) - 1)
61#define VALUE_SHIFT	3
62#define VALUE_BITS	5
63#define VALUE_MASK	((1 << VALUE_BITS) - 1)
64
65enum reloc_target_format {
66	/* direct encoded formats: */
67	RF_NONE = 0,
68	RF_INSN14 = 1,
69	RF_INSN22 = 2,
70	RF_INSN64 = 3,
71	RF_32MSB = 4,
72	RF_32LSB = 5,
73	RF_64MSB = 6,
74	RF_64LSB = 7,
75
76	/* formats that cannot be directly decoded: */
77	RF_INSN60,
78	RF_INSN21B,	/* imm21 form 1 */
79	RF_INSN21M,	/* imm21 form 2 */
80	RF_INSN21F	/* imm21 form 3 */
81};
82
83enum reloc_value_formula {
84	RV_DIRECT = 4,		/* S + A */
85	RV_GPREL = 5,		/* @gprel(S + A) */
86	RV_LTREL = 6,		/* @ltoff(S + A) */
87	RV_PLTREL = 7,		/* @pltoff(S + A) */
88	RV_FPTR = 8,		/* @fptr(S + A) */
89	RV_PCREL = 9,		/* S + A - P */
90	RV_LTREL_FPTR = 10,	/* @ltoff(@fptr(S + A)) */
91	RV_SEGREL = 11,		/* @segrel(S + A) */
92	RV_SECREL = 12,		/* @secrel(S + A) */
93	RV_BDREL = 13,		/* BD + A */
94	RV_LTV = 14,		/* S + A (like RV_DIRECT, except frozen at static link-time) */
95	RV_PCREL2 = 15,		/* S + A - P */
96	RV_SPECIAL = 16,	/* various (see below) */
97	RV_RSVD17 = 17,
98	RV_TPREL = 18,		/* @tprel(S + A) */
99	RV_LTREL_TPREL = 19,	/* @ltoff(@tprel(S + A)) */
100	RV_DTPMOD = 20,		/* @dtpmod(S + A) */
101	RV_LTREL_DTPMOD = 21,	/* @ltoff(@dtpmod(S + A)) */
102	RV_DTPREL = 22,		/* @dtprel(S + A) */
103	RV_LTREL_DTPREL = 23,	/* @ltoff(@dtprel(S + A)) */
104	RV_RSVD24 = 24,
105	RV_RSVD25 = 25,
106	RV_RSVD26 = 26,
107	RV_RSVD27 = 27
108	/* 28-31 reserved for implementation-specific purposes.  */
109};
110
111#define N(reloc)	[R_IA64_##reloc] = #reloc
112
113static const char *reloc_name[256] = {
114	N(NONE),		N(IMM14),		N(IMM22),		N(IMM64),
115	N(DIR32MSB),		N(DIR32LSB),		N(DIR64MSB),		N(DIR64LSB),
116	N(GPREL22),		N(GPREL64I),		N(GPREL32MSB),		N(GPREL32LSB),
117	N(GPREL64MSB),		N(GPREL64LSB),		N(LTOFF22),		N(LTOFF64I),
118	N(PLTOFF22),		N(PLTOFF64I),		N(PLTOFF64MSB),		N(PLTOFF64LSB),
119	N(FPTR64I),		N(FPTR32MSB),		N(FPTR32LSB),		N(FPTR64MSB),
120	N(FPTR64LSB),		N(PCREL60B),		N(PCREL21B),		N(PCREL21M),
121	N(PCREL21F),		N(PCREL32MSB),		N(PCREL32LSB),		N(PCREL64MSB),
122	N(PCREL64LSB),		N(LTOFF_FPTR22),	N(LTOFF_FPTR64I),	N(LTOFF_FPTR32MSB),
123	N(LTOFF_FPTR32LSB),	N(LTOFF_FPTR64MSB),	N(LTOFF_FPTR64LSB),	N(SEGREL32MSB),
124	N(SEGREL32LSB),		N(SEGREL64MSB),		N(SEGREL64LSB),		N(SECREL32MSB),
125	N(SECREL32LSB),		N(SECREL64MSB),		N(SECREL64LSB),		N(REL32MSB),
126	N(REL32LSB),		N(REL64MSB),		N(REL64LSB),		N(LTV32MSB),
127	N(LTV32LSB),		N(LTV64MSB),		N(LTV64LSB),		N(PCREL21BI),
128	N(PCREL22),		N(PCREL64I),		N(IPLTMSB),		N(IPLTLSB),
129	N(COPY),		N(LTOFF22X),		N(LDXMOV),		N(TPREL14),
130	N(TPREL22),		N(TPREL64I),		N(TPREL64MSB),		N(TPREL64LSB),
131	N(LTOFF_TPREL22),	N(DTPMOD64MSB),		N(DTPMOD64LSB),		N(LTOFF_DTPMOD22),
132	N(DTPREL14),		N(DTPREL22),		N(DTPREL64I),		N(DTPREL32MSB),
133	N(DTPREL32LSB),		N(DTPREL64MSB),		N(DTPREL64LSB),		N(LTOFF_DTPREL22)
134};
135
136#undef N
137
138/* Opaque struct for insns, to protect against derefs. */
139struct insn;
140
141static inline uint64_t
142bundle (const struct insn *insn)
143{
144	return (uint64_t) insn & ~0xfUL;
145}
146
147static inline int
148slot (const struct insn *insn)
149{
150	return (uint64_t) insn & 0x3;
151}
152
153static int
154apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
155{
156	if (slot(insn) != 2) {
157		printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
158		       mod->name, slot(insn));
159		return 0;
160	}
161	ia64_patch_imm64((u64) insn, val);
162	return 1;
163}
164
165static int
166apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
167{
168	if (slot(insn) != 2) {
169		printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
170		       mod->name, slot(insn));
171		return 0;
172	}
173	if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
174		printk(KERN_ERR "%s: value %ld out of IMM60 range\n",
175			mod->name, (long) val);
176		return 0;
177	}
178	ia64_patch_imm60((u64) insn, val);
179	return 1;
180}
181
182static int
183apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
184{
185	if (val + (1 << 21) >= (1 << 22)) {
186		printk(KERN_ERR "%s: value %li out of IMM22 range\n",
187			mod->name, (long)val);
188		return 0;
189	}
190	ia64_patch((u64) insn, 0x01fffcfe000UL, (  ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
191					         | ((val & 0x1f0000UL) <<  6) /* bit 16 -> 22 */
192					         | ((val & 0x00ff80UL) << 20) /* bit  7 -> 27 */
193					         | ((val & 0x00007fUL) << 13) /* bit  0 -> 13 */));
194	return 1;
195}
196
197static int
198apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
199{
200	if (val + (1 << 20) >= (1 << 21)) {
201		printk(KERN_ERR "%s: value %li out of IMM21b range\n",
202			mod->name, (long)val);
203		return 0;
204	}
205	ia64_patch((u64) insn, 0x11ffffe000UL, (  ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
206					        | ((val & 0x0fffffUL) << 13) /* bit  0 -> 13 */));
207	return 1;
208}
209
210#if USE_BRL
211
212struct plt_entry {
213	/* Three instruction bundles in PLT. */
214 	unsigned char bundle[2][16];
215};
216
217static const struct plt_entry ia64_plt_template = {
218	{
219		{
220			0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
221			0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /*	     movl gp=TARGET_GP */
222			0x00, 0x00, 0x00, 0x60
223		},
224		{
225			0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
226			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*	     brl.many gp=TARGET_GP */
227			0x08, 0x00, 0x00, 0xc0
228		}
229	}
230};
231
232static int
233patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
234{
235	if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp)
236	    && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2),
237			   (target_ip - (int64_t) plt->bundle[1]) / 16))
238		return 1;
239	return 0;
240}
241
242unsigned long
243plt_target (struct plt_entry *plt)
244{
245	uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1];
246	long off;
247
248	b0 = b[0]; b1 = b[1];
249	off = (  ((b1 & 0x00fffff000000000UL) >> 36)		/* imm20b -> bit 0 */
250	       | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36)	/* imm39 -> bit 20 */
251	       | ((b1 & 0x0800000000000000UL) << 0));		/* i -> bit 59 */
252	return (long) plt->bundle[1] + 16*off;
253}
254
255#else /* !USE_BRL */
256
257struct plt_entry {
258	/* Three instruction bundles in PLT. */
259 	unsigned char bundle[3][16];
260};
261
262static const struct plt_entry ia64_plt_template = {
263	{
264		{
265			0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
266			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*	     movl r16=TARGET_IP */
267			0x02, 0x00, 0x00, 0x60
268		},
269		{
270			0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
271			0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /*	     movl gp=TARGET_GP */
272			0x00, 0x00, 0x00, 0x60
273		},
274		{
275			0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
276			0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /*	     mov b6=r16 */
277			0x60, 0x00, 0x80, 0x00		    /*	     br.few b6 */
278		}
279	}
280};
281
282static int
283patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
284{
285	if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip)
286	    && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp))
287		return 1;
288	return 0;
289}
290
291unsigned long
292plt_target (struct plt_entry *plt)
293{
294	uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0];
295
296	b0 = b[0]; b1 = b[1];
297	return (  ((b1 & 0x000007f000000000) >> 36)		/* imm7b -> bit 0 */
298		| ((b1 & 0x07fc000000000000) >> 43)		/* imm9d -> bit 7 */
299		| ((b1 & 0x0003e00000000000) >> 29)		/* imm5c -> bit 16 */
300		| ((b1 & 0x0000100000000000) >> 23)		/* ic -> bit 21 */
301		| ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40)	/* imm41 -> bit 22 */
302		| ((b1 & 0x0800000000000000) <<  4));		/* i -> bit 63 */
303}
304
305#endif /* !USE_BRL */
306
307void *
308module_alloc (unsigned long size)
309{
310	if (!size)
311		return NULL;
312	return vmalloc(size);
313}
314
315void
316module_free (struct module *mod, void *module_region)
317{
318	if (mod && mod->arch.init_unw_table &&
319	    module_region == mod->module_init) {
320		unw_remove_unwind_table(mod->arch.init_unw_table);
321		mod->arch.init_unw_table = NULL;
322	}
323	vfree(module_region);
324}
325
326/* Have we already seen one of these relocations? */
327static int
328duplicate_reloc (const Elf64_Rela *rela, unsigned int num)
329{
330	unsigned int i;
331
332	for (i = 0; i < num; i++) {
333		if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
334			return 1;
335	}
336	return 0;
337}
338
339/* Count how many GOT entries we may need */
340static unsigned int
341count_gots (const Elf64_Rela *rela, unsigned int num)
342{
343	unsigned int i, ret = 0;
344
345	/* Sure, this is order(n^2), but it's usually short, and not
346           time critical */
347	for (i = 0; i < num; i++) {
348		switch (ELF64_R_TYPE(rela[i].r_info)) {
349		      case R_IA64_LTOFF22:
350		      case R_IA64_LTOFF22X:
351		      case R_IA64_LTOFF64I:
352		      case R_IA64_LTOFF_FPTR22:
353		      case R_IA64_LTOFF_FPTR64I:
354		      case R_IA64_LTOFF_FPTR32MSB:
355		      case R_IA64_LTOFF_FPTR32LSB:
356		      case R_IA64_LTOFF_FPTR64MSB:
357		      case R_IA64_LTOFF_FPTR64LSB:
358			if (!duplicate_reloc(rela, i))
359				ret++;
360			break;
361		}
362	}
363	return ret;
364}
365
366/* Count how many PLT entries we may need */
367static unsigned int
368count_plts (const Elf64_Rela *rela, unsigned int num)
369{
370	unsigned int i, ret = 0;
371
372	/* Sure, this is order(n^2), but it's usually short, and not
373           time critical */
374	for (i = 0; i < num; i++) {
375		switch (ELF64_R_TYPE(rela[i].r_info)) {
376		      case R_IA64_PCREL21B:
377		      case R_IA64_PLTOFF22:
378		      case R_IA64_PLTOFF64I:
379		      case R_IA64_PLTOFF64MSB:
380		      case R_IA64_PLTOFF64LSB:
381		      case R_IA64_IPLTMSB:
382		      case R_IA64_IPLTLSB:
383			if (!duplicate_reloc(rela, i))
384				ret++;
385			break;
386		}
387	}
388	return ret;
389}
390
391/* We need to create an function-descriptors for any internal function
392   which is referenced. */
393static unsigned int
394count_fdescs (const Elf64_Rela *rela, unsigned int num)
395{
396	unsigned int i, ret = 0;
397
398	/* Sure, this is order(n^2), but it's usually short, and not time critical.  */
399	for (i = 0; i < num; i++) {
400		switch (ELF64_R_TYPE(rela[i].r_info)) {
401		      case R_IA64_FPTR64I:
402		      case R_IA64_FPTR32LSB:
403		      case R_IA64_FPTR32MSB:
404		      case R_IA64_FPTR64LSB:
405		      case R_IA64_FPTR64MSB:
406		      case R_IA64_LTOFF_FPTR22:
407		      case R_IA64_LTOFF_FPTR32LSB:
408		      case R_IA64_LTOFF_FPTR32MSB:
409		      case R_IA64_LTOFF_FPTR64I:
410		      case R_IA64_LTOFF_FPTR64LSB:
411		      case R_IA64_LTOFF_FPTR64MSB:
412		      case R_IA64_IPLTMSB:
413		      case R_IA64_IPLTLSB:
414			/*
415			 * Jumps to static functions sometimes go straight to their
416			 * offset.  Of course, that may not be possible if the jump is
417			 * from init -> core or vice. versa, so we need to generate an
418			 * FDESC (and PLT etc) for that.
419			 */
420		      case R_IA64_PCREL21B:
421			if (!duplicate_reloc(rela, i))
422				ret++;
423			break;
424		}
425	}
426	return ret;
427}
428
429int
430module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
431			   struct module *mod)
432{
433	unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
434	Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
435
436	/*
437	 * To store the PLTs and function-descriptors, we expand the .text section for
438	 * core module-code and the .init.text section for initialization code.
439	 */
440	for (s = sechdrs; s < sechdrs_end; ++s)
441		if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
442			mod->arch.core_plt = s;
443		else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
444			mod->arch.init_plt = s;
445		else if (strcmp(".got", secstrings + s->sh_name) == 0)
446			mod->arch.got = s;
447		else if (strcmp(".opd", secstrings + s->sh_name) == 0)
448			mod->arch.opd = s;
449		else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
450			mod->arch.unwind = s;
451#ifdef CONFIG_PARAVIRT
452		else if (strcmp(".paravirt_bundles",
453				secstrings + s->sh_name) == 0)
454			mod->arch.paravirt_bundles = s;
455		else if (strcmp(".paravirt_insts",
456				secstrings + s->sh_name) == 0)
457			mod->arch.paravirt_insts = s;
458#endif
459
460	if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
461		printk(KERN_ERR "%s: sections missing\n", mod->name);
462		return -ENOEXEC;
463	}
464
465	/* GOT and PLTs can occur in any relocated section... */
466	for (s = sechdrs + 1; s < sechdrs_end; ++s) {
467		const Elf64_Rela *rels = (void *)ehdr + s->sh_offset;
468		unsigned long numrels = s->sh_size/sizeof(Elf64_Rela);
469
470		if (s->sh_type != SHT_RELA)
471			continue;
472
473		gots += count_gots(rels, numrels);
474		fdescs += count_fdescs(rels, numrels);
475		if (strstr(secstrings + s->sh_name, ".init"))
476			init_plts += count_plts(rels, numrels);
477		else
478			core_plts += count_plts(rels, numrels);
479	}
480
481	mod->arch.core_plt->sh_type = SHT_NOBITS;
482	mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
483	mod->arch.core_plt->sh_addralign = 16;
484	mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry);
485	mod->arch.init_plt->sh_type = SHT_NOBITS;
486	mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
487	mod->arch.init_plt->sh_addralign = 16;
488	mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry);
489	mod->arch.got->sh_type = SHT_NOBITS;
490	mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC;
491	mod->arch.got->sh_addralign = 8;
492	mod->arch.got->sh_size = gots * sizeof(struct got_entry);
493	mod->arch.opd->sh_type = SHT_NOBITS;
494	mod->arch.opd->sh_flags = SHF_ALLOC;
495	mod->arch.opd->sh_addralign = 8;
496	mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
497	DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
498	       __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
499	       mod->arch.got->sh_size, mod->arch.opd->sh_size);
500	return 0;
501}
502
503static inline int
504in_init (const struct module *mod, uint64_t addr)
505{
506	return addr - (uint64_t) mod->module_init < mod->init_size;
507}
508
509static inline int
510in_core (const struct module *mod, uint64_t addr)
511{
512	return addr - (uint64_t) mod->module_core < mod->core_size;
513}
514
515static inline int
516is_internal (const struct module *mod, uint64_t value)
517{
518	return in_init(mod, value) || in_core(mod, value);
519}
520
521/*
522 * Get gp-relative offset for the linkage-table entry of VALUE.
523 */
524static uint64_t
525get_ltoff (struct module *mod, uint64_t value, int *okp)
526{
527	struct got_entry *got, *e;
528
529	if (!*okp)
530		return 0;
531
532	got = (void *) mod->arch.got->sh_addr;
533	for (e = got; e < got + mod->arch.next_got_entry; ++e)
534		if (e->val == value)
535			goto found;
536
537	/* Not enough GOT entries? */
538	BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size));
539
540	e->val = value;
541	++mod->arch.next_got_entry;
542  found:
543	return (uint64_t) e - mod->arch.gp;
544}
545
546static inline int
547gp_addressable (struct module *mod, uint64_t value)
548{
549	return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF;
550}
551
552/* Get PC-relative PLT entry for this value.  Returns 0 on failure. */
553static uint64_t
554get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
555{
556	struct plt_entry *plt, *plt_end;
557	uint64_t target_ip, target_gp;
558
559	if (!*okp)
560		return 0;
561
562	if (in_init(mod, (uint64_t) insn)) {
563		plt = (void *) mod->arch.init_plt->sh_addr;
564		plt_end = (void *) plt + mod->arch.init_plt->sh_size;
565	} else {
566		plt = (void *) mod->arch.core_plt->sh_addr;
567		plt_end = (void *) plt + mod->arch.core_plt->sh_size;
568	}
569
570	/* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
571	target_ip = ((uint64_t *) value)[0];
572	target_gp = ((uint64_t *) value)[1];
573
574	/* Look for existing PLT entry. */
575	while (plt->bundle[0][0]) {
576		if (plt_target(plt) == target_ip)
577			goto found;
578		if (++plt >= plt_end)
579			BUG();
580	}
581	*plt = ia64_plt_template;
582	if (!patch_plt(mod, plt, target_ip, target_gp)) {
583		*okp = 0;
584		return 0;
585	}
586#if ARCH_MODULE_DEBUG
587	if (plt_target(plt) != target_ip) {
588		printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
589		       __func__, target_ip, plt_target(plt));
590		*okp = 0;
591		return 0;
592	}
593#endif
594  found:
595	return (uint64_t) plt;
596}
597
598/* Get function descriptor for VALUE. */
599static uint64_t
600get_fdesc (struct module *mod, uint64_t value, int *okp)
601{
602	struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr;
603
604	if (!*okp)
605		return 0;
606
607	if (!value) {
608		printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name);
609		return 0;
610	}
611
612	if (!is_internal(mod, value))
613		/*
614		 * If it's not a module-local entry-point, "value" already points to a
615		 * function-descriptor.
616		 */
617		return value;
618
619	/* Look for existing function descriptor. */
620	while (fdesc->ip) {
621		if (fdesc->ip == value)
622			return (uint64_t)fdesc;
623		if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
624			BUG();
625	}
626
627	/* Create new one */
628	fdesc->ip = value;
629	fdesc->gp = mod->arch.gp;
630	return (uint64_t) fdesc;
631}
632
633static inline int
634do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
635	  Elf64_Shdr *sec, void *location)
636{
637	enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK;
638	enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK;
639	uint64_t val;
640	int ok = 1;
641
642	val = sym->st_value + addend;
643
644	switch (formula) {
645	      case RV_SEGREL:	/* segment base is arbitrarily chosen to be 0 for kernel modules */
646	      case RV_DIRECT:
647		break;
648
649	      case RV_GPREL:	  val -= mod->arch.gp; break;
650	      case RV_LTREL:	  val = get_ltoff(mod, val, &ok); break;
651	      case RV_PLTREL:	  val = get_plt(mod, location, val, &ok); break;
652	      case RV_FPTR:	  val = get_fdesc(mod, val, &ok); break;
653	      case RV_SECREL:	  val -= sec->sh_addr; break;
654	      case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break;
655
656	      case RV_PCREL:
657		switch (r_type) {
658		      case R_IA64_PCREL21B:
659			if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) ||
660			    (in_core(mod, val) && in_init(mod, (uint64_t)location))) {
661				/*
662				 * Init section may have been allocated far away from core,
663				 * if the branch won't reach, then allocate a plt for it.
664				 */
665				uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
666				if (delta + (1 << 20) >= (1 << 21)) {
667					val = get_fdesc(mod, val, &ok);
668					val = get_plt(mod, location, val, &ok);
669				}
670			} else if (!is_internal(mod, val))
671				val = get_plt(mod, location, val, &ok);
672			/* FALL THROUGH */
673		      default:
674			val -= bundle(location);
675			break;
676
677		      case R_IA64_PCREL32MSB:
678		      case R_IA64_PCREL32LSB:
679		      case R_IA64_PCREL64MSB:
680		      case R_IA64_PCREL64LSB:
681			val -= (uint64_t) location;
682			break;
683
684		}
685		switch (r_type) {
686		      case R_IA64_PCREL60B: format = RF_INSN60; break;
687		      case R_IA64_PCREL21B: format = RF_INSN21B; break;
688		      case R_IA64_PCREL21M: format = RF_INSN21M; break;
689		      case R_IA64_PCREL21F: format = RF_INSN21F; break;
690		      default: break;
691		}
692		break;
693
694	      case RV_BDREL:
695		val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
696		break;
697
698	      case RV_LTV:
699		/* can link-time value relocs happen here?  */
700		BUG();
701		break;
702
703	      case RV_PCREL2:
704		if (r_type == R_IA64_PCREL21BI) {
705			if (!is_internal(mod, val)) {
706				printk(KERN_ERR "%s: %s reloc against "
707					"non-local symbol (%lx)\n", __func__,
708					reloc_name[r_type], (unsigned long)val);
709				return -ENOEXEC;
710			}
711			format = RF_INSN21B;
712		}
713		val -= bundle(location);
714		break;
715
716	      case RV_SPECIAL:
717		switch (r_type) {
718		      case R_IA64_IPLTMSB:
719		      case R_IA64_IPLTLSB:
720			val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
721			format = RF_64LSB;
722			if (r_type == R_IA64_IPLTMSB)
723				format = RF_64MSB;
724			break;
725
726		      case R_IA64_SUB:
727			val = addend - sym->st_value;
728			format = RF_INSN64;
729			break;
730
731		      case R_IA64_LTOFF22X:
732			if (gp_addressable(mod, val))
733				val -= mod->arch.gp;
734			else
735				val = get_ltoff(mod, val, &ok);
736			format = RF_INSN22;
737			break;
738
739		      case R_IA64_LDXMOV:
740			if (gp_addressable(mod, val)) {
741				/* turn "ld8" into "mov": */
742				DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
743				ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
744			}
745			return 0;
746
747		      default:
748			if (reloc_name[r_type])
749				printk(KERN_ERR "%s: special reloc %s not supported",
750				       mod->name, reloc_name[r_type]);
751			else
752				printk(KERN_ERR "%s: unknown special reloc %x\n",
753				       mod->name, r_type);
754			return -ENOEXEC;
755		}
756		break;
757
758	      case RV_TPREL:
759	      case RV_LTREL_TPREL:
760	      case RV_DTPMOD:
761	      case RV_LTREL_DTPMOD:
762	      case RV_DTPREL:
763	      case RV_LTREL_DTPREL:
764		printk(KERN_ERR "%s: %s reloc not supported\n",
765		       mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?");
766		return -ENOEXEC;
767
768	      default:
769		printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type);
770		return -ENOEXEC;
771	}
772
773	if (!ok)
774		return -ENOEXEC;
775
776	DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
777	       reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
778
779	switch (format) {
780	      case RF_INSN21B:	ok = apply_imm21b(mod, location, (int64_t) val / 16); break;
781	      case RF_INSN22:	ok = apply_imm22(mod, location, val); break;
782	      case RF_INSN64:	ok = apply_imm64(mod, location, val); break;
783	      case RF_INSN60:	ok = apply_imm60(mod, location, (int64_t) val / 16); break;
784	      case RF_32LSB:	put_unaligned(val, (uint32_t *) location); break;
785	      case RF_64LSB:	put_unaligned(val, (uint64_t *) location); break;
786	      case RF_32MSB:	/* ia64 Linux is little-endian... */
787	      case RF_64MSB:	/* ia64 Linux is little-endian... */
788	      case RF_INSN14:	/* must be within-module, i.e., resolved by "ld -r" */
789	      case RF_INSN21M:	/* must be within-module, i.e., resolved by "ld -r" */
790	      case RF_INSN21F:	/* must be within-module, i.e., resolved by "ld -r" */
791		printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
792		       mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?");
793		return -ENOEXEC;
794
795	      default:
796		printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
797		       mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format);
798		return -ENOEXEC;
799	}
800	return ok ? 0 : -ENOEXEC;
801}
802
803int
804apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
805		    unsigned int relsec, struct module *mod)
806{
807	unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela);
808	Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr;
809	Elf64_Shdr *target_sec;
810	int ret;
811
812	DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
813	       relsec, n, sechdrs[relsec].sh_info);
814
815	target_sec = sechdrs + sechdrs[relsec].sh_info;
816
817	if (target_sec->sh_entsize == ~0UL)
818		/*
819		 * If target section wasn't allocated, we don't need to relocate it.
820		 * Happens, e.g., for debug sections.
821		 */
822		return 0;
823
824	if (!mod->arch.gp) {
825		uint64_t gp;
826		if (mod->core_size > MAX_LTOFF)
827			/*
828			 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
829			 * at the end of the module.
830			 */
831			gp = mod->core_size - MAX_LTOFF / 2;
832		else
833			gp = mod->core_size / 2;
834		gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
835		mod->arch.gp = gp;
836		DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
837	}
838
839	for (i = 0; i < n; i++) {
840		ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info),
841			       ((Elf64_Sym *) sechdrs[symindex].sh_addr
842				+ ELF64_R_SYM(rela[i].r_info)),
843			       rela[i].r_addend, target_sec,
844			       (void *) target_sec->sh_addr + rela[i].r_offset);
845		if (ret < 0)
846			return ret;
847	}
848	return 0;
849}
850
851int
852apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
853		unsigned int relsec, struct module *mod)
854{
855	printk(KERN_ERR "module %s: REL relocs in section %u unsupported\n", mod->name, relsec);
856	return -ENOEXEC;
857}
858
859/*
860 * Modules contain a single unwind table which covers both the core and the init text
861 * sections but since the two are not contiguous, we need to split this table up such that
862 * we can register (and unregister) each "segment" separately.  Fortunately, this sounds
863 * more complicated than it really is.
864 */
865static void
866register_unwind_table (struct module *mod)
867{
868	struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
869	struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
870	struct unw_table_entry tmp, *e1, *e2, *core, *init;
871	unsigned long num_init = 0, num_core = 0;
872
873	/* First, count how many init and core unwind-table entries there are.  */
874	for (e1 = start; e1 < end; ++e1)
875		if (in_init(mod, e1->start_offset))
876			++num_init;
877		else
878			++num_core;
879	/*
880	 * Second, sort the table such that all unwind-table entries for the init and core
881	 * text sections are nicely separated.  We do this with a stupid bubble sort
882	 * (unwind tables don't get ridiculously huge).
883	 */
884	for (e1 = start; e1 < end; ++e1) {
885		for (e2 = e1 + 1; e2 < end; ++e2) {
886			if (e2->start_offset < e1->start_offset) {
887				tmp = *e1;
888				*e1 = *e2;
889				*e2 = tmp;
890			}
891		}
892	}
893	/*
894	 * Third, locate the init and core segments in the unwind table:
895	 */
896	if (in_init(mod, start->start_offset)) {
897		init = start;
898		core = start + num_init;
899	} else {
900		core = start;
901		init = start + num_core;
902	}
903
904	DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
905	       mod->name, mod->arch.gp, num_init, num_core);
906
907	/*
908	 * Fourth, register both tables (if not empty).
909	 */
910	if (num_core > 0) {
911		mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
912								core, core + num_core);
913		DEBUGP("%s:  core: handle=%p [%p-%p)\n", __func__,
914		       mod->arch.core_unw_table, core, core + num_core);
915	}
916	if (num_init > 0) {
917		mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
918								init, init + num_init);
919		DEBUGP("%s:  init: handle=%p [%p-%p)\n", __func__,
920		       mod->arch.init_unw_table, init, init + num_init);
921	}
922}
923
924int
925module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
926{
927	DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
928	if (mod->arch.unwind)
929		register_unwind_table(mod);
930#ifdef CONFIG_PARAVIRT
931        if (mod->arch.paravirt_bundles) {
932                struct paravirt_patch_site_bundle *start =
933                        (struct paravirt_patch_site_bundle *)
934                        mod->arch.paravirt_bundles->sh_addr;
935                struct paravirt_patch_site_bundle *end =
936                        (struct paravirt_patch_site_bundle *)
937                        (mod->arch.paravirt_bundles->sh_addr +
938                         mod->arch.paravirt_bundles->sh_size);
939
940                paravirt_patch_apply_bundle(start, end);
941        }
942        if (mod->arch.paravirt_insts) {
943                struct paravirt_patch_site_inst *start =
944                        (struct paravirt_patch_site_inst *)
945                        mod->arch.paravirt_insts->sh_addr;
946                struct paravirt_patch_site_inst *end =
947                        (struct paravirt_patch_site_inst *)
948                        (mod->arch.paravirt_insts->sh_addr +
949                         mod->arch.paravirt_insts->sh_size);
950
951                paravirt_patch_apply_inst(start, end);
952        }
953#endif
954	return 0;
955}
956
957void
958module_arch_cleanup (struct module *mod)
959{
960	if (mod->arch.init_unw_table)
961		unw_remove_unwind_table(mod->arch.init_unw_table);
962	if (mod->arch.core_unw_table)
963		unw_remove_unwind_table(mod->arch.core_unw_table);
964}
965