1/* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */
2/*
3   Copyright (C) 2002 Richard Henderson
4   Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 2 of the License, or
9    (at your option) any later version.
10
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19*/
20#include <linux/module.h>
21#include <linux/moduleloader.h>
22#include <linux/ftrace_event.h>
23#include <linux/init.h>
24#include <linux/kallsyms.h>
25#include <linux/fs.h>
26#include <linux/sysfs.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/vmalloc.h>
30#include <linux/elf.h>
31#include <linux/proc_fs.h>
32#include <linux/seq_file.h>
33#include <linux/syscalls.h>
34#include <linux/fcntl.h>
35#include <linux/rcupdate.h>
36#include <linux/capability.h>
37#include <linux/cpu.h>
38#include <linux/moduleparam.h>
39#include <linux/errno.h>
40#include <linux/err.h>
41#include <linux/vermagic.h>
42#include <linux/notifier.h>
43#include <linux/sched.h>
44#include <linux/stop_machine.h>
45#include <linux/device.h>
46#include <linux/string.h>
47#include <linux/mutex.h>
48#include <linux/rculist.h>
49#include <asm/uaccess.h>
50#include <asm/cacheflush.h>
51#include <asm/mmu_context.h>
52#include <linux/license.h>
53#include <asm/sections.h>
54#include <linux/tracepoint.h>
55#include <linux/ftrace.h>
56#include <linux/async.h>
57#include <linux/percpu.h>
58#include <linux/kmemleak.h>
59
60#define CREATE_TRACE_POINTS
61#include <trace/events/module.h>
62
63#define DEBUGP(fmt , a...)
64
65#ifndef ARCH_SHF_SMALL
66#define ARCH_SHF_SMALL 0
67#endif
68
69/* If this is set, the section belongs in the init part of the module */
70#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
71
72/*
73 * Mutex protects:
74 * 1) List of modules (also safely readable with preempt_disable),
75 * 2) module_use links,
76 * 3) module_addr_min/module_addr_max.
77 * (delete uses stop_machine/add uses RCU list operations). */
78DEFINE_MUTEX(module_mutex);
79EXPORT_SYMBOL_GPL(module_mutex);
80static LIST_HEAD(modules);
81#ifdef CONFIG_KGDB_KDB
82struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
83#endif /* CONFIG_KGDB_KDB */
84
85
86/* Block module loading/unloading? */
87int modules_disabled = 0;
88
89/* Waiting for a module to finish initializing? */
90static DECLARE_WAIT_QUEUE_HEAD(module_wq);
91
92static BLOCKING_NOTIFIER_HEAD(module_notify_list);
93
94/* Bounds of module allocation, for speeding __module_address.
95 * Protected by module_mutex. */
96static unsigned long module_addr_min = -1UL, module_addr_max = 0;
97
98int register_module_notifier(struct notifier_block * nb)
99{
100	return blocking_notifier_chain_register(&module_notify_list, nb);
101}
102EXPORT_SYMBOL(register_module_notifier);
103
104int unregister_module_notifier(struct notifier_block * nb)
105{
106	return blocking_notifier_chain_unregister(&module_notify_list, nb);
107}
108EXPORT_SYMBOL(unregister_module_notifier);
109
110struct load_info {
111	Elf_Ehdr *hdr;
112	unsigned long len;
113	Elf_Shdr *sechdrs;
114	char *secstrings, *strtab;
115	unsigned long *strmap;
116	unsigned long symoffs, stroffs;
117	struct _ddebug *debug;
118	unsigned int num_debug;
119	struct {
120		unsigned int sym, str, mod, vers, info, pcpu;
121	} index;
122};
123
124/* We require a truly strong try_module_get(): 0 means failure due to
125   ongoing or failed initialization etc. */
126static inline int strong_try_module_get(struct module *mod)
127{
128	if (mod && mod->state == MODULE_STATE_COMING)
129		return -EBUSY;
130	if (try_module_get(mod))
131		return 0;
132	else
133		return -ENOENT;
134}
135
136static inline void add_taint_module(struct module *mod, unsigned flag)
137{
138	add_taint(flag);
139	mod->taints |= (1U << flag);
140}
141
142/*
143 * A thread that wants to hold a reference to a module only while it
144 * is running can call this to safely exit.  nfsd and lockd use this.
145 */
146void __module_put_and_exit(struct module *mod, long code)
147{
148	module_put(mod);
149	do_exit(code);
150}
151EXPORT_SYMBOL(__module_put_and_exit);
152
153/* Find a module section: 0 means not found. */
154static unsigned int find_sec(const struct load_info *info, const char *name)
155{
156	unsigned int i;
157
158	for (i = 1; i < info->hdr->e_shnum; i++) {
159		Elf_Shdr *shdr = &info->sechdrs[i];
160		/* Alloc bit cleared means "ignore it." */
161		if ((shdr->sh_flags & SHF_ALLOC)
162		    && strcmp(info->secstrings + shdr->sh_name, name) == 0)
163			return i;
164	}
165	return 0;
166}
167
168/* Find a module section, or NULL. */
169static void *section_addr(const struct load_info *info, const char *name)
170{
171	/* Section 0 has sh_addr 0. */
172	return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
173}
174
175/* Find a module section, or NULL.  Fill in number of "objects" in section. */
176static void *section_objs(const struct load_info *info,
177			  const char *name,
178			  size_t object_size,
179			  unsigned int *num)
180{
181	unsigned int sec = find_sec(info, name);
182
183	/* Section 0 has sh_addr 0 and sh_size 0. */
184	*num = info->sechdrs[sec].sh_size / object_size;
185	return (void *)info->sechdrs[sec].sh_addr;
186}
187
188/* Provided by the linker */
189extern const struct kernel_symbol __start___ksymtab[];
190extern const struct kernel_symbol __stop___ksymtab[];
191extern const struct kernel_symbol __start___ksymtab_gpl[];
192extern const struct kernel_symbol __stop___ksymtab_gpl[];
193extern const struct kernel_symbol __start___ksymtab_gpl_future[];
194extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
195extern const unsigned long __start___kcrctab[];
196extern const unsigned long __start___kcrctab_gpl[];
197extern const unsigned long __start___kcrctab_gpl_future[];
198#ifdef CONFIG_UNUSED_SYMBOLS
199extern const struct kernel_symbol __start___ksymtab_unused[];
200extern const struct kernel_symbol __stop___ksymtab_unused[];
201extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
202extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
203extern const unsigned long __start___kcrctab_unused[];
204extern const unsigned long __start___kcrctab_unused_gpl[];
205#endif
206
207#ifndef CONFIG_MODVERSIONS
208#define symversion(base, idx) NULL
209#else
210#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
211#endif
212
213static bool each_symbol_in_section(const struct symsearch *arr,
214				   unsigned int arrsize,
215				   struct module *owner,
216				   bool (*fn)(const struct symsearch *syms,
217					      struct module *owner,
218					      unsigned int symnum, void *data),
219				   void *data)
220{
221	unsigned int i, j;
222
223	for (j = 0; j < arrsize; j++) {
224		for (i = 0; i < arr[j].stop - arr[j].start; i++)
225			if (fn(&arr[j], owner, i, data))
226				return true;
227	}
228
229	return false;
230}
231
232/* Returns true as soon as fn returns true, otherwise false. */
233bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
234			    unsigned int symnum, void *data), void *data)
235{
236	struct module *mod;
237	static const struct symsearch arr[] = {
238		{ __start___ksymtab, __stop___ksymtab, __start___kcrctab,
239		  NOT_GPL_ONLY, false },
240		{ __start___ksymtab_gpl, __stop___ksymtab_gpl,
241		  __start___kcrctab_gpl,
242		  GPL_ONLY, false },
243		{ __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
244		  __start___kcrctab_gpl_future,
245		  WILL_BE_GPL_ONLY, false },
246#ifdef CONFIG_UNUSED_SYMBOLS
247		{ __start___ksymtab_unused, __stop___ksymtab_unused,
248		  __start___kcrctab_unused,
249		  NOT_GPL_ONLY, true },
250		{ __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
251		  __start___kcrctab_unused_gpl,
252		  GPL_ONLY, true },
253#endif
254	};
255
256	if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
257		return true;
258
259	list_for_each_entry_rcu(mod, &modules, list) {
260		struct symsearch arr[] = {
261			{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
262			  NOT_GPL_ONLY, false },
263			{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
264			  mod->gpl_crcs,
265			  GPL_ONLY, false },
266			{ mod->gpl_future_syms,
267			  mod->gpl_future_syms + mod->num_gpl_future_syms,
268			  mod->gpl_future_crcs,
269			  WILL_BE_GPL_ONLY, false },
270#ifdef CONFIG_UNUSED_SYMBOLS
271			{ mod->unused_syms,
272			  mod->unused_syms + mod->num_unused_syms,
273			  mod->unused_crcs,
274			  NOT_GPL_ONLY, true },
275			{ mod->unused_gpl_syms,
276			  mod->unused_gpl_syms + mod->num_unused_gpl_syms,
277			  mod->unused_gpl_crcs,
278			  GPL_ONLY, true },
279#endif
280		};
281
282		if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
283			return true;
284	}
285	return false;
286}
287EXPORT_SYMBOL_GPL(each_symbol);
288
289struct find_symbol_arg {
290	/* Input */
291	const char *name;
292	bool gplok;
293	bool warn;
294
295	/* Output */
296	struct module *owner;
297	const unsigned long *crc;
298	const struct kernel_symbol *sym;
299};
300
301static bool find_symbol_in_section(const struct symsearch *syms,
302				   struct module *owner,
303				   unsigned int symnum, void *data)
304{
305	struct find_symbol_arg *fsa = data;
306
307//printk(KERN_EMERG"symnum=%d syms->start[symnum].name=%08x fsa=%08x\n",symnum,syms->start[symnum].name,fsa);
308//printk(KERN_EMERG"symnum=%d syms->start[symnum].name=%s fsa->name=%s\n",symnum,syms->start[symnum].name,fsa->name);
309	if (strcmp(syms->start[symnum].name, fsa->name) != 0)
310		return false;
311
312	if (!fsa->gplok) {
313		if (syms->licence == GPL_ONLY)
314			return false;
315		if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
316			printk(KERN_WARNING "Symbol %s is being used "
317			       "by a non-GPL module, which will not "
318			       "be allowed in the future\n", fsa->name);
319			printk(KERN_WARNING "Please see the file "
320			       "Documentation/feature-removal-schedule.txt "
321			       "in the kernel source tree for more details.\n");
322		}
323	}
324
325#ifdef CONFIG_UNUSED_SYMBOLS
326	if (syms->unused && fsa->warn) {
327		printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
328		       "however this module is using it.\n", fsa->name);
329		printk(KERN_WARNING
330		       "This symbol will go away in the future.\n");
331		printk(KERN_WARNING
332		       "Please evalute if this is the right api to use and if "
333		       "it really is, submit a report the linux kernel "
334		       "mailinglist together with submitting your code for "
335		       "inclusion.\n");
336	}
337#endif
338
339	fsa->owner = owner;
340	fsa->crc = symversion(syms->crcs, symnum);
341	fsa->sym = &syms->start[symnum];
342	return true;
343}
344
345/* Find a symbol and return it, along with, (optional) crc and
346 * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
347const struct kernel_symbol *find_symbol(const char *name,
348					struct module **owner,
349					const unsigned long **crc,
350					bool gplok,
351					bool warn)
352{
353	struct find_symbol_arg fsa;
354
355	fsa.name = name;
356	fsa.gplok = gplok;
357	fsa.warn = warn;
358
359	if (each_symbol(find_symbol_in_section, &fsa)) {
360		if (owner)
361			*owner = fsa.owner;
362		if (crc)
363			*crc = fsa.crc;
364		return fsa.sym;
365	}
366
367	DEBUGP("Failed to find symbol %s\n", name);
368	return NULL;
369}
370EXPORT_SYMBOL_GPL(find_symbol);
371
372/* Search for module by name: must hold module_mutex. */
373struct module *find_module(const char *name)
374{
375	struct module *mod;
376
377	list_for_each_entry(mod, &modules, list) {
378		if (strcmp(mod->name, name) == 0)
379			return mod;
380	}
381	return NULL;
382}
383EXPORT_SYMBOL_GPL(find_module);
384
385#ifdef CONFIG_SMP
386
387static inline void __percpu *mod_percpu(struct module *mod)
388{
389	return mod->percpu;
390}
391
392static int percpu_modalloc(struct module *mod,
393			   unsigned long size, unsigned long align)
394{
395	if (align > PAGE_SIZE) {
396		printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
397		       mod->name, align, PAGE_SIZE);
398		align = PAGE_SIZE;
399	}
400
401	mod->percpu = __alloc_reserved_percpu(size, align);
402	if (!mod->percpu) {
403		printk(KERN_WARNING
404		       "%s: Could not allocate %lu bytes percpu data\n",
405		       mod->name, size);
406		return -ENOMEM;
407	}
408	mod->percpu_size = size;
409	return 0;
410}
411
412static void percpu_modfree(struct module *mod)
413{
414	free_percpu(mod->percpu);
415}
416
417static unsigned int find_pcpusec(struct load_info *info)
418{
419	return find_sec(info, ".data..percpu");
420}
421
422static void percpu_modcopy(struct module *mod,
423			   const void *from, unsigned long size)
424{
425	int cpu;
426
427	for_each_possible_cpu(cpu)
428		memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
429}
430
431/**
432 * is_module_percpu_address - test whether address is from module static percpu
433 * @addr: address to test
434 *
435 * Test whether @addr belongs to module static percpu area.
436 *
437 * RETURNS:
438 * %true if @addr is from module static percpu area
439 */
440bool is_module_percpu_address(unsigned long addr)
441{
442	struct module *mod;
443	unsigned int cpu;
444
445	preempt_disable();
446
447	list_for_each_entry_rcu(mod, &modules, list) {
448		if (!mod->percpu_size)
449			continue;
450		for_each_possible_cpu(cpu) {
451			void *start = per_cpu_ptr(mod->percpu, cpu);
452
453			if ((void *)addr >= start &&
454			    (void *)addr < start + mod->percpu_size) {
455				preempt_enable();
456				return true;
457			}
458		}
459	}
460
461	preempt_enable();
462	return false;
463}
464
465#else /* ... !CONFIG_SMP */
466
467static inline void __percpu *mod_percpu(struct module *mod)
468{
469	return NULL;
470}
471static inline int percpu_modalloc(struct module *mod,
472				  unsigned long size, unsigned long align)
473{
474	return -ENOMEM;
475}
476static inline void percpu_modfree(struct module *mod)
477{
478}
479static unsigned int find_pcpusec(struct load_info *info)
480{
481	return 0;
482}
483static inline void percpu_modcopy(struct module *mod,
484				  const void *from, unsigned long size)
485{
486	/* pcpusec should be 0, and size of that section should be 0. */
487	BUG_ON(size != 0);
488}
489bool is_module_percpu_address(unsigned long addr)
490{
491	return false;
492}
493
494#endif /* CONFIG_SMP */
495
496#define MODINFO_ATTR(field)	\
497static void setup_modinfo_##field(struct module *mod, const char *s)  \
498{                                                                     \
499	mod->field = kstrdup(s, GFP_KERNEL);                          \
500}                                                                     \
501static ssize_t show_modinfo_##field(struct module_attribute *mattr,   \
502	                struct module *mod, char *buffer)             \
503{                                                                     \
504	return sprintf(buffer, "%s\n", mod->field);                   \
505}                                                                     \
506static int modinfo_##field##_exists(struct module *mod)               \
507{                                                                     \
508	return mod->field != NULL;                                    \
509}                                                                     \
510static void free_modinfo_##field(struct module *mod)                  \
511{                                                                     \
512	kfree(mod->field);                                            \
513	mod->field = NULL;                                            \
514}                                                                     \
515static struct module_attribute modinfo_##field = {                    \
516	.attr = { .name = __stringify(field), .mode = 0444 },         \
517	.show = show_modinfo_##field,                                 \
518	.setup = setup_modinfo_##field,                               \
519	.test = modinfo_##field##_exists,                             \
520	.free = free_modinfo_##field,                                 \
521};
522
523MODINFO_ATTR(version);
524MODINFO_ATTR(srcversion);
525
526static char last_unloaded_module[MODULE_NAME_LEN+1];
527
528#ifdef CONFIG_MODULE_UNLOAD
529
530EXPORT_TRACEPOINT_SYMBOL(module_get);
531
532/* Init the unload section of the module. */
533static int module_unload_init(struct module *mod)
534{
535	mod->refptr = alloc_percpu(struct module_ref);
536	if (!mod->refptr)
537		return -ENOMEM;
538
539	INIT_LIST_HEAD(&mod->source_list);
540	INIT_LIST_HEAD(&mod->target_list);
541
542	/* Hold reference count during initialization. */
543	__this_cpu_write(mod->refptr->incs, 1);
544	/* Backwards compatibility macros put refcount during init. */
545	mod->waiter = current;
546
547	return 0;
548}
549
550/* Does a already use b? */
551static int already_uses(struct module *a, struct module *b)
552{
553	struct module_use *use;
554
555	list_for_each_entry(use, &b->source_list, source_list) {
556		if (use->source == a) {
557			DEBUGP("%s uses %s!\n", a->name, b->name);
558			return 1;
559		}
560	}
561	DEBUGP("%s does not use %s!\n", a->name, b->name);
562	return 0;
563}
564
565/*
566 * Module a uses b
567 *  - we add 'a' as a "source", 'b' as a "target" of module use
568 *  - the module_use is added to the list of 'b' sources (so
569 *    'b' can walk the list to see who sourced them), and of 'a'
570 *    targets (so 'a' can see what modules it targets).
571 */
572static int add_module_usage(struct module *a, struct module *b)
573{
574	struct module_use *use;
575
576	DEBUGP("Allocating new usage for %s.\n", a->name);
577	use = kmalloc(sizeof(*use), GFP_ATOMIC);
578	if (!use) {
579		printk(KERN_WARNING "%s: out of memory loading\n", a->name);
580		return -ENOMEM;
581	}
582
583	use->source = a;
584	use->target = b;
585	list_add(&use->source_list, &b->source_list);
586	list_add(&use->target_list, &a->target_list);
587	return 0;
588}
589
590/* Module a uses b: caller needs module_mutex() */
591int ref_module(struct module *a, struct module *b)
592{
593	int err;
594
595	if (b == NULL || already_uses(a, b))
596		return 0;
597
598	/* If module isn't available, we fail. */
599	err = strong_try_module_get(b);
600	if (err)
601		return err;
602
603	err = add_module_usage(a, b);
604	if (err) {
605		module_put(b);
606		return err;
607	}
608	return 0;
609}
610EXPORT_SYMBOL_GPL(ref_module);
611
612/* Clear the unload stuff of the module. */
613static void module_unload_free(struct module *mod)
614{
615	struct module_use *use, *tmp;
616
617	mutex_lock(&module_mutex);
618	list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
619		struct module *i = use->target;
620		DEBUGP("%s unusing %s\n", mod->name, i->name);
621		module_put(i);
622		list_del(&use->source_list);
623		list_del(&use->target_list);
624		kfree(use);
625	}
626	mutex_unlock(&module_mutex);
627
628	free_percpu(mod->refptr);
629}
630
631#ifdef CONFIG_MODULE_FORCE_UNLOAD
632static inline int try_force_unload(unsigned int flags)
633{
634	int ret = (flags & O_TRUNC);
635	if (ret)
636		add_taint(TAINT_FORCED_RMMOD);
637	return ret;
638}
639#else
640static inline int try_force_unload(unsigned int flags)
641{
642	return 0;
643}
644#endif /* CONFIG_MODULE_FORCE_UNLOAD */
645
646struct stopref
647{
648	struct module *mod;
649	int flags;
650	int *forced;
651};
652
653/* Whole machine is stopped with interrupts off when this runs. */
654static int __try_stop_module(void *_sref)
655{
656	struct stopref *sref = _sref;
657
658	/* If it's not unused, quit unless we're forcing. */
659	if (module_refcount(sref->mod) != 0) {
660		if (!(*sref->forced = try_force_unload(sref->flags)))
661			return -EWOULDBLOCK;
662	}
663
664	/* Mark it as dying. */
665	sref->mod->state = MODULE_STATE_GOING;
666	return 0;
667}
668
669static int try_stop_module(struct module *mod, int flags, int *forced)
670{
671	if (flags & O_NONBLOCK) {
672		struct stopref sref = { mod, flags, forced };
673
674		return stop_machine(__try_stop_module, &sref, NULL);
675	} else {
676		/* We don't need to stop the machine for this. */
677		mod->state = MODULE_STATE_GOING;
678		synchronize_sched();
679		return 0;
680	}
681}
682
683unsigned int module_refcount(struct module *mod)
684{
685	unsigned int incs = 0, decs = 0;
686	int cpu;
687
688	for_each_possible_cpu(cpu)
689		decs += per_cpu_ptr(mod->refptr, cpu)->decs;
690	/*
691	 * ensure the incs are added up after the decs.
692	 * module_put ensures incs are visible before decs with smp_wmb.
693	 *
694	 * This 2-count scheme avoids the situation where the refcount
695	 * for CPU0 is read, then CPU0 increments the module refcount,
696	 * then CPU1 drops that refcount, then the refcount for CPU1 is
697	 * read. We would record a decrement but not its corresponding
698	 * increment so we would see a low count (disaster).
699	 *
700	 * Rare situation? But module_refcount can be preempted, and we
701	 * might be tallying up 4096+ CPUs. So it is not impossible.
702	 */
703	smp_rmb();
704	for_each_possible_cpu(cpu)
705		incs += per_cpu_ptr(mod->refptr, cpu)->incs;
706	return incs - decs;
707}
708EXPORT_SYMBOL(module_refcount);
709
710/* This exists whether we can unload or not */
711static void free_module(struct module *mod);
712
713static void wait_for_zero_refcount(struct module *mod)
714{
715	/* Since we might sleep for some time, release the mutex first */
716	mutex_unlock(&module_mutex);
717	for (;;) {
718		DEBUGP("Looking at refcount...\n");
719		set_current_state(TASK_UNINTERRUPTIBLE);
720		if (module_refcount(mod) == 0)
721			break;
722		schedule();
723	}
724	current->state = TASK_RUNNING;
725	mutex_lock(&module_mutex);
726}
727
728SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
729		unsigned int, flags)
730{
731	struct module *mod;
732	char name[MODULE_NAME_LEN];
733	int ret, forced = 0;
734
735	if (!capable(CAP_SYS_MODULE) || modules_disabled)
736		return -EPERM;
737
738	if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
739		return -EFAULT;
740	name[MODULE_NAME_LEN-1] = '\0';
741
742	if (mutex_lock_interruptible(&module_mutex) != 0)
743		return -EINTR;
744
745	mod = find_module(name);
746	if (!mod) {
747		ret = -ENOENT;
748		goto out;
749	}
750
751	if (!list_empty(&mod->source_list)) {
752		/* Other modules depend on us: get rid of them first. */
753		ret = -EWOULDBLOCK;
754		goto out;
755	}
756
757	/* Doing init or already dying? */
758	if (mod->state != MODULE_STATE_LIVE) {
759		DEBUGP("%s already dying\n", mod->name);
760		ret = -EBUSY;
761		goto out;
762	}
763
764	/* If it has an init func, it must have an exit func to unload */
765	if (mod->init && !mod->exit) {
766		forced = try_force_unload(flags);
767		if (!forced) {
768			/* This module can't be removed */
769			ret = -EBUSY;
770			goto out;
771		}
772	}
773
774	/* Set this up before setting mod->state */
775	mod->waiter = current;
776
777	/* Stop the machine so refcounts can't move and disable module. */
778	ret = try_stop_module(mod, flags, &forced);
779	if (ret != 0)
780		goto out;
781
782	/* Never wait if forced. */
783	if (!forced && module_refcount(mod) != 0)
784		wait_for_zero_refcount(mod);
785
786	mutex_unlock(&module_mutex);
787	/* Final destruction now noone is using it. */
788	if (mod->exit != NULL)
789		mod->exit();
790	blocking_notifier_call_chain(&module_notify_list,
791				     MODULE_STATE_GOING, mod);
792	async_synchronize_full();
793
794	/* Store the name of the last unloaded module for diagnostic purposes */
795	strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
796
797	free_module(mod);
798	return 0;
799out:
800	mutex_unlock(&module_mutex);
801	return ret;
802}
803
804static inline void print_unload_info(struct seq_file *m, struct module *mod)
805{
806	struct module_use *use;
807	int printed_something = 0;
808
809	seq_printf(m, " %u ", module_refcount(mod));
810
811	/* Always include a trailing , so userspace can differentiate
812           between this and the old multi-field proc format. */
813	list_for_each_entry(use, &mod->source_list, source_list) {
814		printed_something = 1;
815		seq_printf(m, "%s,", use->source->name);
816	}
817
818	if (mod->init != NULL && mod->exit == NULL) {
819		printed_something = 1;
820		seq_printf(m, "[permanent],");
821	}
822
823	if (!printed_something)
824		seq_printf(m, "-");
825}
826
827void __symbol_put(const char *symbol)
828{
829	struct module *owner;
830
831	preempt_disable();
832	if (!find_symbol(symbol, &owner, NULL, true, false))
833		BUG();
834	module_put(owner);
835	preempt_enable();
836}
837EXPORT_SYMBOL(__symbol_put);
838
839/* Note this assumes addr is a function, which it currently always is. */
840void symbol_put_addr(void *addr)
841{
842	struct module *modaddr;
843	unsigned long a = (unsigned long)dereference_function_descriptor(addr);
844
845	if (core_kernel_text(a))
846		return;
847
848	/* module_text_address is safe here: we're supposed to have reference
849	 * to module from symbol_get, so it can't go away. */
850	modaddr = __module_text_address(a);
851	BUG_ON(!modaddr);
852	module_put(modaddr);
853}
854EXPORT_SYMBOL_GPL(symbol_put_addr);
855
856static ssize_t show_refcnt(struct module_attribute *mattr,
857			   struct module *mod, char *buffer)
858{
859	return sprintf(buffer, "%u\n", module_refcount(mod));
860}
861
862static struct module_attribute refcnt = {
863	.attr = { .name = "refcnt", .mode = 0444 },
864	.show = show_refcnt,
865};
866
867void module_put(struct module *module)
868{
869	if (module) {
870		preempt_disable();
871		smp_wmb(); /* see comment in module_refcount */
872		__this_cpu_inc(module->refptr->decs);
873
874		trace_module_put(module, _RET_IP_);
875		/* Maybe they're waiting for us to drop reference? */
876		if (unlikely(!module_is_live(module)))
877			wake_up_process(module->waiter);
878		preempt_enable();
879	}
880}
881EXPORT_SYMBOL(module_put);
882
883#else /* !CONFIG_MODULE_UNLOAD */
884static inline void print_unload_info(struct seq_file *m, struct module *mod)
885{
886	/* We don't know the usage count, or what modules are using. */
887	seq_printf(m, " - -");
888}
889
890static inline void module_unload_free(struct module *mod)
891{
892}
893
894int ref_module(struct module *a, struct module *b)
895{
896	return strong_try_module_get(b);
897}
898EXPORT_SYMBOL_GPL(ref_module);
899
900static inline int module_unload_init(struct module *mod)
901{
902	return 0;
903}
904#endif /* CONFIG_MODULE_UNLOAD */
905
906static ssize_t show_initstate(struct module_attribute *mattr,
907			   struct module *mod, char *buffer)
908{
909	const char *state = "unknown";
910
911	switch (mod->state) {
912	case MODULE_STATE_LIVE:
913		state = "live";
914		break;
915	case MODULE_STATE_COMING:
916		state = "coming";
917		break;
918	case MODULE_STATE_GOING:
919		state = "going";
920		break;
921	}
922	return sprintf(buffer, "%s\n", state);
923}
924
925static struct module_attribute initstate = {
926	.attr = { .name = "initstate", .mode = 0444 },
927	.show = show_initstate,
928};
929
930static struct module_attribute *modinfo_attrs[] = {
931	&modinfo_version,
932	&modinfo_srcversion,
933	&initstate,
934#ifdef CONFIG_MODULE_UNLOAD
935	&refcnt,
936#endif
937	NULL,
938};
939
940static const char vermagic[] = VERMAGIC_STRING;
941
942static int try_to_force_load(struct module *mod, const char *reason)
943{
944#ifdef CONFIG_MODULE_FORCE_LOAD
945	if (!test_taint(TAINT_FORCED_MODULE))
946		printk(KERN_WARNING "%s: %s: kernel tainted.\n",
947		       mod->name, reason);
948	add_taint_module(mod, TAINT_FORCED_MODULE);
949	return 0;
950#else
951	return -ENOEXEC;
952#endif
953}
954
955#ifdef CONFIG_MODVERSIONS
956/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
957static unsigned long maybe_relocated(unsigned long crc,
958				     const struct module *crc_owner)
959{
960#ifdef ARCH_RELOCATES_KCRCTAB
961	if (crc_owner == NULL)
962		return crc - (unsigned long)reloc_start;
963#endif
964	return crc;
965}
966
967static int check_version(Elf_Shdr *sechdrs,
968			 unsigned int versindex,
969			 const char *symname,
970			 struct module *mod,
971			 const unsigned long *crc,
972			 const struct module *crc_owner)
973{
974	unsigned int i, num_versions;
975	struct modversion_info *versions;
976
977	/* Exporting module didn't supply crcs?  OK, we're already tainted. */
978	if (!crc)
979		return 1;
980
981	/* No versions at all?  modprobe --force does this. */
982	if (versindex == 0)
983		return try_to_force_load(mod, symname) == 0;
984
985	versions = (void *) sechdrs[versindex].sh_addr;
986	num_versions = sechdrs[versindex].sh_size
987		/ sizeof(struct modversion_info);
988
989	for (i = 0; i < num_versions; i++) {
990		if (strcmp(versions[i].name, symname) != 0)
991			continue;
992
993		if (versions[i].crc == maybe_relocated(*crc, crc_owner))
994			return 1;
995		DEBUGP("Found checksum %lX vs module %lX\n",
996		       maybe_relocated(*crc, crc_owner), versions[i].crc);
997		goto bad_version;
998	}
999
1000	printk(KERN_WARNING "%s: no symbol version for %s\n",
1001	       mod->name, symname);
1002	return 0;
1003
1004bad_version:
1005	printk("%s: disagrees about version of symbol %s\n",
1006	       mod->name, symname);
1007	return 0;
1008}
1009
1010static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1011					  unsigned int versindex,
1012					  struct module *mod)
1013{
1014	const unsigned long *crc;
1015
1016	/* Since this should be found in kernel (which can't be removed),
1017	 * no locking is necessary. */
1018	if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
1019			 &crc, true, false))
1020		BUG();
1021	return check_version(sechdrs, versindex, "module_layout", mod, crc,
1022			     NULL);
1023}
1024
1025/* First part is kernel version, which we ignore if module has crcs. */
1026static inline int same_magic(const char *amagic, const char *bmagic,
1027			     bool has_crcs)
1028{
1029	if (has_crcs) {
1030		amagic += strcspn(amagic, " ");
1031		bmagic += strcspn(bmagic, " ");
1032	}
1033	return strcmp(amagic, bmagic) == 0;
1034}
1035#else
1036static inline int check_version(Elf_Shdr *sechdrs,
1037				unsigned int versindex,
1038				const char *symname,
1039				struct module *mod,
1040				const unsigned long *crc,
1041				const struct module *crc_owner)
1042{
1043	return 1;
1044}
1045
1046static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1047					  unsigned int versindex,
1048					  struct module *mod)
1049{
1050	return 1;
1051}
1052
1053static inline int same_magic(const char *amagic, const char *bmagic,
1054			     bool has_crcs)
1055{
1056	return strcmp(amagic, bmagic) == 0;
1057}
1058#endif /* CONFIG_MODVERSIONS */
1059
1060/* Resolve a symbol for this module.  I.e. if we find one, record usage. */
1061static const struct kernel_symbol *resolve_symbol(struct module *mod,
1062						  const struct load_info *info,
1063						  const char *name,
1064						  char ownername[])
1065{
1066	struct module *owner;
1067	const struct kernel_symbol *sym;
1068	const unsigned long *crc;
1069	int err;
1070
1071	mutex_lock(&module_mutex);
1072	sym = find_symbol(name, &owner, &crc,
1073			  !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1074	if (!sym)
1075		goto unlock;
1076
1077	if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1078			   owner)) {
1079		sym = ERR_PTR(-EINVAL);
1080		goto getname;
1081	}
1082
1083	err = ref_module(mod, owner);
1084	if (err) {
1085		sym = ERR_PTR(err);
1086		goto getname;
1087	}
1088
1089getname:
1090	/* We must make copy under the lock if we failed to get ref. */
1091	strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1092unlock:
1093	mutex_unlock(&module_mutex);
1094	return sym;
1095}
1096
1097static const struct kernel_symbol *
1098resolve_symbol_wait(struct module *mod,
1099		    const struct load_info *info,
1100		    const char *name)
1101{
1102	const struct kernel_symbol *ksym;
1103	char owner[MODULE_NAME_LEN];
1104
1105	if (wait_event_interruptible_timeout(module_wq,
1106			!IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1107			|| PTR_ERR(ksym) != -EBUSY,
1108					     30 * HZ) <= 0) {
1109		printk(KERN_WARNING "%s: gave up waiting for init of module %s.\n",
1110		       mod->name, owner);
1111	}
1112	return ksym;
1113}
1114
1115/*
1116 * /sys/module/foo/sections stuff
1117 * J. Corbet <corbet@lwn.net>
1118 */
1119#ifdef CONFIG_SYSFS
1120
1121#ifdef CONFIG_KALLSYMS
1122static inline bool sect_empty(const Elf_Shdr *sect)
1123{
1124	return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1125}
1126
1127struct module_sect_attr
1128{
1129	struct module_attribute mattr;
1130	char *name;
1131	unsigned long address;
1132};
1133
1134struct module_sect_attrs
1135{
1136	struct attribute_group grp;
1137	unsigned int nsections;
1138	struct module_sect_attr attrs[0];
1139};
1140
1141static ssize_t module_sect_show(struct module_attribute *mattr,
1142				struct module *mod, char *buf)
1143{
1144	struct module_sect_attr *sattr =
1145		container_of(mattr, struct module_sect_attr, mattr);
1146	return sprintf(buf, "0x%lx\n", sattr->address);
1147}
1148
1149static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1150{
1151	unsigned int section;
1152
1153	for (section = 0; section < sect_attrs->nsections; section++)
1154		kfree(sect_attrs->attrs[section].name);
1155	kfree(sect_attrs);
1156}
1157
1158static void add_sect_attrs(struct module *mod, const struct load_info *info)
1159{
1160	unsigned int nloaded = 0, i, size[2];
1161	struct module_sect_attrs *sect_attrs;
1162	struct module_sect_attr *sattr;
1163	struct attribute **gattr;
1164
1165	/* Count loaded sections and allocate structures */
1166	for (i = 0; i < info->hdr->e_shnum; i++)
1167		if (!sect_empty(&info->sechdrs[i]))
1168			nloaded++;
1169	size[0] = ALIGN(sizeof(*sect_attrs)
1170			+ nloaded * sizeof(sect_attrs->attrs[0]),
1171			sizeof(sect_attrs->grp.attrs[0]));
1172	size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1173	sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1174	if (sect_attrs == NULL)
1175		return;
1176
1177	/* Setup section attributes. */
1178	sect_attrs->grp.name = "sections";
1179	sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1180
1181	sect_attrs->nsections = 0;
1182	sattr = &sect_attrs->attrs[0];
1183	gattr = &sect_attrs->grp.attrs[0];
1184	for (i = 0; i < info->hdr->e_shnum; i++) {
1185		Elf_Shdr *sec = &info->sechdrs[i];
1186		if (sect_empty(sec))
1187			continue;
1188		sattr->address = sec->sh_addr;
1189		sattr->name = kstrdup(info->secstrings + sec->sh_name,
1190					GFP_KERNEL);
1191		if (sattr->name == NULL)
1192			goto out;
1193		sect_attrs->nsections++;
1194		sysfs_attr_init(&sattr->mattr.attr);
1195		sattr->mattr.show = module_sect_show;
1196		sattr->mattr.store = NULL;
1197		sattr->mattr.attr.name = sattr->name;
1198		sattr->mattr.attr.mode = S_IRUGO;
1199		*(gattr++) = &(sattr++)->mattr.attr;
1200	}
1201	*gattr = NULL;
1202
1203	if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1204		goto out;
1205
1206	mod->sect_attrs = sect_attrs;
1207	return;
1208  out:
1209	free_sect_attrs(sect_attrs);
1210}
1211
1212static void remove_sect_attrs(struct module *mod)
1213{
1214	if (mod->sect_attrs) {
1215		sysfs_remove_group(&mod->mkobj.kobj,
1216				   &mod->sect_attrs->grp);
1217		/* We are positive that no one is using any sect attrs
1218		 * at this point.  Deallocate immediately. */
1219		free_sect_attrs(mod->sect_attrs);
1220		mod->sect_attrs = NULL;
1221	}
1222}
1223
1224/*
1225 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1226 */
1227
1228struct module_notes_attrs {
1229	struct kobject *dir;
1230	unsigned int notes;
1231	struct bin_attribute attrs[0];
1232};
1233
1234static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1235				 struct bin_attribute *bin_attr,
1236				 char *buf, loff_t pos, size_t count)
1237{
1238	/*
1239	 * The caller checked the pos and count against our size.
1240	 */
1241	memcpy(buf, bin_attr->private + pos, count);
1242	return count;
1243}
1244
1245static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1246			     unsigned int i)
1247{
1248	if (notes_attrs->dir) {
1249		while (i-- > 0)
1250			sysfs_remove_bin_file(notes_attrs->dir,
1251					      &notes_attrs->attrs[i]);
1252		kobject_put(notes_attrs->dir);
1253	}
1254	kfree(notes_attrs);
1255}
1256
1257static void add_notes_attrs(struct module *mod, const struct load_info *info)
1258{
1259	unsigned int notes, loaded, i;
1260	struct module_notes_attrs *notes_attrs;
1261	struct bin_attribute *nattr;
1262
1263	/* failed to create section attributes, so can't create notes */
1264	if (!mod->sect_attrs)
1265		return;
1266
1267	/* Count notes sections and allocate structures.  */
1268	notes = 0;
1269	for (i = 0; i < info->hdr->e_shnum; i++)
1270		if (!sect_empty(&info->sechdrs[i]) &&
1271		    (info->sechdrs[i].sh_type == SHT_NOTE))
1272			++notes;
1273
1274	if (notes == 0)
1275		return;
1276
1277	notes_attrs = kzalloc(sizeof(*notes_attrs)
1278			      + notes * sizeof(notes_attrs->attrs[0]),
1279			      GFP_KERNEL);
1280	if (notes_attrs == NULL)
1281		return;
1282
1283	notes_attrs->notes = notes;
1284	nattr = &notes_attrs->attrs[0];
1285	for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1286		if (sect_empty(&info->sechdrs[i]))
1287			continue;
1288		if (info->sechdrs[i].sh_type == SHT_NOTE) {
1289			sysfs_bin_attr_init(nattr);
1290			nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1291			nattr->attr.mode = S_IRUGO;
1292			nattr->size = info->sechdrs[i].sh_size;
1293			nattr->private = (void *) info->sechdrs[i].sh_addr;
1294			nattr->read = module_notes_read;
1295			++nattr;
1296		}
1297		++loaded;
1298	}
1299
1300	notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1301	if (!notes_attrs->dir)
1302		goto out;
1303
1304	for (i = 0; i < notes; ++i)
1305		if (sysfs_create_bin_file(notes_attrs->dir,
1306					  &notes_attrs->attrs[i]))
1307			goto out;
1308
1309	mod->notes_attrs = notes_attrs;
1310	return;
1311
1312  out:
1313	free_notes_attrs(notes_attrs, i);
1314}
1315
1316static void remove_notes_attrs(struct module *mod)
1317{
1318	if (mod->notes_attrs)
1319		free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1320}
1321
1322#else
1323
1324static inline void add_sect_attrs(struct module *mod,
1325				  const struct load_info *info)
1326{
1327}
1328
1329static inline void remove_sect_attrs(struct module *mod)
1330{
1331}
1332
1333static inline void add_notes_attrs(struct module *mod,
1334				   const struct load_info *info)
1335{
1336}
1337
1338static inline void remove_notes_attrs(struct module *mod)
1339{
1340}
1341#endif /* CONFIG_KALLSYMS */
1342
1343static void add_usage_links(struct module *mod)
1344{
1345#ifdef CONFIG_MODULE_UNLOAD
1346	struct module_use *use;
1347	int nowarn;
1348
1349	mutex_lock(&module_mutex);
1350	list_for_each_entry(use, &mod->target_list, target_list) {
1351		nowarn = sysfs_create_link(use->target->holders_dir,
1352					   &mod->mkobj.kobj, mod->name);
1353	}
1354	mutex_unlock(&module_mutex);
1355#endif
1356}
1357
1358static void del_usage_links(struct module *mod)
1359{
1360#ifdef CONFIG_MODULE_UNLOAD
1361	struct module_use *use;
1362
1363	mutex_lock(&module_mutex);
1364	list_for_each_entry(use, &mod->target_list, target_list)
1365		sysfs_remove_link(use->target->holders_dir, mod->name);
1366	mutex_unlock(&module_mutex);
1367#endif
1368}
1369
1370static int module_add_modinfo_attrs(struct module *mod)
1371{
1372	struct module_attribute *attr;
1373	struct module_attribute *temp_attr;
1374	int error = 0;
1375	int i;
1376
1377	mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1378					(ARRAY_SIZE(modinfo_attrs) + 1)),
1379					GFP_KERNEL);
1380	if (!mod->modinfo_attrs)
1381		return -ENOMEM;
1382
1383	temp_attr = mod->modinfo_attrs;
1384	for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1385		if (!attr->test ||
1386		    (attr->test && attr->test(mod))) {
1387			memcpy(temp_attr, attr, sizeof(*temp_attr));
1388			sysfs_attr_init(&temp_attr->attr);
1389			error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
1390			++temp_attr;
1391		}
1392	}
1393	return error;
1394}
1395
1396static void module_remove_modinfo_attrs(struct module *mod)
1397{
1398	struct module_attribute *attr;
1399	int i;
1400
1401	for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1402		/* pick a field to test for end of list */
1403		if (!attr->attr.name)
1404			break;
1405		sysfs_remove_file(&mod->mkobj.kobj,&attr->attr);
1406		if (attr->free)
1407			attr->free(mod);
1408	}
1409	kfree(mod->modinfo_attrs);
1410}
1411
1412static int mod_sysfs_init(struct module *mod)
1413{
1414	int err;
1415	struct kobject *kobj;
1416
1417	if (!module_sysfs_initialized) {
1418		printk(KERN_ERR "%s: module sysfs not initialized\n",
1419		       mod->name);
1420		err = -EINVAL;
1421		goto out;
1422	}
1423
1424	kobj = kset_find_obj(module_kset, mod->name);
1425	if (kobj) {
1426		printk(KERN_ERR "%s: module is already loaded\n", mod->name);
1427		kobject_put(kobj);
1428		err = -EINVAL;
1429		goto out;
1430	}
1431
1432	mod->mkobj.mod = mod;
1433
1434	memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1435	mod->mkobj.kobj.kset = module_kset;
1436	err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1437				   "%s", mod->name);
1438	if (err)
1439		kobject_put(&mod->mkobj.kobj);
1440
1441	/* delay uevent until full sysfs population */
1442out:
1443	return err;
1444}
1445
1446static int mod_sysfs_setup(struct module *mod,
1447			   const struct load_info *info,
1448			   struct kernel_param *kparam,
1449			   unsigned int num_params)
1450{
1451	int err;
1452
1453	err = mod_sysfs_init(mod);
1454	if (err)
1455		goto out;
1456
1457	mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1458	if (!mod->holders_dir) {
1459		err = -ENOMEM;
1460		goto out_unreg;
1461	}
1462
1463	err = module_param_sysfs_setup(mod, kparam, num_params);
1464	if (err)
1465		goto out_unreg_holders;
1466
1467	err = module_add_modinfo_attrs(mod);
1468	if (err)
1469		goto out_unreg_param;
1470
1471	add_usage_links(mod);
1472	add_sect_attrs(mod, info);
1473	add_notes_attrs(mod, info);
1474
1475	kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1476	return 0;
1477
1478out_unreg_param:
1479	module_param_sysfs_remove(mod);
1480out_unreg_holders:
1481	kobject_put(mod->holders_dir);
1482out_unreg:
1483	kobject_put(&mod->mkobj.kobj);
1484out:
1485	return err;
1486}
1487
1488static void mod_sysfs_fini(struct module *mod)
1489{
1490	remove_notes_attrs(mod);
1491	remove_sect_attrs(mod);
1492	kobject_put(&mod->mkobj.kobj);
1493}
1494
1495#else /* !CONFIG_SYSFS */
1496
1497static int mod_sysfs_setup(struct module *mod,
1498			   const struct load_info *info,
1499			   struct kernel_param *kparam,
1500			   unsigned int num_params)
1501{
1502	return 0;
1503}
1504
1505static void mod_sysfs_fini(struct module *mod)
1506{
1507}
1508
1509static void module_remove_modinfo_attrs(struct module *mod)
1510{
1511}
1512
1513static void del_usage_links(struct module *mod)
1514{
1515}
1516
1517#endif /* CONFIG_SYSFS */
1518
1519static void mod_sysfs_teardown(struct module *mod)
1520{
1521	del_usage_links(mod);
1522	module_remove_modinfo_attrs(mod);
1523	module_param_sysfs_remove(mod);
1524	kobject_put(mod->mkobj.drivers_dir);
1525	kobject_put(mod->holders_dir);
1526	mod_sysfs_fini(mod);
1527}
1528
1529/*
1530 * unlink the module with the whole machine is stopped with interrupts off
1531 * - this defends against kallsyms not taking locks
1532 */
1533static int __unlink_module(void *_mod)
1534{
1535	struct module *mod = _mod;
1536	list_del(&mod->list);
1537	module_bug_cleanup(mod);
1538	return 0;
1539}
1540
1541/* Free a module, remove from lists, etc. */
1542static void free_module(struct module *mod)
1543{
1544	trace_module_free(mod);
1545
1546	/* Delete from various lists */
1547	mutex_lock(&module_mutex);
1548	stop_machine(__unlink_module, mod, NULL);
1549	mutex_unlock(&module_mutex);
1550	mod_sysfs_teardown(mod);
1551
1552	/* Remove dynamic debug info */
1553	ddebug_remove_module(mod->name);
1554
1555	/* Arch-specific cleanup. */
1556	module_arch_cleanup(mod);
1557
1558	/* Module unload stuff */
1559	module_unload_free(mod);
1560
1561	/* Free any allocated parameters. */
1562	destroy_params(mod->kp, mod->num_kp);
1563
1564	/* This may be NULL, but that's OK */
1565	module_free(mod, mod->module_init);
1566	kfree(mod->args);
1567	percpu_modfree(mod);
1568
1569	/* Free lock-classes: */
1570	lockdep_free_key_range(mod->module_core, mod->core_size);
1571
1572	/* Finally, free the core (containing the module structure) */
1573	module_free(mod, mod->module_core);
1574
1575#ifdef CONFIG_MPU
1576	update_protections(current->mm);
1577#endif
1578}
1579
1580void *__symbol_get(const char *symbol)
1581{
1582	struct module *owner;
1583	const struct kernel_symbol *sym;
1584
1585	preempt_disable();
1586	sym = find_symbol(symbol, &owner, NULL, true, true);
1587	if (sym && strong_try_module_get(owner))
1588		sym = NULL;
1589	preempt_enable();
1590
1591	return sym ? (void *)sym->value : NULL;
1592}
1593EXPORT_SYMBOL_GPL(__symbol_get);
1594
1595/*
1596 * Ensure that an exported symbol [global namespace] does not already exist
1597 * in the kernel or in some other module's exported symbol table.
1598 *
1599 * You must hold the module_mutex.
1600 */
1601static int verify_export_symbols(struct module *mod)
1602{
1603	unsigned int i;
1604	struct module *owner;
1605	const struct kernel_symbol *s;
1606	struct {
1607		const struct kernel_symbol *sym;
1608		unsigned int num;
1609	} arr[] = {
1610		{ mod->syms, mod->num_syms },
1611		{ mod->gpl_syms, mod->num_gpl_syms },
1612		{ mod->gpl_future_syms, mod->num_gpl_future_syms },
1613#ifdef CONFIG_UNUSED_SYMBOLS
1614		{ mod->unused_syms, mod->num_unused_syms },
1615		{ mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1616#endif
1617	};
1618
1619	for (i = 0; i < ARRAY_SIZE(arr); i++) {
1620		for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
1621			if (find_symbol(s->name, &owner, NULL, true, false)) {
1622				printk(KERN_ERR
1623				       "%s: exports duplicate symbol %s"
1624				       " (owned by %s)\n",
1625				       mod->name, s->name, module_name(owner));
1626				return -ENOEXEC;
1627			}
1628		}
1629	}
1630	return 0;
1631}
1632
1633/* Change all symbols so that st_value encodes the pointer directly. */
1634static int simplify_symbols(struct module *mod, const struct load_info *info)
1635{
1636	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
1637	Elf_Sym *sym = (void *)symsec->sh_addr;
1638	unsigned long secbase;
1639	unsigned int i;
1640	int ret = 0;
1641	const struct kernel_symbol *ksym;
1642
1643	for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
1644		const char *name = info->strtab + sym[i].st_name;
1645
1646		switch (sym[i].st_shndx) {
1647		case SHN_COMMON:
1648			/* We compiled with -fno-common.  These are not
1649			   supposed to happen.  */
1650			DEBUGP("Common symbol: %s\n", name);
1651			printk("%s: please compile with -fno-common\n",
1652			       mod->name);
1653			ret = -ENOEXEC;
1654			break;
1655
1656		case SHN_ABS:
1657			/* Don't need to do anything */
1658			DEBUGP("Absolute symbol: 0x%08lx\n",
1659			       (long)sym[i].st_value);
1660			break;
1661
1662		case SHN_UNDEF:
1663			ksym = resolve_symbol_wait(mod, info, name);
1664			/* Ok if resolved.  */
1665			if (ksym && !IS_ERR(ksym)) {
1666				sym[i].st_value = ksym->value;
1667				break;
1668			}
1669
1670			/* Ok if weak.  */
1671			if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
1672				break;
1673
1674			printk(KERN_WARNING "%s: Unknown symbol %s (err %li)\n",
1675			       mod->name, name, PTR_ERR(ksym));
1676			ret = PTR_ERR(ksym) ?: -ENOENT;
1677			break;
1678
1679		default:
1680			/* Divert to percpu allocation if a percpu var. */
1681			if (sym[i].st_shndx == info->index.pcpu)
1682				secbase = (unsigned long)mod_percpu(mod);
1683			else
1684				secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
1685			sym[i].st_value += secbase;
1686			break;
1687		}
1688	}
1689
1690	return ret;
1691}
1692
1693static int apply_relocations(struct module *mod, const struct load_info *info)
1694{
1695	unsigned int i;
1696	int err = 0;
1697
1698	/* Now do relocations. */
1699	for (i = 1; i < info->hdr->e_shnum; i++) {
1700		unsigned int infosec = info->sechdrs[i].sh_info;
1701
1702		/* Not a valid relocation section? */
1703		if (infosec >= info->hdr->e_shnum)
1704			continue;
1705
1706		/* Don't bother with non-allocated sections */
1707		if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
1708			continue;
1709
1710		if (info->sechdrs[i].sh_type == SHT_REL)
1711			err = apply_relocate(info->sechdrs, info->strtab,
1712					     info->index.sym, i, mod);
1713		else if (info->sechdrs[i].sh_type == SHT_RELA)
1714			err = apply_relocate_add(info->sechdrs, info->strtab,
1715						 info->index.sym, i, mod);
1716		if (err < 0)
1717			break;
1718	}
1719	return err;
1720}
1721
1722/* Additional bytes needed by arch in front of individual sections */
1723unsigned int __weak arch_mod_section_prepend(struct module *mod,
1724					     unsigned int section)
1725{
1726	/* default implementation just returns zero */
1727	return 0;
1728}
1729
1730/* Update size with this section: return offset. */
1731static long get_offset(struct module *mod, unsigned int *size,
1732		       Elf_Shdr *sechdr, unsigned int section)
1733{
1734	long ret;
1735
1736	*size += arch_mod_section_prepend(mod, section);
1737	ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
1738	*size = ret + sechdr->sh_size;
1739	return ret;
1740}
1741
1742/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
1743   might -- code, read-only data, read-write data, small data.  Tally
1744   sizes, and place the offsets into sh_entsize fields: high bit means it
1745   belongs in init. */
1746static void layout_sections(struct module *mod, struct load_info *info)
1747{
1748	static unsigned long const masks[][2] = {
1749		/* NOTE: all executable code must be the first section
1750		 * in this array; otherwise modify the text_size
1751		 * finder in the two loops below */
1752		{ SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
1753		{ SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
1754		{ SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
1755		{ ARCH_SHF_SMALL | SHF_ALLOC, 0 }
1756	};
1757	unsigned int m, i;
1758
1759	for (i = 0; i < info->hdr->e_shnum; i++)
1760		info->sechdrs[i].sh_entsize = ~0UL;
1761
1762	DEBUGP("Core section allocation order:\n");
1763	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
1764		for (i = 0; i < info->hdr->e_shnum; ++i) {
1765			Elf_Shdr *s = &info->sechdrs[i];
1766			const char *sname = info->secstrings + s->sh_name;
1767
1768			if ((s->sh_flags & masks[m][0]) != masks[m][0]
1769			    || (s->sh_flags & masks[m][1])
1770			    || s->sh_entsize != ~0UL
1771			    || strstarts(sname, ".init"))
1772				continue;
1773			s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
1774			DEBUGP("\t%s\n", name);
1775		}
1776		if (m == 0)
1777			mod->core_text_size = mod->core_size;
1778	}
1779
1780	DEBUGP("Init section allocation order:\n");
1781	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
1782		for (i = 0; i < info->hdr->e_shnum; ++i) {
1783			Elf_Shdr *s = &info->sechdrs[i];
1784			const char *sname = info->secstrings + s->sh_name;
1785
1786			if ((s->sh_flags & masks[m][0]) != masks[m][0]
1787			    || (s->sh_flags & masks[m][1])
1788			    || s->sh_entsize != ~0UL
1789			    || !strstarts(sname, ".init"))
1790				continue;
1791			s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
1792					 | INIT_OFFSET_MASK);
1793			DEBUGP("\t%s\n", sname);
1794		}
1795		if (m == 0)
1796			mod->init_text_size = mod->init_size;
1797	}
1798}
1799
1800static void set_license(struct module *mod, const char *license)
1801{
1802	if (!license)
1803		license = "unspecified";
1804
1805#ifndef	CONFIG_LOCKDEP
1806	if (!license_is_gpl_compatible(license)) {
1807		if (!test_taint(TAINT_PROPRIETARY_MODULE))
1808			printk(KERN_WARNING "%s: module license '%s' taints "
1809				"kernel.\n", mod->name, license);
1810		add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
1811	}
1812#endif
1813}
1814
1815/* Parse tag=value strings from .modinfo section */
1816static char *next_string(char *string, unsigned long *secsize)
1817{
1818	/* Skip non-zero chars */
1819	while (string[0]) {
1820		string++;
1821		if ((*secsize)-- <= 1)
1822			return NULL;
1823	}
1824
1825	/* Skip any zero padding. */
1826	while (!string[0]) {
1827		string++;
1828		if ((*secsize)-- <= 1)
1829			return NULL;
1830	}
1831	return string;
1832}
1833
1834static char *get_modinfo(struct load_info *info, const char *tag)
1835{
1836	char *p;
1837	unsigned int taglen = strlen(tag);
1838	Elf_Shdr *infosec = &info->sechdrs[info->index.info];
1839	unsigned long size = infosec->sh_size;
1840
1841	for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
1842		if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
1843			return p + taglen + 1;
1844	}
1845	return NULL;
1846}
1847
1848static void setup_modinfo(struct module *mod, struct load_info *info)
1849{
1850	struct module_attribute *attr;
1851	int i;
1852
1853	for (i = 0; (attr = modinfo_attrs[i]); i++) {
1854		if (attr->setup)
1855			attr->setup(mod, get_modinfo(info, attr->attr.name));
1856	}
1857}
1858
1859static void free_modinfo(struct module *mod)
1860{
1861	struct module_attribute *attr;
1862	int i;
1863
1864	for (i = 0; (attr = modinfo_attrs[i]); i++) {
1865		if (attr->free)
1866			attr->free(mod);
1867	}
1868}
1869
1870#ifdef CONFIG_KALLSYMS
1871
1872/* lookup symbol in given range of kernel_symbols */
1873static const struct kernel_symbol *lookup_symbol(const char *name,
1874	const struct kernel_symbol *start,
1875	const struct kernel_symbol *stop)
1876{
1877	const struct kernel_symbol *ks = start;
1878	for (; ks < stop; ks++)
1879		if (strcmp(ks->name, name) == 0)
1880			return ks;
1881	return NULL;
1882}
1883
1884static int is_exported(const char *name, unsigned long value,
1885		       const struct module *mod)
1886{
1887	const struct kernel_symbol *ks;
1888	if (!mod)
1889		ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
1890	else
1891		ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
1892	return ks != NULL && ks->value == value;
1893}
1894
1895/* As per nm */
1896static char elf_type(const Elf_Sym *sym, const struct load_info *info)
1897{
1898	const Elf_Shdr *sechdrs = info->sechdrs;
1899
1900	if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
1901		if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
1902			return 'v';
1903		else
1904			return 'w';
1905	}
1906	if (sym->st_shndx == SHN_UNDEF)
1907		return 'U';
1908	if (sym->st_shndx == SHN_ABS)
1909		return 'a';
1910	if (sym->st_shndx >= SHN_LORESERVE)
1911		return '?';
1912	if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
1913		return 't';
1914	if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
1915	    && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
1916		if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
1917			return 'r';
1918		else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
1919			return 'g';
1920		else
1921			return 'd';
1922	}
1923	if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
1924		if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
1925			return 's';
1926		else
1927			return 'b';
1928	}
1929	if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
1930		      ".debug")) {
1931		return 'n';
1932	}
1933	return '?';
1934}
1935
1936static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
1937                           unsigned int shnum)
1938{
1939	const Elf_Shdr *sec;
1940
1941	if (src->st_shndx == SHN_UNDEF
1942	    || src->st_shndx >= shnum
1943	    || !src->st_name)
1944		return false;
1945
1946	sec = sechdrs + src->st_shndx;
1947	if (!(sec->sh_flags & SHF_ALLOC)
1948#ifndef CONFIG_KALLSYMS_ALL
1949	    || !(sec->sh_flags & SHF_EXECINSTR)
1950#endif
1951	    || (sec->sh_entsize & INIT_OFFSET_MASK))
1952		return false;
1953
1954	return true;
1955}
1956
1957static void layout_symtab(struct module *mod, struct load_info *info)
1958{
1959	Elf_Shdr *symsect = info->sechdrs + info->index.sym;
1960	Elf_Shdr *strsect = info->sechdrs + info->index.str;
1961	const Elf_Sym *src;
1962	unsigned int i, nsrc, ndst;
1963
1964	/* Put symbol section at end of init part of module. */
1965	symsect->sh_flags |= SHF_ALLOC;
1966	symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
1967					 info->index.sym) | INIT_OFFSET_MASK;
1968	DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
1969
1970	src = (void *)info->hdr + symsect->sh_offset;
1971	nsrc = symsect->sh_size / sizeof(*src);
1972	for (ndst = i = 1; i < nsrc; ++i, ++src)
1973		if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) {
1974			unsigned int j = src->st_name;
1975
1976			while (!__test_and_set_bit(j, info->strmap)
1977			       && info->strtab[j])
1978				++j;
1979			++ndst;
1980		}
1981
1982	/* Append room for core symbols at end of core part. */
1983	info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
1984	mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
1985
1986	/* Put string table section at end of init part of module. */
1987	strsect->sh_flags |= SHF_ALLOC;
1988	strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
1989					 info->index.str) | INIT_OFFSET_MASK;
1990	DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
1991
1992	/* Append room for core symbols' strings at end of core part. */
1993	info->stroffs = mod->core_size;
1994	__set_bit(0, info->strmap);
1995	mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
1996}
1997
1998static void add_kallsyms(struct module *mod, const struct load_info *info)
1999{
2000	unsigned int i, ndst;
2001	const Elf_Sym *src;
2002	Elf_Sym *dst;
2003	char *s;
2004	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2005
2006	mod->symtab = (void *)symsec->sh_addr;
2007	mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2008	/* Make sure we get permanent strtab: don't use info->strtab. */
2009	mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2010
2011	/* Set types up while we still have access to sections. */
2012	for (i = 0; i < mod->num_symtab; i++)
2013		mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2014
2015	mod->core_symtab = dst = mod->module_core + info->symoffs;
2016	src = mod->symtab;
2017	*dst = *src;
2018	for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
2019		if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum))
2020			continue;
2021		dst[ndst] = *src;
2022		dst[ndst].st_name = bitmap_weight(info->strmap,
2023						  dst[ndst].st_name);
2024		++ndst;
2025	}
2026	mod->core_num_syms = ndst;
2027
2028	mod->core_strtab = s = mod->module_core + info->stroffs;
2029	for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
2030		if (test_bit(i, info->strmap))
2031			*++s = mod->strtab[i];
2032}
2033#else
2034static inline void layout_symtab(struct module *mod, struct load_info *info)
2035{
2036}
2037
2038static void add_kallsyms(struct module *mod, struct load_info *info)
2039{
2040}
2041#endif /* CONFIG_KALLSYMS */
2042
2043static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2044{
2045	if (!debug)
2046		return;
2047#ifdef CONFIG_DYNAMIC_DEBUG
2048	if (ddebug_add_module(debug, num, debug->modname))
2049		printk(KERN_ERR "dynamic debug error adding module: %s\n",
2050					debug->modname);
2051#endif
2052}
2053
2054static void dynamic_debug_remove(struct _ddebug *debug)
2055{
2056	if (debug)
2057		ddebug_remove_module(debug->modname);
2058}
2059
2060static void *module_alloc_update_bounds(unsigned long size)
2061{
2062	void *ret = module_alloc(size);
2063
2064	if (ret) {
2065		mutex_lock(&module_mutex);
2066		/* Update module bounds. */
2067		if ((unsigned long)ret < module_addr_min)
2068			module_addr_min = (unsigned long)ret;
2069		if ((unsigned long)ret + size > module_addr_max)
2070			module_addr_max = (unsigned long)ret + size;
2071		mutex_unlock(&module_mutex);
2072	}
2073	return ret;
2074}
2075
2076#ifdef CONFIG_DEBUG_KMEMLEAK
2077static void kmemleak_load_module(const struct module *mod,
2078				 const struct load_info *info)
2079{
2080	unsigned int i;
2081
2082	/* only scan the sections containing data */
2083	kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2084
2085	for (i = 1; i < info->hdr->e_shnum; i++) {
2086		const char *name = info->secstrings + info->sechdrs[i].sh_name;
2087		if (!(info->sechdrs[i].sh_flags & SHF_ALLOC))
2088			continue;
2089		if (!strstarts(name, ".data") && !strstarts(name, ".bss"))
2090			continue;
2091
2092		kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2093				   info->sechdrs[i].sh_size, GFP_KERNEL);
2094	}
2095}
2096#else
2097static inline void kmemleak_load_module(const struct module *mod,
2098					const struct load_info *info)
2099{
2100}
2101#endif
2102
2103/* Sets info->hdr and info->len. */
2104static int copy_and_check(struct load_info *info,
2105			  const void __user *umod, unsigned long len,
2106			  const char __user *uargs)
2107{
2108	int err;
2109	Elf_Ehdr *hdr;
2110
2111	if (len < sizeof(*hdr))
2112		return -ENOEXEC;
2113
2114	/* Suck in entire file: we'll want most of it. */
2115	/* vmalloc barfs on "unusual" numbers.  Check here */
2116	if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
2117		return -ENOMEM;
2118
2119	if (copy_from_user(hdr, umod, len) != 0) {
2120		err = -EFAULT;
2121		goto free_hdr;
2122	}
2123
2124	/* Sanity checks against insmoding binaries or wrong arch,
2125	   weird elf version */
2126	if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
2127	    || hdr->e_type != ET_REL
2128	    || !elf_check_arch(hdr)
2129	    || hdr->e_shentsize != sizeof(Elf_Shdr)) {
2130		err = -ENOEXEC;
2131		goto free_hdr;
2132	}
2133
2134	if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
2135		err = -ENOEXEC;
2136		goto free_hdr;
2137	}
2138
2139	info->hdr = hdr;
2140	info->len = len;
2141	return 0;
2142
2143free_hdr:
2144	vfree(hdr);
2145	return err;
2146}
2147
2148static void free_copy(struct load_info *info)
2149{
2150	vfree(info->hdr);
2151}
2152
2153static int rewrite_section_headers(struct load_info *info)
2154{
2155	unsigned int i;
2156
2157	/* This should always be true, but let's be sure. */
2158	info->sechdrs[0].sh_addr = 0;
2159
2160	for (i = 1; i < info->hdr->e_shnum; i++) {
2161		Elf_Shdr *shdr = &info->sechdrs[i];
2162		if (shdr->sh_type != SHT_NOBITS
2163		    && info->len < shdr->sh_offset + shdr->sh_size) {
2164			printk(KERN_ERR "Module len %lu truncated\n",
2165			       info->len);
2166			return -ENOEXEC;
2167		}
2168
2169		/* Mark all sections sh_addr with their address in the
2170		   temporary image. */
2171		shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2172
2173#ifndef CONFIG_MODULE_UNLOAD
2174		/* Don't load .exit sections */
2175		if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2176			shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2177#endif
2178	}
2179
2180	/* Track but don't keep modinfo and version sections. */
2181	info->index.vers = find_sec(info, "__versions");
2182	info->index.info = find_sec(info, ".modinfo");
2183	info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2184	info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2185	return 0;
2186}
2187
2188/*
2189 * Set up our basic convenience variables (pointers to section headers,
2190 * search for module section index etc), and do some basic section
2191 * verification.
2192 *
2193 * Return the temporary module pointer (we'll replace it with the final
2194 * one when we move the module sections around).
2195 */
2196static struct module *setup_load_info(struct load_info *info)
2197{
2198	unsigned int i;
2199	int err;
2200	struct module *mod;
2201
2202	/* Set up the convenience variables */
2203	info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2204	info->secstrings = (void *)info->hdr
2205		+ info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2206
2207	err = rewrite_section_headers(info);
2208	if (err)
2209		return ERR_PTR(err);
2210
2211	/* Find internal symbols and strings. */
2212	for (i = 1; i < info->hdr->e_shnum; i++) {
2213		if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2214			info->index.sym = i;
2215			info->index.str = info->sechdrs[i].sh_link;
2216			info->strtab = (char *)info->hdr
2217				+ info->sechdrs[info->index.str].sh_offset;
2218			break;
2219		}
2220	}
2221
2222	info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2223	if (!info->index.mod) {
2224		printk(KERN_WARNING "No module found in object\n");
2225		return ERR_PTR(-ENOEXEC);
2226	}
2227	/* This is temporary: point mod into copy of data. */
2228	mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2229
2230	if (info->index.sym == 0) {
2231		printk(KERN_WARNING "%s: module has no symbols (stripped?)\n",
2232		       mod->name);
2233		return ERR_PTR(-ENOEXEC);
2234	}
2235
2236	info->index.pcpu = find_pcpusec(info);
2237
2238	/* Check module struct version now, before we try to use module. */
2239	if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2240		return ERR_PTR(-ENOEXEC);
2241
2242	return mod;
2243}
2244
2245static int check_modinfo(struct module *mod, struct load_info *info)
2246{
2247	const char *modmagic = get_modinfo(info, "vermagic");
2248	int err;
2249
2250	/* This is allowed: modprobe --force will invalidate it. */
2251	if (!modmagic) {
2252		err = try_to_force_load(mod, "bad vermagic");
2253		if (err)
2254			return err;
2255	} else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2256		printk(KERN_ERR "%s: version magic '%s' should be '%s'\n",
2257		       mod->name, modmagic, vermagic);
2258		return -ENOEXEC;
2259	}
2260
2261	if (get_modinfo(info, "staging")) {
2262		add_taint_module(mod, TAINT_CRAP);
2263		printk(KERN_WARNING "%s: module is from the staging directory,"
2264		       " the quality is unknown, you have been warned.\n",
2265		       mod->name);
2266	}
2267
2268	/* Set up license info based on the info section */
2269	set_license(mod, get_modinfo(info, "license"));
2270
2271	return 0;
2272}
2273
2274static void find_module_sections(struct module *mod, struct load_info *info)
2275{
2276	mod->kp = section_objs(info, "__param",
2277			       sizeof(*mod->kp), &mod->num_kp);
2278	mod->syms = section_objs(info, "__ksymtab",
2279				 sizeof(*mod->syms), &mod->num_syms);
2280	mod->crcs = section_addr(info, "__kcrctab");
2281	mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2282				     sizeof(*mod->gpl_syms),
2283				     &mod->num_gpl_syms);
2284	mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2285	mod->gpl_future_syms = section_objs(info,
2286					    "__ksymtab_gpl_future",
2287					    sizeof(*mod->gpl_future_syms),
2288					    &mod->num_gpl_future_syms);
2289	mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2290
2291#ifdef CONFIG_UNUSED_SYMBOLS
2292	mod->unused_syms = section_objs(info, "__ksymtab_unused",
2293					sizeof(*mod->unused_syms),
2294					&mod->num_unused_syms);
2295	mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2296	mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2297					    sizeof(*mod->unused_gpl_syms),
2298					    &mod->num_unused_gpl_syms);
2299	mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2300#endif
2301#ifdef CONFIG_CONSTRUCTORS
2302	mod->ctors = section_objs(info, ".ctors",
2303				  sizeof(*mod->ctors), &mod->num_ctors);
2304#endif
2305
2306#ifdef CONFIG_TRACEPOINTS
2307	mod->tracepoints = section_objs(info, "__tracepoints",
2308					sizeof(*mod->tracepoints),
2309					&mod->num_tracepoints);
2310#endif
2311#ifdef CONFIG_EVENT_TRACING
2312	mod->trace_events = section_objs(info, "_ftrace_events",
2313					 sizeof(*mod->trace_events),
2314					 &mod->num_trace_events);
2315	/*
2316	 * This section contains pointers to allocated objects in the trace
2317	 * code and not scanning it leads to false positives.
2318	 */
2319	kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
2320			   mod->num_trace_events, GFP_KERNEL);
2321#endif
2322#ifdef CONFIG_FTRACE_MCOUNT_RECORD
2323	/* sechdrs[0].sh_size is always zero */
2324	mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2325					     sizeof(*mod->ftrace_callsites),
2326					     &mod->num_ftrace_callsites);
2327#endif
2328
2329	mod->extable = section_objs(info, "__ex_table",
2330				    sizeof(*mod->extable), &mod->num_exentries);
2331
2332	if (section_addr(info, "__obsparm"))
2333		printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
2334		       mod->name);
2335
2336	info->debug = section_objs(info, "__verbose",
2337				   sizeof(*info->debug), &info->num_debug);
2338}
2339
2340static int move_module(struct module *mod, struct load_info *info)
2341{
2342	int i;
2343	void *ptr;
2344
2345	/* Do the allocs. */
2346	ptr = module_alloc_update_bounds(mod->core_size);
2347	/*
2348	 * The pointer to this block is stored in the module structure
2349	 * which is inside the block. Just mark it as not being a
2350	 * leak.
2351	 */
2352	kmemleak_not_leak(ptr);
2353	if (!ptr)
2354		return -ENOMEM;
2355
2356	memset(ptr, 0, mod->core_size);
2357	mod->module_core = ptr;
2358
2359	ptr = module_alloc_update_bounds(mod->init_size);
2360	/*
2361	 * The pointer to this block is stored in the module structure
2362	 * which is inside the block. This block doesn't need to be
2363	 * scanned as it contains data and code that will be freed
2364	 * after the module is initialized.
2365	 */
2366	kmemleak_ignore(ptr);
2367	if (!ptr && mod->init_size) {
2368		module_free(mod, mod->module_core);
2369		return -ENOMEM;
2370	}
2371	memset(ptr, 0, mod->init_size);
2372	mod->module_init = ptr;
2373
2374	/* Transfer each section which specifies SHF_ALLOC */
2375	DEBUGP("final section addresses:\n");
2376	for (i = 0; i < info->hdr->e_shnum; i++) {
2377		void *dest;
2378		Elf_Shdr *shdr = &info->sechdrs[i];
2379
2380		if (!(shdr->sh_flags & SHF_ALLOC))
2381			continue;
2382
2383		if (shdr->sh_entsize & INIT_OFFSET_MASK)
2384			dest = mod->module_init
2385				+ (shdr->sh_entsize & ~INIT_OFFSET_MASK);
2386		else
2387			dest = mod->module_core + shdr->sh_entsize;
2388
2389		if (shdr->sh_type != SHT_NOBITS)
2390			memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
2391		/* Update sh_addr to point to copy in image. */
2392		shdr->sh_addr = (unsigned long)dest;
2393		DEBUGP("\t0x%lx %s\n",
2394		       shdr->sh_addr, info->secstrings + shdr->sh_name);
2395	}
2396
2397	return 0;
2398}
2399
2400static int check_module_license_and_versions(struct module *mod)
2401{
2402	/*
2403	 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2404	 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2405	 * using GPL-only symbols it needs.
2406	 */
2407	if (strcmp(mod->name, "ndiswrapper") == 0)
2408		add_taint(TAINT_PROPRIETARY_MODULE);
2409
2410	/* driverloader was caught wrongly pretending to be under GPL */
2411	if (strcmp(mod->name, "driverloader") == 0)
2412		add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
2413
2414#ifdef CONFIG_MODVERSIONS
2415	if ((mod->num_syms && !mod->crcs)
2416	    || (mod->num_gpl_syms && !mod->gpl_crcs)
2417	    || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
2418#ifdef CONFIG_UNUSED_SYMBOLS
2419	    || (mod->num_unused_syms && !mod->unused_crcs)
2420	    || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
2421#endif
2422		) {
2423		return try_to_force_load(mod,
2424					 "no versions for exported symbols");
2425	}
2426#endif
2427	return 0;
2428}
2429
2430static void flush_module_icache(const struct module *mod)
2431{
2432	mm_segment_t old_fs;
2433
2434	/* flush the icache in correct context */
2435	old_fs = get_fs();
2436	set_fs(KERNEL_DS);
2437
2438	/*
2439	 * Flush the instruction cache, since we've played with text.
2440	 * Do it before processing of module parameters, so the module
2441	 * can provide parameter accessor functions of its own.
2442	 */
2443	if (mod->module_init)
2444		flush_icache_range((unsigned long)mod->module_init,
2445				   (unsigned long)mod->module_init
2446				   + mod->init_size);
2447	flush_icache_range((unsigned long)mod->module_core,
2448			   (unsigned long)mod->module_core + mod->core_size);
2449
2450	set_fs(old_fs);
2451}
2452
2453static struct module *layout_and_allocate(struct load_info *info)
2454{
2455	/* Module within temporary copy. */
2456	struct module *mod;
2457	Elf_Shdr *pcpusec;
2458	int err;
2459
2460	mod = setup_load_info(info);
2461	if (IS_ERR(mod))
2462		return mod;
2463
2464	err = check_modinfo(mod, info);
2465	if (err)
2466		return ERR_PTR(err);
2467
2468	/* Allow arches to frob section contents and sizes.  */
2469	err = module_frob_arch_sections(info->hdr, info->sechdrs,
2470					info->secstrings, mod);
2471	if (err < 0)
2472		goto out;
2473
2474	pcpusec = &info->sechdrs[info->index.pcpu];
2475	if (pcpusec->sh_size) {
2476		/* We have a special allocation for this section. */
2477		err = percpu_modalloc(mod,
2478				      pcpusec->sh_size, pcpusec->sh_addralign);
2479		if (err)
2480			goto out;
2481		pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC;
2482	}
2483
2484	/* Determine total sizes, and put offsets in sh_entsize.  For now
2485	   this is done generically; there doesn't appear to be any
2486	   special cases for the architectures. */
2487	layout_sections(mod, info);
2488
2489	info->strmap = kzalloc(BITS_TO_LONGS(info->sechdrs[info->index.str].sh_size)
2490			 * sizeof(long), GFP_KERNEL);
2491	if (!info->strmap) {
2492		err = -ENOMEM;
2493		goto free_percpu;
2494	}
2495	layout_symtab(mod, info);
2496
2497	/* Allocate and move to the final place */
2498	err = move_module(mod, info);
2499	if (err)
2500		goto free_strmap;
2501
2502	/* Module has been copied to its final place now: return it. */
2503	mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2504	kmemleak_load_module(mod, info);
2505	return mod;
2506
2507free_strmap:
2508	kfree(info->strmap);
2509free_percpu:
2510	percpu_modfree(mod);
2511out:
2512	return ERR_PTR(err);
2513}
2514
2515/* mod is no longer valid after this! */
2516static void module_deallocate(struct module *mod, struct load_info *info)
2517{
2518	kfree(info->strmap);
2519	percpu_modfree(mod);
2520	module_free(mod, mod->module_init);
2521	module_free(mod, mod->module_core);
2522}
2523
2524static int post_relocation(struct module *mod, const struct load_info *info)
2525{
2526	/* Sort exception table now relocations are done. */
2527	sort_extable(mod->extable, mod->extable + mod->num_exentries);
2528
2529	/* Copy relocated percpu area over. */
2530	percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
2531		       info->sechdrs[info->index.pcpu].sh_size);
2532
2533	/* Setup kallsyms-specific fields. */
2534	add_kallsyms(mod, info);
2535
2536	/* Arch-specific module finalizing. */
2537	return module_finalize(info->hdr, info->sechdrs, mod);
2538}
2539
2540/* Allocate and load the module: note that size of section 0 is always
2541   zero, and we rely on this for optional sections. */
2542static struct module *load_module(void __user *umod,
2543				  unsigned long len,
2544				  const char __user *uargs)
2545{
2546	struct load_info info = { NULL, };
2547	struct module *mod;
2548	long err;
2549
2550	DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
2551	       umod, len, uargs);
2552
2553	/* Copy in the blobs from userspace, check they are vaguely sane. */
2554	err = copy_and_check(&info, umod, len, uargs);
2555	if (err)
2556		return ERR_PTR(err);
2557
2558	/* Figure out module layout, and allocate all the memory. */
2559	mod = layout_and_allocate(&info);
2560	if (IS_ERR(mod)) {
2561		err = PTR_ERR(mod);
2562		goto free_copy;
2563	}
2564
2565	/* Now module is in final location, initialize linked lists, etc. */
2566	err = module_unload_init(mod);
2567	if (err)
2568		goto free_module;
2569
2570	/* Now we've got everything in the final locations, we can
2571	 * find optional sections. */
2572	find_module_sections(mod, &info);
2573
2574	err = check_module_license_and_versions(mod);
2575	if (err)
2576		goto free_unload;
2577
2578	/* Set up MODINFO_ATTR fields */
2579	setup_modinfo(mod, &info);
2580
2581	/* Fix up syms, so that st_value is a pointer to location. */
2582	err = simplify_symbols(mod, &info);
2583	if (err < 0)
2584		goto free_modinfo;
2585
2586	err = apply_relocations(mod, &info);
2587	if (err < 0)
2588		goto free_modinfo;
2589
2590	err = post_relocation(mod, &info);
2591	if (err < 0)
2592		goto free_modinfo;
2593
2594	flush_module_icache(mod);
2595
2596	/* Now copy in args */
2597	mod->args = strndup_user(uargs, ~0UL >> 1);
2598	if (IS_ERR(mod->args)) {
2599		err = PTR_ERR(mod->args);
2600		goto free_arch_cleanup;
2601	}
2602
2603	/* Mark state as coming so strong_try_module_get() ignores us. */
2604	mod->state = MODULE_STATE_COMING;
2605
2606	/* Now sew it into the lists so we can get lockdep and oops
2607	 * info during argument parsing.  Noone should access us, since
2608	 * strong_try_module_get() will fail.
2609	 * lockdep/oops can run asynchronous, so use the RCU list insertion
2610	 * function to insert in a way safe to concurrent readers.
2611	 * The mutex protects against concurrent writers.
2612	 */
2613	mutex_lock(&module_mutex);
2614	if (find_module(mod->name)) {
2615		err = -EEXIST;
2616		goto unlock;
2617	}
2618
2619	/* This has to be done once we're sure module name is unique. */
2620	if (!mod->taints)
2621		dynamic_debug_setup(info.debug, info.num_debug);
2622
2623	/* Find duplicate symbols */
2624	err = verify_export_symbols(mod);
2625	if (err < 0)
2626		goto ddebug;
2627
2628	module_bug_finalize(info.hdr, info.sechdrs, mod);
2629	list_add_rcu(&mod->list, &modules);
2630	mutex_unlock(&module_mutex);
2631
2632	/* Module is ready to execute: parsing args may do that. */
2633	err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, NULL);
2634	if (err < 0)
2635		goto unlink;
2636
2637	/* Link in to syfs. */
2638	err = mod_sysfs_setup(mod, &info, mod->kp, mod->num_kp);
2639	if (err < 0)
2640		goto unlink;
2641
2642	/* Get rid of temporary copy and strmap. */
2643	kfree(info.strmap);
2644	free_copy(&info);
2645
2646	/* Done! */
2647	trace_module_load(mod);
2648	return mod;
2649
2650 unlink:
2651	mutex_lock(&module_mutex);
2652	/* Unlink carefully: kallsyms could be walking list. */
2653	list_del_rcu(&mod->list);
2654	module_bug_cleanup(mod);
2655
2656 ddebug:
2657	if (!mod->taints)
2658		dynamic_debug_remove(info.debug);
2659 unlock:
2660	mutex_unlock(&module_mutex);
2661	synchronize_sched();
2662	kfree(mod->args);
2663 free_arch_cleanup:
2664	module_arch_cleanup(mod);
2665 free_modinfo:
2666	free_modinfo(mod);
2667 free_unload:
2668	module_unload_free(mod);
2669 free_module:
2670	module_deallocate(mod, &info);
2671 free_copy:
2672	free_copy(&info);
2673	return ERR_PTR(err);
2674}
2675
2676/* Call module constructors. */
2677static void do_mod_ctors(struct module *mod)
2678{
2679#ifdef CONFIG_CONSTRUCTORS
2680	unsigned long i;
2681
2682	for (i = 0; i < mod->num_ctors; i++)
2683		mod->ctors[i]();
2684#endif
2685}
2686
2687/* This is where the real work happens */
2688SYSCALL_DEFINE3(init_module, void __user *, umod,
2689		unsigned long, len, const char __user *, uargs)
2690{
2691	struct module *mod;
2692	int ret = 0;
2693
2694	/* Must have permission */
2695	if (!capable(CAP_SYS_MODULE) || modules_disabled)
2696		return -EPERM;
2697
2698	/* Do all the hard work */
2699	mod = load_module(umod, len, uargs);
2700	if (IS_ERR(mod))
2701		return PTR_ERR(mod);
2702
2703	blocking_notifier_call_chain(&module_notify_list,
2704			MODULE_STATE_COMING, mod);
2705
2706	do_mod_ctors(mod);
2707	/* Start the module */
2708	if (mod->init != NULL)
2709		ret = do_one_initcall(mod->init);
2710	if (ret < 0) {
2711		/* Init routine failed: abort.  Try to protect us from
2712                   buggy refcounters. */
2713		mod->state = MODULE_STATE_GOING;
2714		synchronize_sched();
2715		module_put(mod);
2716		blocking_notifier_call_chain(&module_notify_list,
2717					     MODULE_STATE_GOING, mod);
2718		free_module(mod);
2719		wake_up(&module_wq);
2720		return ret;
2721	}
2722	if (ret > 0) {
2723		printk(KERN_WARNING
2724"%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n"
2725"%s: loading module anyway...\n",
2726		       __func__, mod->name, ret,
2727		       __func__);
2728		dump_stack();
2729	}
2730
2731	/* Now it's a first class citizen!  Wake up anyone waiting for it. */
2732	mod->state = MODULE_STATE_LIVE;
2733	wake_up(&module_wq);
2734	blocking_notifier_call_chain(&module_notify_list,
2735				     MODULE_STATE_LIVE, mod);
2736
2737	/* We need to finish all async code before the module init sequence is done */
2738	async_synchronize_full();
2739
2740	mutex_lock(&module_mutex);
2741	/* Drop initial reference. */
2742	module_put(mod);
2743	trim_init_extable(mod);
2744#ifdef CONFIG_KALLSYMS
2745	mod->num_symtab = mod->core_num_syms;
2746	mod->symtab = mod->core_symtab;
2747	mod->strtab = mod->core_strtab;
2748#endif
2749	module_free(mod, mod->module_init);
2750	mod->module_init = NULL;
2751	mod->init_size = 0;
2752	mod->init_text_size = 0;
2753	mutex_unlock(&module_mutex);
2754
2755	return 0;
2756}
2757
2758static inline int within(unsigned long addr, void *start, unsigned long size)
2759{
2760	return ((void *)addr >= start && (void *)addr < start + size);
2761}
2762
2763#ifdef CONFIG_KALLSYMS
2764/*
2765 * This ignores the intensely annoying "mapping symbols" found
2766 * in ARM ELF files: $a, $t and $d.
2767 */
2768static inline int is_arm_mapping_symbol(const char *str)
2769{
2770	return str[0] == '$' && strchr("atd", str[1])
2771	       && (str[2] == '\0' || str[2] == '.');
2772}
2773
2774static const char *get_ksymbol(struct module *mod,
2775			       unsigned long addr,
2776			       unsigned long *size,
2777			       unsigned long *offset)
2778{
2779	unsigned int i, best = 0;
2780	unsigned long nextval;
2781
2782	/* At worse, next value is at end of module */
2783	if (within_module_init(addr, mod))
2784		nextval = (unsigned long)mod->module_init+mod->init_text_size;
2785	else
2786		nextval = (unsigned long)mod->module_core+mod->core_text_size;
2787
2788	/* Scan for closest preceeding symbol, and next symbol. (ELF
2789	   starts real symbols at 1). */
2790	for (i = 1; i < mod->num_symtab; i++) {
2791		if (mod->symtab[i].st_shndx == SHN_UNDEF)
2792			continue;
2793
2794		/* We ignore unnamed symbols: they're uninformative
2795		 * and inserted at a whim. */
2796		if (mod->symtab[i].st_value <= addr
2797		    && mod->symtab[i].st_value > mod->symtab[best].st_value
2798		    && *(mod->strtab + mod->symtab[i].st_name) != '\0'
2799		    && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
2800			best = i;
2801		if (mod->symtab[i].st_value > addr
2802		    && mod->symtab[i].st_value < nextval
2803		    && *(mod->strtab + mod->symtab[i].st_name) != '\0'
2804		    && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
2805			nextval = mod->symtab[i].st_value;
2806	}
2807
2808	if (!best)
2809		return NULL;
2810
2811	if (size)
2812		*size = nextval - mod->symtab[best].st_value;
2813	if (offset)
2814		*offset = addr - mod->symtab[best].st_value;
2815	return mod->strtab + mod->symtab[best].st_name;
2816}
2817
2818/* For kallsyms to ask for address resolution.  NULL means not found.  Careful
2819 * not to lock to avoid deadlock on oopses, simply disable preemption. */
2820const char *module_address_lookup(unsigned long addr,
2821			    unsigned long *size,
2822			    unsigned long *offset,
2823			    char **modname,
2824			    char *namebuf)
2825{
2826	struct module *mod;
2827	const char *ret = NULL;
2828
2829	preempt_disable();
2830	list_for_each_entry_rcu(mod, &modules, list) {
2831		if (within_module_init(addr, mod) ||
2832		    within_module_core(addr, mod)) {
2833			if (modname)
2834				*modname = mod->name;
2835			ret = get_ksymbol(mod, addr, size, offset);
2836			break;
2837		}
2838	}
2839	/* Make a copy in here where it's safe */
2840	if (ret) {
2841		strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
2842		ret = namebuf;
2843	}
2844	preempt_enable();
2845	return ret;
2846}
2847
2848int lookup_module_symbol_name(unsigned long addr, char *symname)
2849{
2850	struct module *mod;
2851
2852	preempt_disable();
2853	list_for_each_entry_rcu(mod, &modules, list) {
2854		if (within_module_init(addr, mod) ||
2855		    within_module_core(addr, mod)) {
2856			const char *sym;
2857
2858			sym = get_ksymbol(mod, addr, NULL, NULL);
2859			if (!sym)
2860				goto out;
2861			strlcpy(symname, sym, KSYM_NAME_LEN);
2862			preempt_enable();
2863			return 0;
2864		}
2865	}
2866out:
2867	preempt_enable();
2868	return -ERANGE;
2869}
2870
2871int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
2872			unsigned long *offset, char *modname, char *name)
2873{
2874	struct module *mod;
2875
2876	preempt_disable();
2877	list_for_each_entry_rcu(mod, &modules, list) {
2878		if (within_module_init(addr, mod) ||
2879		    within_module_core(addr, mod)) {
2880			const char *sym;
2881
2882			sym = get_ksymbol(mod, addr, size, offset);
2883			if (!sym)
2884				goto out;
2885			if (modname)
2886				strlcpy(modname, mod->name, MODULE_NAME_LEN);
2887			if (name)
2888				strlcpy(name, sym, KSYM_NAME_LEN);
2889			preempt_enable();
2890			return 0;
2891		}
2892	}
2893out:
2894	preempt_enable();
2895	return -ERANGE;
2896}
2897
2898int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2899			char *name, char *module_name, int *exported)
2900{
2901	struct module *mod;
2902
2903	preempt_disable();
2904	list_for_each_entry_rcu(mod, &modules, list) {
2905		if (symnum < mod->num_symtab) {
2906			*value = mod->symtab[symnum].st_value;
2907			*type = mod->symtab[symnum].st_info;
2908			strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
2909				KSYM_NAME_LEN);
2910			strlcpy(module_name, mod->name, MODULE_NAME_LEN);
2911			*exported = is_exported(name, *value, mod);
2912			preempt_enable();
2913			return 0;
2914		}
2915		symnum -= mod->num_symtab;
2916	}
2917	preempt_enable();
2918	return -ERANGE;
2919}
2920
2921static unsigned long mod_find_symname(struct module *mod, const char *name)
2922{
2923	unsigned int i;
2924
2925	for (i = 0; i < mod->num_symtab; i++)
2926		if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
2927		    mod->symtab[i].st_info != 'U')
2928			return mod->symtab[i].st_value;
2929	return 0;
2930}
2931
2932/* Look for this name: can be of form module:name. */
2933unsigned long module_kallsyms_lookup_name(const char *name)
2934{
2935	struct module *mod;
2936	char *colon;
2937	unsigned long ret = 0;
2938
2939	/* Don't lock: we're in enough trouble already. */
2940	preempt_disable();
2941	if ((colon = strchr(name, ':')) != NULL) {
2942		*colon = '\0';
2943		if ((mod = find_module(name)) != NULL)
2944			ret = mod_find_symname(mod, colon+1);
2945		*colon = ':';
2946	} else {
2947		list_for_each_entry_rcu(mod, &modules, list)
2948			if ((ret = mod_find_symname(mod, name)) != 0)
2949				break;
2950	}
2951	preempt_enable();
2952	return ret;
2953}
2954
2955int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
2956					     struct module *, unsigned long),
2957				   void *data)
2958{
2959	struct module *mod;
2960	unsigned int i;
2961	int ret;
2962
2963	list_for_each_entry(mod, &modules, list) {
2964		for (i = 0; i < mod->num_symtab; i++) {
2965			ret = fn(data, mod->strtab + mod->symtab[i].st_name,
2966				 mod, mod->symtab[i].st_value);
2967			if (ret != 0)
2968				return ret;
2969		}
2970	}
2971	return 0;
2972}
2973#endif /* CONFIG_KALLSYMS */
2974
2975static char *module_flags(struct module *mod, char *buf)
2976{
2977	int bx = 0;
2978
2979	if (mod->taints ||
2980	    mod->state == MODULE_STATE_GOING ||
2981	    mod->state == MODULE_STATE_COMING) {
2982		buf[bx++] = '(';
2983		if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
2984			buf[bx++] = 'P';
2985		if (mod->taints & (1 << TAINT_FORCED_MODULE))
2986			buf[bx++] = 'F';
2987		if (mod->taints & (1 << TAINT_CRAP))
2988			buf[bx++] = 'C';
2989		/*
2990		 * TAINT_FORCED_RMMOD: could be added.
2991		 * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
2992		 * apply to modules.
2993		 */
2994
2995		/* Show a - for module-is-being-unloaded */
2996		if (mod->state == MODULE_STATE_GOING)
2997			buf[bx++] = '-';
2998		/* Show a + for module-is-being-loaded */
2999		if (mod->state == MODULE_STATE_COMING)
3000			buf[bx++] = '+';
3001		buf[bx++] = ')';
3002	}
3003	buf[bx] = '\0';
3004
3005	return buf;
3006}
3007
3008#ifdef CONFIG_PROC_FS
3009/* Called by the /proc file system to return a list of modules. */
3010static void *m_start(struct seq_file *m, loff_t *pos)
3011{
3012	mutex_lock(&module_mutex);
3013	return seq_list_start(&modules, *pos);
3014}
3015
3016static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3017{
3018	return seq_list_next(p, &modules, pos);
3019}
3020
3021static void m_stop(struct seq_file *m, void *p)
3022{
3023	mutex_unlock(&module_mutex);
3024}
3025
3026static int m_show(struct seq_file *m, void *p)
3027{
3028	struct module *mod = list_entry(p, struct module, list);
3029	char buf[8];
3030
3031	seq_printf(m, "%s %u",
3032		   mod->name, mod->init_size + mod->core_size);
3033	print_unload_info(m, mod);
3034
3035	/* Informative for users. */
3036	seq_printf(m, " %s",
3037		   mod->state == MODULE_STATE_GOING ? "Unloading":
3038		   mod->state == MODULE_STATE_COMING ? "Loading":
3039		   "Live");
3040	/* Used by oprofile and other similar tools. */
3041	seq_printf(m, " 0x%p", mod->module_core);
3042
3043	/* Taints info */
3044	if (mod->taints)
3045		seq_printf(m, " %s", module_flags(mod, buf));
3046
3047	seq_printf(m, "\n");
3048	return 0;
3049}
3050
3051/* Format: modulename size refcount deps address
3052
3053   Where refcount is a number or -, and deps is a comma-separated list
3054   of depends or -.
3055*/
3056static const struct seq_operations modules_op = {
3057	.start	= m_start,
3058	.next	= m_next,
3059	.stop	= m_stop,
3060	.show	= m_show
3061};
3062
3063static int modules_open(struct inode *inode, struct file *file)
3064{
3065	return seq_open(file, &modules_op);
3066}
3067
3068static const struct file_operations proc_modules_operations = {
3069	.open		= modules_open,
3070	.read		= seq_read,
3071	.llseek		= seq_lseek,
3072	.release	= seq_release,
3073};
3074
3075static int __init proc_modules_init(void)
3076{
3077	proc_create("modules", 0, NULL, &proc_modules_operations);
3078	return 0;
3079}
3080module_init(proc_modules_init);
3081#endif
3082
3083/* Given an address, look for it in the module exception tables. */
3084const struct exception_table_entry *search_module_extables(unsigned long addr)
3085{
3086	const struct exception_table_entry *e = NULL;
3087	struct module *mod;
3088
3089	preempt_disable();
3090	list_for_each_entry_rcu(mod, &modules, list) {
3091		if (mod->num_exentries == 0)
3092			continue;
3093
3094		e = search_extable(mod->extable,
3095				   mod->extable + mod->num_exentries - 1,
3096				   addr);
3097		if (e)
3098			break;
3099	}
3100	preempt_enable();
3101
3102	/* Now, if we found one, we are running inside it now, hence
3103	   we cannot unload the module, hence no refcnt needed. */
3104	return e;
3105}
3106
3107/*
3108 * is_module_address - is this address inside a module?
3109 * @addr: the address to check.
3110 *
3111 * See is_module_text_address() if you simply want to see if the address
3112 * is code (not data).
3113 */
3114bool is_module_address(unsigned long addr)
3115{
3116	bool ret;
3117
3118	preempt_disable();
3119	ret = __module_address(addr) != NULL;
3120	preempt_enable();
3121
3122	return ret;
3123}
3124
3125/*
3126 * __module_address - get the module which contains an address.
3127 * @addr: the address.
3128 *
3129 * Must be called with preempt disabled or module mutex held so that
3130 * module doesn't get freed during this.
3131 */
3132struct module *__module_address(unsigned long addr)
3133{
3134	struct module *mod;
3135
3136	if (addr < module_addr_min || addr > module_addr_max)
3137		return NULL;
3138
3139	list_for_each_entry_rcu(mod, &modules, list)
3140		if (within_module_core(addr, mod)
3141		    || within_module_init(addr, mod))
3142			return mod;
3143	return NULL;
3144}
3145EXPORT_SYMBOL_GPL(__module_address);
3146
3147/*
3148 * is_module_text_address - is this address inside module code?
3149 * @addr: the address to check.
3150 *
3151 * See is_module_address() if you simply want to see if the address is
3152 * anywhere in a module.  See kernel_text_address() for testing if an
3153 * address corresponds to kernel or module code.
3154 */
3155bool is_module_text_address(unsigned long addr)
3156{
3157	bool ret;
3158
3159	preempt_disable();
3160	ret = __module_text_address(addr) != NULL;
3161	preempt_enable();
3162
3163	return ret;
3164}
3165
3166/*
3167 * __module_text_address - get the module whose code contains an address.
3168 * @addr: the address.
3169 *
3170 * Must be called with preempt disabled or module mutex held so that
3171 * module doesn't get freed during this.
3172 */
3173struct module *__module_text_address(unsigned long addr)
3174{
3175	struct module *mod = __module_address(addr);
3176	if (mod) {
3177		/* Make sure it's within the text section. */
3178		if (!within(addr, mod->module_init, mod->init_text_size)
3179		    && !within(addr, mod->module_core, mod->core_text_size))
3180			mod = NULL;
3181	}
3182	return mod;
3183}
3184EXPORT_SYMBOL_GPL(__module_text_address);
3185
3186/* Don't grab lock, we're oopsing. */
3187void print_modules(void)
3188{
3189	struct module *mod;
3190	char buf[8];
3191
3192	/* Show each module name and core base address for
3193	 * GDB source code trace dump
3194	 */
3195	preempt_disable();
3196	list_for_each_entry_rcu(mod, &modules, list) {
3197		printk("module:  %s\t %p\t %u\n",
3198			mod->name,
3199			mod->module_core,
3200			mod->init_size + mod->core_size);
3201	}
3202	preempt_enable();
3203
3204	printk(KERN_DEFAULT "Modules linked in:");
3205	/* Most callers should already have preempt disabled, but make sure */
3206	preempt_disable();
3207	list_for_each_entry_rcu(mod, &modules, list)
3208		printk(" %s%s", mod->name, module_flags(mod, buf));
3209	preempt_enable();
3210	if (last_unloaded_module[0])
3211		printk(" [last unloaded: %s]", last_unloaded_module);
3212	printk("\n");
3213}
3214
3215#ifdef CONFIG_MODVERSIONS
3216/* Generate the signature for all relevant module structures here.
3217 * If these change, we don't want to try to parse the module. */
3218void module_layout(struct module *mod,
3219		   struct modversion_info *ver,
3220		   struct kernel_param *kp,
3221		   struct kernel_symbol *ks,
3222		   struct tracepoint *tp)
3223{
3224}
3225EXPORT_SYMBOL(module_layout);
3226#endif
3227
3228#ifdef CONFIG_TRACEPOINTS
3229void module_update_tracepoints(void)
3230{
3231	struct module *mod;
3232
3233	mutex_lock(&module_mutex);
3234	list_for_each_entry(mod, &modules, list)
3235		if (!mod->taints)
3236			tracepoint_update_probe_range(mod->tracepoints,
3237				mod->tracepoints + mod->num_tracepoints);
3238	mutex_unlock(&module_mutex);
3239}
3240
3241/*
3242 * Returns 0 if current not found.
3243 * Returns 1 if current found.
3244 */
3245int module_get_iter_tracepoints(struct tracepoint_iter *iter)
3246{
3247	struct module *iter_mod;
3248	int found = 0;
3249
3250	mutex_lock(&module_mutex);
3251	list_for_each_entry(iter_mod, &modules, list) {
3252		if (!iter_mod->taints) {
3253			/*
3254			 * Sorted module list
3255			 */
3256			if (iter_mod < iter->module)
3257				continue;
3258			else if (iter_mod > iter->module)
3259				iter->tracepoint = NULL;
3260			found = tracepoint_get_iter_range(&iter->tracepoint,
3261				iter_mod->tracepoints,
3262				iter_mod->tracepoints
3263					+ iter_mod->num_tracepoints);
3264			if (found) {
3265				iter->module = iter_mod;
3266				break;
3267			}
3268		}
3269	}
3270	mutex_unlock(&module_mutex);
3271	return found;
3272}
3273#endif
3274