• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/include/asm-generic/
1/*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 *	. = START;
14 *	__init_begin = .;
15 *	HEAD_TEXT_SECTION
16 *	INIT_TEXT_SECTION(PAGE_SIZE)
17 *	INIT_DATA_SECTION(...)
18 *	PERCPU(PAGE_SIZE)
19 *	__init_end = .;
20 *
21 *	_stext = .;
22 *	TEXT_SECTION = 0
23 *	_etext = .;
24 *
25 *      _sdata = .;
26 *	RO_DATA_SECTION(PAGE_SIZE)
27 *	RW_DATA_SECTION(...)
28 *	_edata = .;
29 *
30 *	EXCEPTION_TABLE(...)
31 *	NOTES
32 *
33 *	BSS_SECTION(0, 0, 0)
34 *	_end = .;
35 *
36 *	STABS_DEBUG
37 *	DWARF_DEBUG
38 *
39 *	DISCARDS		// must be the last
40 * }
41 *
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * [_stext, _etext] is the text section
44 * [_sdata, _edata] is the data section
45 *
46 * Some of the included output section have their own set of constants.
47 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
48 *               [__nosave_begin, __nosave_end] for the nosave data
49 */
50
51#ifndef LOAD_OFFSET
52#define LOAD_OFFSET 0
53#endif
54
55#ifndef SYMBOL_PREFIX
56#define VMLINUX_SYMBOL(sym) sym
57#else
58#define PASTE2(x,y) x##y
59#define PASTE(x,y) PASTE2(x,y)
60#define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym)
61#endif
62
63/* Align . to a 8 byte boundary equals to maximum function alignment. */
64#define ALIGN_FUNCTION()  . = ALIGN(8)
65
66/*
67 * Align to a 32 byte boundary equal to the
68 * alignment gcc 4.5 uses for a struct
69 */
70#define STRUCT_ALIGN() . = ALIGN(32)
71
72/* The actual configuration determine if the init/exit sections
73 * are handled as text/data or they can be discarded (which
74 * often happens at runtime)
75 */
76#ifdef CONFIG_HOTPLUG
77#define DEV_KEEP(sec)    *(.dev##sec)
78#define DEV_DISCARD(sec)
79#else
80#define DEV_KEEP(sec)
81#define DEV_DISCARD(sec) *(.dev##sec)
82#endif
83
84#ifdef CONFIG_HOTPLUG_CPU
85#define CPU_KEEP(sec)    *(.cpu##sec)
86#define CPU_DISCARD(sec)
87#else
88#define CPU_KEEP(sec)
89#define CPU_DISCARD(sec) *(.cpu##sec)
90#endif
91
92#if defined(CONFIG_MEMORY_HOTPLUG)
93#define MEM_KEEP(sec)    *(.mem##sec)
94#define MEM_DISCARD(sec)
95#else
96#define MEM_KEEP(sec)
97#define MEM_DISCARD(sec) *(.mem##sec)
98#endif
99
100#ifdef CONFIG_FTRACE_MCOUNT_RECORD
101#define MCOUNT_REC()	. = ALIGN(8);				\
102			VMLINUX_SYMBOL(__start_mcount_loc) = .; \
103			*(__mcount_loc)				\
104			VMLINUX_SYMBOL(__stop_mcount_loc) = .;
105#else
106#define MCOUNT_REC()
107#endif
108
109#ifdef CONFIG_TRACE_BRANCH_PROFILING
110#define LIKELY_PROFILE()	VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
111				*(_ftrace_annotated_branch)			      \
112				VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
113#else
114#define LIKELY_PROFILE()
115#endif
116
117#ifdef CONFIG_PROFILE_ALL_BRANCHES
118#define BRANCH_PROFILE()	VMLINUX_SYMBOL(__start_branch_profile) = .;   \
119				*(_ftrace_branch)			      \
120				VMLINUX_SYMBOL(__stop_branch_profile) = .;
121#else
122#define BRANCH_PROFILE()
123#endif
124
125#ifdef CONFIG_EVENT_TRACING
126#define FTRACE_EVENTS()	VMLINUX_SYMBOL(__start_ftrace_events) = .;	\
127			*(_ftrace_events)				\
128			VMLINUX_SYMBOL(__stop_ftrace_events) = .;
129#else
130#define FTRACE_EVENTS()
131#endif
132
133#ifdef CONFIG_TRACING
134#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .;      \
135			 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
136			 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
137#else
138#define TRACE_PRINTKS()
139#endif
140
141#ifdef CONFIG_FTRACE_SYSCALLS
142#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .;	\
143			 *(__syscalls_metadata)				\
144			 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
145#else
146#define TRACE_SYSCALLS()
147#endif
148
149/* .data section */
150#define DATA_DATA							\
151	*(.data)							\
152	*(.ref.data)							\
153	DEV_KEEP(init.data)						\
154	DEV_KEEP(exit.data)						\
155	CPU_KEEP(init.data)						\
156	CPU_KEEP(exit.data)						\
157	MEM_KEEP(init.data)						\
158	MEM_KEEP(exit.data)						\
159	. = ALIGN(32);							\
160	VMLINUX_SYMBOL(__start___tracepoints) = .;			\
161	*(__tracepoints)						\
162	VMLINUX_SYMBOL(__stop___tracepoints) = .;			\
163	/* implement dynamic printk debug */				\
164	. = ALIGN(8);							\
165	VMLINUX_SYMBOL(__start___verbose) = .;                          \
166	*(__verbose)                                                    \
167	VMLINUX_SYMBOL(__stop___verbose) = .;				\
168	LIKELY_PROFILE()		       				\
169	BRANCH_PROFILE()						\
170	TRACE_PRINTKS()							\
171									\
172	STRUCT_ALIGN();							\
173	FTRACE_EVENTS()							\
174									\
175	STRUCT_ALIGN();							\
176	TRACE_SYSCALLS()
177
178/*
179 * Data section helpers
180 */
181#define NOSAVE_DATA							\
182	. = ALIGN(PAGE_SIZE);						\
183	VMLINUX_SYMBOL(__nosave_begin) = .;				\
184	*(.data..nosave)						\
185	. = ALIGN(PAGE_SIZE);						\
186	VMLINUX_SYMBOL(__nosave_end) = .;
187
188#define PAGE_ALIGNED_DATA(page_align)					\
189	. = ALIGN(page_align);						\
190	*(.data..page_aligned)
191
192#define READ_MOSTLY_DATA(align)						\
193	. = ALIGN(align);						\
194	*(.data..read_mostly)
195
196#define CACHELINE_ALIGNED_DATA(align)					\
197	. = ALIGN(align);						\
198	*(.data..cacheline_aligned)
199
200#define INIT_TASK_DATA(align)						\
201	. = ALIGN(align);						\
202	*(.data..init_task)
203
204/*
205 * Read only Data
206 */
207#define RO_DATA_SECTION(align)						\
208	. = ALIGN((align));						\
209	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
210		VMLINUX_SYMBOL(__start_rodata) = .;			\
211		*(.rodata) *(.rodata.*)					\
212		*(__vermagic)		/* Kernel version magic */	\
213		*(__markers_strings)	/* Markers: strings */		\
214		*(__tracepoints_strings)/* Tracepoints: strings */	\
215	}								\
216									\
217	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
218		*(.rodata1)						\
219	}								\
220									\
221	BUG_TABLE							\
222									\
223	/* PCI quirks */						\
224	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
225		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
226		*(.pci_fixup_early)					\
227		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
228		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
229		*(.pci_fixup_header)					\
230		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
231		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
232		*(.pci_fixup_final)					\
233		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
234		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
235		*(.pci_fixup_enable)					\
236		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
237		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
238		*(.pci_fixup_resume)					\
239		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
240		VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .;	\
241		*(.pci_fixup_resume_early)				\
242		VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .;	\
243		VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;		\
244		*(.pci_fixup_suspend)					\
245		VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;		\
246	}								\
247									\
248	/* Built-in firmware blobs */					\
249	.builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {	\
250		VMLINUX_SYMBOL(__start_builtin_fw) = .;			\
251		*(.builtin_fw)						\
252		VMLINUX_SYMBOL(__end_builtin_fw) = .;			\
253	}								\
254									\
255	/* RapidIO route ops */						\
256	.rio_ops        : AT(ADDR(.rio_ops) - LOAD_OFFSET) {		\
257		VMLINUX_SYMBOL(__start_rio_switch_ops) = .;		\
258		*(.rio_switch_ops)					\
259		VMLINUX_SYMBOL(__end_rio_switch_ops) = .;		\
260	}								\
261									\
262	TRACEDATA							\
263									\
264	/* Kernel symbol table: Normal symbols */			\
265	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
266		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
267		*(__ksymtab)						\
268		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
269	}								\
270									\
271	/* Kernel symbol table: GPL-only symbols */			\
272	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
273		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
274		*(__ksymtab_gpl)					\
275		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
276	}								\
277									\
278	/* Kernel symbol table: Normal unused symbols */		\
279	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
280		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
281		*(__ksymtab_unused)					\
282		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
283	}								\
284									\
285	/* Kernel symbol table: GPL-only unused symbols */		\
286	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
287		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
288		*(__ksymtab_unused_gpl)					\
289		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
290	}								\
291									\
292	/* Kernel symbol table: GPL-future-only symbols */		\
293	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
294		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
295		*(__ksymtab_gpl_future)					\
296		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
297	}								\
298									\
299	/* Kernel symbol table: Normal symbols */			\
300	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
301		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
302		*(__kcrctab)						\
303		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
304	}								\
305									\
306	/* Kernel symbol table: GPL-only symbols */			\
307	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
308		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
309		*(__kcrctab_gpl)					\
310		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
311	}								\
312									\
313	/* Kernel symbol table: Normal unused symbols */		\
314	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
315		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
316		*(__kcrctab_unused)					\
317		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
318	}								\
319									\
320	/* Kernel symbol table: GPL-only unused symbols */		\
321	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
322		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
323		*(__kcrctab_unused_gpl)					\
324		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
325	}								\
326									\
327	/* Kernel symbol table: GPL-future-only symbols */		\
328	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
329		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
330		*(__kcrctab_gpl_future)					\
331		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
332	}								\
333									\
334	/* Kernel symbol table: strings */				\
335        __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
336		*(__ksymtab_strings)					\
337	}								\
338									\
339	/* __*init sections */						\
340	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
341		*(.ref.rodata)						\
342		DEV_KEEP(init.rodata)					\
343		DEV_KEEP(exit.rodata)					\
344		CPU_KEEP(init.rodata)					\
345		CPU_KEEP(exit.rodata)					\
346		MEM_KEEP(init.rodata)					\
347		MEM_KEEP(exit.rodata)					\
348	}								\
349									\
350	/* Built-in module parameters. */				\
351	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
352		VMLINUX_SYMBOL(__start___param) = .;			\
353		*(__param)						\
354		VMLINUX_SYMBOL(__stop___param) = .;			\
355		. = ALIGN((align));					\
356		VMLINUX_SYMBOL(__end_rodata) = .;			\
357	}								\
358	. = ALIGN((align));
359
360/* RODATA & RO_DATA provided for backward compatibility.
361 * All archs are supposed to use RO_DATA() */
362#define RODATA          RO_DATA_SECTION(4096)
363#define RO_DATA(align)  RO_DATA_SECTION(align)
364
365#define SECURITY_INIT							\
366	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
367		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
368		*(.security_initcall.init) 				\
369		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
370	}
371
372/* .text section. Map to function alignment to avoid address changes
373 * during second ld run in second ld pass when generating System.map */
374#define TEXT_TEXT							\
375		ALIGN_FUNCTION();					\
376		*(.text.hot)						\
377		*(.text)						\
378		*(.ref.text)						\
379	DEV_KEEP(init.text)						\
380	DEV_KEEP(exit.text)						\
381	CPU_KEEP(init.text)						\
382	CPU_KEEP(exit.text)						\
383	MEM_KEEP(init.text)						\
384	MEM_KEEP(exit.text)						\
385		*(.text.unlikely)
386
387
388/* sched.text is aling to function alignment to secure we have same
389 * address even at second ld pass when generating System.map */
390#define SCHED_TEXT							\
391		ALIGN_FUNCTION();					\
392		VMLINUX_SYMBOL(__sched_text_start) = .;			\
393		*(.sched.text)						\
394		VMLINUX_SYMBOL(__sched_text_end) = .;
395
396/* spinlock.text is aling to function alignment to secure we have same
397 * address even at second ld pass when generating System.map */
398#define LOCK_TEXT							\
399		ALIGN_FUNCTION();					\
400		VMLINUX_SYMBOL(__lock_text_start) = .;			\
401		*(.spinlock.text)					\
402		VMLINUX_SYMBOL(__lock_text_end) = .;
403
404#define KPROBES_TEXT							\
405		ALIGN_FUNCTION();					\
406		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
407		*(.kprobes.text)					\
408		VMLINUX_SYMBOL(__kprobes_text_end) = .;
409
410#ifdef CONFIG_FUNCTION_GRAPH_TRACER
411#define IRQENTRY_TEXT							\
412		ALIGN_FUNCTION();					\
413		VMLINUX_SYMBOL(__irqentry_text_start) = .;		\
414		*(.irqentry.text)					\
415		VMLINUX_SYMBOL(__irqentry_text_end) = .;
416#else
417#define IRQENTRY_TEXT
418#endif
419
420/* Section used for early init (in .S files) */
421#define HEAD_TEXT  *(.head.text)
422
423#define HEAD_TEXT_SECTION							\
424	.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {		\
425		HEAD_TEXT						\
426	}
427
428/*
429 * Exception table
430 */
431#define EXCEPTION_TABLE(align)						\
432	. = ALIGN(align);						\
433	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {		\
434		VMLINUX_SYMBOL(__start___ex_table) = .;			\
435		*(__ex_table)						\
436		VMLINUX_SYMBOL(__stop___ex_table) = .;			\
437	}
438
439/*
440 * Init task
441 */
442#define INIT_TASK_DATA_SECTION(align)					\
443	. = ALIGN(align);						\
444	.data..init_task :  AT(ADDR(.data..init_task) - LOAD_OFFSET) {	\
445		INIT_TASK_DATA(align)					\
446	}
447
448#ifdef CONFIG_CONSTRUCTORS
449#define KERNEL_CTORS()	. = ALIGN(8);			   \
450			VMLINUX_SYMBOL(__ctors_start) = .; \
451			*(.ctors)			   \
452			VMLINUX_SYMBOL(__ctors_end) = .;
453#else
454#define KERNEL_CTORS()
455#endif
456
457/* init and exit section handling */
458#define INIT_DATA							\
459	*(.init.data)							\
460	DEV_DISCARD(init.data)						\
461	CPU_DISCARD(init.data)						\
462	MEM_DISCARD(init.data)						\
463	KERNEL_CTORS()							\
464	*(.init.rodata)							\
465	MCOUNT_REC()							\
466	DEV_DISCARD(init.rodata)					\
467	CPU_DISCARD(init.rodata)					\
468	MEM_DISCARD(init.rodata)
469
470#define INIT_TEXT							\
471	*(.init.text)							\
472	DEV_DISCARD(init.text)						\
473	CPU_DISCARD(init.text)						\
474	MEM_DISCARD(init.text)
475
476#define EXIT_DATA							\
477	*(.exit.data)							\
478	DEV_DISCARD(exit.data)						\
479	DEV_DISCARD(exit.rodata)					\
480	CPU_DISCARD(exit.data)						\
481	CPU_DISCARD(exit.rodata)					\
482	MEM_DISCARD(exit.data)						\
483	MEM_DISCARD(exit.rodata)
484
485#define EXIT_TEXT							\
486	*(.exit.text)							\
487	DEV_DISCARD(exit.text)						\
488	CPU_DISCARD(exit.text)						\
489	MEM_DISCARD(exit.text)
490
491#define EXIT_CALL							\
492	*(.exitcall.exit)
493
494/*
495 * bss (Block Started by Symbol) - uninitialized data
496 * zeroed during startup
497 */
498#define SBSS(sbss_align)						\
499	. = ALIGN(sbss_align);						\
500	.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) {				\
501		*(.sbss)						\
502		*(.scommon)						\
503	}
504
505#define BSS(bss_align)							\
506	. = ALIGN(bss_align);						\
507	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {				\
508		*(.bss..page_aligned)					\
509		*(.dynbss)						\
510		*(.bss)							\
511		*(COMMON)						\
512	}
513
514/*
515 * DWARF debug sections.
516 * Symbols in the DWARF debugging sections are relative to
517 * the beginning of the section so we begin them at 0.
518 */
519#define DWARF_DEBUG							\
520		/* DWARF 1 */						\
521		.debug          0 : { *(.debug) }			\
522		.line           0 : { *(.line) }			\
523		/* GNU DWARF 1 extensions */				\
524		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
525		.debug_sfnames  0 : { *(.debug_sfnames) }		\
526		/* DWARF 1.1 and DWARF 2 */				\
527		.debug_aranges  0 : { *(.debug_aranges) }		\
528		.debug_pubnames 0 : { *(.debug_pubnames) }		\
529		/* DWARF 2 */						\
530		.debug_info     0 : { *(.debug_info			\
531				.gnu.linkonce.wi.*) }			\
532		.debug_abbrev   0 : { *(.debug_abbrev) }		\
533		.debug_line     0 : { *(.debug_line) }			\
534		.debug_frame    0 : { *(.debug_frame) }			\
535		.debug_str      0 : { *(.debug_str) }			\
536		.debug_loc      0 : { *(.debug_loc) }			\
537		.debug_macinfo  0 : { *(.debug_macinfo) }		\
538		/* SGI/MIPS DWARF 2 extensions */			\
539		.debug_weaknames 0 : { *(.debug_weaknames) }		\
540		.debug_funcnames 0 : { *(.debug_funcnames) }		\
541		.debug_typenames 0 : { *(.debug_typenames) }		\
542		.debug_varnames  0 : { *(.debug_varnames) }		\
543
544		/* Stabs debugging sections.  */
545#define STABS_DEBUG							\
546		.stab 0 : { *(.stab) }					\
547		.stabstr 0 : { *(.stabstr) }				\
548		.stab.excl 0 : { *(.stab.excl) }			\
549		.stab.exclstr 0 : { *(.stab.exclstr) }			\
550		.stab.index 0 : { *(.stab.index) }			\
551		.stab.indexstr 0 : { *(.stab.indexstr) }		\
552		.comment 0 : { *(.comment) }
553
554#ifdef CONFIG_GENERIC_BUG
555#define BUG_TABLE							\
556	. = ALIGN(8);							\
557	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
558		VMLINUX_SYMBOL(__start___bug_table) = .;		\
559		*(__bug_table)						\
560		VMLINUX_SYMBOL(__stop___bug_table) = .;			\
561	}
562#else
563#define BUG_TABLE
564#endif
565
566#ifdef CONFIG_PM_TRACE
567#define TRACEDATA							\
568	. = ALIGN(4);							\
569	.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {		\
570		VMLINUX_SYMBOL(__tracedata_start) = .;			\
571		*(.tracedata)						\
572		VMLINUX_SYMBOL(__tracedata_end) = .;			\
573	}
574#else
575#define TRACEDATA
576#endif
577
578#define NOTES								\
579	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
580		VMLINUX_SYMBOL(__start_notes) = .;			\
581		*(.note.*)						\
582		VMLINUX_SYMBOL(__stop_notes) = .;			\
583	}
584
585#define INIT_SETUP(initsetup_align)					\
586		. = ALIGN(initsetup_align);				\
587		VMLINUX_SYMBOL(__setup_start) = .;			\
588		*(.init.setup)						\
589		VMLINUX_SYMBOL(__setup_end) = .;
590
591#define INITCALLS							\
592	*(.initcallearly.init)						\
593	VMLINUX_SYMBOL(__early_initcall_end) = .;			\
594  	*(.initcall0.init)						\
595  	*(.initcall0s.init)						\
596  	*(.initcall1.init)						\
597  	*(.initcall1s.init)						\
598  	*(.initcall2.init)						\
599  	*(.initcall2s.init)						\
600  	*(.initcall3.init)						\
601  	*(.initcall3s.init)						\
602  	*(.initcall4.init)						\
603  	*(.initcall4s.init)						\
604  	*(.initcall5.init)						\
605  	*(.initcall5s.init)						\
606	*(.initcallrootfs.init)						\
607  	*(.initcall6.init)						\
608  	*(.initcall6s.init)						\
609  	*(.initcall7.init)						\
610  	*(.initcall7s.init)
611
612#define INIT_CALLS							\
613		VMLINUX_SYMBOL(__initcall_start) = .;			\
614		INITCALLS						\
615		VMLINUX_SYMBOL(__initcall_end) = .;
616
617#define CON_INITCALL							\
618		VMLINUX_SYMBOL(__con_initcall_start) = .;		\
619		*(.con_initcall.init)					\
620		VMLINUX_SYMBOL(__con_initcall_end) = .;
621
622#define SECURITY_INITCALL						\
623		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
624		*(.security_initcall.init)				\
625		VMLINUX_SYMBOL(__security_initcall_end) = .;
626
627#ifdef CONFIG_BLK_DEV_INITRD
628#define INIT_RAM_FS							\
629	. = ALIGN(PAGE_SIZE);						\
630	VMLINUX_SYMBOL(__initramfs_start) = .;				\
631	*(.init.ramfs)							\
632	VMLINUX_SYMBOL(__initramfs_end) = .;
633#else
634#define INIT_RAM_FS
635#endif
636
637/*
638 * Default discarded sections.
639 *
640 * Some archs want to discard exit text/data at runtime rather than
641 * link time due to cross-section references such as alt instructions,
642 * bug table, eh_frame, etc.  DISCARDS must be the last of output
643 * section definitions so that such archs put those in earlier section
644 * definitions.
645 */
646#define DISCARDS							\
647	/DISCARD/ : {							\
648	EXIT_TEXT							\
649	EXIT_DATA							\
650	EXIT_CALL							\
651	*(.discard)							\
652	*(.discard.*)							\
653	}
654
655/**
656 * PERCPU_VADDR - define output section for percpu area
657 * @vaddr: explicit base address (optional)
658 * @phdr: destination PHDR (optional)
659 *
660 * Macro which expands to output section for percpu area.  If @vaddr
661 * is not blank, it specifies explicit base address and all percpu
662 * symbols will be offset from the given address.  If blank, @vaddr
663 * always equals @laddr + LOAD_OFFSET.
664 *
665 * @phdr defines the output PHDR to use if not blank.  Be warned that
666 * output PHDR is sticky.  If @phdr is specified, the next output
667 * section in the linker script will go there too.  @phdr should have
668 * a leading colon.
669 *
670 * Note that this macros defines __per_cpu_load as an absolute symbol.
671 * If there is no need to put the percpu section at a predetermined
672 * address, use PERCPU().
673 */
674#define PERCPU_VADDR(vaddr, phdr)					\
675	VMLINUX_SYMBOL(__per_cpu_load) = .;				\
676	.data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)		\
677				- LOAD_OFFSET) {			\
678		VMLINUX_SYMBOL(__per_cpu_start) = .;			\
679		*(.data..percpu..first)					\
680		*(.data..percpu..page_aligned)				\
681		*(.data..percpu)					\
682		*(.data..percpu..shared_aligned)			\
683		VMLINUX_SYMBOL(__per_cpu_end) = .;			\
684	} phdr								\
685	. = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
686
687/**
688 * PERCPU - define output section for percpu area, simple version
689 * @align: required alignment
690 *
691 * Align to @align and outputs output section for percpu area.  This
692 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
693 * __per_cpu_start will be identical.
694 *
695 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
696 * that __per_cpu_load is defined as a relative symbol against
697 * .data..percpu which is required for relocatable x86_32
698 * configuration.
699 */
700#define PERCPU(align)							\
701	. = ALIGN(align);						\
702	.data..percpu	: AT(ADDR(.data..percpu) - LOAD_OFFSET) {	\
703		VMLINUX_SYMBOL(__per_cpu_load) = .;			\
704		VMLINUX_SYMBOL(__per_cpu_start) = .;			\
705		*(.data..percpu..first)					\
706		*(.data..percpu..page_aligned)				\
707		*(.data..percpu)					\
708		*(.data..percpu..shared_aligned)			\
709		VMLINUX_SYMBOL(__per_cpu_end) = .;			\
710	}
711
712
713/*
714 * Definition of the high level *_SECTION macros
715 * They will fit only a subset of the architectures
716 */
717
718
719/*
720 * Writeable data.
721 * All sections are combined in a single .data section.
722 * The sections following CONSTRUCTORS are arranged so their
723 * typical alignment matches.
724 * A cacheline is typical/always less than a PAGE_SIZE so
725 * the sections that has this restriction (or similar)
726 * is located before the ones requiring PAGE_SIZE alignment.
727 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
728 * matches the requirment of PAGE_ALIGNED_DATA.
729 *
730 * use 0 as page_align if page_aligned data is not used */
731#define RW_DATA_SECTION(cacheline, pagealigned, inittask)		\
732	. = ALIGN(PAGE_SIZE);						\
733	.data : AT(ADDR(.data) - LOAD_OFFSET) {				\
734		INIT_TASK_DATA(inittask)				\
735		NOSAVE_DATA						\
736		PAGE_ALIGNED_DATA(pagealigned)				\
737		CACHELINE_ALIGNED_DATA(cacheline)			\
738		READ_MOSTLY_DATA(cacheline)				\
739		DATA_DATA						\
740		CONSTRUCTORS						\
741	}
742
743#define INIT_TEXT_SECTION(inittext_align)				\
744	. = ALIGN(inittext_align);					\
745	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {		\
746		VMLINUX_SYMBOL(_sinittext) = .;				\
747		INIT_TEXT						\
748		VMLINUX_SYMBOL(_einittext) = .;				\
749	}
750
751#define INIT_DATA_SECTION(initsetup_align)				\
752	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {		\
753		INIT_DATA						\
754		INIT_SETUP(initsetup_align)				\
755		INIT_CALLS						\
756		CON_INITCALL						\
757		SECURITY_INITCALL					\
758		INIT_RAM_FS						\
759	}
760
761#define BSS_SECTION(sbss_align, bss_align, stop_align)			\
762	. = ALIGN(sbss_align);						\
763	VMLINUX_SYMBOL(__bss_start) = .;				\
764	SBSS(sbss_align)						\
765	BSS(bss_align)							\
766	. = ALIGN(stop_align);						\
767	VMLINUX_SYMBOL(__bss_stop) = .;
768