1#ifndef LOAD_OFFSET
2#define LOAD_OFFSET 0
3#endif
4
5#ifndef VMLINUX_SYMBOL
6#define VMLINUX_SYMBOL(_sym_) _sym_
7#endif
8
9/* Align . to a 8 byte boundary equals to maximum function alignment. */
10#define ALIGN_FUNCTION()  . = ALIGN(8)
11
12/* .data section */
13#define DATA_DATA							\
14	*(.data)							\
15	*(.data.init.refok)
16
17#define RO_DATA(align)							\
18	. = ALIGN((align));						\
19	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
20		VMLINUX_SYMBOL(__start_rodata) = .;			\
21		*(.rodata) *(.rodata.*)					\
22		*(__vermagic)		/* Kernel version magic */	\
23	}								\
24									\
25	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
26		*(.rodata1)						\
27	}								\
28									\
29	/* PCI quirks */						\
30	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
31		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
32		*(.pci_fixup_early)					\
33		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
34		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
35		*(.pci_fixup_header)					\
36		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
37		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
38		*(.pci_fixup_final)					\
39		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
40		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
41		*(.pci_fixup_enable)					\
42		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
43		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
44		*(.pci_fixup_resume)					\
45		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
46	}								\
47									\
48	/* RapidIO route ops */						\
49	.rio_route        : AT(ADDR(.rio_route) - LOAD_OFFSET) {	\
50		VMLINUX_SYMBOL(__start_rio_route_ops) = .;		\
51		*(.rio_route_ops)					\
52		VMLINUX_SYMBOL(__end_rio_route_ops) = .;		\
53	}								\
54									\
55	/* Kernel symbol table: Normal symbols */			\
56	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
57		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
58		*(__ksymtab)						\
59		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
60	}								\
61									\
62	/* Kernel symbol table: GPL-only symbols */			\
63	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
64		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
65		*(__ksymtab_gpl)					\
66		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
67	}								\
68									\
69	/* Kernel symbol table: Normal unused symbols */		\
70	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
71		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
72		*(__ksymtab_unused)					\
73		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
74	}								\
75									\
76	/* Kernel symbol table: GPL-only unused symbols */		\
77	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
78		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
79		*(__ksymtab_unused_gpl)					\
80		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
81	}								\
82									\
83	/* Kernel symbol table: GPL-future-only symbols */		\
84	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
85		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
86		*(__ksymtab_gpl_future)					\
87		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
88	}								\
89									\
90	/* Kernel symbol table: Normal symbols */			\
91	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
92		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
93		*(__kcrctab)						\
94		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
95	}								\
96									\
97	/* Kernel symbol table: GPL-only symbols */			\
98	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
99		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
100		*(__kcrctab_gpl)					\
101		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
102	}								\
103									\
104	/* Kernel symbol table: Normal unused symbols */		\
105	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
106		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
107		*(__kcrctab_unused)					\
108		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
109	}								\
110									\
111	/* Kernel symbol table: GPL-only unused symbols */		\
112	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
113		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
114		*(__kcrctab_unused_gpl)					\
115		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
116	}								\
117									\
118	/* Kernel symbol table: GPL-future-only symbols */		\
119	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
120		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
121		*(__kcrctab_gpl_future)					\
122		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
123	}								\
124									\
125	/* Kernel symbol table: strings */				\
126        __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
127		*(__ksymtab_strings)					\
128	}								\
129									\
130	/* Built-in module parameters. */				\
131	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
132		VMLINUX_SYMBOL(__start___param) = .;			\
133		*(__param)						\
134		VMLINUX_SYMBOL(__stop___param) = .;			\
135		VMLINUX_SYMBOL(__end_rodata) = .;			\
136	}								\
137									\
138	. = ALIGN((align));
139
140/* RODATA provided for backward compatibility.
141 * All archs are supposed to use RO_DATA() */
142#define RODATA RO_DATA(4096)
143
144#define SECURITY_INIT							\
145	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
146		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
147		*(.security_initcall.init) 				\
148		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
149	}
150
151/* .text section. Map to function alignment to avoid address changes
152 * during second ld run in second ld pass when generating System.map */
153#define TEXT_TEXT							\
154		ALIGN_FUNCTION();					\
155		*(.text)						\
156		*(.text.init.refok)
157
158/* sched.text is aling to function alignment to secure we have same
159 * address even at second ld pass when generating System.map */
160#define SCHED_TEXT							\
161		ALIGN_FUNCTION();					\
162		VMLINUX_SYMBOL(__sched_text_start) = .;			\
163		*(.sched.text)						\
164		VMLINUX_SYMBOL(__sched_text_end) = .;
165
166/* spinlock.text is aling to function alignment to secure we have same
167 * address even at second ld pass when generating System.map */
168#define LOCK_TEXT							\
169		ALIGN_FUNCTION();					\
170		VMLINUX_SYMBOL(__lock_text_start) = .;			\
171		*(.spinlock.text)					\
172		VMLINUX_SYMBOL(__lock_text_end) = .;
173
174#define KPROBES_TEXT							\
175		ALIGN_FUNCTION();					\
176		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
177		*(.kprobes.text)					\
178		VMLINUX_SYMBOL(__kprobes_text_end) = .;
179
180		/* DWARF debug sections.
181		Symbols in the DWARF debugging sections are relative to
182		the beginning of the section so we begin them at 0.  */
183#define DWARF_DEBUG							\
184		/* DWARF 1 */						\
185		.debug          0 : { *(.debug) }			\
186		.line           0 : { *(.line) }			\
187		/* GNU DWARF 1 extensions */				\
188		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
189		.debug_sfnames  0 : { *(.debug_sfnames) }		\
190		/* DWARF 1.1 and DWARF 2 */				\
191		.debug_aranges  0 : { *(.debug_aranges) }		\
192		.debug_pubnames 0 : { *(.debug_pubnames) }		\
193		/* DWARF 2 */						\
194		.debug_info     0 : { *(.debug_info			\
195				.gnu.linkonce.wi.*) }			\
196		.debug_abbrev   0 : { *(.debug_abbrev) }		\
197		.debug_line     0 : { *(.debug_line) }			\
198		.debug_frame    0 : { *(.debug_frame) }			\
199		.debug_str      0 : { *(.debug_str) }			\
200		.debug_loc      0 : { *(.debug_loc) }			\
201		.debug_macinfo  0 : { *(.debug_macinfo) }		\
202		/* SGI/MIPS DWARF 2 extensions */			\
203		.debug_weaknames 0 : { *(.debug_weaknames) }		\
204		.debug_funcnames 0 : { *(.debug_funcnames) }		\
205		.debug_typenames 0 : { *(.debug_typenames) }		\
206		.debug_varnames  0 : { *(.debug_varnames) }		\
207
208		/* Stabs debugging sections.  */
209#define STABS_DEBUG							\
210		.stab 0 : { *(.stab) }					\
211		.stabstr 0 : { *(.stabstr) }				\
212		.stab.excl 0 : { *(.stab.excl) }			\
213		.stab.exclstr 0 : { *(.stab.exclstr) }			\
214		.stab.index 0 : { *(.stab.index) }			\
215		.stab.indexstr 0 : { *(.stab.indexstr) }		\
216		.comment 0 : { *(.comment) }
217
218#define BUG_TABLE							\
219	. = ALIGN(8);							\
220	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
221		__start___bug_table = .;				\
222		*(__bug_table)						\
223		__stop___bug_table = .;					\
224	}
225
226#define NOTES								\
227	.notes : { *(.note.*) } :note
228
229#define INITCALLS							\
230  	*(.initcall0.init)						\
231  	*(.initcall0s.init)						\
232  	*(.initcall1.init)						\
233  	*(.initcall1s.init)						\
234  	*(.initcall2.init)						\
235  	*(.initcall2s.init)						\
236  	*(.initcall3.init)						\
237  	*(.initcall3s.init)						\
238  	*(.initcall4.init)						\
239  	*(.initcall4s.init)						\
240  	*(.initcall5.init)						\
241  	*(.initcall5s.init)						\
242	*(.initcallrootfs.init)						\
243  	*(.initcall6.init)						\
244  	*(.initcall6s.init)						\
245  	*(.initcall7.init)						\
246  	*(.initcall7s.init)
247