• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/blackfin/kernel/
1/*
2 * Copyright 2004-2009 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later
5 */
6
7#include <asm-generic/vmlinux.lds.h>
8#include <asm/mem_map.h>
9#include <asm/page.h>
10#include <asm/thread_info.h>
11
12OUTPUT_FORMAT("elf32-bfin")
13ENTRY(__start)
14_jiffies = _jiffies_64;
15
16SECTIONS
17{
18#ifdef CONFIG_RAMKERNEL
19	. = CONFIG_BOOT_LOAD;
20#else
21	. = CONFIG_ROM_BASE;
22#endif
23
24	/* Neither the text, ro_data or bss section need to be aligned
25	 * So pack them back to back
26	 */
27	.text :
28	{
29		__text = .;
30		_text = .;
31		__stext = .;
32		TEXT_TEXT
33#ifndef CONFIG_SCHEDULE_L1
34		SCHED_TEXT
35#endif
36		LOCK_TEXT
37		IRQENTRY_TEXT
38		KPROBES_TEXT
39#ifdef CONFIG_ROMKERNEL
40		__sinittext = .;
41		INIT_TEXT
42		__einittext = .;
43		EXIT_TEXT
44#endif
45		*(.text.*)
46		*(.fixup)
47
48#if !L1_CODE_LENGTH
49		*(.l1.text)
50#endif
51		__etext = .;
52	}
53
54	EXCEPTION_TABLE(4)
55	NOTES
56
57	/* Just in case the first read only is a 32-bit access */
58	RO_DATA(4)
59	__rodata_end = .;
60
61#ifdef CONFIG_ROMKERNEL
62	. = CONFIG_BOOT_LOAD;
63	.bss : AT(__rodata_end)
64#else
65	.bss :
66#endif
67	{
68		. = ALIGN(4);
69		___bss_start = .;
70		*(.bss .bss.*)
71		*(COMMON)
72#if !L1_DATA_A_LENGTH
73		*(.l1.bss)
74#endif
75#if !L1_DATA_B_LENGTH
76		*(.l1.bss.B)
77#endif
78		. = ALIGN(4);
79		___bss_stop = .;
80	}
81
82#if defined(CONFIG_ROMKERNEL)
83	.data : AT(LOADADDR(.bss) + SIZEOF(.bss))
84#else
85	.data :
86#endif
87	{
88		__sdata = .;
89		/* This gets done first, so the glob doesn't suck it in */
90		CACHELINE_ALIGNED_DATA(32)
91
92#if !L1_DATA_A_LENGTH
93		. = ALIGN(32);
94		*(.data_l1.cacheline_aligned)
95		*(.l1.data)
96#endif
97#if !L1_DATA_B_LENGTH
98		*(.l1.data.B)
99#endif
100#if !L2_LENGTH
101		. = ALIGN(32);
102		*(.data_l2.cacheline_aligned)
103		*(.l2.data)
104#endif
105
106		DATA_DATA
107		CONSTRUCTORS
108
109		INIT_TASK_DATA(THREAD_SIZE)
110
111		__edata = .;
112	}
113	__data_lma = LOADADDR(.data);
114	__data_len = SIZEOF(.data);
115
116	/* The init section should be last, so when we free it, it goes into
117	 * the general memory pool, and (hopefully) will decrease fragmentation
118	 * a tiny bit. The init section has a _requirement_ that it be
119	 * PAGE_SIZE aligned
120	 */
121	. = ALIGN(PAGE_SIZE);
122	___init_begin = .;
123
124#ifdef CONFIG_RAMKERNEL
125	INIT_TEXT_SECTION(PAGE_SIZE)
126
127	/* We have to discard exit text and such at runtime, not link time, to
128	 * handle embedded cross-section references (alt instructions, bug
129	 * table, eh_frame, etc...).  We need all of our .text up front and
130	 * .data after it for PCREL call issues.
131	 */
132	.exit.text :
133	{
134		EXIT_TEXT
135	}
136
137	. = ALIGN(16);
138	INIT_DATA_SECTION(16)
139	PERCPU(4)
140
141	.exit.data :
142	{
143		EXIT_DATA
144	}
145
146	.text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
147#else
148	.init.data : AT(__data_lma + __data_len)
149	{
150		__sinitdata = .;
151		INIT_DATA
152		INIT_SETUP(16)
153		INIT_CALLS
154		CON_INITCALL
155		SECURITY_INITCALL
156		INIT_RAM_FS
157
158		. = ALIGN(4);
159		___per_cpu_load = .;
160		___per_cpu_start = .;
161		*(.data.percpu.first)
162		*(.data.percpu.page_aligned)
163		*(.data.percpu)
164		*(.data.percpu.shared_aligned)
165		___per_cpu_end = .;
166
167		EXIT_DATA
168		__einitdata = .;
169	}
170	__init_data_lma = LOADADDR(.init.data);
171	__init_data_len = SIZEOF(.init.data);
172	__init_data_end = .;
173
174	.text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
175#endif
176	{
177		. = ALIGN(4);
178		__stext_l1 = .;
179		*(.l1.text)
180#ifdef CONFIG_SCHEDULE_L1
181		SCHED_TEXT
182#endif
183		. = ALIGN(4);
184		__etext_l1 = .;
185	}
186	__text_l1_lma = LOADADDR(.text_l1);
187	__text_l1_len = SIZEOF(.text_l1);
188	ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
189
190	.data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
191	{
192		. = ALIGN(4);
193		__sdata_l1 = .;
194		*(.l1.data)
195		__edata_l1 = .;
196
197		. = ALIGN(32);
198		*(.data_l1.cacheline_aligned)
199
200		. = ALIGN(4);
201		__sbss_l1 = .;
202		*(.l1.bss)
203		. = ALIGN(4);
204		__ebss_l1 = .;
205	}
206	__data_l1_lma = LOADADDR(.data_l1);
207	__data_l1_len = SIZEOF(.data_l1);
208	ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
209
210	.data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
211	{
212		. = ALIGN(4);
213		__sdata_b_l1 = .;
214		*(.l1.data.B)
215		__edata_b_l1 = .;
216
217		. = ALIGN(4);
218		__sbss_b_l1 = .;
219		*(.l1.bss.B)
220		. = ALIGN(4);
221		__ebss_b_l1 = .;
222	}
223	__data_b_l1_lma = LOADADDR(.data_b_l1);
224	__data_b_l1_len = SIZEOF(.data_b_l1);
225	ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
226
227	.text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
228	{
229		. = ALIGN(4);
230		__stext_l2 = .;
231		*(.l2.text)
232		. = ALIGN(4);
233		__etext_l2 = .;
234
235		. = ALIGN(4);
236		__sdata_l2 = .;
237		*(.l2.data)
238		__edata_l2 = .;
239
240		. = ALIGN(32);
241		*(.data_l2.cacheline_aligned)
242
243		. = ALIGN(4);
244		__sbss_l2 = .;
245		*(.l2.bss)
246		. = ALIGN(4);
247		__ebss_l2 = .;
248	}
249	__l2_lma = LOADADDR(.text_data_l2);
250	__l2_len = SIZEOF(.text_data_l2);
251	ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
252
253	/* Force trailing alignment of our init section so that when we
254	 * free our init memory, we don't leave behind a partial page.
255	 */
256#ifdef CONFIG_RAMKERNEL
257	. = __l2_lma + __l2_len;
258#else
259	. = __init_data_end;
260#endif
261	. = ALIGN(PAGE_SIZE);
262	___init_end = .;
263
264	__end =.;
265
266	STABS_DEBUG
267
268	DWARF_DEBUG
269
270	DISCARDS
271}
272