• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright 2004-2009 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later
5 */
6
7#include <asm-generic/vmlinux.lds.h>
8#include <asm/mem_map.h>
9#include <asm/page.h>
10#include <asm/thread_info.h>
11
12OUTPUT_FORMAT("elf32-bfin")
13ENTRY(__start)
14_jiffies = _jiffies_64;
15
16SECTIONS
17{
18#ifdef CONFIG_RAMKERNEL
19	. = CONFIG_BOOT_LOAD;
20#else
21	. = CONFIG_ROM_BASE;
22#endif
23
24	/* Neither the text, ro_data or bss section need to be aligned
25	 * So pack them back to back
26	 */
27	.text :
28	{
29		__text = .;
30		_text = .;
31		__stext = .;
32		TEXT_TEXT
33#ifndef CONFIG_SCHEDULE_L1
34		SCHED_TEXT
35#endif
36		LOCK_TEXT
37		IRQENTRY_TEXT
38		SOFTIRQENTRY_TEXT
39		KPROBES_TEXT
40#ifdef CONFIG_ROMKERNEL
41		__sinittext = .;
42		INIT_TEXT
43		__einittext = .;
44		EXIT_TEXT
45#endif
46		*(.text.*)
47		*(.fixup)
48
49#if !L1_CODE_LENGTH
50		*(.l1.text)
51#endif
52		__etext = .;
53	}
54
55	EXCEPTION_TABLE(4)
56	NOTES
57
58	/* Just in case the first read only is a 32-bit access */
59	RO_DATA(4)
60	__rodata_end = .;
61
62#ifdef CONFIG_ROMKERNEL
63	. = CONFIG_BOOT_LOAD;
64	.bss : AT(__rodata_end)
65#else
66	.bss :
67#endif
68	{
69		. = ALIGN(4);
70		___bss_start = .;
71		*(.bss .bss.*)
72		*(COMMON)
73#if !L1_DATA_A_LENGTH
74		*(.l1.bss)
75#endif
76#if !L1_DATA_B_LENGTH
77		*(.l1.bss.B)
78#endif
79		. = ALIGN(4);
80		___bss_stop = .;
81	}
82
83#if defined(CONFIG_ROMKERNEL)
84	.data : AT(LOADADDR(.bss) + SIZEOF(.bss))
85#else
86	.data :
87#endif
88	{
89		__sdata = .;
90		/* This gets done first, so the glob doesn't suck it in */
91		CACHELINE_ALIGNED_DATA(32)
92
93#if !L1_DATA_A_LENGTH
94		. = ALIGN(32);
95		*(.data_l1.cacheline_aligned)
96		*(.l1.data)
97#endif
98#if !L1_DATA_B_LENGTH
99		*(.l1.data.B)
100#endif
101#if !L2_LENGTH
102		. = ALIGN(32);
103		*(.data_l2.cacheline_aligned)
104		*(.l2.data)
105#endif
106
107		DATA_DATA
108		CONSTRUCTORS
109
110		INIT_TASK_DATA(THREAD_SIZE)
111
112		__edata = .;
113	}
114	__data_lma = LOADADDR(.data);
115	__data_len = SIZEOF(.data);
116
117	/* The init section should be last, so when we free it, it goes into
118	 * the general memory pool, and (hopefully) will decrease fragmentation
119	 * a tiny bit. The init section has a _requirement_ that it be
120	 * PAGE_SIZE aligned
121	 */
122	. = ALIGN(PAGE_SIZE);
123	___init_begin = .;
124
125#ifdef CONFIG_RAMKERNEL
126	INIT_TEXT_SECTION(PAGE_SIZE)
127
128	/* We have to discard exit text and such at runtime, not link time, to
129	 * handle embedded cross-section references (alt instructions, bug
130	 * table, eh_frame, etc...).  We need all of our .text up front and
131	 * .data after it for PCREL call issues.
132	 */
133	.exit.text :
134	{
135		EXIT_TEXT
136	}
137
138	. = ALIGN(16);
139	INIT_DATA_SECTION(16)
140	PERCPU_SECTION(32)
141
142	.exit.data :
143	{
144		EXIT_DATA
145	}
146
147	.text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
148#else
149	.init.data : AT(__data_lma + __data_len + 32)
150	{
151		__sinitdata = .;
152		INIT_DATA
153		INIT_SETUP(16)
154		INIT_CALLS
155		CON_INITCALL
156		SECURITY_INITCALL
157		INIT_RAM_FS
158
159		. = ALIGN(PAGE_SIZE);
160		___per_cpu_load = .;
161		PERCPU_INPUT(32)
162
163		EXIT_DATA
164		__einitdata = .;
165	}
166	__init_data_lma = LOADADDR(.init.data);
167	__init_data_len = SIZEOF(.init.data);
168	__init_data_end = .;
169
170	.text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
171#endif
172	{
173		. = ALIGN(4);
174		__stext_l1 = .;
175		*(.l1.text.head)
176		*(.l1.text)
177#ifdef CONFIG_SCHEDULE_L1
178		SCHED_TEXT
179#endif
180		. = ALIGN(4);
181		__etext_l1 = .;
182	}
183	__text_l1_lma = LOADADDR(.text_l1);
184	__text_l1_len = SIZEOF(.text_l1);
185	ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
186
187	.data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
188	{
189		. = ALIGN(4);
190		__sdata_l1 = .;
191		*(.l1.data)
192		__edata_l1 = .;
193
194		. = ALIGN(32);
195		*(.data_l1.cacheline_aligned)
196
197		. = ALIGN(4);
198		__sbss_l1 = .;
199		*(.l1.bss)
200		. = ALIGN(4);
201		__ebss_l1 = .;
202	}
203	__data_l1_lma = LOADADDR(.data_l1);
204	__data_l1_len = SIZEOF(.data_l1);
205	ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
206
207	.data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
208	{
209		. = ALIGN(4);
210		__sdata_b_l1 = .;
211		*(.l1.data.B)
212		__edata_b_l1 = .;
213
214		. = ALIGN(4);
215		__sbss_b_l1 = .;
216		*(.l1.bss.B)
217		. = ALIGN(4);
218		__ebss_b_l1 = .;
219	}
220	__data_b_l1_lma = LOADADDR(.data_b_l1);
221	__data_b_l1_len = SIZEOF(.data_b_l1);
222	ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
223
224	.text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
225	{
226		. = ALIGN(4);
227		__stext_l2 = .;
228		*(.l2.text)
229		. = ALIGN(4);
230		__etext_l2 = .;
231
232		. = ALIGN(4);
233		__sdata_l2 = .;
234		*(.l2.data)
235		__edata_l2 = .;
236
237		. = ALIGN(32);
238		*(.data_l2.cacheline_aligned)
239
240		. = ALIGN(4);
241		__sbss_l2 = .;
242		*(.l2.bss)
243		. = ALIGN(4);
244		__ebss_l2 = .;
245	}
246	__l2_lma = LOADADDR(.text_data_l2);
247	__l2_len = SIZEOF(.text_data_l2);
248	ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
249
250	/* Force trailing alignment of our init section so that when we
251	 * free our init memory, we don't leave behind a partial page.
252	 */
253#ifdef CONFIG_RAMKERNEL
254	. = __l2_lma + __l2_len;
255#else
256	. = __init_data_end;
257#endif
258	. = ALIGN(PAGE_SIZE);
259	___init_end = .;
260
261	__end =.;
262
263	STABS_DEBUG
264
265	DWARF_DEBUG
266
267	DISCARDS
268}
269