• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * File:         arch/blackfin/kernel/vmlinux.lds.S
3 * Based on:     none - original work
4 * Author:
5 *
6 * Created:      Tue Sep 21 2004
7 * Description:  Master linker script for blackfin architecture
8 *
9 * Modified:
10 *               Copyright 2004-2007 Analog Devices Inc.
11 *
12 * Bugs:         Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
28 */
29
30#define VMLINUX_SYMBOL(_sym_) _##_sym_
31
32#include <asm-generic/vmlinux.lds.h>
33#include <asm/mem_map.h>
34#include <asm/page.h>
35#include <asm/thread_info.h>
36
37OUTPUT_FORMAT("elf32-bfin")
38ENTRY(__start)
39_jiffies = _jiffies_64;
40
41SECTIONS
42{
43	. = CONFIG_BOOT_LOAD;
44	/* Neither the text, ro_data or bss section need to be aligned
45	 * So pack them back to back
46	 */
47	.text :
48	{
49		__text = .;
50		_text = .;
51		__stext = .;
52		TEXT_TEXT
53		SCHED_TEXT
54		LOCK_TEXT
55		KPROBES_TEXT
56		*(.text.*)
57		*(.fixup)
58
59#if !L1_CODE_LENGTH
60		*(.l1.text)
61#endif
62
63		. = ALIGN(16);
64		___start___ex_table = .;
65		*(__ex_table)
66		___stop___ex_table = .;
67
68		__etext = .;
69	}
70
71	NOTES
72
73	/* Just in case the first read only is a 32-bit access */
74	RO_DATA(4)
75
76	.bss :
77	{
78		. = ALIGN(4);
79		___bss_start = .;
80		*(.bss .bss.*)
81		*(COMMON)
82#if !L1_DATA_A_LENGTH
83		*(.l1.bss)
84#endif
85#if !L1_DATA_B_LENGTH
86		*(.l1.bss.B)
87#endif
88		. = ALIGN(4);
89		___bss_stop = .;
90	}
91
92	.data :
93	{
94		__sdata = .;
95		/* This gets done first, so the glob doesn't suck it in */
96		. = ALIGN(32);
97		*(.data.cacheline_aligned)
98
99#if !L1_DATA_A_LENGTH
100		. = ALIGN(32);
101		*(.data_l1.cacheline_aligned)
102		*(.l1.data)
103#endif
104#if !L1_DATA_B_LENGTH
105		*(.l1.data.B)
106#endif
107#if !L2_LENGTH
108		. = ALIGN(32);
109		*(.data_l2.cacheline_aligned)
110		*(.l2.data)
111#endif
112
113		DATA_DATA
114		CONSTRUCTORS
115
116		/* make sure the init_task is aligned to the
117		 * kernel thread size so we can locate the kernel
118		 * stack properly and quickly.
119		 */
120		. = ALIGN(THREAD_SIZE);
121		*(.init_task.data)
122
123		__edata = .;
124	}
125
126	/* The init section should be last, so when we free it, it goes into
127	 * the general memory pool, and (hopefully) will decrease fragmentation
128	 * a tiny bit. The init section has a _requirement_ that it be
129	 * PAGE_SIZE aligned
130	 */
131	. = ALIGN(PAGE_SIZE);
132	___init_begin = .;
133
134	.init.text :
135	{
136		. = ALIGN(PAGE_SIZE);
137		__sinittext = .;
138		INIT_TEXT
139		__einittext = .;
140	}
141	.init.data :
142	{
143		. = ALIGN(16);
144		INIT_DATA
145	}
146	.init.setup :
147	{
148		. = ALIGN(16);
149		___setup_start = .;
150		*(.init.setup)
151		___setup_end = .;
152	}
153	.initcall.init :
154	{
155		___initcall_start = .;
156		INITCALLS
157		___initcall_end = .;
158	}
159	.con_initcall.init :
160	{
161		___con_initcall_start = .;
162		*(.con_initcall.init)
163		___con_initcall_end = .;
164	}
165	PERCPU(4)
166	SECURITY_INIT
167	.init.ramfs :
168	{
169		. = ALIGN(4);
170		___initramfs_start = .;
171		*(.init.ramfs)
172		. = ALIGN(4);
173		___initramfs_end = .;
174	}
175
176	__l1_lma_start = .;
177
178	.text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs))
179	{
180		. = ALIGN(4);
181		__stext_l1 = .;
182		*(.l1.text)
183		. = ALIGN(4);
184		__etext_l1 = .;
185	}
186
187	.data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1))
188	{
189		. = ALIGN(4);
190		__sdata_l1 = .;
191		*(.l1.data)
192		__edata_l1 = .;
193
194		. = ALIGN(32);
195		*(.data_l1.cacheline_aligned)
196
197		. = ALIGN(4);
198		__sbss_l1 = .;
199		*(.l1.bss)
200		. = ALIGN(4);
201		__ebss_l1 = .;
202	}
203
204	.data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
205	{
206		. = ALIGN(4);
207		__sdata_b_l1 = .;
208		*(.l1.data.B)
209		__edata_b_l1 = .;
210
211		. = ALIGN(4);
212		__sbss_b_l1 = .;
213		*(.l1.bss.B)
214		. = ALIGN(4);
215		__ebss_b_l1 = .;
216	}
217
218	__l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
219
220	.text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1))
221	{
222		. = ALIGN(4);
223		__stext_l2 = .;
224		*(.l2.text)
225		. = ALIGN(4);
226		__etext_l2 = .;
227
228		. = ALIGN(4);
229		__sdata_l2 = .;
230		*(.l2.data)
231		__edata_l2 = .;
232
233		. = ALIGN(32);
234		*(.data_l2.cacheline_aligned)
235
236		. = ALIGN(4);
237		__sbss_l2 = .;
238		*(.l2.bss)
239		. = ALIGN(4);
240		__ebss_l2 = .;
241	}
242
243	/* Force trailing alignment of our init section so that when we
244	 * free our init memory, we don't leave behind a partial page.
245	 */
246	. = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2);
247	. = ALIGN(PAGE_SIZE);
248	___init_end = .;
249
250	__end =.;
251
252	STABS_DEBUG
253
254	DWARF_DEBUG
255
256	/DISCARD/ :
257	{
258		EXIT_TEXT
259		EXIT_DATA
260		*(.exitcall.exit)
261	}
262}
263