• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#ifdef CONFIG_PPC64
2#define PROVIDE32(x)	PROVIDE(__unused__##x)
3#else
4#define PROVIDE32(x)	PROVIDE(x)
5#endif
6#include <asm/page.h>
7#include <asm-generic/vmlinux.lds.h>
8#include <asm/cache.h>
9#include <asm/thread_info.h>
10
11ENTRY(_stext)
12
13PHDRS {
14	kernel PT_LOAD FLAGS(7); /* RWX */
15	notes PT_NOTE FLAGS(0);
16	dummy PT_NOTE FLAGS(0);
17
18	/* binutils < 2.18 has a bug that makes it misbehave when taking an
19	   ELF file with all segments at load address 0 as input.  This
20	   happens when running "strip" on vmlinux, because of the AT() magic
21	   in this linker script.  People using GCC >= 4.2 won't run into
22	   this problem, because the "build-id" support will put some data
23	   into the "notes" segment (at a non-zero load address).
24
25	   To work around this, we force some data into both the "dummy"
26	   segment and the kernel segment, so the dummy segment will get a
27	   non-zero load address.  It's not enough to always create the
28	   "notes" segment, since if nothing gets assigned to it, its load
29	   address will be zero.  */
30}
31
32#ifdef CONFIG_PPC64
33OUTPUT_ARCH(powerpc:common64)
34jiffies = jiffies_64;
35#else
36OUTPUT_ARCH(powerpc:common)
37jiffies = jiffies_64 + 4;
38#endif
39SECTIONS
40{
41	. = KERNELBASE;
42
43/*
44 * Text, read only data and other permanent read-only sections
45 */
46
47	_text = .;
48	_stext = .;
49
50	/*
51	 * Head text.
52	 * This needs to be in its own output section to avoid ld placing
53	 * branch trampoline stubs randomly throughout the fixed sections,
54	 * which it will do (even if the branch comes from another section)
55	 * in order to optimize stub generation.
56	 */
57	.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
58#ifdef CONFIG_PPC64
59		KEEP(*(.head.text.first_256B));
60#ifdef CONFIG_PPC_BOOK3E
61# define END_FIXED	0x100
62#else
63		KEEP(*(.head.text.real_vectors));
64		*(.head.text.real_trampolines);
65		KEEP(*(.head.text.virt_vectors));
66		*(.head.text.virt_trampolines);
67# if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
68		KEEP(*(.head.data.fwnmi_page));
69#  define END_FIXED	0x8000
70# else
71#  define END_FIXED	0x7000
72# endif
73#endif
74		ASSERT((. == END_FIXED), "vmlinux.lds.S: fixed section overflow error");
75#else /* !CONFIG_PPC64 */
76		HEAD_TEXT
77#endif
78	} :kernel
79
80	/*
81	 * If the build dies here, it's likely code in head_64.S is referencing
82	 * labels it can't reach, and the linker inserting stubs without the
83	 * assembler's knowledge. To debug, remove the above assert and
84	 * rebuild. Look for branch stubs in the fixed section region.
85	 *
86	 * Linker stub generation could be allowed in "trampoline"
87	 * sections if absolutely necessary, but this would require
88	 * some rework of the fixed sections. Before resorting to this,
89	 * consider references that have sufficient addressing range,
90	 * (e.g., hand coded trampolines) so the linker does not have
91	 * to add stubs.
92	 *
93	 * Linker stubs at the top of the main text section are currently not
94	 * detected, and will result in a crash at boot due to offsets being
95	 * wrong.
96	 */
97#ifdef CONFIG_PPC64
98	/*
99	 * BLOCK(0) overrides the default output section alignment because
100	 * this needs to start right after .head.text in order for fixed
101	 * section placement to work.
102	 */
103	.text BLOCK(0) : AT(ADDR(.text) - LOAD_OFFSET) {
104#else
105	.text : AT(ADDR(.text) - LOAD_OFFSET) {
106		ALIGN_FUNCTION();
107#endif
108		/* careful! __ftr_alt_* sections need to be close to .text */
109		*(.text .fixup __ftr_alt_* .ref.text)
110		SCHED_TEXT
111		CPUIDLE_TEXT
112		LOCK_TEXT
113		KPROBES_TEXT
114		IRQENTRY_TEXT
115		SOFTIRQENTRY_TEXT
116		MEM_KEEP(init.text)
117		MEM_KEEP(exit.text)
118
119#ifdef CONFIG_PPC32
120		*(.got1)
121		__got2_start = .;
122		*(.got2)
123		__got2_end = .;
124#endif /* CONFIG_PPC32 */
125
126	} :kernel
127
128	. = ALIGN(PAGE_SIZE);
129	_etext = .;
130	PROVIDE32 (etext = .);
131
132	/* Read-only data */
133	RODATA
134
135#ifdef CONFIG_PPC64
136	. = ALIGN(8);
137	__rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
138		__start___rfi_flush_fixup = .;
139		*(__rfi_flush_fixup)
140		__stop___rfi_flush_fixup = .;
141	}
142#endif
143
144	EXCEPTION_TABLE(0)
145
146	NOTES :kernel :notes
147
148	/* The dummy segment contents for the bug workaround mentioned above
149	   near PHDRS.  */
150	.dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
151		LONG(0)
152		LONG(0)
153		LONG(0)
154	} :kernel :dummy
155
156/*
157 * Init sections discarded at runtime
158 */
159	. = ALIGN(PAGE_SIZE);
160	__init_begin = .;
161	INIT_TEXT_SECTION(PAGE_SIZE) :kernel
162
163	/* .exit.text is discarded at runtime, not link time,
164	 * to deal with references from __bug_table
165	 */
166	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
167		EXIT_TEXT
168	}
169
170	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
171		INIT_DATA
172		__vtop_table_begin = .;
173		*(.vtop_fixup);
174		__vtop_table_end = .;
175		__ptov_table_begin = .;
176		*(.ptov_fixup);
177		__ptov_table_end = .;
178	}
179
180	.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
181		INIT_SETUP(16)
182	}
183
184	.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
185		INIT_CALLS
186	}
187
188	.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
189		CON_INITCALL
190	}
191
192	SECURITY_INIT
193
194	. = ALIGN(8);
195	__ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
196		__start___ftr_fixup = .;
197		*(__ftr_fixup)
198		__stop___ftr_fixup = .;
199	}
200	. = ALIGN(8);
201	__mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
202		__start___mmu_ftr_fixup = .;
203		*(__mmu_ftr_fixup)
204		__stop___mmu_ftr_fixup = .;
205	}
206	. = ALIGN(8);
207	__lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
208		__start___lwsync_fixup = .;
209		*(__lwsync_fixup)
210		__stop___lwsync_fixup = .;
211	}
212#ifdef CONFIG_PPC64
213	. = ALIGN(8);
214	__fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
215		__start___fw_ftr_fixup = .;
216		*(__fw_ftr_fixup)
217		__stop___fw_ftr_fixup = .;
218	}
219#endif
220	.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
221		INIT_RAM_FS
222	}
223
224	PERCPU_SECTION(L1_CACHE_BYTES)
225
226	. = ALIGN(8);
227	.machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
228		__machine_desc_start = . ;
229		*(.machine.desc)
230		__machine_desc_end = . ;
231	}
232#ifdef CONFIG_RELOCATABLE
233	. = ALIGN(8);
234	.dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
235	{
236#ifdef CONFIG_PPC32
237		__dynamic_symtab = .;
238#endif
239		*(.dynsym)
240	}
241	.dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
242	.dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
243	{
244		__dynamic_start = .;
245		*(.dynamic)
246	}
247	.hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
248	.interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
249	.rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
250	{
251		__rela_dyn_start = .;
252		*(.rela*)
253	}
254#endif
255	/* .exit.data is discarded at runtime, not link time,
256	 * to deal with references from .exit.text
257	 */
258	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
259		EXIT_DATA
260	}
261
262	/* freed after init ends here */
263	. = ALIGN(PAGE_SIZE);
264	__init_end = .;
265
266/*
267 * And now the various read/write data
268 */
269
270	. = ALIGN(PAGE_SIZE);
271	_sdata = .;
272
273#ifdef CONFIG_PPC32
274	.data : AT(ADDR(.data) - LOAD_OFFSET) {
275		DATA_DATA
276		*(.sdata)
277		*(.got.plt) *(.got)
278	}
279#else
280	.data : AT(ADDR(.data) - LOAD_OFFSET) {
281		DATA_DATA
282		*(.data.rel*)
283		*(.toc1)
284		*(.branch_lt)
285	}
286
287	.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
288		*(.opd)
289	}
290
291	. = ALIGN(256);
292	.got : AT(ADDR(.got) - LOAD_OFFSET) {
293		__toc_start = .;
294#ifndef CONFIG_RELOCATABLE
295		__prom_init_toc_start = .;
296		arch/powerpc/kernel/prom_init.o*(.toc .got)
297		__prom_init_toc_end = .;
298#endif
299		*(.got)
300		*(.toc)
301	}
302#endif
303
304	/* The initial task and kernel stack */
305	INIT_TASK_DATA_SECTION(THREAD_SIZE)
306
307	.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
308		PAGE_ALIGNED_DATA(PAGE_SIZE)
309	}
310
311	.data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
312		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
313	}
314
315	.data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
316		READ_MOSTLY_DATA(L1_CACHE_BYTES)
317	}
318
319	. = ALIGN(PAGE_SIZE);
320	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
321		NOSAVE_DATA
322	}
323
324	. = ALIGN(PAGE_SIZE);
325	_edata  =  .;
326	PROVIDE32 (edata = .);
327
328/*
329 * And finally the bss
330 */
331
332	BSS_SECTION(0, 0, 0)
333
334	. = ALIGN(PAGE_SIZE);
335	_end = . ;
336	PROVIDE32 (end = .);
337
338	/* Sections to be discarded. */
339	DISCARDS
340}
341