• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * ld script for the x86 kernel
3 *
4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 *
6 * Modernisation, unification and other changes and fixes:
7 *   Copyright (C) 2007-2009  Sam Ravnborg <sam@ravnborg.org>
8 *
9 *
10 * Don't define absolute symbols until and unless you know that symbol
11 * value is should remain constant even if kernel image is relocated
12 * at run time. Absolute symbols are not relocated. If symbol value should
13 * change if kernel is relocated, make the symbol section relative and
14 * put it inside the section definition.
15 */
16
17#ifdef CONFIG_X86_32
18#define LOAD_OFFSET __PAGE_OFFSET
19#else
20#define LOAD_OFFSET __START_KERNEL_map
21#endif
22
23#include <asm-generic/vmlinux.lds.h>
24#include <asm/asm-offsets.h>
25#include <asm/thread_info.h>
26#include <asm/page_types.h>
27#include <asm/cache.h>
28#include <asm/boot.h>
29
30#undef i386     /* in case the preprocessor is a 32bit one */
31
32OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
33
34#ifdef CONFIG_X86_32
35OUTPUT_ARCH(i386)
36ENTRY(phys_startup_32)
37jiffies = jiffies_64;
38#else
39OUTPUT_ARCH(i386:x86-64)
40ENTRY(phys_startup_64)
41jiffies_64 = jiffies;
42#endif
43
44#if defined(CONFIG_X86_64)
45/*
46 * On 64-bit, align RODATA to 2MB so we retain large page mappings for
47 * boundaries spanning kernel text, rodata and data sections.
48 *
49 * However, kernel identity mappings will have different RWX permissions
50 * to the pages mapping to text and to the pages padding (which are freed) the
51 * text section. Hence kernel identity mappings will be broken to smaller
52 * pages. For 64-bit, kernel text and kernel identity mappings are different,
53 * so we can enable protection checks as well as retain 2MB large page
54 * mappings for kernel text.
55 */
56#define X64_ALIGN_RODATA_BEGIN	. = ALIGN(HPAGE_SIZE);
57
58#define X64_ALIGN_RODATA_END					\
59		. = ALIGN(HPAGE_SIZE);				\
60		__end_rodata_hpage_align = .;
61
62#else
63
64#define X64_ALIGN_RODATA_BEGIN
65#define X64_ALIGN_RODATA_END
66
67#endif
68
69PHDRS {
70	text PT_LOAD FLAGS(5);          /* R_E */
71	data PT_LOAD FLAGS(6);          /* RW_ */
72#ifdef CONFIG_X86_64
73#ifdef CONFIG_SMP
74	percpu PT_LOAD FLAGS(6);        /* RW_ */
75#endif
76	init PT_LOAD FLAGS(7);          /* RWE */
77#endif
78	note PT_NOTE FLAGS(0);          /* ___ */
79}
80
81SECTIONS
82{
83#ifdef CONFIG_X86_32
84	. = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
85	phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
86#else
87	. = __START_KERNEL;
88	phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
89#endif
90
91	/* Text and read-only data */
92	.text :  AT(ADDR(.text) - LOAD_OFFSET) {
93		_text = .;
94		/* bootstrapping code */
95		HEAD_TEXT
96		. = ALIGN(8);
97		_stext = .;
98		TEXT_TEXT
99		SCHED_TEXT
100		CPUIDLE_TEXT
101		LOCK_TEXT
102		KPROBES_TEXT
103		ENTRY_TEXT
104		IRQENTRY_TEXT
105		SOFTIRQENTRY_TEXT
106		*(.fixup)
107		*(.gnu.warning)
108
109#ifdef CONFIG_RETPOLINE
110		__indirect_thunk_start = .;
111		*(.text.__x86.indirect_thunk)
112		__indirect_thunk_end = .;
113#endif
114
115		/* End of text section */
116		_etext = .;
117	} :text = 0x9090
118
119	NOTES :text :note
120
121	EXCEPTION_TABLE(16) :text = 0x9090
122
123	/* .text should occupy whole number of pages */
124	. = ALIGN(PAGE_SIZE);
125	X64_ALIGN_RODATA_BEGIN
126	RO_DATA(PAGE_SIZE)
127	X64_ALIGN_RODATA_END
128
129	/* Data */
130	.data : AT(ADDR(.data) - LOAD_OFFSET) {
131		/* Start of data section */
132		_sdata = .;
133
134		/* init_task */
135		INIT_TASK_DATA(THREAD_SIZE)
136
137#ifdef CONFIG_X86_32
138		/* 32 bit has nosave before _edata */
139		NOSAVE_DATA
140#endif
141
142		PAGE_ALIGNED_DATA(PAGE_SIZE)
143
144		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
145
146		DATA_DATA
147		CONSTRUCTORS
148
149		/* rarely changed data like cpu maps */
150		READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
151
152		/* End of data section */
153		_edata = .;
154	} :data
155
156
157	. = ALIGN(PAGE_SIZE);
158	__vvar_page = .;
159
160	.vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
161		/* work around gold bug 13023 */
162		__vvar_beginning_hack = .;
163
164		/* Place all vvars at the offsets in asm/vvar.h. */
165#define EMIT_VVAR(name, offset) 			\
166		. = __vvar_beginning_hack + offset;	\
167		*(.vvar_ ## name)
168#define __VVAR_KERNEL_LDS
169#include <asm/vvar.h>
170#undef __VVAR_KERNEL_LDS
171#undef EMIT_VVAR
172
173		/*
174		 * Pad the rest of the page with zeros.  Otherwise the loader
175		 * can leave garbage here.
176		 */
177		. = __vvar_beginning_hack + PAGE_SIZE;
178	} :data
179
180       . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
181
182	/* Init code and data - will be freed after init */
183	. = ALIGN(PAGE_SIZE);
184	.init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
185		__init_begin = .; /* paired with __init_end */
186	}
187
188#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
189	/*
190	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
191	 * output PHDR, so the next output section - .init.text - should
192	 * start another segment - init.
193	 */
194	PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
195	ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
196	       "per-CPU data too large - increase CONFIG_PHYSICAL_START")
197#endif
198
199	INIT_TEXT_SECTION(PAGE_SIZE)
200#ifdef CONFIG_X86_64
201	:init
202#endif
203
204	/*
205	 * Section for code used exclusively before alternatives are run. All
206	 * references to such code must be patched out by alternatives, normally
207	 * by using X86_FEATURE_ALWAYS CPU feature bit.
208	 *
209	 * See static_cpu_has() for an example.
210	 */
211	.altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
212		*(.altinstr_aux)
213	}
214
215	INIT_DATA_SECTION(16)
216
217	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
218		__x86_cpu_dev_start = .;
219		*(.x86_cpu_dev.init)
220		__x86_cpu_dev_end = .;
221	}
222
223#ifdef CONFIG_X86_INTEL_MID
224	.x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
225								LOAD_OFFSET) {
226		__x86_intel_mid_dev_start = .;
227		*(.x86_intel_mid_dev.init)
228		__x86_intel_mid_dev_end = .;
229	}
230#endif
231
232	/*
233	 * start address and size of operations which during runtime
234	 * can be patched with virtualization friendly instructions or
235	 * baremetal native ones. Think page table operations.
236	 * Details in paravirt_types.h
237	 */
238	. = ALIGN(8);
239	.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
240		__parainstructions = .;
241		*(.parainstructions)
242		__parainstructions_end = .;
243	}
244
245	/*
246	 * struct alt_inst entries. From the header (alternative.h):
247	 * "Alternative instructions for different CPU types or capabilities"
248	 * Think locking instructions on spinlocks.
249	 */
250	. = ALIGN(8);
251	.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
252		__alt_instructions = .;
253		*(.altinstructions)
254		__alt_instructions_end = .;
255	}
256
257	/*
258	 * And here are the replacement instructions. The linker sticks
259	 * them as binary blobs. The .altinstructions has enough data to
260	 * get the address and the length of them to patch the kernel safely.
261	 */
262	.altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
263		*(.altinstr_replacement)
264	}
265
266	/*
267	 * struct iommu_table_entry entries are injected in this section.
268	 * It is an array of IOMMUs which during run time gets sorted depending
269	 * on its dependency order. After rootfs_initcall is complete
270	 * this section can be safely removed.
271	 */
272	.iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) {
273		__iommu_table = .;
274		*(.iommu_table)
275		__iommu_table_end = .;
276	}
277
278	. = ALIGN(8);
279	.apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
280		__apicdrivers = .;
281		*(.apicdrivers);
282		__apicdrivers_end = .;
283	}
284
285	. = ALIGN(8);
286	/*
287	 * .exit.text is discard at runtime, not link time, to deal with
288	 *  references from .altinstructions and .eh_frame
289	 */
290	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
291		EXIT_TEXT
292	}
293
294	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
295		EXIT_DATA
296	}
297
298#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
299	PERCPU_SECTION(INTERNODE_CACHE_BYTES)
300#endif
301
302	. = ALIGN(PAGE_SIZE);
303
304	/* freed after init ends here */
305	.init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
306		__init_end = .;
307	}
308
309	/*
310	 * smp_locks might be freed after init
311	 * start/end must be page aligned
312	 */
313	. = ALIGN(PAGE_SIZE);
314	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
315		__smp_locks = .;
316		*(.smp_locks)
317		. = ALIGN(PAGE_SIZE);
318		__smp_locks_end = .;
319	}
320
321#ifdef CONFIG_X86_64
322	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
323		NOSAVE_DATA
324	}
325#endif
326
327	/* BSS */
328	. = ALIGN(PAGE_SIZE);
329	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
330		__bss_start = .;
331		*(.bss..page_aligned)
332		*(.bss)
333		. = ALIGN(PAGE_SIZE);
334		__bss_stop = .;
335	}
336
337	. = ALIGN(PAGE_SIZE);
338	.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
339		__brk_base = .;
340		. += 64 * 1024;		/* 64k alignment slop space */
341		*(.brk_reservation)	/* areas brk users have reserved */
342		__brk_limit = .;
343	}
344
345	. = ALIGN(PAGE_SIZE);		/* keep VO_INIT_SIZE page aligned */
346	_end = .;
347
348        STABS_DEBUG
349        DWARF_DEBUG
350
351	/* Sections to be discarded */
352	DISCARDS
353	/DISCARD/ : {
354		*(.eh_frame)
355		*(__func_stack_frame_non_standard)
356	}
357}
358
359
360#ifdef CONFIG_X86_32
361/*
362 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
363 */
364. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
365	   "kernel image bigger than KERNEL_IMAGE_SIZE");
366#else
367/*
368 * Per-cpu symbols which need to be offset from __per_cpu_load
369 * for the boot processor.
370 */
371#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
372INIT_PER_CPU(gdt_page);
373INIT_PER_CPU(irq_stack_union);
374
375/*
376 * Build-time check on the image size:
377 */
378. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
379	   "kernel image bigger than KERNEL_IMAGE_SIZE");
380
381#ifdef CONFIG_SMP
382. = ASSERT((irq_stack_union == 0),
383           "irq_stack_union is not at start of per-cpu area");
384#endif
385
386#endif /* CONFIG_X86_32 */
387
388#ifdef CONFIG_KEXEC_CORE
389#include <asm/kexec.h>
390
391. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
392           "kexec control code size is too big");
393#endif
394
395