• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * ld script to make ARM Linux kernel
3 * taken from the i386 version by Russell King
4 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 */
6
7#include <asm-generic/vmlinux.lds.h>
8#include <asm/cache.h>
9#include <asm/kernel-pgtable.h>
10#include <asm/thread_info.h>
11#include <asm/memory.h>
12#include <asm/page.h>
13#include <asm/pgtable.h>
14
15#include "image.h"
16
17/* .exit.text needed in case of alternative patching */
18#define ARM_EXIT_KEEP(x)	x
19#define ARM_EXIT_DISCARD(x)
20
21OUTPUT_ARCH(aarch64)
22ENTRY(_text)
23
24jiffies = jiffies_64;
25
26#define HYPERVISOR_TEXT					\
27	/*						\
28	 * Align to 4 KB so that			\
29	 * a) the HYP vector table is at its minimum	\
30	 *    alignment of 2048 bytes			\
31	 * b) the HYP init code will not cross a page	\
32	 *    boundary if its size does not exceed	\
33	 *    4 KB (see related ASSERT() below)		\
34	 */						\
35	. = ALIGN(SZ_4K);				\
36	VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;	\
37	*(.hyp.idmap.text)				\
38	VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;	\
39	VMLINUX_SYMBOL(__hyp_text_start) = .;		\
40	*(.hyp.text)					\
41	VMLINUX_SYMBOL(__hyp_text_end) = .;
42
43#define IDMAP_TEXT					\
44	. = ALIGN(SZ_4K);				\
45	VMLINUX_SYMBOL(__idmap_text_start) = .;		\
46	*(.idmap.text)					\
47	VMLINUX_SYMBOL(__idmap_text_end) = .;
48
49#ifdef CONFIG_HIBERNATION
50#define HIBERNATE_TEXT					\
51	. = ALIGN(SZ_4K);				\
52	VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\
53	*(.hibernate_exit.text)				\
54	VMLINUX_SYMBOL(__hibernate_exit_text_end) = .;
55#else
56#define HIBERNATE_TEXT
57#endif
58
59#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
60#define TRAMP_TEXT					\
61	. = ALIGN(PAGE_SIZE);				\
62	VMLINUX_SYMBOL(__entry_tramp_text_start) = .;	\
63	KEEP(*(.entry.tramp.text))			\
64	. = ALIGN(PAGE_SIZE);				\
65	VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
66#else
67#define TRAMP_TEXT
68#endif
69
70/*
71 * The size of the PE/COFF section that covers the kernel image, which
72 * runs from stext to _edata, must be a round multiple of the PE/COFF
73 * FileAlignment, which we set to its minimum value of 0x200. 'stext'
74 * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
75 * boundary should be sufficient.
76 */
77PECOFF_FILE_ALIGNMENT = 0x200;
78
79#ifdef CONFIG_EFI
80#define PECOFF_EDATA_PADDING	\
81	.pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
82#else
83#define PECOFF_EDATA_PADDING
84#endif
85
86#if defined(CONFIG_DEBUG_ALIGN_RODATA)
87/*
88 *  4 KB granule:   1 level 2 entry
89 * 16 KB granule: 128 level 3 entries, with contiguous bit
90 * 64 KB granule:  32 level 3 entries, with contiguous bit
91 */
92#define SEGMENT_ALIGN			SZ_2M
93#else
94/*
95 *  4 KB granule:  16 level 3 entries, with contiguous bit
96 * 16 KB granule:   4 level 3 entries, without contiguous bit
97 * 64 KB granule:   1 level 3 entry
98 */
99#define SEGMENT_ALIGN			SZ_64K
100#endif
101
102SECTIONS
103{
104	/*
105	 * XXX: The linker does not define how output sections are
106	 * assigned to input sections when there are multiple statements
107	 * matching the same input section name.  There is no documented
108	 * order of matching.
109	 */
110	/DISCARD/ : {
111		ARM_EXIT_DISCARD(EXIT_TEXT)
112		ARM_EXIT_DISCARD(EXIT_DATA)
113		EXIT_CALL
114		*(.discard)
115		*(.discard.*)
116		*(.interp .dynamic)
117		*(.dynsym .dynstr .hash)
118	}
119
120	. = KIMAGE_VADDR + TEXT_OFFSET;
121
122	.head.text : {
123		_text = .;
124		HEAD_TEXT
125	}
126	.text : {			/* Real text segment		*/
127		_stext = .;		/* Text and read-only data	*/
128			__exception_text_start = .;
129			*(.exception.text)
130			__exception_text_end = .;
131			IRQENTRY_TEXT
132			SOFTIRQENTRY_TEXT
133			ENTRY_TEXT
134			TEXT_TEXT
135			SCHED_TEXT
136			CPUIDLE_TEXT
137			LOCK_TEXT
138			KPROBES_TEXT
139			HYPERVISOR_TEXT
140			IDMAP_TEXT
141			HIBERNATE_TEXT
142			TRAMP_TEXT
143			*(.fixup)
144			*(.gnu.warning)
145		. = ALIGN(16);
146		*(.got)			/* Global offset table		*/
147	}
148
149	. = ALIGN(SEGMENT_ALIGN);
150	_etext = .;			/* End of text section */
151
152	RO_DATA(PAGE_SIZE)		/* everything from this point to     */
153	EXCEPTION_TABLE(8)		/* __init_begin will be marked RO NX */
154	NOTES
155
156	. = ALIGN(SEGMENT_ALIGN);
157	__init_begin = .;
158
159	INIT_TEXT_SECTION(8)
160	.exit.text : {
161		ARM_EXIT_KEEP(EXIT_TEXT)
162	}
163
164	.init.data : {
165		INIT_DATA
166		INIT_SETUP(16)
167		INIT_CALLS
168		CON_INITCALL
169		SECURITY_INITCALL
170		INIT_RAM_FS
171		*(.init.rodata.* .init.bss)	/* from the EFI stub */
172	}
173	.exit.data : {
174		ARM_EXIT_KEEP(EXIT_DATA)
175	}
176
177	PERCPU_SECTION(L1_CACHE_BYTES)
178
179	. = ALIGN(4);
180	.altinstructions : {
181		__alt_instructions = .;
182		KEEP(*(.altinstructions))
183		__alt_instructions_end = .;
184	}
185	.altinstr_replacement : {
186		KEEP(*(.altinstr_replacement))
187	}
188	.rela : ALIGN(8) {
189		*(.rela .rela*)
190	}
191
192	__rela_offset	= ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
193	__rela_size	= SIZEOF(.rela);
194
195	. = ALIGN(SEGMENT_ALIGN);
196	__init_end = .;
197
198	_data = .;
199	_sdata = .;
200	RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
201
202	/*
203	 * Data written with the MMU off but read with the MMU on requires
204	 * cache lines to be invalidated, discarding up to a Cache Writeback
205	 * Granule (CWG) of data from the cache. Keep the section that
206	 * requires this type of maintenance to be in its own Cache Writeback
207	 * Granule (CWG) area so the cache maintenance operations don't
208	 * interfere with adjacent data.
209	 */
210	.mmuoff.data.write : ALIGN(SZ_2K) {
211		__mmuoff_data_start = .;
212		*(.mmuoff.data.write)
213	}
214	. = ALIGN(SZ_2K);
215	.mmuoff.data.read : {
216		*(.mmuoff.data.read)
217		__mmuoff_data_end = .;
218	}
219
220	PECOFF_EDATA_PADDING
221	_edata = .;
222
223	BSS_SECTION(0, 0, 0)
224
225	. = ALIGN(PAGE_SIZE);
226	idmap_pg_dir = .;
227	. += IDMAP_DIR_SIZE;
228	swapper_pg_dir = .;
229	. += SWAPPER_DIR_SIZE;
230
231#ifdef CONFIG_ARM64_SW_TTBR0_PAN
232	reserved_ttbr0 = .;
233	. += RESERVED_TTBR0_SIZE;
234#endif
235
236#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
237	tramp_pg_dir = .;
238	. += PAGE_SIZE;
239#endif
240
241	_end = .;
242
243	STABS_DEBUG
244
245	HEAD_SYMBOLS
246}
247
248/*
249 * The HYP init code and ID map text can't be longer than a page each,
250 * and should not cross a page boundary.
251 */
252ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
253	"HYP init code too big or misaligned")
254ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
255	"ID map text too big or misaligned")
256#ifdef CONFIG_HIBERNATION
257ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
258	<= SZ_4K, "Hibernate exit text too big or misaligned")
259#endif
260#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
261ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
262	"Entry trampoline text too big")
263#endif
264/*
265 * If padding is applied before .head.text, virt<->phys conversions will fail.
266 */
267ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
268