• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef LOAD_OFFSET
2 #define LOAD_OFFSET 0
3 #endif
4 
5 #ifndef VMLINUX_SYMBOL
6 #define VMLINUX_SYMBOL(_sym_) _sym_
7 #endif
8 
9 /* Align . to a 8 byte boundary equals to maximum function alignment. */
10 #define ALIGN_FUNCTION()  . = ALIGN(8)
11 
12 /* The actual configuration determine if the init/exit sections
13  * are handled as text/data or they can be discarded (which
14  * often happens at runtime)
15  */
16 #ifdef CONFIG_HOTPLUG
17 #define DEV_KEEP(sec)    *(.dev##sec)
18 #define DEV_DISCARD(sec)
19 #else
20 #define DEV_KEEP(sec)
21 #define DEV_DISCARD(sec) *(.dev##sec)
22 #endif
23 
24 #ifdef CONFIG_HOTPLUG_CPU
25 #define CPU_KEEP(sec)    *(.cpu##sec)
26 #define CPU_DISCARD(sec)
27 #else
28 #define CPU_KEEP(sec)
29 #define CPU_DISCARD(sec) *(.cpu##sec)
30 #endif
31 
32 #if defined(CONFIG_MEMORY_HOTPLUG)
33 #define MEM_KEEP(sec)    *(.mem##sec)
34 #define MEM_DISCARD(sec)
35 #else
36 #define MEM_KEEP(sec)
37 #define MEM_DISCARD(sec) *(.mem##sec)
38 #endif
39 
40 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
41 #define MCOUNT_REC()	VMLINUX_SYMBOL(__start_mcount_loc) = .; \
42 			*(__mcount_loc)				\
43 			VMLINUX_SYMBOL(__stop_mcount_loc) = .;
44 #else
45 #define MCOUNT_REC()
46 #endif
47 
48 #ifdef CONFIG_TRACE_BRANCH_PROFILING
49 #define LIKELY_PROFILE()	VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
50 				*(_ftrace_annotated_branch)			      \
51 				VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
52 #else
53 #define LIKELY_PROFILE()
54 #endif
55 
56 #ifdef CONFIG_PROFILE_ALL_BRANCHES
57 #define BRANCH_PROFILE()	VMLINUX_SYMBOL(__start_branch_profile) = .;   \
58 				*(_ftrace_branch)			      \
59 				VMLINUX_SYMBOL(__stop_branch_profile) = .;
60 #else
61 #define BRANCH_PROFILE()
62 #endif
63 
64 /* .data section */
65 #define DATA_DATA							\
66 	*(.data)							\
67 	*(.data.init.refok)						\
68 	*(.ref.data)							\
69 	DEV_KEEP(init.data)						\
70 	DEV_KEEP(exit.data)						\
71 	CPU_KEEP(init.data)						\
72 	CPU_KEEP(exit.data)						\
73 	MEM_KEEP(init.data)						\
74 	MEM_KEEP(exit.data)						\
75 	. = ALIGN(8);							\
76 	VMLINUX_SYMBOL(__start___markers) = .;				\
77 	*(__markers)							\
78 	VMLINUX_SYMBOL(__stop___markers) = .;				\
79 	. = ALIGN(32);							\
80 	VMLINUX_SYMBOL(__start___tracepoints) = .;			\
81 	*(__tracepoints)						\
82 	VMLINUX_SYMBOL(__stop___tracepoints) = .;			\
83 	LIKELY_PROFILE()		       				\
84 	BRANCH_PROFILE()
85 
86 #define RO_DATA(align)							\
87 	. = ALIGN((align));						\
88 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
89 		VMLINUX_SYMBOL(__start_rodata) = .;			\
90 		*(.rodata) *(.rodata.*)					\
91 		*(__vermagic)		/* Kernel version magic */	\
92 		*(__markers_strings)	/* Markers: strings */		\
93 		*(__tracepoints_strings)/* Tracepoints: strings */	\
94 	}								\
95 									\
96 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
97 		*(.rodata1)						\
98 	}								\
99 									\
100 	BUG_TABLE							\
101 									\
102 	/* PCI quirks */						\
103 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
104 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
105 		*(.pci_fixup_early)					\
106 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
107 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
108 		*(.pci_fixup_header)					\
109 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
110 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
111 		*(.pci_fixup_final)					\
112 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
113 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
114 		*(.pci_fixup_enable)					\
115 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
116 		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
117 		*(.pci_fixup_resume)					\
118 		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
119 		VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .;	\
120 		*(.pci_fixup_resume_early)				\
121 		VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .;	\
122 		VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;		\
123 		*(.pci_fixup_suspend)					\
124 		VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;		\
125 	}								\
126 									\
127 	/* Built-in firmware blobs */					\
128 	.builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {	\
129 		VMLINUX_SYMBOL(__start_builtin_fw) = .;			\
130 		*(.builtin_fw)						\
131 		VMLINUX_SYMBOL(__end_builtin_fw) = .;			\
132 	}								\
133 									\
134 	/* RapidIO route ops */						\
135 	.rio_route        : AT(ADDR(.rio_route) - LOAD_OFFSET) {	\
136 		VMLINUX_SYMBOL(__start_rio_route_ops) = .;		\
137 		*(.rio_route_ops)					\
138 		VMLINUX_SYMBOL(__end_rio_route_ops) = .;		\
139 	}								\
140 									\
141 	TRACEDATA							\
142 									\
143 	/* Kernel symbol table: Normal symbols */			\
144 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
145 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
146 		*(__ksymtab)						\
147 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
148 	}								\
149 									\
150 	/* Kernel symbol table: GPL-only symbols */			\
151 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
152 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
153 		*(__ksymtab_gpl)					\
154 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
155 	}								\
156 									\
157 	/* Kernel symbol table: Normal unused symbols */		\
158 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
159 		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
160 		*(__ksymtab_unused)					\
161 		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
162 	}								\
163 									\
164 	/* Kernel symbol table: GPL-only unused symbols */		\
165 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
166 		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
167 		*(__ksymtab_unused_gpl)					\
168 		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
169 	}								\
170 									\
171 	/* Kernel symbol table: GPL-future-only symbols */		\
172 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
173 		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
174 		*(__ksymtab_gpl_future)					\
175 		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
176 	}								\
177 									\
178 	/* Kernel symbol table: Normal symbols */			\
179 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
180 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
181 		*(__kcrctab)						\
182 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
183 	}								\
184 									\
185 	/* Kernel symbol table: GPL-only symbols */			\
186 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
187 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
188 		*(__kcrctab_gpl)					\
189 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
190 	}								\
191 									\
192 	/* Kernel symbol table: Normal unused symbols */		\
193 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
194 		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
195 		*(__kcrctab_unused)					\
196 		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
197 	}								\
198 									\
199 	/* Kernel symbol table: GPL-only unused symbols */		\
200 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
201 		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
202 		*(__kcrctab_unused_gpl)					\
203 		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
204 	}								\
205 									\
206 	/* Kernel symbol table: GPL-future-only symbols */		\
207 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
208 		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
209 		*(__kcrctab_gpl_future)					\
210 		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
211 	}								\
212 									\
213 	/* Kernel symbol table: strings */				\
214         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
215 		*(__ksymtab_strings)					\
216 	}								\
217 									\
218 	/* __*init sections */						\
219 	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
220 		*(.ref.rodata)						\
221 		MCOUNT_REC()						\
222 		DEV_KEEP(init.rodata)					\
223 		DEV_KEEP(exit.rodata)					\
224 		CPU_KEEP(init.rodata)					\
225 		CPU_KEEP(exit.rodata)					\
226 		MEM_KEEP(init.rodata)					\
227 		MEM_KEEP(exit.rodata)					\
228 	}								\
229 									\
230 	/* Built-in module parameters. */				\
231 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
232 		VMLINUX_SYMBOL(__start___param) = .;			\
233 		*(__param)						\
234 		VMLINUX_SYMBOL(__stop___param) = .;			\
235 		. = ALIGN((align));					\
236 		VMLINUX_SYMBOL(__end_rodata) = .;			\
237 	}								\
238 	. = ALIGN((align));
239 
240 /* RODATA provided for backward compatibility.
241  * All archs are supposed to use RO_DATA() */
242 #define RODATA RO_DATA(4096)
243 
244 #define SECURITY_INIT							\
245 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
246 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
247 		*(.security_initcall.init) 				\
248 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
249 	}
250 
251 /* .text section. Map to function alignment to avoid address changes
252  * during second ld run in second ld pass when generating System.map */
253 #define TEXT_TEXT							\
254 		ALIGN_FUNCTION();					\
255 		*(.text.hot)						\
256 		*(.text)						\
257 		*(.ref.text)						\
258 		*(.text.init.refok)					\
259 		*(.exit.text.refok)					\
260 	DEV_KEEP(init.text)						\
261 	DEV_KEEP(exit.text)						\
262 	CPU_KEEP(init.text)						\
263 	CPU_KEEP(exit.text)						\
264 	MEM_KEEP(init.text)						\
265 	MEM_KEEP(exit.text)						\
266 		*(.text.unlikely)
267 
268 
269 /* sched.text is aling to function alignment to secure we have same
270  * address even at second ld pass when generating System.map */
271 #define SCHED_TEXT							\
272 		ALIGN_FUNCTION();					\
273 		VMLINUX_SYMBOL(__sched_text_start) = .;			\
274 		*(.sched.text)						\
275 		VMLINUX_SYMBOL(__sched_text_end) = .;
276 
277 /* spinlock.text is aling to function alignment to secure we have same
278  * address even at second ld pass when generating System.map */
279 #define LOCK_TEXT							\
280 		ALIGN_FUNCTION();					\
281 		VMLINUX_SYMBOL(__lock_text_start) = .;			\
282 		*(.spinlock.text)					\
283 		VMLINUX_SYMBOL(__lock_text_end) = .;
284 
285 #define KPROBES_TEXT							\
286 		ALIGN_FUNCTION();					\
287 		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
288 		*(.kprobes.text)					\
289 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
290 
291 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
292 #define IRQENTRY_TEXT							\
293 		ALIGN_FUNCTION();					\
294 		VMLINUX_SYMBOL(__irqentry_text_start) = .;		\
295 		*(.irqentry.text)					\
296 		VMLINUX_SYMBOL(__irqentry_text_end) = .;
297 #else
298 #define IRQENTRY_TEXT
299 #endif
300 
301 /* Section used for early init (in .S files) */
302 #define HEAD_TEXT  *(.head.text)
303 
304 /* init and exit section handling */
305 #define INIT_DATA							\
306 	*(.init.data)							\
307 	DEV_DISCARD(init.data)						\
308 	DEV_DISCARD(init.rodata)					\
309 	CPU_DISCARD(init.data)						\
310 	CPU_DISCARD(init.rodata)					\
311 	MEM_DISCARD(init.data)						\
312 	MEM_DISCARD(init.rodata)					\
313 	/* implement dynamic printk debug */				\
314 	VMLINUX_SYMBOL(__start___verbose_strings) = .;                  \
315 	*(__verbose_strings)                                            \
316 	VMLINUX_SYMBOL(__stop___verbose_strings) = .;                   \
317 	. = ALIGN(8);							\
318 	VMLINUX_SYMBOL(__start___verbose) = .;                          \
319 	*(__verbose)                                                    \
320 	VMLINUX_SYMBOL(__stop___verbose) = .;
321 
322 #define INIT_TEXT							\
323 	*(.init.text)							\
324 	DEV_DISCARD(init.text)						\
325 	CPU_DISCARD(init.text)						\
326 	MEM_DISCARD(init.text)
327 
328 #define EXIT_DATA							\
329 	*(.exit.data)							\
330 	DEV_DISCARD(exit.data)						\
331 	DEV_DISCARD(exit.rodata)					\
332 	CPU_DISCARD(exit.data)						\
333 	CPU_DISCARD(exit.rodata)					\
334 	MEM_DISCARD(exit.data)						\
335 	MEM_DISCARD(exit.rodata)
336 
337 #define EXIT_TEXT							\
338 	*(.exit.text)							\
339 	DEV_DISCARD(exit.text)						\
340 	CPU_DISCARD(exit.text)						\
341 	MEM_DISCARD(exit.text)
342 
343 		/* DWARF debug sections.
344 		Symbols in the DWARF debugging sections are relative to
345 		the beginning of the section so we begin them at 0.  */
346 #define DWARF_DEBUG							\
347 		/* DWARF 1 */						\
348 		.debug          0 : { *(.debug) }			\
349 		.line           0 : { *(.line) }			\
350 		/* GNU DWARF 1 extensions */				\
351 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
352 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
353 		/* DWARF 1.1 and DWARF 2 */				\
354 		.debug_aranges  0 : { *(.debug_aranges) }		\
355 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
356 		/* DWARF 2 */						\
357 		.debug_info     0 : { *(.debug_info			\
358 				.gnu.linkonce.wi.*) }			\
359 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
360 		.debug_line     0 : { *(.debug_line) }			\
361 		.debug_frame    0 : { *(.debug_frame) }			\
362 		.debug_str      0 : { *(.debug_str) }			\
363 		.debug_loc      0 : { *(.debug_loc) }			\
364 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
365 		/* SGI/MIPS DWARF 2 extensions */			\
366 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
367 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
368 		.debug_typenames 0 : { *(.debug_typenames) }		\
369 		.debug_varnames  0 : { *(.debug_varnames) }		\
370 
371 		/* Stabs debugging sections.  */
372 #define STABS_DEBUG							\
373 		.stab 0 : { *(.stab) }					\
374 		.stabstr 0 : { *(.stabstr) }				\
375 		.stab.excl 0 : { *(.stab.excl) }			\
376 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
377 		.stab.index 0 : { *(.stab.index) }			\
378 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
379 		.comment 0 : { *(.comment) }
380 
381 #ifdef CONFIG_GENERIC_BUG
382 #define BUG_TABLE							\
383 	. = ALIGN(8);							\
384 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
385 		VMLINUX_SYMBOL(__start___bug_table) = .;		\
386 		*(__bug_table)						\
387 		VMLINUX_SYMBOL(__stop___bug_table) = .;			\
388 	}
389 #else
390 #define BUG_TABLE
391 #endif
392 
393 #ifdef CONFIG_PM_TRACE
394 #define TRACEDATA							\
395 	. = ALIGN(4);							\
396 	.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {		\
397 		VMLINUX_SYMBOL(__tracedata_start) = .;			\
398 		*(.tracedata)						\
399 		VMLINUX_SYMBOL(__tracedata_end) = .;			\
400 	}
401 #else
402 #define TRACEDATA
403 #endif
404 
405 #define NOTES								\
406 	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
407 		VMLINUX_SYMBOL(__start_notes) = .;			\
408 		*(.note.*)						\
409 		VMLINUX_SYMBOL(__stop_notes) = .;			\
410 	}
411 
412 #define INITCALLS							\
413 	*(.initcallearly.init)						\
414 	VMLINUX_SYMBOL(__early_initcall_end) = .;			\
415   	*(.initcall0.init)						\
416   	*(.initcall0s.init)						\
417   	*(.initcall1.init)						\
418   	*(.initcall1s.init)						\
419   	*(.initcall2.init)						\
420   	*(.initcall2s.init)						\
421   	*(.initcall3.init)						\
422   	*(.initcall3s.init)						\
423   	*(.initcall4.init)						\
424   	*(.initcall4s.init)						\
425   	*(.initcall5.init)						\
426   	*(.initcall5s.init)						\
427 	*(.initcallrootfs.init)						\
428   	*(.initcall6.init)						\
429   	*(.initcall6s.init)						\
430   	*(.initcall7.init)						\
431   	*(.initcall7s.init)
432 
433 #define PERCPU(align)							\
434 	. = ALIGN(align);						\
435 	VMLINUX_SYMBOL(__per_cpu_start) = .;				\
436 	.data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {		\
437 		*(.data.percpu.page_aligned)				\
438 		*(.data.percpu)						\
439 		*(.data.percpu.shared_aligned)				\
440 	}								\
441 	VMLINUX_SYMBOL(__per_cpu_end) = .;
442