1 /* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA_SECTION(PAGE_SIZE) 27 * RW_DATA_SECTION(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * NOTES 32 * 33 * BSS_SECTION(0, 0, 0) 34 * _end = .; 35 * 36 * STABS_DEBUG 37 * DWARF_DEBUG 38 * 39 * DISCARDS // must be the last 40 * } 41 * 42 * [__init_begin, __init_end] is the init section that may be freed after init 43 * [_stext, _etext] is the text section 44 * [_sdata, _edata] is the data section 45 * 46 * Some of the included output section have their own set of constants. 47 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 48 * [__nosave_begin, __nosave_end] for the nosave data 49 */ 50 51 #ifndef LOAD_OFFSET 52 #define LOAD_OFFSET 0 53 #endif 54 55 #include <linux/export.h> 56 57 /* Align . to a 8 byte boundary equals to maximum function alignment. */ 58 #define ALIGN_FUNCTION() . = ALIGN(8) 59 60 /* 61 * Align to a 32 byte boundary equal to the 62 * alignment gcc 4.5 uses for a struct 63 */ 64 #define STRUCT_ALIGNMENT 32 65 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 66 67 /* The actual configuration determine if the init/exit sections 68 * are handled as text/data or they can be discarded (which 69 * often happens at runtime) 70 */ 71 #ifdef CONFIG_HOTPLUG 72 #define DEV_KEEP(sec) *(.dev##sec) 73 #define DEV_DISCARD(sec) 74 #else 75 #define DEV_KEEP(sec) 76 #define DEV_DISCARD(sec) *(.dev##sec) 77 #endif 78 79 #ifdef CONFIG_HOTPLUG_CPU 80 #define CPU_KEEP(sec) *(.cpu##sec) 81 #define CPU_DISCARD(sec) 82 #else 83 #define CPU_KEEP(sec) 84 #define CPU_DISCARD(sec) *(.cpu##sec) 85 #endif 86 87 #if defined(CONFIG_MEMORY_HOTPLUG) 88 #define MEM_KEEP(sec) *(.mem##sec) 89 #define MEM_DISCARD(sec) 90 #else 91 #define MEM_KEEP(sec) 92 #define MEM_DISCARD(sec) *(.mem##sec) 93 #endif 94 95 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 96 #define MCOUNT_REC() . = ALIGN(8); \ 97 VMLINUX_SYMBOL(__start_mcount_loc) = .; \ 98 *(__mcount_loc) \ 99 VMLINUX_SYMBOL(__stop_mcount_loc) = .; 100 #else 101 #define MCOUNT_REC() 102 #endif 103 104 #ifdef CONFIG_TRACE_BRANCH_PROFILING 105 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ 106 *(_ftrace_annotated_branch) \ 107 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; 108 #else 109 #define LIKELY_PROFILE() 110 #endif 111 112 #ifdef CONFIG_PROFILE_ALL_BRANCHES 113 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ 114 *(_ftrace_branch) \ 115 VMLINUX_SYMBOL(__stop_branch_profile) = .; 116 #else 117 #define BRANCH_PROFILE() 118 #endif 119 120 #ifdef CONFIG_EVENT_TRACING 121 #define FTRACE_EVENTS() . = ALIGN(8); \ 122 VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 123 *(_ftrace_events) \ 124 VMLINUX_SYMBOL(__stop_ftrace_events) = .; 125 #else 126 #define FTRACE_EVENTS() 127 #endif 128 129 #ifdef CONFIG_TRACING 130 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ 131 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ 132 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; 133 #else 134 #define TRACE_PRINTKS() 135 #endif 136 137 #ifdef CONFIG_FTRACE_SYSCALLS 138 #define TRACE_SYSCALLS() . = ALIGN(8); \ 139 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ 140 *(__syscalls_metadata) \ 141 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; 142 #else 143 #define TRACE_SYSCALLS() 144 #endif 145 146 #ifdef CONFIG_CLKSRC_OF 147 #define CLKSRC_OF_TABLES() . = ALIGN(8); \ 148 VMLINUX_SYMBOL(__clksrc_of_table) = .; \ 149 *(__clksrc_of_table) \ 150 *(__clksrc_of_table_end) 151 #else 152 #define CLKSRC_OF_TABLES() 153 #endif 154 155 #ifdef CONFIG_IRQCHIP 156 #define IRQCHIP_OF_MATCH_TABLE() \ 157 . = ALIGN(8); \ 158 VMLINUX_SYMBOL(__irqchip_begin) = .; \ 159 *(__irqchip_of_table) \ 160 *(__irqchip_of_end) 161 #else 162 #define IRQCHIP_OF_MATCH_TABLE() 163 #endif 164 165 #ifdef CONFIG_COMMON_CLK 166 #define CLK_OF_TABLES() . = ALIGN(8); \ 167 VMLINUX_SYMBOL(__clk_of_table) = .; \ 168 *(__clk_of_table) \ 169 *(__clk_of_table_end) 170 #else 171 #define CLK_OF_TABLES() 172 #endif 173 174 #define KERNEL_DTB() \ 175 STRUCT_ALIGN(); \ 176 VMLINUX_SYMBOL(__dtb_start) = .; \ 177 *(.dtb.init.rodata) \ 178 VMLINUX_SYMBOL(__dtb_end) = .; 179 180 /* .data section */ 181 #define DATA_DATA \ 182 *(.data) \ 183 *(.ref.data) \ 184 *(.data..shared_aligned) /* percpu related */ \ 185 DEV_KEEP(init.data) \ 186 DEV_KEEP(exit.data) \ 187 CPU_KEEP(init.data) \ 188 CPU_KEEP(exit.data) \ 189 MEM_KEEP(init.data) \ 190 MEM_KEEP(exit.data) \ 191 *(.data.unlikely) \ 192 STRUCT_ALIGN(); \ 193 *(__tracepoints) \ 194 /* implement dynamic printk debug */ \ 195 . = ALIGN(8); \ 196 VMLINUX_SYMBOL(__start___jump_table) = .; \ 197 *(__jump_table) \ 198 VMLINUX_SYMBOL(__stop___jump_table) = .; \ 199 . = ALIGN(8); \ 200 VMLINUX_SYMBOL(__start___verbose) = .; \ 201 *(__verbose) \ 202 VMLINUX_SYMBOL(__stop___verbose) = .; \ 203 LIKELY_PROFILE() \ 204 BRANCH_PROFILE() \ 205 TRACE_PRINTKS() 206 207 /* 208 * Data section helpers 209 */ 210 #define NOSAVE_DATA \ 211 . = ALIGN(PAGE_SIZE); \ 212 VMLINUX_SYMBOL(__nosave_begin) = .; \ 213 *(.data..nosave) \ 214 . = ALIGN(PAGE_SIZE); \ 215 VMLINUX_SYMBOL(__nosave_end) = .; 216 217 #define PAGE_ALIGNED_DATA(page_align) \ 218 . = ALIGN(page_align); \ 219 *(.data..page_aligned) 220 221 #define READ_MOSTLY_DATA(align) \ 222 . = ALIGN(align); \ 223 *(.data..read_mostly) \ 224 . = ALIGN(align); 225 226 #define CACHELINE_ALIGNED_DATA(align) \ 227 . = ALIGN(align); \ 228 *(.data..cacheline_aligned) 229 230 #define INIT_TASK_DATA(align) \ 231 . = ALIGN(align); \ 232 *(.data..init_task) 233 234 /* 235 * Read only Data 236 */ 237 #define RO_DATA_SECTION(align) \ 238 . = ALIGN((align)); \ 239 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 240 VMLINUX_SYMBOL(__start_rodata) = .; \ 241 *(.rodata) *(.rodata.*) \ 242 *(__vermagic) /* Kernel version magic */ \ 243 . = ALIGN(8); \ 244 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ 245 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\ 246 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \ 247 *(__tracepoints_strings)/* Tracepoints: strings */ \ 248 } \ 249 \ 250 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 251 *(.rodata1) \ 252 } \ 253 \ 254 BUG_TABLE \ 255 \ 256 /* PCI quirks */ \ 257 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 258 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 259 *(.pci_fixup_early) \ 260 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 261 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 262 *(.pci_fixup_header) \ 263 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 264 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 265 *(.pci_fixup_final) \ 266 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 267 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 268 *(.pci_fixup_enable) \ 269 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 270 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 271 *(.pci_fixup_resume) \ 272 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 273 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 274 *(.pci_fixup_resume_early) \ 275 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 276 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 277 *(.pci_fixup_suspend) \ 278 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 279 } \ 280 \ 281 /* Built-in firmware blobs */ \ 282 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 283 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 284 *(.builtin_fw) \ 285 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 286 } \ 287 \ 288 /* RapidIO route ops */ \ 289 .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \ 290 VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \ 291 *(.rio_switch_ops) \ 292 VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \ 293 } \ 294 \ 295 TRACEDATA \ 296 \ 297 /* Kernel symbol table: Normal symbols */ \ 298 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 299 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 300 *(SORT(___ksymtab+*)) \ 301 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 302 } \ 303 \ 304 /* Kernel symbol table: GPL-only symbols */ \ 305 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 306 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 307 *(SORT(___ksymtab_gpl+*)) \ 308 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 309 } \ 310 \ 311 /* Kernel symbol table: Normal unused symbols */ \ 312 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 313 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 314 *(SORT(___ksymtab_unused+*)) \ 315 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 316 } \ 317 \ 318 /* Kernel symbol table: GPL-only unused symbols */ \ 319 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 320 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 321 *(SORT(___ksymtab_unused_gpl+*)) \ 322 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 323 } \ 324 \ 325 /* Kernel symbol table: GPL-future-only symbols */ \ 326 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 327 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 328 *(SORT(___ksymtab_gpl_future+*)) \ 329 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 330 } \ 331 \ 332 /* Kernel symbol table: Normal symbols */ \ 333 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 334 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 335 *(SORT(___kcrctab+*)) \ 336 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 337 } \ 338 \ 339 /* Kernel symbol table: GPL-only symbols */ \ 340 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 341 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 342 *(SORT(___kcrctab_gpl+*)) \ 343 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 344 } \ 345 \ 346 /* Kernel symbol table: Normal unused symbols */ \ 347 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 348 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 349 *(SORT(___kcrctab_unused+*)) \ 350 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 351 } \ 352 \ 353 /* Kernel symbol table: GPL-only unused symbols */ \ 354 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 355 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 356 *(SORT(___kcrctab_unused_gpl+*)) \ 357 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 358 } \ 359 \ 360 /* Kernel symbol table: GPL-future-only symbols */ \ 361 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 362 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 363 *(SORT(___kcrctab_gpl_future+*)) \ 364 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 365 } \ 366 \ 367 /* Kernel symbol table: strings */ \ 368 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 369 *(__ksymtab_strings) \ 370 } \ 371 \ 372 /* __*init sections */ \ 373 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 374 *(.ref.rodata) \ 375 DEV_KEEP(init.rodata) \ 376 DEV_KEEP(exit.rodata) \ 377 CPU_KEEP(init.rodata) \ 378 CPU_KEEP(exit.rodata) \ 379 MEM_KEEP(init.rodata) \ 380 MEM_KEEP(exit.rodata) \ 381 } \ 382 \ 383 /* Built-in module parameters. */ \ 384 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 385 VMLINUX_SYMBOL(__start___param) = .; \ 386 *(__param) \ 387 VMLINUX_SYMBOL(__stop___param) = .; \ 388 } \ 389 \ 390 /* Built-in module versions. */ \ 391 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 392 VMLINUX_SYMBOL(__start___modver) = .; \ 393 *(__modver) \ 394 VMLINUX_SYMBOL(__stop___modver) = .; \ 395 . = ALIGN((align)); \ 396 VMLINUX_SYMBOL(__end_rodata) = .; \ 397 } \ 398 . = ALIGN((align)); 399 400 /* RODATA & RO_DATA provided for backward compatibility. 401 * All archs are supposed to use RO_DATA() */ 402 #define RODATA RO_DATA_SECTION(4096) 403 #define RO_DATA(align) RO_DATA_SECTION(align) 404 405 #define SECURITY_INIT \ 406 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 407 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 408 *(.security_initcall.init) \ 409 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 410 } 411 412 /* .text section. Map to function alignment to avoid address changes 413 * during second ld run in second ld pass when generating System.map */ 414 #define TEXT_TEXT \ 415 ALIGN_FUNCTION(); \ 416 *(.text.hot) \ 417 *(.text) \ 418 *(.ref.text) \ 419 DEV_KEEP(init.text) \ 420 DEV_KEEP(exit.text) \ 421 CPU_KEEP(init.text) \ 422 CPU_KEEP(exit.text) \ 423 MEM_KEEP(init.text) \ 424 MEM_KEEP(exit.text) \ 425 *(.text.unlikely) 426 427 428 /* sched.text is aling to function alignment to secure we have same 429 * address even at second ld pass when generating System.map */ 430 #define SCHED_TEXT \ 431 ALIGN_FUNCTION(); \ 432 VMLINUX_SYMBOL(__sched_text_start) = .; \ 433 *(.sched.text) \ 434 VMLINUX_SYMBOL(__sched_text_end) = .; 435 436 /* spinlock.text is aling to function alignment to secure we have same 437 * address even at second ld pass when generating System.map */ 438 #define LOCK_TEXT \ 439 ALIGN_FUNCTION(); \ 440 VMLINUX_SYMBOL(__lock_text_start) = .; \ 441 *(.spinlock.text) \ 442 VMLINUX_SYMBOL(__lock_text_end) = .; 443 444 #define KPROBES_TEXT \ 445 ALIGN_FUNCTION(); \ 446 VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 447 *(.kprobes.text) \ 448 VMLINUX_SYMBOL(__kprobes_text_end) = .; 449 450 #define ENTRY_TEXT \ 451 ALIGN_FUNCTION(); \ 452 VMLINUX_SYMBOL(__entry_text_start) = .; \ 453 *(.entry.text) \ 454 VMLINUX_SYMBOL(__entry_text_end) = .; 455 456 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 457 #define IRQENTRY_TEXT \ 458 ALIGN_FUNCTION(); \ 459 VMLINUX_SYMBOL(__irqentry_text_start) = .; \ 460 *(.irqentry.text) \ 461 VMLINUX_SYMBOL(__irqentry_text_end) = .; 462 #else 463 #define IRQENTRY_TEXT 464 #endif 465 466 /* Section used for early init (in .S files) */ 467 #define HEAD_TEXT *(.head.text) 468 469 #define HEAD_TEXT_SECTION \ 470 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 471 HEAD_TEXT \ 472 } 473 474 /* 475 * Exception table 476 */ 477 #define EXCEPTION_TABLE(align) \ 478 . = ALIGN(align); \ 479 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 480 VMLINUX_SYMBOL(__start___ex_table) = .; \ 481 *(__ex_table) \ 482 VMLINUX_SYMBOL(__stop___ex_table) = .; \ 483 } 484 485 /* 486 * Init task 487 */ 488 #define INIT_TASK_DATA_SECTION(align) \ 489 . = ALIGN(align); \ 490 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 491 INIT_TASK_DATA(align) \ 492 } 493 494 #ifdef CONFIG_CONSTRUCTORS 495 #define KERNEL_CTORS() . = ALIGN(8); \ 496 VMLINUX_SYMBOL(__ctors_start) = .; \ 497 *(.ctors) \ 498 VMLINUX_SYMBOL(__ctors_end) = .; 499 #else 500 #define KERNEL_CTORS() 501 #endif 502 503 /* init and exit section handling */ 504 #define INIT_DATA \ 505 *(.init.data) \ 506 DEV_DISCARD(init.data) \ 507 CPU_DISCARD(init.data) \ 508 MEM_DISCARD(init.data) \ 509 KERNEL_CTORS() \ 510 MCOUNT_REC() \ 511 *(.init.rodata) \ 512 FTRACE_EVENTS() \ 513 TRACE_SYSCALLS() \ 514 DEV_DISCARD(init.rodata) \ 515 CPU_DISCARD(init.rodata) \ 516 MEM_DISCARD(init.rodata) \ 517 CLK_OF_TABLES() \ 518 CLKSRC_OF_TABLES() \ 519 KERNEL_DTB() \ 520 IRQCHIP_OF_MATCH_TABLE() 521 522 #define INIT_TEXT \ 523 *(.init.text) \ 524 DEV_DISCARD(init.text) \ 525 CPU_DISCARD(init.text) \ 526 MEM_DISCARD(init.text) 527 528 #define EXIT_DATA \ 529 *(.exit.data) \ 530 DEV_DISCARD(exit.data) \ 531 DEV_DISCARD(exit.rodata) \ 532 CPU_DISCARD(exit.data) \ 533 CPU_DISCARD(exit.rodata) \ 534 MEM_DISCARD(exit.data) \ 535 MEM_DISCARD(exit.rodata) 536 537 #define EXIT_TEXT \ 538 *(.exit.text) \ 539 DEV_DISCARD(exit.text) \ 540 CPU_DISCARD(exit.text) \ 541 MEM_DISCARD(exit.text) 542 543 #define EXIT_CALL \ 544 *(.exitcall.exit) 545 546 /* 547 * bss (Block Started by Symbol) - uninitialized data 548 * zeroed during startup 549 */ 550 #define SBSS(sbss_align) \ 551 . = ALIGN(sbss_align); \ 552 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 553 *(.sbss) \ 554 *(.scommon) \ 555 } 556 557 /* 558 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 559 * sections to the front of bss. 560 */ 561 #ifndef BSS_FIRST_SECTIONS 562 #define BSS_FIRST_SECTIONS 563 #endif 564 565 #define BSS(bss_align) \ 566 . = ALIGN(bss_align); \ 567 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 568 BSS_FIRST_SECTIONS \ 569 *(.bss..page_aligned) \ 570 *(.dynbss) \ 571 *(.bss) \ 572 *(COMMON) \ 573 } 574 575 /* 576 * DWARF debug sections. 577 * Symbols in the DWARF debugging sections are relative to 578 * the beginning of the section so we begin them at 0. 579 */ 580 #define DWARF_DEBUG \ 581 /* DWARF 1 */ \ 582 .debug 0 : { *(.debug) } \ 583 .line 0 : { *(.line) } \ 584 /* GNU DWARF 1 extensions */ \ 585 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 586 .debug_sfnames 0 : { *(.debug_sfnames) } \ 587 /* DWARF 1.1 and DWARF 2 */ \ 588 .debug_aranges 0 : { *(.debug_aranges) } \ 589 .debug_pubnames 0 : { *(.debug_pubnames) } \ 590 /* DWARF 2 */ \ 591 .debug_info 0 : { *(.debug_info \ 592 .gnu.linkonce.wi.*) } \ 593 .debug_abbrev 0 : { *(.debug_abbrev) } \ 594 .debug_line 0 : { *(.debug_line) } \ 595 .debug_frame 0 : { *(.debug_frame) } \ 596 .debug_str 0 : { *(.debug_str) } \ 597 .debug_loc 0 : { *(.debug_loc) } \ 598 .debug_macinfo 0 : { *(.debug_macinfo) } \ 599 /* SGI/MIPS DWARF 2 extensions */ \ 600 .debug_weaknames 0 : { *(.debug_weaknames) } \ 601 .debug_funcnames 0 : { *(.debug_funcnames) } \ 602 .debug_typenames 0 : { *(.debug_typenames) } \ 603 .debug_varnames 0 : { *(.debug_varnames) } \ 604 605 /* Stabs debugging sections. */ 606 #define STABS_DEBUG \ 607 .stab 0 : { *(.stab) } \ 608 .stabstr 0 : { *(.stabstr) } \ 609 .stab.excl 0 : { *(.stab.excl) } \ 610 .stab.exclstr 0 : { *(.stab.exclstr) } \ 611 .stab.index 0 : { *(.stab.index) } \ 612 .stab.indexstr 0 : { *(.stab.indexstr) } \ 613 .comment 0 : { *(.comment) } 614 615 #ifdef CONFIG_GENERIC_BUG 616 #define BUG_TABLE \ 617 . = ALIGN(8); \ 618 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 619 VMLINUX_SYMBOL(__start___bug_table) = .; \ 620 *(__bug_table) \ 621 VMLINUX_SYMBOL(__stop___bug_table) = .; \ 622 } 623 #else 624 #define BUG_TABLE 625 #endif 626 627 #ifdef CONFIG_PM_TRACE 628 #define TRACEDATA \ 629 . = ALIGN(4); \ 630 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 631 VMLINUX_SYMBOL(__tracedata_start) = .; \ 632 *(.tracedata) \ 633 VMLINUX_SYMBOL(__tracedata_end) = .; \ 634 } 635 #else 636 #define TRACEDATA 637 #endif 638 639 #define NOTES \ 640 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 641 VMLINUX_SYMBOL(__start_notes) = .; \ 642 *(.note.*) \ 643 VMLINUX_SYMBOL(__stop_notes) = .; \ 644 } 645 646 #define INIT_SETUP(initsetup_align) \ 647 . = ALIGN(initsetup_align); \ 648 VMLINUX_SYMBOL(__setup_start) = .; \ 649 *(.init.setup) \ 650 VMLINUX_SYMBOL(__setup_end) = .; 651 652 #define INIT_CALLS_LEVEL(level) \ 653 VMLINUX_SYMBOL(__initcall##level##_start) = .; \ 654 *(.initcall##level##.init) \ 655 *(.initcall##level##s.init) \ 656 657 #define INIT_CALLS \ 658 VMLINUX_SYMBOL(__initcall_start) = .; \ 659 *(.initcallearly.init) \ 660 INIT_CALLS_LEVEL(0) \ 661 INIT_CALLS_LEVEL(1) \ 662 INIT_CALLS_LEVEL(2) \ 663 INIT_CALLS_LEVEL(3) \ 664 INIT_CALLS_LEVEL(4) \ 665 INIT_CALLS_LEVEL(5) \ 666 INIT_CALLS_LEVEL(rootfs) \ 667 INIT_CALLS_LEVEL(6) \ 668 INIT_CALLS_LEVEL(7) \ 669 VMLINUX_SYMBOL(__initcall_end) = .; 670 671 #define CON_INITCALL \ 672 VMLINUX_SYMBOL(__con_initcall_start) = .; \ 673 *(.con_initcall.init) \ 674 VMLINUX_SYMBOL(__con_initcall_end) = .; 675 676 #define SECURITY_INITCALL \ 677 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 678 *(.security_initcall.init) \ 679 VMLINUX_SYMBOL(__security_initcall_end) = .; 680 681 #ifdef CONFIG_BLK_DEV_INITRD 682 #define INIT_RAM_FS \ 683 . = ALIGN(4); \ 684 VMLINUX_SYMBOL(__initramfs_start) = .; \ 685 *(.init.ramfs) \ 686 . = ALIGN(8); \ 687 *(.init.ramfs.info) 688 #else 689 #define INIT_RAM_FS 690 #endif 691 692 /* 693 * Default discarded sections. 694 * 695 * Some archs want to discard exit text/data at runtime rather than 696 * link time due to cross-section references such as alt instructions, 697 * bug table, eh_frame, etc. DISCARDS must be the last of output 698 * section definitions so that such archs put those in earlier section 699 * definitions. 700 */ 701 #define DISCARDS \ 702 /DISCARD/ : { \ 703 EXIT_TEXT \ 704 EXIT_DATA \ 705 EXIT_CALL \ 706 *(.discard) \ 707 *(.discard.*) \ 708 } 709 710 /** 711 * PERCPU_INPUT - the percpu input sections 712 * @cacheline: cacheline size 713 * 714 * The core percpu section names and core symbols which do not rely 715 * directly upon load addresses. 716 * 717 * @cacheline is used to align subsections to avoid false cacheline 718 * sharing between subsections for different purposes. 719 */ 720 #define PERCPU_INPUT(cacheline) \ 721 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 722 *(.data..percpu..first) \ 723 . = ALIGN(PAGE_SIZE); \ 724 *(.data..percpu..page_aligned) \ 725 . = ALIGN(cacheline); \ 726 *(.data..percpu..readmostly) \ 727 . = ALIGN(cacheline); \ 728 *(.data..percpu) \ 729 *(.data..percpu..shared_aligned) \ 730 VMLINUX_SYMBOL(__per_cpu_end) = .; 731 732 /** 733 * PERCPU_VADDR - define output section for percpu area 734 * @cacheline: cacheline size 735 * @vaddr: explicit base address (optional) 736 * @phdr: destination PHDR (optional) 737 * 738 * Macro which expands to output section for percpu area. 739 * 740 * @cacheline is used to align subsections to avoid false cacheline 741 * sharing between subsections for different purposes. 742 * 743 * If @vaddr is not blank, it specifies explicit base address and all 744 * percpu symbols will be offset from the given address. If blank, 745 * @vaddr always equals @laddr + LOAD_OFFSET. 746 * 747 * @phdr defines the output PHDR to use if not blank. Be warned that 748 * output PHDR is sticky. If @phdr is specified, the next output 749 * section in the linker script will go there too. @phdr should have 750 * a leading colon. 751 * 752 * Note that this macros defines __per_cpu_load as an absolute symbol. 753 * If there is no need to put the percpu section at a predetermined 754 * address, use PERCPU_SECTION. 755 */ 756 #define PERCPU_VADDR(cacheline, vaddr, phdr) \ 757 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 758 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 759 - LOAD_OFFSET) { \ 760 PERCPU_INPUT(cacheline) \ 761 } phdr \ 762 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); 763 764 /** 765 * PERCPU_SECTION - define output section for percpu area, simple version 766 * @cacheline: cacheline size 767 * 768 * Align to PAGE_SIZE and outputs output section for percpu area. This 769 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and 770 * __per_cpu_start will be identical. 771 * 772 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) 773 * except that __per_cpu_load is defined as a relative symbol against 774 * .data..percpu which is required for relocatable x86_32 configuration. 775 */ 776 #define PERCPU_SECTION(cacheline) \ 777 . = ALIGN(PAGE_SIZE); \ 778 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 779 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 780 PERCPU_INPUT(cacheline) \ 781 } 782 783 784 /* 785 * Definition of the high level *_SECTION macros 786 * They will fit only a subset of the architectures 787 */ 788 789 790 /* 791 * Writeable data. 792 * All sections are combined in a single .data section. 793 * The sections following CONSTRUCTORS are arranged so their 794 * typical alignment matches. 795 * A cacheline is typical/always less than a PAGE_SIZE so 796 * the sections that has this restriction (or similar) 797 * is located before the ones requiring PAGE_SIZE alignment. 798 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 799 * matches the requirement of PAGE_ALIGNED_DATA. 800 * 801 * use 0 as page_align if page_aligned data is not used */ 802 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ 803 . = ALIGN(PAGE_SIZE); \ 804 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 805 INIT_TASK_DATA(inittask) \ 806 NOSAVE_DATA \ 807 PAGE_ALIGNED_DATA(pagealigned) \ 808 CACHELINE_ALIGNED_DATA(cacheline) \ 809 READ_MOSTLY_DATA(cacheline) \ 810 DATA_DATA \ 811 CONSTRUCTORS \ 812 } 813 814 #define INIT_TEXT_SECTION(inittext_align) \ 815 . = ALIGN(inittext_align); \ 816 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 817 VMLINUX_SYMBOL(_sinittext) = .; \ 818 INIT_TEXT \ 819 VMLINUX_SYMBOL(_einittext) = .; \ 820 } 821 822 #define INIT_DATA_SECTION(initsetup_align) \ 823 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 824 INIT_DATA \ 825 INIT_SETUP(initsetup_align) \ 826 INIT_CALLS \ 827 CON_INITCALL \ 828 SECURITY_INITCALL \ 829 INIT_RAM_FS \ 830 } 831 832 #define BSS_SECTION(sbss_align, bss_align, stop_align) \ 833 . = ALIGN(sbss_align); \ 834 VMLINUX_SYMBOL(__bss_start) = .; \ 835 SBSS(sbss_align) \ 836 BSS(bss_align) \ 837 . = ALIGN(stop_align); \ 838 VMLINUX_SYMBOL(__bss_stop) = .; 839