1/* ld script to make ARM Linux kernel 2 * taken from the i386 version by Russell King 3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 4 */ 5 6#include <asm-generic/vmlinux.lds.h> 7#include <asm/cache.h> 8#include <asm/thread_info.h> 9#include <asm/memory.h> 10#include <asm/page.h> 11#ifdef CONFIG_ARM_KERNMEM_PERMS 12#include <asm/pgtable.h> 13#endif 14 15#define PROC_INFO \ 16 . = ALIGN(4); \ 17 VMLINUX_SYMBOL(__proc_info_begin) = .; \ 18 *(.proc.info.init) \ 19 VMLINUX_SYMBOL(__proc_info_end) = .; 20 21#define IDMAP_TEXT \ 22 ALIGN_FUNCTION(); \ 23 VMLINUX_SYMBOL(__idmap_text_start) = .; \ 24 *(.idmap.text) \ 25 VMLINUX_SYMBOL(__idmap_text_end) = .; \ 26 . = ALIGN(PAGE_SIZE); \ 27 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ 28 *(.hyp.idmap.text) \ 29 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; 30 31#ifdef CONFIG_HOTPLUG_CPU 32#define ARM_CPU_DISCARD(x) 33#define ARM_CPU_KEEP(x) x 34#else 35#define ARM_CPU_DISCARD(x) x 36#define ARM_CPU_KEEP(x) 37#endif 38 39#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \ 40 defined(CONFIG_GENERIC_BUG) 41#define ARM_EXIT_KEEP(x) x 42#define ARM_EXIT_DISCARD(x) 43#else 44#define ARM_EXIT_KEEP(x) 45#define ARM_EXIT_DISCARD(x) x 46#endif 47 48OUTPUT_ARCH(arm) 49ENTRY(stext) 50 51#ifndef __ARMEB__ 52jiffies = jiffies_64; 53#else 54jiffies = jiffies_64 + 4; 55#endif 56 57SECTIONS 58{ 59 /* 60 * XXX: The linker does not define how output sections are 61 * assigned to input sections when there are multiple statements 62 * matching the same input section name. There is no documented 63 * order of matching. 64 * 65 * unwind exit sections must be discarded before the rest of the 66 * unwind sections get included. 67 */ 68 /DISCARD/ : { 69 *(.ARM.exidx.exit.text) 70 *(.ARM.extab.exit.text) 71 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) 72 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) 73 ARM_EXIT_DISCARD(EXIT_TEXT) 74 ARM_EXIT_DISCARD(EXIT_DATA) 75 EXIT_CALL 76#ifndef CONFIG_MMU 77 *(.text.fixup) 78 *(__ex_table) 79#endif 80#ifndef CONFIG_SMP_ON_UP 81 *(.alt.smp.init) 82#endif 83 *(.discard) 84 *(.discard.*) 85 } 86 87#ifdef CONFIG_XIP_KERNEL 88 . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); 89#else 90 . = PAGE_OFFSET + TEXT_OFFSET; 91#endif 92 .head.text : { 93 _text = .; 94 HEAD_TEXT 95 } 96 97#ifdef CONFIG_ARM_KERNMEM_PERMS 98 . = ALIGN(1<<SECTION_SHIFT); 99#endif 100 101 .text : { /* Real text segment */ 102 _stext = .; /* Text and read-only data */ 103 IDMAP_TEXT 104 __exception_text_start = .; 105 *(.exception.text) 106 __exception_text_end = .; 107 IRQENTRY_TEXT 108 SOFTIRQENTRY_TEXT 109 TEXT_TEXT 110 SCHED_TEXT 111 LOCK_TEXT 112 KPROBES_TEXT 113 *(.gnu.warning) 114 *(.glue_7) 115 *(.glue_7t) 116 . = ALIGN(4); 117 *(.got) /* Global offset table */ 118 ARM_CPU_KEEP(PROC_INFO) 119 } 120 121#ifdef CONFIG_DEBUG_RODATA 122 . = ALIGN(1<<SECTION_SHIFT); 123#endif 124 _etext = .; /* End of text section */ 125 126 RO_DATA(PAGE_SIZE) 127 128 . = ALIGN(4); 129 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { 130 __start___ex_table = .; 131#ifdef CONFIG_MMU 132 *(__ex_table) 133#endif 134 __stop___ex_table = .; 135 } 136 137#ifdef CONFIG_ARM_UNWIND 138 /* 139 * Stack unwinding tables 140 */ 141 . = ALIGN(8); 142 .ARM.unwind_idx : { 143 __start_unwind_idx = .; 144 *(.ARM.exidx*) 145 __stop_unwind_idx = .; 146 } 147 .ARM.unwind_tab : { 148 __start_unwind_tab = .; 149 *(.ARM.extab*) 150 __stop_unwind_tab = .; 151 } 152#endif 153 154 NOTES 155 156#ifndef CONFIG_XIP_KERNEL 157# ifdef CONFIG_ARM_KERNMEM_PERMS 158 . = ALIGN(1<<SECTION_SHIFT); 159# else 160 . = ALIGN(PAGE_SIZE); 161# endif 162 __init_begin = .; 163#endif 164 /* 165 * The vectors and stubs are relocatable code, and the 166 * only thing that matters is their relative offsets 167 */ 168 __vectors_start = .; 169 .vectors 0 : AT(__vectors_start) { 170 *(.vectors) 171 } 172 . = __vectors_start + SIZEOF(.vectors); 173 __vectors_end = .; 174 175 __stubs_start = .; 176 .stubs 0x1000 : AT(__stubs_start) { 177 *(.stubs) 178 } 179 . = __stubs_start + SIZEOF(.stubs); 180 __stubs_end = .; 181 182 INIT_TEXT_SECTION(8) 183 .exit.text : { 184 ARM_EXIT_KEEP(EXIT_TEXT) 185 } 186 .init.proc.info : { 187 ARM_CPU_DISCARD(PROC_INFO) 188 } 189 .init.arch.info : { 190 __arch_info_begin = .; 191 *(.arch.info.init) 192 __arch_info_end = .; 193 } 194 .init.tagtable : { 195 __tagtable_begin = .; 196 *(.taglist.init) 197 __tagtable_end = .; 198 } 199#ifdef CONFIG_SMP_ON_UP 200 .init.smpalt : { 201 __smpalt_begin = .; 202 *(.alt.smp.init) 203 __smpalt_end = .; 204 } 205#endif 206 .init.pv_table : { 207 __pv_table_begin = .; 208 *(.pv_table) 209 __pv_table_end = .; 210 } 211 .init.data : { 212#ifndef CONFIG_XIP_KERNEL 213 INIT_DATA 214#endif 215 INIT_SETUP(16) 216 INIT_CALLS 217 CON_INITCALL 218 SECURITY_INITCALL 219 INIT_RAM_FS 220 } 221#ifndef CONFIG_XIP_KERNEL 222 .exit.data : { 223 ARM_EXIT_KEEP(EXIT_DATA) 224 } 225#endif 226 227#ifdef CONFIG_SMP 228 PERCPU_SECTION(L1_CACHE_BYTES) 229#endif 230 231#ifdef CONFIG_XIP_KERNEL 232 __data_loc = ALIGN(4); /* location in binary */ 233 . = PAGE_OFFSET + TEXT_OFFSET; 234#else 235#ifdef CONFIG_ARM_KERNMEM_PERMS 236 . = ALIGN(1<<SECTION_SHIFT); 237#else 238 . = ALIGN(THREAD_SIZE); 239#endif 240 __init_end = .; 241 __data_loc = .; 242#endif 243 244 .data : AT(__data_loc) { 245 _data = .; /* address in memory */ 246 _sdata = .; 247 248 /* 249 * first, the init task union, aligned 250 * to an 8192 byte boundary. 251 */ 252 INIT_TASK_DATA(THREAD_SIZE) 253 254#ifdef CONFIG_XIP_KERNEL 255 . = ALIGN(PAGE_SIZE); 256 __init_begin = .; 257 INIT_DATA 258 ARM_EXIT_KEEP(EXIT_DATA) 259 . = ALIGN(PAGE_SIZE); 260 __init_end = .; 261#endif 262 263 NOSAVE_DATA 264 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 265 READ_MOSTLY_DATA(L1_CACHE_BYTES) 266 267 /* 268 * and the usual data section 269 */ 270 DATA_DATA 271 CONSTRUCTORS 272 273 _edata = .; 274 } 275 _edata_loc = __data_loc + SIZEOF(.data); 276 277#ifdef CONFIG_HAVE_TCM 278 /* 279 * We align everything to a page boundary so we can 280 * free it after init has commenced and TCM contents have 281 * been copied to its destination. 282 */ 283 .tcm_start : { 284 . = ALIGN(PAGE_SIZE); 285 __tcm_start = .; 286 __itcm_start = .; 287 } 288 289 /* 290 * Link these to the ITCM RAM 291 * Put VMA to the TCM address and LMA to the common RAM 292 * and we'll upload the contents from RAM to TCM and free 293 * the used RAM after that. 294 */ 295 .text_itcm ITCM_OFFSET : AT(__itcm_start) 296 { 297 __sitcm_text = .; 298 *(.tcm.text) 299 *(.tcm.rodata) 300 . = ALIGN(4); 301 __eitcm_text = .; 302 } 303 304 /* 305 * Reset the dot pointer, this is needed to create the 306 * relative __dtcm_start below (to be used as extern in code). 307 */ 308 . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm); 309 310 .dtcm_start : { 311 __dtcm_start = .; 312 } 313 314 /* TODO: add remainder of ITCM as well, that can be used for data! */ 315 .data_dtcm DTCM_OFFSET : AT(__dtcm_start) 316 { 317 . = ALIGN(4); 318 __sdtcm_data = .; 319 *(.tcm.data) 320 . = ALIGN(4); 321 __edtcm_data = .; 322 } 323 324 /* Reset the dot pointer or the linker gets confused */ 325 . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm); 326 327 /* End marker for freeing TCM copy in linked object */ 328 .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){ 329 . = ALIGN(PAGE_SIZE); 330 __tcm_end = .; 331 } 332#endif 333 334 BSS_SECTION(0, 0, 0) 335 _end = .; 336 337 STABS_DEBUG 338} 339 340/* 341 * These must never be empty 342 * If you have to comment these two assert statements out, your 343 * binutils is too old (for other reasons as well) 344 */ 345ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") 346ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") 347 348/* 349 * The HYP init code can't be more than a page long, 350 * and should not cross a page boundary. 351 * The above comment applies as well. 352 */ 353ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, 354 "HYP init code too big or misaligned") 355