• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#include <asm.h>
2#include <arch/arm64/mmu.h>
3#include <arch/asm_macros.h>
4#include <kernel/vm.h>
5
6/*
7 * Register use:
8 *  x0-x3   Arguments
9 *  x9-x15  Scratch
10 *  x18     Shadow stack pointer (if enabled)
11 *  x19-x28 Globals
12 */
13tmp                     .req x9
14tmp2                    .req x10
15wtmp2                   .req w10
16index                   .req x11
17canary                  .req x12
18
19ssp                     .req x18
20cpuid                   .req x19
21page_table0             .req x20
22page_table1             .req x21
23size                    .req x22
24supports_mte            .req x23
25
26.section .text.boot
27.globl arm_reset
28arm_reset:
29.globl _start
30.type _start,STT_OBJECT
31_start:
32    /* This instruction is read by the bootloader to determine image type */
33    bl      arm64_elX_to_el1
34
35    /* Initialize VBAR to the temporary exception vector table */
36    adrl    tmp, .Learly_exception_base
37    msr     vbar_el1, tmp
38    isb
39
40    mrs     tmp2, id_aa64pfr1_el1
41    tst     tmp2, #0xe00
42    cset    supports_mte, ne
43
44#if WITH_KERNEL_VM
45    /* enable caches so atomics and spinlocks work */
46    mrs     tmp, sctlr_el1
47    orr     tmp, tmp, #(1<<12) /* Enable icache */
48    orr     tmp, tmp, #(1<<2)  /* Enable dcache/ucache */
49    orr     tmp, tmp, #(1<<3)  /* Enable Stack Alignment Check EL1 */
50    orr     tmp, tmp, #(1<<4)  /* Enable Stack Alignment Check EL0 */
51    cbz     supports_mte, .Ldont_set_mte_flags
52    orr     tmp, tmp, #(1<<43) /* Allocation Tag Access in EL1 */
53    orr     tmp, tmp, #(1<<42) /* Allocation Tag Access in EL0 */
54    bic     tmp, tmp, #(1<<40) /* No tag check faults in EL1 */
55    orr     tmp, tmp, #(1<<38) /* Tag check faults in EL0 are synchronous */
56.Ldont_set_mte_flags:
57    bic     tmp, tmp, #(1<<1)  /* Disable Alignment Checking for EL1 EL0 */
58    msr     sctlr_el1, tmp
59
60    /* Make sure SP1 is being used */
61    msr     spsel, #1
62
63    /* set up the mmu according to mmu_initial_mappings */
64
65    /* load the base of the translation table and clear the table */
66    adrl    page_table1, arm64_kernel_translation_table
67
68    /* Prepare tt_trampoline page table */
69    /* Calculate pagetable physical addresses */
70    adrl    page_table0, tt_trampoline
71
72#if WITH_SMP
73    /*
74     * Stash x0 as it will be clobbered
75     * We place it in size as x0 contains the size passed to the entry point.
76     */
77    mov     size, x0
78    /* Get the CPU number */
79    bl      arm64_curr_cpu_num
80    mov     cpuid, x0
81    /* Restore registers */
82    mov     x0, size
83    cbnz    cpuid, .Ltt_trampoline_check_secondary
84#endif
85
86    /* Zero the top level kernel page table */
87    mov     tmp, #0
88
89    /* walk through all the entries in the translation table, setting them up */
90.Lclear_top_page_table_loop:
91    str     xzr, [page_table1, tmp, lsl #3]
92    add     tmp, tmp, #1
93    cmp     tmp, #MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP
94    bne     .Lclear_top_page_table_loop
95
96    /* Prepare tt_trampoline page table */
97
98    /* Zero tt_trampoline translation tables */
99    mov     tmp, #0
100.Lclear_tt_trampoline:
101    str     xzr, [page_table0, tmp, lsl#3]
102    add     tmp, tmp, #1
103    cmp     tmp, #MMU_PAGE_TABLE_ENTRIES_IDENT
104    blt     .Lclear_tt_trampoline
105
106    /* Setup mapping at phys -> phys */
107    /*
108     * Map from the start of the kernel to the end of RAM
109     * so we have enough pages for boot_alloc memory.
110     */
111    adr     index, _start
112    lsr     tmp, index, #MMU_IDENT_TOP_SHIFT    /* tmp = paddr index */
113
114    /* Check that the start index falls inside the table */
115    cmp     tmp, #MMU_PAGE_TABLE_ENTRIES_IDENT
116    b.hs    platform_early_halt
117
118#if ARM64_BOOT_PROTOCOL_X0_MEMSIZE
119    /*
120     * The physical address of end of ram (exclusive) is (_start + x0).
121     */
122    add     index, index, x0
123#elif ARM64_BOOT_PROTOCOL_X0_DTB
124    /*
125     * The physical address if end of kernel (exclusive) that could be used
126     * before any dynamic memory allocations are made is &_end.
127     * (The rest will be mapped in arm64_early_mmu_init)
128     */
129    adrl    index, _end
130#else
131    #error Unknown ARM64_BOOT_PROTOCOL
132#endif
133    /*
134     * `index` is the first byte of memory after the kernel that we don't need
135     * mapped at this point. We subtract one and round that down to a multiple
136     * of 1<<MMU_IDENT_TOP_SHIFT to get the inclusive upper bound of the
137     * tt_trampoline range.
138     */
139    sub     index, index, #1
140    lsr     index, index, #MMU_IDENT_TOP_SHIFT
141
142    /* Clamp the end index to the last possible entry */
143    cmp     index, #MMU_PAGE_TABLE_ENTRIES_IDENT
144    b.lo    .Lset_tt_trampoline_loop
145    mov     index, #(MMU_PAGE_TABLE_ENTRIES_IDENT - 1)
146
147.Lset_tt_trampoline_loop:
148    cmp     tmp, index
149    b.hi   .Lset_tt_trampoline_done
150
151    ldr     tmp2, =MMU_PTE_IDENT_FLAGS
152    cbz     supports_mte, .Luse_untagged_mapping
153    ldr     tmp2, =MMU_PTE_IDENT_FLAGS_TAGGED
154.Luse_untagged_mapping:
155    add     tmp2, tmp2, tmp, lsl #MMU_IDENT_TOP_SHIFT  /* tmp2 = pt entry */
156    str     tmp2, [page_table0, tmp, lsl #3]     /* tt_trampoline[paddr index] = pt entry */
157    add     tmp, tmp, #1
158    b       .Lset_tt_trampoline_loop
159
160.Lset_tt_trampoline_done:
161
162#if WITH_SMP
163    /* Release the first lock on the secondary CPUs */
164    adrl    tmp, tt_trampoline_not_ready
165    str     wzr, [tmp]
166    b       .Ltt_trampoline_ready
167
168.Ltt_trampoline_check_secondary:
169    adrl    tmp, tt_trampoline_not_ready
170.Ltt_trampoline_not_ready:
171    ldr     wtmp2, [tmp]
172    cbnz    wtmp2, .Ltt_trampoline_not_ready
173.Ltt_trampoline_ready:
174#endif
175
176    /* set up the mmu */
177
178    /* Invalidate TLB */
179    tlbi    vmalle1is
180    isb
181    dsb     sy
182
183    /* Initialize Memory Attribute Indirection Register */
184    ldr     tmp, =MMU_MAIR_VAL
185    msr     mair_el1, tmp
186
187    /* Initialize TCR_EL1 */
188    /* set cacheable attributes on translation walk */
189    /* (SMP extensions) non-shareable, inner write-back write-allocate */
190    ldr     tmp, =MMU_TCR_FLAGS_IDENT
191    msr     tcr_el1, tmp
192
193    isb
194
195    /* Write ttbr with phys addr of the translation table */
196    msr     ttbr0_el1, page_table0
197    msr     ttbr1_el1, page_table1
198    isb
199
200    /* Read SCTLR */
201    mrs     tmp, sctlr_el1
202
203    /* Turn on the MMU */
204    orr     tmp, tmp, #0x1
205
206    /* Write back SCTLR */
207    msr     sctlr_el1, tmp
208    isb
209
210#if WITH_SMP
211    cbnz    cpuid, .Lpage_tables_check_secondary
212#endif
213#endif /* WITH_KERNEL_VM */
214
215    /* clear bss */
216.L__do_bss:
217    /* clear out the bss excluding the stack and kernel translation table  */
218    /* NOTE: relies on __post_prebss_bss_start and __bss_end being 8 byte aligned */
219    adrl    tmp, __post_prebss_bss_start
220    adrl    tmp2, __bss_end
221    sub     tmp2, tmp2, tmp
222    cbz     tmp2, .L__bss_loop_done
223.L__bss_loop:
224    sub     tmp2, tmp2, #8
225    str     xzr, [tmp], #8
226    cbnz    tmp2, .L__bss_loop
227.L__bss_loop_done:
228
229#if WITH_KERNEL_VM
230    /* Set up the stack */
231    adrl    tmp, __stack_end
232    mov     sp, tmp
233
234    /* Add the stack canary region at the low end of the stack */
235    ldr     tmp2, =ARCH_DEFAULT_STACK_SIZE
236    sub     tmp, tmp, tmp2
237    ldr     index, =ARM64_PHYSICAL_STACK_CANARY_WORDS
238    ldr     canary, =ARM64_PHYSICAL_STACK_CANARY
239
240.Lphysical_stack_canary_setup_loop:
241    cbz     index, .Lphysical_stack_canary_setup_end
242
243    /* Store the canary at the current stack location */
244    str     canary, [tmp], #8
245
246    sub     index, index, #1
247    /* Rotate the canary so every value is different */
248    ror     canary, canary, #ARM64_PHYSICAL_STACK_CANARY_ROTATE
249    b       .Lphysical_stack_canary_setup_loop
250
251.Lphysical_stack_canary_setup_end:
252
253#if KERNEL_SCS_ENABLED
254    adrl    ssp, __shadow_stack
255#endif
256
257    /* Try to write and read tags to test if EL3 configured tag space access */
258    cbz     supports_mte, .Lno_tagging_feature
259
260    /*
261     * The stack is set up but unused at this point, so we can use
262     * it as a convenient address for tagging.
263     */
264    mov     tmp, sp
265    sub     tmp, tmp, #16
266    mov     tmp2, #3
267    bfi     tmp, tmp2, #56, #4
268    mov     tmp2, tmp
269.arch_extension memtag
270    stg     tmp, [tmp]
271    ldg     tmp, [tmp]
272    cmp     tmp, tmp2
273    cset    tmp2, eq
274    adrl    tmp, arm64_mte_enabled
275    strb    wtmp2, [tmp]
276
277.Lno_tagging_feature:
278
279    /* Save the arguments */
280    push    x2, x3
281    push    x0, x1
282
283    /* x0 already contains ram_size */
284    adrl    x1, __relr_start
285    adrl    x2, __relr_end
286    adrl    x3, _start
287    bl      arm64_early_mmu_init
288
289    /* Check the stack canaries */
290    adrl    tmp, __stack_end
291    ldr     tmp2, =ARCH_DEFAULT_STACK_SIZE
292    sub     tmp, tmp, tmp2
293    ldr     index, =ARM64_PHYSICAL_STACK_CANARY_WORDS
294    ldr     canary, =ARM64_PHYSICAL_STACK_CANARY
295
296.Lphysical_stack_canary_check_loop:
297    cbz     index, .Lphysical_stack_canary_check_end
298
299    ldr     tmp2, [tmp], #8
300    cmp     tmp2, canary
301    b.ne    platform_early_halt /* Error: canary got overwritten, stack overflow */
302
303    sub     index, index, #1
304    /* Rotate the canary so every value is different */
305    ror     canary, canary, #ARM64_PHYSICAL_STACK_CANARY_ROTATE
306    b       .Lphysical_stack_canary_check_loop
307
308.Lphysical_stack_canary_check_end:
309
310    /* Restore the arguments */
311    pop     x0, x1
312    pop     x2, x3
313
314    /* Check the stack offset */
315    adrl    tmp, __stack_end
316    mov     tmp2, sp
317    cmp     tmp, tmp2
318    b.ne    platform_early_halt /* Error: invalid SP on return from C */
319
320#if WITH_SMP
321    /* Release the second lock on the secondary CPUs */
322    adrl    tmp, page_tables_not_ready
323    stlr    wzr, [tmp]
324    b       .Lpage_tables_ready
325
326.Lpage_tables_check_secondary:
327    adrl    tmp, page_tables_not_ready
328.Lpage_tables_not_ready:
329    ldar    wtmp2, [tmp]
330    cbnz    wtmp2, .Lpage_tables_not_ready
331.Lpage_tables_ready:
332#endif
333    isb
334
335    /* Jump to virtual code address */
336    adrl    tmp, mmu_on_vaddr_ptr
337    ldr     tmp, [tmp]
338    br      tmp
339
340.Lmmu_on_vaddr:
341    /* Allow br from above if BTI is enabled, otherwise this is a NOP */
342    bti     j
343
344    /* Update VBAR to its virtual address */
345    adrl    tmp, .Learly_exception_base
346    msr     vbar_el1, tmp
347    isb
348
349    /* Disable trampoline page-table in ttbr0 */
350    ldr     tmp, =MMU_TCR_FLAGS_KERNEL
351    msr     tcr_el1, tmp
352    isb
353
354    /* Enable Write implies XN (Execute-Never), EL0/1 */
355    mrs     tmp, sctlr_el1
356    orr     tmp, tmp, #(1<<19)
357    msr     sctlr_el1, tmp
358    isb
359
360    /* Invalidate TLB and sync (needed for at least WXN) */
361    tlbi    vmalle1
362    dsb     sy
363    isb
364
365    /* We're no longer using the tagged identity map at this point, so
366     * enable synchronous tag check faults in EL1 to catch any code
367     * improperly using a tagged mapping
368     */
369    cbz     supports_mte, .Ldont_set_mte_flags_2
370    mrs     tmp, sctlr_el1
371    orr     tmp, tmp, #(1<<40)
372    msr     sctlr_el1, tmp
373.Ldont_set_mte_flags_2:
374
375#if WITH_SMP
376    cbnz    cpuid, .Lsecondary_boot
377#endif
378#endif /* WITH_KERNEL_VM */
379    adrl    tmp, sp_el1_bufs
380    mov     sp, tmp
381
382    msr     spsel, #0 /* Use SP0 for kernel stacks */
383    adrl    tmp, __stack_end
384    mov sp, tmp
385
386#if KERNEL_SCS_ENABLED
387    adrl    ssp, __shadow_stack
388#endif
389
390    bl  lk_main
391    b   .
392
393#if WITH_SMP
394/*
395 *  Decodes the CPU number out of MPIDR.
396 *  This can be overridden by the platform.
397 *  If it is, it must:
398 *  - Not assume a stack
399 *  - Only clobber x0, x9, x10
400 *  - Return the CPU number in x0
401 *  - If the CPU number would be invalid, return SMP_MAX_CPUS
402 */
403WEAK_FUNCTION(arm64_curr_cpu_num)
404    mrs     x0, mpidr_el1
405    ubfx    x0, x0, #0, #SMP_CPU_ID_BITS
406    and     tmp, x0, #0xff
407    cmp     tmp, #(1 << SMP_CPU_CLUSTER_SHIFT)
408    bge     .Lunsupported_cpu_num
409    bic     x0, x0, #0xff
410    orr     x0, tmp, x0, LSR #(8 - SMP_CPU_CLUSTER_SHIFT)
411    ret
412
413.Lunsupported_cpu_num:
414    mov     x0, #SMP_MAX_CPUS
415    ret
416
417.Lsecondary_boot:
418    cmp     cpuid, #SMP_MAX_CPUS
419    bge     .Lunsupported_cpu_trap
420
421    /* Set up the stack pointers */
422    adrl    tmp, sp_el1_bufs
423    mov     tmp2, #ARM64_EXC_SP_EL1_BUF_SIZE
424    mul     tmp2, tmp2, cpuid
425    add     sp, tmp, tmp2
426
427    msr     spsel, #0 /* Use SP0 for kernel stacks */
428
429    adrl    tmp, __stack_end
430    mov     tmp2, #ARCH_DEFAULT_STACK_SIZE
431    mul     tmp2, tmp2, cpuid
432    sub     sp, tmp, tmp2
433
434#if KERNEL_SCS_ENABLED
435    /* Set up the shadow stack pointer */
436    adrl    ssp, __shadow_stack
437    mov     tmp, #ARCH_DEFAULT_SHADOW_STACK_SIZE
438    mul     tmp, tmp, cpuid
439    add     ssp, ssp, tmp
440#endif
441
442    mov     x0, cpuid
443    bl      arm64_secondary_entry
444
445.Lunsupported_cpu_trap:
446    wfe
447    b       .Lunsupported_cpu_trap
448#endif
449
450.ltorg
451
452.section .text.boot.early.vectab
453.balign 0x800 /* This is the required alignment for the table */
454.Learly_exception_base:
455.org 0x000
456    b platform_early_halt
457.org 0x080
458    b platform_early_halt
459.org 0x100
460    b platform_early_halt
461.org 0x180
462    b platform_early_halt
463.org 0x200
464    b platform_early_halt
465.org 0x280
466    b platform_early_halt
467.org 0x300
468    b platform_early_halt
469.org 0x380
470    b platform_early_halt
471.org 0x400
472    b platform_early_halt
473.org 0x480
474    b platform_early_halt
475.org 0x500
476    b platform_early_halt
477.org 0x580
478    b platform_early_halt
479.org 0x600
480    b platform_early_halt
481.org 0x680
482    b platform_early_halt
483.org 0x700
484    b platform_early_halt
485.org 0x780
486    b platform_early_halt
487
488#if WITH_KERNEL_VM
489.data
490DATA(mmu_on_vaddr_ptr)
491.hidden mmu_on_vaddr_ptr
492    /*
493     * Store a pointer to the virtual address of .Lmmu_on_vaddr inside a
494     * pointer quad that ASLR can relocate.
495     */
496    .align 3
497    .quad .Lmmu_on_vaddr
498#endif
499
500#if WITH_SMP
501.data
502DATA(tt_trampoline_not_ready)
503    /*
504     * The primary processor clears this when the ttbr0 page tables
505     * are ready and all processors can enable their MMUs. Before
506     * passing this semaphore, all CPUs should have MMUs off, and
507     * turn them on immediately after.
508     */
509    .long       1
510DATA(page_tables_not_ready)
511    /*
512     * The primary processor clears this when the final (ttbr1)
513     * page tables are ready.
514     */
515    .long       1
516#endif
517
518.section .bss.prebss.stack
519    .align 4
520DATA(__stack)
521    .skip ARCH_DEFAULT_STACK_SIZE * SMP_MAX_CPUS
522DATA(__stack_end)
523
524#if KERNEL_SCS_ENABLED
525.section .bss.prebss.shadow_stack
526    /*
527     * Request 2^3 = 8-byte alignment. For aarch64, the stack pointer
528     * alignment must be two times the pointer size (2^4) but the same is not
529     * required for the shadow stack. Protecting the shadow stack with Memory
530     * Tagging Extensions may require matching MTE's 16-byte tag granularity.
531     */
532    .align 3
533DATA(__shadow_stack)
534    .skip ARCH_DEFAULT_SHADOW_STACK_SIZE * SMP_MAX_CPUS
535#endif
536
537.section .bss.prebss.sp_el1_bufs
538    .align 4
539DATA(sp_el1_bufs)
540    .skip ARM64_EXC_SP_EL1_BUF_SIZE * SMP_MAX_CPUS
541
542#if WITH_KERNEL_VM
543.section ".bss.prebss.translation_table"
544.align 3 + MMU_PAGE_TABLE_ENTRIES_IDENT_SHIFT
545DATA(tt_trampoline)
546    .skip 8 * MMU_PAGE_TABLE_ENTRIES_IDENT
547#endif
548