• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3 * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this list of
9 *    conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12 *    of conditions and the following disclaimer in the documentation and/or other materials
13 *    provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16 *    to endorse or promote products derived from this software without specific prior written
17 *    permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#define  ASSEMBLY
33#include "arch_config.h"
34#include "los_vm_boot.h"
35#include "los_vm_zone.h"
36#include "los_mmu_descriptor_v6.h"
37#undef ASSEMBLY
38
39
40    .global __exc_stack_top
41    .global __svc_stack_top
42    .global __exc_stack
43    .global __svc_stack
44
45    .extern __bss_start
46    .extern __bss_end
47    .extern hal_clock_initialize_start
48    .extern los_bss_init
49    .extern _osExceptFiqHdl
50    .extern _osExceptAddrAbortHdl
51    .extern _osExceptDataAbortHdl
52    .extern _osExceptPrefetchAbortHdl
53    .extern _osExceptSwiHdl
54    .extern _osExceptUndefInstrHdl
55    .extern __stack_chk_guard_setup
56    .extern g_firstPageTable
57    .extern g_mmuJumpPageTable
58    .extern g_archMmuInitMapping
59    .extern HalSecondaryCpuStart
60
61    .equ MPIDR_CPUID_MASK, 0xffU
62
63    .fpu neon-vfpv4
64    .syntax unified
65    .arch armv7-a
66    .arm
67
68/* param0 is stack bottom, param1 is stack size, r12 hold cpu id */
69.macro EXC_SP_SET param0, param1
70    ldr    r1, =\param0
71    mov    r0, \param1
72    bl     sp_set
73.endm
74
75/* param0 is stack top, param1 is stack size, param2 is magic num */
76.macro STACK_MAGIC_SET param0, param1, param2
77    ldr     r0, =\param0
78    mov     r1, \param1
79    ldr     r2, =\param2
80    bl      excstack_magic
81.endm
82
83    .code   32
84    .section ".vectors","ax"
85
86    .global __exception_handlers
87__exception_handlers:
88    /*
89    *Assumption:  ROM code has these vectors at the hardware reset address.
90    *A simple jump removes any address-space dependencies [i.e. safer]
91    */
92    b   reset_vector
93    b   _osExceptUndefInstrHdl
94    b   _osExceptSwiHdl
95    b   _osExceptPrefetchAbortHdl
96    b   _osExceptDataAbortHdl
97    b   _osExceptAddrAbortHdl
98    b   OsIrqHandler
99    b   _osExceptFiqHdl
100
101    /* Startup code which will get the machine into supervisor mode */
102    .global reset_vector
103    .type   reset_vector,function
104
105#ifdef LOSCFG_BOOTENV_RAM
106__quickstart_args_start:
107    .fill LOSCFG_BOOTENV_RAMSIZE,1,0
108__quickstart_args_end:
109
110.global OsGetArgsAddr
111.type   OsGetArgsAddr,function
112
113OsGetArgsAddr:
114    ldr     r0, =__quickstart_args_start
115    bx      lr
116#endif
117
118reset_vector:
119    /* clear register TPIDRPRW */
120    mov     r0, #0
121    mcr     p15, 0, r0, c13, c0, 4
122    /* do some early cpu setup: i/d cache disable, mmu disabled */
123    mrc     p15, 0, r0, c1, c0, 0
124    bic     r0, #(1 << 12)          /* i cache */
125    bic     r0, #(1 << 2)           /* d cache */
126    bic     r0, #(1 << 0)           /* mmu */
127    mcr     p15, 0, r0, c1, c0, 0
128
129    /* enable fpu+neon */
130#ifndef LOSCFG_TEE_ENABLE
131    MRC    p15, 0, r0, c1, c1, 2
132    ORR    r0, r0, #0xC00
133    BIC    r0, r0, #0xC000
134    MCR    p15, 0, r0, c1, c1, 2
135
136    LDR    r0, =(0xF << 20)
137    MCR    p15, 0, r0, c1, c0, 2
138    ISB
139#endif
140    MOV    r3, #0x40000000
141    VMSR   FPEXC, r3
142
143    /* r11: delta of physical address and virtual address */
144    adr     r11, pa_va_offset
145    ldr     r0, [r11]
146    sub     r11, r11, r0
147
148    mrc     p15, 0, r12, c0, c0, 5              /* r12: get cpuid */
149    and     r12, r12, #MPIDR_CPUID_MASK
150    cmp     r12, #0
151    bne     secondary_cpu_init
152
153    /* if we need to relocate to proper location or not */
154    adr     r4, __exception_handlers            /* r4: base of load address */
155    ldr     r5, =SYS_MEM_BASE                   /* r5: base of physical address */
156    subs    r12, r4, r5                         /* r12: delta of load address and physical address */
157    beq     reloc_img_to_bottom_done            /* if we load image at the bottom of physical address */
158
159    /* we need to relocate image at the bottom of physical address */
160    ldr     r7, =__exception_handlers           /* r7: base of linked address (or vm address) */
161    ldr     r6, =__bss_start                    /* r6: end of linked address (or vm address) */
162    sub     r6, r7                              /* r6: delta of linked address (or vm address) */
163    add     r6, r4                              /* r6: end of load address */
164
165reloc_img_to_bottom_loop:
166    ldr     r7, [r4], #4
167    str     r7, [r5], #4
168    cmp     r4, r6
169    bne     reloc_img_to_bottom_loop
170    sub     pc, r12
171    nop
172    sub     r11, r11, r12                       /* r11: eventual address offset */
173
174reloc_img_to_bottom_done:
175#ifdef LOSCFG_KERNEL_MMU
176    ldr     r4, =g_firstPageTable               /* r4: physical address of translation table and clear it */
177    add     r4, r4, r11
178    mov     r0, r4
179    mov     r1, #0
180    mov     r2, #MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS
181    bl      memset_optimized                    /* optimized memset since r0 is 64-byte aligned */
182
183    ldr     r5, =g_archMmuInitMapping
184    add     r5, r5, r11
185init_mmu_loop:
186    ldmia   r5!, {r6-r10}                       /* r6 = phys, r7 = virt, r8 = size, r9 = mmu_flags, r10 = name */
187    cmp     r8, 0                               /* if size = 0, the mmu init done */
188    beq     init_mmu_done
189    bl      page_table_build
190    b       init_mmu_loop
191init_mmu_done:
192    orr     r8, r4, #MMU_TTBRx_FLAGS            /* r8 = r4 and set cacheable attributes on translation walk */
193    ldr     r4, =g_mmuJumpPageTable             /* r4: jump pagetable vaddr */
194    add     r4, r4, r11
195    ldr     r4, [r4]
196    add     r4, r4, r11                         /* r4: jump pagetable paddr */
197
198    /* build 1M section mapping, in order to jump va during turing on mmu:pa == pa, va == pa */
199    mov     r6, pc
200    mov     r7, r6                              /* r7: pa (MB aligned)*/
201    lsr     r6, r6, #20                         /* r6: va l1 index */
202    ldr     r10, =MMU_DESCRIPTOR_KERNEL_L1_PTE_FLAGS
203    add     r12, r10, r6, lsl #20               /* r12: pa |flags */
204    str     r12, [r4, r7, lsr #(20 - 2)]        /* jumpTable[paIndex] = pt entry */
205    rsb     r7, r11, r6, lsl #20                /* r7: va */
206    str     r12, [r4, r7, lsr #(20 - 2)]        /* jumpTable[vaIndex] = pt entry */
207
208    bl      mmu_setup                           /* set up the mmu */
209#endif
210    /* clear out the interrupt and exception stack and set magic num to check the overflow */
211    ldr     r0, =__svc_stack
212    ldr     r1, =__exc_stack_top
213    bl      stack_init
214
215    STACK_MAGIC_SET __svc_stack, #OS_EXC_SVC_STACK_SIZE, OS_STACK_MAGIC_WORD
216    STACK_MAGIC_SET __exc_stack, #OS_EXC_STACK_SIZE, OS_STACK_MAGIC_WORD
217
218warm_reset:
219    /* initialize CPSR (machine state register) */
220    mov    r0, #(CPSR_IRQ_DISABLE|CPSR_FIQ_DISABLE|CPSR_SVC_MODE)
221    msr    cpsr, r0
222
223    /* Note: some functions in LIBGCC1 will cause a "restore from SPSR"!! */
224    msr    spsr, r0
225
226    /* get cpuid and keep it in r12 */
227    mrc     p15, 0, r12, c0, c0, 5
228    and     r12, r12, #MPIDR_CPUID_MASK
229
230    /* set svc stack, every cpu has OS_EXC_SVC_STACK_SIZE stack */
231    ldr    r0, =__svc_stack_top
232    mov    r2, #OS_EXC_SVC_STACK_SIZE
233    mul    r2, r2, r12
234    sub    r0, r0, r2
235    mov    sp, r0
236
237    LDR    r0, =__exception_handlers
238    MCR    p15, 0, r0, c12, c0, 0
239
240    cmp    r12, #0
241    bne    cpu_start
242
243clear_bss:
244    ldr    r0, =__bss_start
245    ldr    r2, =__bss_end
246    mov    r1, #0
247    sub    r2, r2, r0
248    bl     memset
249
250#if defined(LOSCFG_CC_STACKPROTECTOR_ALL) || \
251    defined(LOSCFG_CC_STACKPROTECTOR_STRONG) || \
252    defined(LOSCFG_CC_STACKPROTECTOR)
253    bl     __stack_chk_guard_setup
254#endif
255
256#ifdef LOSCFG_GDB_DEBUG
257    /* GDB_START - generate a compiled_breadk,This function will get GDB stubs started, with a proper environment */
258    bl     GDB_START
259    .word  0xe7ffdeff
260#endif
261
262    bl     main
263
264_start_hang:
265    b      _start_hang
266#ifdef LOSCFG_KERNEL_MMU
267mmu_setup:
268    mov     r12, #0
269    mcr     p15, 0, r12, c8, c7, 0              /* Set c8 to control the TLB and set the mapping to invalid */
270    isb
271    mcr     p15, 0, r12, c2, c0, 2              /* Initialize the c2 register */
272    isb
273    orr     r12, r4, #MMU_TTBRx_FLAGS
274    mcr     p15, 0, r12, c2, c0, 0              /* Set attributes and set temp page table */
275    isb
276    mov     r12, #0x7                           /* 0b0111 */
277    mcr     p15, 0, r12, c3, c0, 0              /* Set DACR with 0b0111, client and manager domian */
278    isb
279    mrc    p15, 0, r12, c1, c0, 1               /* ACTLR, Auxiliary Control Register */
280    orr    r12, r12, #(1 << 6)                  /* SMP, Enables coherent requests to the processor. */
281    orr    r12, r12, #(1 << 2)                  /* Enable D-side prefetch */
282    orr    r12, r12, #(1 << 11)                 /* Global BP Enable bit */
283    mcr    p15, 0, r12, c1, c0, 1               /* ACTLR, Auxiliary Control Register */
284    dsb
285    mrc     p15, 0, r12, c1, c0, 0
286    bic     r12, #(1 << 29 | 1 << 28)           /* Disable TRE/AFE */
287    orr     r12, #(1 << 0)                      /* mmu enable */
288    bic     r12, #(1 << 1)
289    orr     r12, #(1 << 2)                     /* D cache enable */
290    orr     r12, #(1 << 12)                    /* I cache enable */
291    mcr     p15, 0, r12, c1, c0, 0              /* Set SCTLR with r12: Turn on the MMU, I/D cache Disable TRE/AFE */
292    isb
293    ldr     pc,  =1f                            /* Convert to VA */
2941:
295    mcr     p15, 0, r8, c2, c0, 0               /* Go to the base address saved in C2: Jump to the page table */
296    isb
297    mov     r12, #0
298    mcr     p15, 0, r12, c8, c7, 0
299    isb
300    sub     lr,  r11                            /* adjust lr with delta of physical address and virtual address */
301    bx      lr
302#endif
303    .code  32
304
305    .global reset_platform
306    .type   reset_platform,function
307reset_platform:
308#ifdef A7SEM_HAL_ROM_MONITOR
309    /* initialize CPSR (machine state register) */
310    mov    r0, #(CPSR_IRQ_DISABLE|CPSR_FIQ_DISABLE|CPSR_SVC_MODE)
311    msr    cpsr, r0
312    b      warm_reset
313#else
314    mov    r0, #0
315    mov    pc, r0   // Jump to reset vector
316#endif
317
318cpu_start:
319#ifdef LOSCFG_KERNEL_MMU
320    ldr     r4, =g_firstPageTable               /* r4 = physical address of translation table and clear it */
321    add     r4, r4, r11
322    orr     r8, r4, #MMU_TTBRx_FLAGS
323
324    ldr     r4, =g_mmuJumpPageTable             /* r4 = tt_trampoline vaddr */
325    add     r4, r4, r11
326    ldr     r4, [r4]
327    add     r4, r4, r11                         /* r4 = tt_trampoline paddr */
328
329    bl     mmu_setup
330#endif
331
332    bl     HalSecondaryCpuStart
333    b      .
334
335secondary_cpu_init:
336#ifdef LOSCFG_TEE_ENABLE
337    /* enable fpu+neon */
338    ldr    r0, =0x60c00
339    MCR    p15, 0, r0, c1, c1, 2
340
341    LDR    r0, =(0xF << 20)
342    MCR    p15, 0, r0, c1, c0, 2
343    cps    #0x16
344    mov    r1, #0x25
345    mcr    p15, 0, r1, c1, c1, 0
346    cps    #0x13
347#endif
348    bl      warm_reset
349
350/*
351 * set sp for current cpu
352 * r1 is stack bottom, r0 is stack size, r12 hold cpu id
353 */
354sp_set:
355    mrc    p15, 0, r12, c0, c0, 5
356    and    r12, r12, #MPIDR_CPUID_MASK
357    mul    r3, r0, r12
358    sub    r2, r1, r3
359    mov    sp, r2
360    bx     lr          /* set sp */
361
362/*
363 * r4: page table base address
364 * r6: physical address
365 * r7: virtual address
366 * r8: sizes
367 * r10: flags
368 * r9 and r12 will be used as variable
369 */
370#ifdef LOSCFG_KERNEL_MMU
371page_table_build:
372    mov     r10, r6
373    bfc     r10, #20, #12                       /* r9: pa % MB */
374    add     r8, r8, r10
375    add     r8, r8, #(1 << 20)
376    sub     r8, r8, #1
377    lsr     r6, #20                             /* r6 = physical address / MB */
378    lsr     r7, #20                             /* r7 = virtual address / MB */
379    lsr     r8, #20                             /* r8 = roundup(size, MB) */
380
381page_table_build_loop:
382    orr     r12, r9, r6, lsl #20                /* r12: flags | physAddr */
383    str     r12, [r4, r7, lsl #2]               /* gPgTable[l1Index] = physAddr | flags */
384    add     r6, #1                              /* physAddr+ */
385    add     r7, #1                              /* l1Index++ */
386    subs    r8, #1                              /* sizes-- */
387    bne     page_table_build_loop
388    bx      lr
389#endif
390/*
391 * init stack to initial value
392 * r0 is stack mem start, r1 is stack mem end
393 */
394stack_init:
395    ldr     r2, =OS_STACK_INIT
396    ldr     r3, =OS_STACK_INIT
397    /* Main loop sets 32 bytes at a time. */
398stack_init_loop:
399    .irp    offset, #0, #8, #16, #24
400    strd    r2, r3, [r0, \offset]
401    .endr
402    add     r0, #32
403    cmp     r0, r1
404    blt     stack_init_loop
405    bx      lr
406
407pa_va_offset:
408    .word   .
409
410/*
411 * set magic num to stack top for all cpu
412 * r0 is stack top, r1 is stack size, r2 is magic num
413 */
414excstack_magic:
415    mov     r3, #0
416excstack_magic_loop:
417    str     r2, [r0]
418    add     r0, r0, r1
419    add     r3, r3, #1
420    cmp     r3, #CORE_NUM
421    blt     excstack_magic_loop
422    bx      lr
423
424#ifdef LOSCFG_KERNEL_MMU
425memset_optimized:
426    mov     r3, r0
427    vdup.8  q0, r1
428    vmov    q1, q0
429    vmov    q2, q0
430    vmov    q3, q0
431memset_optimized_loop:
432    subs    r2, #64
433    vstmia  r3!, {d0 - d7}
434    bge     memset_optimized_loop
435    bx      lr
436#endif
437init_done:
438    .long  0xDEADB00B
439
440    .code  32
441    .data
442
443init_flag:
444    .balign 4
445    .long   0
446
447    /*
448    * Temporary interrupt stack
449    */
450    .section ".int_stack", "wa", %nobits
451    .align  3
452
453__svc_stack:
454    .space OS_EXC_SVC_STACK_SIZE * CORE_NUM
455__svc_stack_top:
456
457__exc_stack:
458    .space OS_EXC_STACK_SIZE * CORE_NUM
459__exc_stack_top:
460