• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3 * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this list of
9 *    conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12 *    of conditions and the following disclaimer in the documentation and/or other materials
13 *    provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16 *    to endorse or promote products derived from this software without specific prior written
17 *    permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#define  ASSEMBLY
33#include "arch_config.h"
34#include "los_vm_boot.h"
35#include "los_vm_zone.h"
36#include "los_mmu_descriptor_v6.h"
37#undef ASSEMBLY
38
39
40    .global __exc_stack_top
41    .global __svc_stack_top
42    .global __exc_stack
43    .global __svc_stack
44
45    .extern __bss_start
46    .extern __bss_end
47    .extern hal_clock_initialize_start
48    .extern _osExceptFiqHdl
49    .extern _osExceptAddrAbortHdl
50    .extern _osExceptDataAbortHdl
51    .extern _osExceptPrefetchAbortHdl
52    .extern _osExceptSwiHdl
53    .extern _osExceptUndefInstrHdl
54    .extern __stack_chk_guard_setup
55    .extern g_firstPageTable
56    .extern g_mmuJumpPageTable
57    .extern g_archMmuInitMapping
58
59    .equ MPIDR_CPUID_MASK, 0xffU
60
61    .fpu neon-vfpv4
62    .syntax unified
63    .arch armv7-a
64    .arm
65
66/* param0 is stack bottom, param1 is stack size, r11 hold cpu id */
67.macro EXC_SP_SET param0, param1
68    ldr    r1, =\param0
69    mov    r0, \param1
70    bl     sp_set
71.endm
72
73/* param0 is stack top, param1 is stack size, param2 is magic num */
74.macro STACK_MAGIC_SET param0, param1, param2
75    ldr     r0, =\param0
76    mov     r1, \param1
77    ldr     r2, =\param2
78    bl      excstack_magic
79.endm
80
81    .code   32
82    .section ".vectors","ax"
83
84__exception_handlers:
85    /*
86    *Assumption:  ROM code has these vectors at the hardware reset address.
87    *A simple jump removes any address-space dependencies [i.e. safer]
88    */
89    b   reset_vector
90    b   _osExceptUndefInstrHdl
91    b   _osExceptSwiHdl
92    b   _osExceptPrefetchAbortHdl
93    b   _osExceptDataAbortHdl
94    b   _osExceptAddrAbortHdl
95    b   OsIrqHandler
96    b   _osExceptFiqHdl
97
98    /* Startup code which will get the machine into supervisor mode */
99    .global reset_vector
100    .type   reset_vector,function
101reset_vector:
102    /* do some early cpu setup: i/d cache disable, mmu disabled */
103    mrc     p15, 0, r0, c1, c0, 0
104    bic     r0, #(1 << 12)          /* i cache */
105    bic     r0, #(1 << 2)           /* d cache */
106    bic     r0, #(1 << 0)           /* mmu */
107    mcr     p15, 0, r0, c1, c0, 0
108
109    /* enable fpu+neon */
110#ifndef LOSCFG_TEE_ENABLE
111    MRC    p15, 0, r0, c1, c1, 2
112    ORR    r0, r0, #0xC00
113    BIC    r0, r0, #0xC000
114    MCR    p15, 0, r0, c1, c1, 2
115
116    LDR    r0, =(0xF << 20)
117    MCR    p15, 0, r0, c1, c0, 2
118    ISB
119#endif
120    MOV    r3, #0x40000000
121    VMSR   FPEXC, r3
122
123    /* r11: delta of physical address and virtual address */
124    adr     r11, pa_va_offset
125    ldr     r0, [r11]
126    sub     r11, r11, r0
127
128    /* if we need to relocate to proper location or not */
129    adr     r4, __exception_handlers            /* r4: base of load address */
130    ldr     r5, =SYS_MEM_BASE                   /* r5: base of physical address */
131    subs    r12, r4, r5                         /* r12: delta of load address and physical address */
132    beq     reloc_img_to_bottom_done            /* if we load image at the bottom of physical address */
133
134    /* we need to relocate image at the bottom of physical address */
135    ldr     r7, =__exception_handlers           /* r7: base of linked address (or vm address) */
136    ldr     r6, =__bss_start                    /* r6: end of linked address (or vm address) */
137    sub     r6, r7                              /* r6: delta of linked address (or vm address) */
138    add     r6, r4                              /* r6: end of load address */
139
140reloc_img_to_bottom_loop:
141    ldr     r7, [r4], #4
142    str     r7, [r5], #4
143    cmp     r4, r6
144    bne     reloc_img_to_bottom_loop
145    sub     pc, r12
146    nop
147    sub     r11, r11, r12                       /* r11: eventual address offset */
148
149reloc_img_to_bottom_done:
150#ifdef LOSCFG_KERNEL_MMU
151    ldr     r4, =g_firstPageTable               /* r4: physical address of translation table and clear it */
152    add     r4, r4, r11
153    mov     r0, r4
154    mov     r1, #0
155    mov     r2, #MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS
156    bl      memset_optimized                    /* optimized memset since r0 is 64-byte aligned */
157
158    ldr     r5, =g_archMmuInitMapping
159    add     r5, r5, r11
160init_mmu_loop:
161    ldmia   r5!, {r6-r10}                       /* r6 = phys, r7 = virt, r8 = size, r9 = mmu_flags, r10 = name */
162    cmp     r8, 0                               /* if size = 0, the mmu init done */
163    beq     init_mmu_done
164    bl      page_table_build
165    b       init_mmu_loop
166init_mmu_done:
167    orr     r8, r4, #MMU_TTBRx_FLAGS            /* r8 = r4 and set cacheable attributes on translation walk */
168    ldr     r4, =g_mmuJumpPageTable             /* r4: jump pagetable vaddr */
169    add     r4, r4, r11
170    ldr     r4, [r4]
171    add     r4, r4, r11                         /* r4: jump pagetable paddr */
172
173    /* build 1M section mapping, in order to jump va during turing on mmu:pa == pa, va == pa */
174    mov     r6, pc
175    mov     r7, r6                              /* r7: pa (MB aligned)*/
176    lsr     r6, r6, #20                         /* r6: va l1 index */
177    ldr     r10, =MMU_DESCRIPTOR_KERNEL_L1_PTE_FLAGS
178    add     r12, r10, r6, lsl #20               /* r12: pa |flags */
179    str     r12, [r4, r7, lsr #(20 - 2)]        /* jumpTable[paIndex] = pt entry */
180    rsb     r7, r11, r6, lsl #20                /* r7: va */
181    str     r12, [r4, r7, lsr #(20 - 2)]        /* jumpTable[vaIndex] = pt entry */
182
183    bl      mmu_setup                           /* set up the mmu */
184#endif
185    /* get cpuid and keep it in r11 */
186    mrc     p15, 0, r11, c0, c0, 5
187    and     r11, r11, #MPIDR_CPUID_MASK
188    cmp     r11, #0
189    bne     excstatck_loop_done
190
191excstatck_loop:
192    /* clear out the interrupt and exception stack and set magic num to check the overflow */
193    ldr     r0, =__svc_stack
194    ldr     r1, =__exc_stack_top
195    bl      stack_init
196
197    STACK_MAGIC_SET __svc_stack, #OS_EXC_SVC_STACK_SIZE, OS_STACK_MAGIC_WORD
198    STACK_MAGIC_SET __exc_stack, #OS_EXC_STACK_SIZE, OS_STACK_MAGIC_WORD
199
200excstatck_loop_done:
201warm_reset:
202    /* initialize CPSR (machine state register) */
203    mov    r0, #(CPSR_IRQ_DISABLE|CPSR_FIQ_DISABLE|CPSR_SVC_MODE)
204    msr    cpsr, r0
205
206    /* Note: some functions in LIBGCC1 will cause a "restore from SPSR"!! */
207    msr    spsr, r0
208
209    /* set svc stack, every cpu has OS_EXC_SVC_STACK_SIZE stack */
210    ldr    r0, =__svc_stack_top
211    mov    r2, #OS_EXC_SVC_STACK_SIZE
212    mul    r2, r2, r11
213    sub    r0, r0, r2
214    mov    sp, r0
215
216    /* enable fpu+neon */
217    MRC    p15, 0, r0, c1, c1, 2
218    ORR    r0, r0, #0xC00
219    BIC    r0, r0, #0xC000
220    MCR    p15, 0, r0, c1, c1, 2
221
222    LDR    r0, =(0xF << 20)
223    MCR    p15, 0, r0, c1, c0, 2
224
225    MOV    r3, #0x40000000
226    VMSR   FPEXC, r3
227
228    LDR    r0, =__exception_handlers
229    MCR    p15, 0, r0, c12, c0, 0
230
231clear_bss:
232    ldr    r0, =__bss_start
233    ldr    r2, =__bss_end
234    mov    r1, #0
235    sub    r2, r2, r0
236    bl     memset
237
238#if defined(LOSCFG_CC_STACKPROTECTOR_ALL) || \
239    defined(LOSCFG_CC_STACKPROTECTOR_STRONG) || \
240    defined(LOSCFG_CC_STACKPROTECTOR)
241    bl     __stack_chk_guard_setup
242#endif
243
244#ifdef LOSCFG_GDB_DEBUG
245    /* GDB_START - generate a compiled_breadk,This function will get GDB stubs started, with a proper environment */
246    bl     GDB_START
247    .word  0xe7ffdeff
248#endif
249
250    bl     main
251
252_start_hang:
253    b      _start_hang
254
255#ifdef LOSCFG_KERNEL_MMU
256mmu_setup:
257    mov     r12, #0
258    mcr     p15, 0, r12, c8, c7, 0              /* Set c8 to control the TLB and set the mapping to invalid */
259    isb
260
261    mcr     p15, 0, r12, c2, c0, 2              /* Initialize the c2 register */
262    isb
263
264    orr     r12, r4, #MMU_TTBRx_FLAGS
265    mcr     p15, 0, r12, c2, c0, 0              /* Set attributes and set temp page table */
266    isb
267
268    mov     r12, #0x7                           /* 0b0111 */
269    mcr     p15, 0, r12, c3, c0, 0              /* Set DACR with 0b0111, client and manager domian */
270    isb
271
272    mrc     p15, 0, r12, c1, c0, 0
273    bic     r12, #(1 << 29 | 1 << 28)           /* Disable TRE/AFE */
274    orr     r12, #(1 << 0)                      /* mmu enable */
275    bic     r12, #(1 << 1)
276    orr     r12, #(1 << 2)                      /* D cache enable */
277    orr     r12, #(1 << 12)                     /* I cache enable */
278    mcr     p15, 0, r12, c1, c0, 0              /* Set SCTLR with r12: Turn on the MMU, I/D cache Disable TRE/AFE */
279    isb
280
281    ldr     pc,  =1f                            /* Convert to VA */
2821:
283    mcr     p15, 0, r8, c2, c0, 0               /* Go to the base address saved in C2: Jump to the page table */
284    isb
285
286    mov     r12, #0
287    mcr     p15, 0, r12, c8, c7, 0
288    isb
289
290    sub     lr,  r11                            /* adjust lr with delta of physical address and virtual address */
291    bx      lr
292#endif
293    .code  32
294
295    .global reset_platform
296    .type   reset_platform,function
297reset_platform:
298#ifdef A7SEM_HAL_ROM_MONITOR
299    /* initialize CPSR (machine state register) */
300    mov    r0, #(CPSR_IRQ_DISABLE|CPSR_FIQ_DISABLE|CPSR_SVC_MODE)
301    msr    cpsr, r0
302    b      warm_reset
303#else
304    mov    r0, #0
305    mov    pc, r0   // Jump to reset vector
306#endif
307
308/*
309 * set sp for current cpu
310 * r1 is stack bottom, r0 is stack size, r11 hold cpu id
311 */
312sp_set:
313    mul    r3, r0, r11
314    sub    r2, r1, r3
315    mov    sp, r2
316    bx     lr          /* set sp */
317
318/*
319 * r4: page table base address
320 * r6: physical address
321 * r7: virtual address
322 * r8: sizes
323 * r10: flags
324 * r9 and r12 will be used as variable
325 */
326#ifdef LOSCFG_KERNEL_MMU
327page_table_build:
328    mov     r10, r6
329    bfc     r10, #20, #12                       /* r9: pa % MB */
330    add     r8, r8, r10
331    add     r8, r8, #(1 << 20)
332    sub     r8, r8, #1
333    lsr     r6, #20                             /* r6 = physical address / MB */
334    lsr     r7, #20                             /* r7 = virtual address / MB */
335    lsr     r8, #20                             /* r8 = roundup(size, MB) */
336
337page_table_build_loop:
338    orr     r12, r9, r6, lsl #20                /* r12: flags | physAddr */
339    str     r12, [r4, r7, lsl #2]               /* gPgTable[l1Index] = physAddr | flags */
340    add     r6, #1                              /* physAddr+ */
341    add     r7, #1                              /* l1Index++ */
342    subs    r8, #1                              /* sizes-- */
343    bne     page_table_build_loop
344    bx      lr
345#endif
346/*
347 * init stack to initial value
348 * r0 is stack mem start, r1 is stack mem end
349 */
350stack_init:
351    ldr     r2, =OS_STACK_INIT
352    ldr     r3, =OS_STACK_INIT
353    /* Main loop sets 32 bytes at a time. */
354stack_init_loop:
355    .irp    offset, #0, #8, #16, #24
356    strd    r2, r3, [r0, \offset]
357    .endr
358    add     r0, #32
359    cmp     r0, r1
360    blt     stack_init_loop
361    bx      lr
362
363pa_va_offset:
364    .word   .
365
366/*
367 * set magic num to stack top for all cpu
368 * r0 is stack top, r1 is stack size, r2 is magic num
369 */
370excstack_magic:
371    mov     r3, #0
372excstack_magic_loop:
373    str     r2, [r0]
374    add     r0, r0, r1
375    add     r3, r3, #1
376    cmp     r3, #CORE_NUM
377    blt     excstack_magic_loop
378    bx      lr
379
380#ifdef LOSCFG_KERNEL_MMU
381memset_optimized:
382    mov     r3, r0
383    vdup.8  q0, r1
384    vmov    q1, q0
385    vmov    q2, q0
386    vmov    q3, q0
387memset_optimized_loop:
388    subs    r2, #64
389    vstmia  r3!, {d0 - d7}
390    bge     memset_optimized_loop
391    bx      lr
392#endif
393init_done:
394    .long  0xDEADB00B
395
396    .code  32
397    .data
398
399init_flag:
400    .balign 4
401    .long   0
402
403    /*
404    * Temporary interrupt stack
405    */
406    .section ".int_stack", "wa", %nobits
407    .align  3
408
409__svc_stack:
410    .space OS_EXC_SVC_STACK_SIZE * CORE_NUM
411__svc_stack_top:
412
413__exc_stack:
414    .space OS_EXC_STACK_SIZE * CORE_NUM
415__exc_stack_top:
416