• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3  * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  *    conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12  *    of conditions and the following disclaimer in the documentation and/or other materials
13  *    provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16  *    to endorse or promote products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**
33  * @defgroup los_hw Hardware
34  * @ingroup kernel
35  */
36 
37 #ifndef _LOS_HW_CPU_H
38 #define _LOS_HW_CPU_H
39 
40 #include "los_typedef.h"
41 #include "los_toolchain.h"
42 #include "los_hw_arch.h"
43 
44 #ifdef __cplusplus
45 #if __cplusplus
46 extern "C" {
47 #endif /* __cplusplus */
48 #endif /* __cplusplus */
49 
50 /* ARM System Registers */
51 #define DSB     __asm__ volatile("dsb" ::: "memory")
52 #define DMB     __asm__ volatile("dmb" ::: "memory")
53 #define ISB     __asm__ volatile("isb" ::: "memory")
54 #define WFI     __asm__ volatile("wfi" ::: "memory")
55 #define BARRIER __asm__ volatile("":::"memory")
56 #define WFE     __asm__ volatile("wfe" ::: "memory")
57 #define SEV     __asm__ volatile("sev" ::: "memory")
58 
59 #define ARM_SYSREG_READ(REG)                    \
60 ({                                              \
61     UINT32 _val;                                \
62     __asm__ volatile("mrc " REG : "=r" (_val)); \
63     _val;                                       \
64 })
65 
66 #define ARM_SYSREG_WRITE(REG, val)              \
67 ({                                              \
68     __asm__ volatile("mcr " REG :: "r" (val));  \
69     ISB;                                        \
70 })
71 
72 #define ARM_SYSREG64_READ(REG)                   \
73 ({                                               \
74     UINT64 _val;                                 \
75     __asm__ volatile("mrrc " REG : "=r" (_val)); \
76     _val;                                        \
77 })
78 
79 #define ARM_SYSREG64_WRITE(REG, val)             \
80 ({                                               \
81     __asm__ volatile("mcrr " REG :: "r" (val));  \
82     ISB;                                         \
83 })
84 
85 #define CP14_REG(CRn, Op1, CRm, Op2)    "p14, "#Op1", %0, "#CRn","#CRm","#Op2
86 #define CP15_REG(CRn, Op1, CRm, Op2)    "p15, "#Op1", %0, "#CRn","#CRm","#Op2
87 #define CP15_REG64(CRn, Op1)            "p15, "#Op1", %0,    %H0,"#CRn
88 
89 /*
90  * Identification registers (c0)
91  */
92 #define MIDR                CP15_REG(c0, 0, c0, 0)    /* Main ID Register */
93 #define MPIDR               CP15_REG(c0, 0, c0, 5)    /* Multiprocessor Affinity Register */
94 #define CCSIDR              CP15_REG(c0, 1, c0, 0)    /* Cache Size ID Registers */
95 #define CLIDR               CP15_REG(c0, 1, c0, 1)    /* Cache Level ID Register */
96 #define VPIDR               CP15_REG(c0, 4, c0, 0)    /* Virtualization Processor ID Register */
97 #define VMPIDR              CP15_REG(c0, 4, c0, 5)    /* Virtualization Multiprocessor ID Register */
98 
99 /*
100  * System control registers (c1)
101  */
102 #define SCTLR               CP15_REG(c1, 0, c0, 0)    /* System Control Register */
103 #define ACTLR               CP15_REG(c1, 0, c0, 1)    /* Auxiliary Control Register */
104 #define CPACR               CP15_REG(c1, 0, c0, 2)    /* Coprocessor Access Control Register */
105 
106 /*
107  * Memory protection and control registers (c2 & c3)
108  */
109 #define TTBR0               CP15_REG(c2, 0, c0, 0)    /* Translation Table Base Register 0 */
110 #define TTBR1               CP15_REG(c2, 0, c0, 1)    /* Translation Table Base Register 1 */
111 #define TTBCR               CP15_REG(c2, 0, c0, 2)    /* Translation Table Base Control Register */
112 #define DACR                CP15_REG(c3, 0, c0, 0)    /* Domain Access Control Register */
113 
114 /*
115  * Memory system fault registers (c5 & c6)
116  */
117 #define DFSR                CP15_REG(c5, 0, c0, 0)    /* Data Fault Status Register */
118 #define IFSR                CP15_REG(c5, 0, c0, 1)    /* Instruction Fault Status Register */
119 #define DFAR                CP15_REG(c6, 0, c0, 0)    /* Data Fault Address Register */
120 #define IFAR                CP15_REG(c6, 0, c0, 2)    /* Instruction Fault Address Register */
121 
122 /*
123  * Process, context and thread ID registers (c13)
124  */
125 #define FCSEIDR             CP15_REG(c13, 0, c0, 0)    /* FCSE Process ID Register */
126 #define CONTEXTIDR          CP15_REG(c13, 0, c0, 1)    /* Context ID Register */
127 #define TPIDRURW            CP15_REG(c13, 0, c0, 2)    /* User Read/Write Thread ID Register */
128 #define TPIDRURO            CP15_REG(c13, 0, c0, 3)    /* User Read-Only Thread ID Register */
129 #define TPIDRPRW            CP15_REG(c13, 0, c0, 4)    /* PL1 only Thread ID Register */
130 
131 #define MPIDR_CPUID_MASK    (0xffU)
132 
ArchCurrTaskGet(VOID)133 STATIC INLINE VOID *ArchCurrTaskGet(VOID)
134 {
135     return (VOID *)(UINTPTR)ARM_SYSREG_READ(TPIDRPRW);
136 }
137 
ArchCurrTaskSet(VOID * val)138 STATIC INLINE VOID ArchCurrTaskSet(VOID *val)
139 {
140     ARM_SYSREG_WRITE(TPIDRPRW, (UINT32)(UINTPTR)val);
141 }
142 
ArchCurrUserTaskSet(UINTPTR val)143 STATIC INLINE VOID ArchCurrUserTaskSet(UINTPTR val)
144 {
145     ARM_SYSREG_WRITE(TPIDRURO, (UINT32)val);
146 }
147 
ArchCurrCpuid(VOID)148 STATIC INLINE UINT32 ArchCurrCpuid(VOID)
149 {
150 #ifdef LOSCFG_KERNEL_SMP
151     return ARM_SYSREG_READ(MPIDR) & MPIDR_CPUID_MASK;
152 #else
153     return 0;
154 #endif
155 }
156 
OsHwIDGet(VOID)157 STATIC INLINE UINT64 OsHwIDGet(VOID)
158 {
159     return ARM_SYSREG_READ(MPIDR);
160 }
161 
OsMainIDGet(VOID)162 STATIC INLINE UINT32 OsMainIDGet(VOID)
163 {
164     return ARM_SYSREG_READ(MIDR);
165 }
166 
167 /* CPU interrupt mask handle implementation */
168 #if LOSCFG_ARM_ARCH >= 6
169 
ArchIntLock(VOID)170 STATIC INLINE UINT32 ArchIntLock(VOID)
171 {
172     UINT32 intSave;
173     __asm__ __volatile__(
174         "mrs    %0, cpsr      \n"
175         "cpsid  if              "
176         : "=r"(intSave)
177         :
178         : "memory");
179     return intSave;
180 }
181 
ArchIntUnlock(VOID)182 STATIC INLINE UINT32 ArchIntUnlock(VOID)
183 {
184     UINT32 intSave;
185     __asm__ __volatile__(
186         "mrs    %0, cpsr      \n"
187         "cpsie  if              "
188         : "=r"(intSave)
189         :
190         : "memory");
191     return intSave;
192 }
193 
ArchIrqDisable(VOID)194 STATIC INLINE VOID ArchIrqDisable(VOID)
195 {
196     __asm__ __volatile__(
197         "cpsid  i      "
198         :
199         :
200         : "memory", "cc");
201 }
202 
ArchIrqEnable(VOID)203 STATIC INLINE VOID ArchIrqEnable(VOID)
204 {
205     __asm__ __volatile__(
206         "cpsie  i      "
207         :
208         :
209         : "memory", "cc");
210 }
211 
212 #else
213 
ArchIntLock(VOID)214 STATIC INLINE UINT32 ArchIntLock(VOID)
215 {
216     UINT32 intSave, temp;
217     __asm__ __volatile__(
218         "mrs    %0, cpsr      \n"
219         "orr    %1, %0, #0xc0 \n"
220         "msr    cpsr_c, %1      "
221         :"=r"(intSave),  "=r"(temp)
222         : :"memory");
223     return intSave;
224 }
225 
ArchIntUnlock(VOID)226 STATIC INLINE UINT32 ArchIntUnlock(VOID)
227 {
228     UINT32 intSave;
229     __asm__ __volatile__(
230         "mrs    %0, cpsr      \n"
231         "bic    %0, %0, #0xc0 \n"
232         "msr    cpsr_c, %0      "
233         : "=r"(intSave)
234         : : "memory");
235     return intSave;
236 }
237 
238 #endif
239 
ArchIntRestore(UINT32 intSave)240 STATIC INLINE VOID ArchIntRestore(UINT32 intSave)
241 {
242     __asm__ __volatile__(
243         "msr    cpsr_c, %0      "
244         :
245         : "r"(intSave)
246         : "memory");
247 }
248 
249 #define PSR_I_BIT   0x00000080U
250 
OsIntLocked(VOID)251 STATIC INLINE UINT32 OsIntLocked(VOID)
252 {
253     UINT32 intSave;
254 
255     asm volatile(
256         "mrs    %0, cpsr        "
257         : "=r" (intSave)
258         :
259         : "memory", "cc");
260 
261     return intSave & PSR_I_BIT;
262 }
263 
ArchSPGet(VOID)264 STATIC INLINE UINT32 ArchSPGet(VOID)
265 {
266     UINT32 val;
267     __asm__ __volatile__("mov %0, sp" : "=r"(val));
268     return val;
269 }
270 
271 #ifdef __cplusplus
272 #if __cplusplus
273 }
274 #endif /* __cplusplus */
275 #endif /* __cplusplus */
276 
277 #endif /* _LOS_HW_CPU_H */
278