• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************//**
2  * @file     core_ca.h
3  * @brief    CMSIS Cortex-A Core Peripheral Access Layer Header File
4  * @version  V1.0.2
5  * @date     12. November 2018
6  ******************************************************************************/
7 /*
8  * Copyright (c) 2009-2018 ARM Limited. All rights reserved.
9  *
10  * SPDX-License-Identifier: Apache-2.0
11  *
12  * Licensed under the Apache License, Version 2.0 (the License); you may
13  * not use this file except in compliance with the License.
14  * You may obtain a copy of the License at
15  *
16  * www.apache.org/licenses/LICENSE-2.0
17  *
18  * Unless required by applicable law or agreed to in writing, software
19  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21  * See the License for the specific language governing permissions and
22  * limitations under the License.
23  */
24 
25 #if   defined ( __ICCARM__ )
26   #pragma system_include         /* treat file as system include file for MISRA check */
27 #elif defined (__clang__)
28   #pragma clang system_header   /* treat file as system include file */
29 #endif
30 
31 #ifndef __CORE_CA_H_GENERIC
32 #define __CORE_CA_H_GENERIC
33 
34 #ifdef __cplusplus
35  extern "C" {
36 #endif
37 
38 #ifndef __ASSEMBLER__
39 #ifdef KERNEL_RHINO
40 #include "ca/irq_ctrl.h"
41 #include "driver/interrupt.h"
42 extern void gic_clear_pending(u32 id);
43 #define NVIC_SetPriority(irq, prio)         gic_set_irq_priority(irq, (prio))
44 #define NVIC_ClearPendingIRQ(irq)           gic_clear_pending(irq)
45 #define NVIC_EnableIRQ(irq)                 irq_enable(irq)
46 #define NVIC_DisableIRQ(irq)                irq_disable(irq)
47 //#define NVIC_GetActive(irq)                 (GIC_GetIRQStatus(irq) & (1 << 1))
48 #define NVIC_GetActive(irq)                 (((GICDistributor->ISACTIVER[(irq) / 32U])  >> ((irq) % 32U)) & 1UL)
49 
50 #define NVIC_SetVector(irq, vector)         irq_request(irq, (void *)(vector), 0)
51 #elif defined (KERNEL_RTT)
52 #include "ca/irq_ctrl.h"
53 #include "gic.h"
54 #include "interrupt.h"
55 #define NVIC_SetPriority(irq, prio)         arm_gic_set_priority(irq, (prio))
56 #define NVIC_ClearPendingIRQ(irq)           arm_gic_clear_pending(0, irq)
57 #define NVIC_EnableIRQ(irq)                 rt_hw_interrupt_umask(irq)
58 #define NVIC_DisableIRQ(irq)                rt_hw_interrupt_mask(irq)
59 #define NVIC_GetActive(irq)                 (rt_hw_interrupt_get_irq() == (irq))
60 //#define NVIC_GetActive(irq)                 (((GICDistributor->ISACTIVER[(irq) / 32U])  >> ((irq) % 32U)) & 1UL)
61 #define NVIC_SetVector(irq, vector)         rt_hw_interrupt_install(irq, (rt_isr_handler_t)vector, NULL, NULL)
62 #elif defined (KERNEL_LITEOS_A)
63 //#include "los_hwi.h"
64 #include "ca/irq_ctrl.h"
65 typedef void (*LOS_ISR)(void);
66 extern void HalIrqClear(unsigned int vector);
67 extern void HalIrqUnmask(unsigned int vector);
68 extern void HalIrqMask(unsigned int vector);
69 extern unsigned int HalCurIrqGet(void);
70 extern unsigned int hal_irq_create(unsigned int hwiNum,
71                                            unsigned short hwiPrio,
72                                            unsigned short hwiMode,
73                                            LOS_ISR hwiHandler,
74                                            void *irqParam);
75 #define NVIC_SetPriority(irq, prio)          GIC_SetPriority(irq, ((prio) << (8 - __GIC_PRIO_BITS)))
76 #define NVIC_ClearPendingIRQ(irq)       HalIrqClear(irq)
77 #define NVIC_EnableIRQ(irq)                 HalIrqUnmask(irq)
78 #define NVIC_DisableIRQ(irq)                HalIrqMask(irq)
79 #define NVIC_GetActive(irq)                 (HalCurIrqGet() == (irq))
80 #define NVIC_SetVector(irq, vector)     hal_irq_create(irq, 0xa0U, 0, (LOS_ISR)vector, 0)
81 #elif defined(KERNEL_NUTTX)
82 #include "ca/irq_ctrl.h"
83 extern void up_enable_irq(int irq);
84 extern void up_disable_irq(int irq);
85 extern int up_prioritize_irq(int irq, int priority);
86 typedef CODE int (*xcpt_t)(int irq, void *context, void *arg);
87 extern int irq_attach(int irq, xcpt_t isr, void *arg);
88 #define NVIC_SetPriority(irq, prio)         up_prioritize_irq(irq, (prio))
89 #define NVIC_ClearPendingIRQ(irq)        GIC_ClearPendingIRQ(irq)
90 #define NVIC_EnableIRQ(irq)                 up_enable_irq(irq)
91 #define NVIC_DisableIRQ(irq)                up_disable_irq(irq)
92 #define NVIC_GetActive(irq)                 (((GICDistributor->ISACTIVER[(irq) / 32U])  >> ((irq) % 32U)) & 1UL)
93 #define NVIC_SetVector(irq, vector)         irq_attach(irq, (void *)(vector), 0)
94 #else
95 #define NVIC_SetPriority(irq, prio)         GIC_SetPriority(irq, ((prio) << (8 - __GIC_PRIO_BITS)))
96 #define NVIC_ClearPendingIRQ(irq)           GIC_ClearPendingIRQ(irq)
97 #define NVIC_EnableIRQ(irq)                 GIC_EnableIRQ(irq)
98 #define NVIC_DisableIRQ(irq)                GIC_DisableIRQ(irq)
99 //#define NVIC_GetActive(irq)                 (GIC_GetIRQStatus(irq) & (1 << 1))
100 #define NVIC_GetActive(irq)                 (((GICDistributor->ISACTIVER[(irq) / 32U])  >> ((irq) % 32U)) & 1UL)
101 
102 #include "ca/irq_ctrl.h"
103 #define NVIC_SetVector(irq, vector)         IRQ_SetHandler(irq, (IRQHandler_t)(vector))
104 #endif
105 
106 uint32_t __get_SPSR(void);
107 void __set_SPSR(uint32_t spsr);
108 #endif
109 
110 /*******************************************************************************
111  *                 CMSIS definitions
112  ******************************************************************************/
113 
114 /*  CMSIS CA definitions */
115 #define __CA_CMSIS_VERSION_MAIN  (1U)                                      /*!< \brief [31:16] CMSIS-Core(A) main version   */
116 #define __CA_CMSIS_VERSION_SUB   (1U)                                      /*!< \brief [15:0]  CMSIS-Core(A) sub version    */
117 #define __CA_CMSIS_VERSION       ((__CA_CMSIS_VERSION_MAIN << 16U) | \
118                                    __CA_CMSIS_VERSION_SUB          )       /*!< \brief CMSIS-Core(A) version number         */
119 
120 #if defined ( __CC_ARM )
121   #if defined __TARGET_FPU_VFP
122     #if (__FPU_PRESENT == 1)
123       #define __FPU_USED       1U
124     #else
125       #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
126       #define __FPU_USED       0U
127     #endif
128   #else
129     #define __FPU_USED         0U
130   #endif
131 
132 #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
133   #if defined __ARM_FP
134     #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)
135       #define __FPU_USED       1U
136     #else
137       #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
138       #define __FPU_USED       0U
139     #endif
140   #else
141     #define __FPU_USED         0U
142   #endif
143 
144 #elif defined ( __ICCARM__ )
145   #if defined __ARMVFP__
146     #if (__FPU_PRESENT == 1)
147       #define __FPU_USED       1U
148     #else
149       #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
150       #define __FPU_USED       0U
151     #endif
152   #else
153     #define __FPU_USED         0U
154   #endif
155 
156 #elif defined ( __TMS470__ )
157   #if defined __TI_VFP_SUPPORT__
158     #if (__FPU_PRESENT == 1)
159       #define __FPU_USED       1U
160     #else
161       #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
162       #define __FPU_USED       0U
163     #endif
164   #else
165     #define __FPU_USED         0U
166   #endif
167 
168 #elif defined ( __GNUC__ )
169   #if defined (__VFP_FP__) && !defined(__SOFTFP__)
170     #if (__FPU_PRESENT == 1)
171       #define __FPU_USED       1U
172     #else
173       #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
174       #define __FPU_USED       0U
175     #endif
176   #else
177     #define __FPU_USED         0U
178   #endif
179 
180 #elif defined ( __TASKING__ )
181   #if defined __FPU_VFP__
182     #if (__FPU_PRESENT == 1)
183       #define __FPU_USED       1U
184     #else
185       #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
186       #define __FPU_USED       0U
187     #endif
188   #else
189     #define __FPU_USED         0U
190   #endif
191 #endif
192 
193 #ifndef __ASSEMBLER__
194 #include "ca/cmsis_compiler_ca.h"               /* CMSIS compiler specific defines */
195 #endif
196 
197 #ifdef __cplusplus
198 }
199 #endif
200 
201 #endif /* __CORE_CA_H_GENERIC */
202 
203 #ifndef __CMSIS_GENERIC
204 
205 #ifndef __CORE_CA_H_DEPENDANT
206 #define __CORE_CA_H_DEPENDANT
207 
208 #ifdef __cplusplus
209  extern "C" {
210 #endif
211 
212  /* check device defines and use defaults */
213 #if defined __CHECK_DEVICE_DEFINES
214   #ifndef __CA_REV
215     #define __CA_REV              0x0000U
216     #warning "__CA_REV not defined in device header file; using default!"
217   #endif
218 
219   #ifndef __FPU_PRESENT
220     #define __FPU_PRESENT             0U
221     #warning "__FPU_PRESENT not defined in device header file; using default!"
222   #endif
223 
224   #ifndef __GIC_PRESENT
225     #define __GIC_PRESENT             1U
226     #warning "__GIC_PRESENT not defined in device header file; using default!"
227   #endif
228 
229   #ifndef __TIM_PRESENT
230     #define __TIM_PRESENT             1U
231     #warning "__TIM_PRESENT not defined in device header file; using default!"
232   #endif
233 
234   #ifndef __L2C_PRESENT
235     #define __L2C_PRESENT             0U
236     #warning "__L2C_PRESENT not defined in device header file; using default!"
237   #endif
238 #endif
239 
240 /* IO definitions (access restrictions to peripheral registers) */
241 #ifdef __cplusplus
242   #define   __I     volatile             /*!< \brief Defines 'read only' permissions */
243 #else
244   #define   __I     volatile const       /*!< \brief Defines 'read only' permissions */
245 #endif
246 #define     __O     volatile             /*!< \brief Defines 'write only' permissions */
247 #define     __IO    volatile             /*!< \brief Defines 'read / write' permissions */
248 
249 /* following defines should be used for structure members */
250 #define     __IM     volatile const      /*!< \brief Defines 'read only' structure member permissions */
251 #define     __OM     volatile            /*!< \brief Defines 'write only' structure member permissions */
252 #define     __IOM    volatile            /*!< \brief Defines 'read / write' structure member permissions */
253 #define RESERVED(N, T) T RESERVED##N;    // placeholder struct members used for "reserved" areas
254 
255 #ifndef __ASSEMBLER__
256 
257  /*******************************************************************************
258   *                 Register Abstraction
259    Core Register contain:
260    - CPSR
261    - CP15 Registers
262    - L2C-310 Cache Controller
263    - Generic Interrupt Controller Distributor
264    - Generic Interrupt Controller Interface
265   ******************************************************************************/
266 
267 /* Core Register CPSR */
268 typedef union
269 {
270   struct
271   {
272     uint32_t M:5;                        /*!< \brief bit:  0.. 4  Mode field */
273     uint32_t T:1;                        /*!< \brief bit:      5  Thumb execution state bit */
274     uint32_t F:1;                        /*!< \brief bit:      6  FIQ mask bit */
275     uint32_t I:1;                        /*!< \brief bit:      7  IRQ mask bit */
276     uint32_t A:1;                        /*!< \brief bit:      8  Asynchronous abort mask bit */
277     uint32_t E:1;                        /*!< \brief bit:      9  Endianness execution state bit */
278     uint32_t IT1:6;                      /*!< \brief bit: 10..15  If-Then execution state bits 2-7 */
279     uint32_t GE:4;                       /*!< \brief bit: 16..19  Greater than or Equal flags */
280     RESERVED(0:4, uint32_t)
281     uint32_t J:1;                        /*!< \brief bit:     24  Jazelle bit */
282     uint32_t IT0:2;                      /*!< \brief bit: 25..26  If-Then execution state bits 0-1 */
283     uint32_t Q:1;                        /*!< \brief bit:     27  Saturation condition flag */
284     uint32_t V:1;                        /*!< \brief bit:     28  Overflow condition code flag */
285     uint32_t C:1;                        /*!< \brief bit:     29  Carry condition code flag */
286     uint32_t Z:1;                        /*!< \brief bit:     30  Zero condition code flag */
287     uint32_t N:1;                        /*!< \brief bit:     31  Negative condition code flag */
288   } b;                                   /*!< \brief Structure used for bit  access */
289   uint32_t w;                            /*!< \brief Type      used for word access */
290 } CPSR_Type;
291 
292 
293 
294 /* CPSR Register Definitions */
295 #define CPSR_N_Pos                       31U                                    /*!< \brief CPSR: N Position */
296 #define CPSR_N_Msk                       (1UL << CPSR_N_Pos)                    /*!< \brief CPSR: N Mask */
297 
298 #define CPSR_Z_Pos                       30U                                    /*!< \brief CPSR: Z Position */
299 #define CPSR_Z_Msk                       (1UL << CPSR_Z_Pos)                    /*!< \brief CPSR: Z Mask */
300 
301 #define CPSR_C_Pos                       29U                                    /*!< \brief CPSR: C Position */
302 #define CPSR_C_Msk                       (1UL << CPSR_C_Pos)                    /*!< \brief CPSR: C Mask */
303 
304 #define CPSR_V_Pos                       28U                                    /*!< \brief CPSR: V Position */
305 #define CPSR_V_Msk                       (1UL << CPSR_V_Pos)                    /*!< \brief CPSR: V Mask */
306 
307 #define CPSR_Q_Pos                       27U                                    /*!< \brief CPSR: Q Position */
308 #define CPSR_Q_Msk                       (1UL << CPSR_Q_Pos)                    /*!< \brief CPSR: Q Mask */
309 
310 #define CPSR_IT0_Pos                     25U                                    /*!< \brief CPSR: IT0 Position */
311 #define CPSR_IT0_Msk                     (3UL << CPSR_IT0_Pos)                  /*!< \brief CPSR: IT0 Mask */
312 
313 #define CPSR_J_Pos                       24U                                    /*!< \brief CPSR: J Position */
314 #define CPSR_J_Msk                       (1UL << CPSR_J_Pos)                    /*!< \brief CPSR: J Mask */
315 
316 #define CPSR_GE_Pos                      16U                                    /*!< \brief CPSR: GE Position */
317 #define CPSR_GE_Msk                      (0xFUL << CPSR_GE_Pos)                 /*!< \brief CPSR: GE Mask */
318 
319 #define CPSR_IT1_Pos                     10U                                    /*!< \brief CPSR: IT1 Position */
320 #define CPSR_IT1_Msk                     (0x3FUL << CPSR_IT1_Pos)               /*!< \brief CPSR: IT1 Mask */
321 
322 #define CPSR_E_Pos                       9U                                     /*!< \brief CPSR: E Position */
323 #define CPSR_E_Msk                       (1UL << CPSR_E_Pos)                    /*!< \brief CPSR: E Mask */
324 
325 #define CPSR_A_Pos                       8U                                     /*!< \brief CPSR: A Position */
326 #define CPSR_A_Msk                       (1UL << CPSR_A_Pos)                    /*!< \brief CPSR: A Mask */
327 
328 #define CPSR_I_Pos                       7U                                     /*!< \brief CPSR: I Position */
329 #define CPSR_I_Msk                       (1UL << CPSR_I_Pos)                    /*!< \brief CPSR: I Mask */
330 
331 #define CPSR_F_Pos                       6U                                     /*!< \brief CPSR: F Position */
332 #define CPSR_F_Msk                       (1UL << CPSR_F_Pos)                    /*!< \brief CPSR: F Mask */
333 
334 #define CPSR_T_Pos                       5U                                     /*!< \brief CPSR: T Position */
335 #define CPSR_T_Msk                       (1UL << CPSR_T_Pos)                    /*!< \brief CPSR: T Mask */
336 
337 #define CPSR_M_Pos                       0U                                     /*!< \brief CPSR: M Position */
338 #define CPSR_M_Msk                       (0x1FUL << CPSR_M_Pos)                 /*!< \brief CPSR: M Mask */
339 
340 #define CPSR_M_USR                       0x10U                                  /*!< \brief CPSR: M User mode (PL0) */
341 #define CPSR_M_FIQ                       0x11U                                  /*!< \brief CPSR: M Fast Interrupt mode (PL1) */
342 #define CPSR_M_IRQ                       0x12U                                  /*!< \brief CPSR: M Interrupt mode (PL1) */
343 #define CPSR_M_SVC                       0x13U                                  /*!< \brief CPSR: M Supervisor mode (PL1) */
344 #define CPSR_M_MON                       0x16U                                  /*!< \brief CPSR: M Monitor mode (PL1) */
345 #define CPSR_M_ABT                       0x17U                                  /*!< \brief CPSR: M Abort mode (PL1) */
346 #define CPSR_M_HYP                       0x1AU                                  /*!< \brief CPSR: M Hypervisor mode (PL2) */
347 #define CPSR_M_UND                       0x1BU                                  /*!< \brief CPSR: M Undefined mode (PL1) */
348 #define CPSR_M_SYS                       0x1FU                                  /*!< \brief CPSR: M System mode (PL1) */
349 
350 /* CP15 Register SCTLR */
351 typedef union
352 {
353   struct
354   {
355     uint32_t M:1;                        /*!< \brief bit:     0  MMU enable */
356     uint32_t A:1;                        /*!< \brief bit:     1  Alignment check enable */
357     uint32_t C:1;                        /*!< \brief bit:     2  Cache enable */
358     RESERVED(0:2, uint32_t)
359     uint32_t CP15BEN:1;                  /*!< \brief bit:     5  CP15 barrier enable */
360     RESERVED(1:1, uint32_t)
361     uint32_t B:1;                        /*!< \brief bit:     7  Endianness model */
362     RESERVED(2:2, uint32_t)
363     uint32_t SW:1;                       /*!< \brief bit:    10  SWP and SWPB enable */
364     uint32_t Z:1;                        /*!< \brief bit:    11  Branch prediction enable */
365     uint32_t I:1;                        /*!< \brief bit:    12  Instruction cache enable */
366     uint32_t V:1;                        /*!< \brief bit:    13  Vectors bit */
367     uint32_t RR:1;                       /*!< \brief bit:    14  Round Robin select */
368     RESERVED(3:2, uint32_t)
369     uint32_t HA:1;                       /*!< \brief bit:    17  Hardware Access flag enable */
370     RESERVED(4:1, uint32_t)
371     uint32_t WXN:1;                      /*!< \brief bit:    19  Write permission implies XN */
372     uint32_t UWXN:1;                     /*!< \brief bit:    20  Unprivileged write permission implies PL1 XN */
373     uint32_t FI:1;                       /*!< \brief bit:    21  Fast interrupts configuration enable */
374     uint32_t U:1;                        /*!< \brief bit:    22  Alignment model */
375     RESERVED(5:1, uint32_t)
376     uint32_t VE:1;                       /*!< \brief bit:    24  Interrupt Vectors Enable */
377     uint32_t EE:1;                       /*!< \brief bit:    25  Exception Endianness */
378     RESERVED(6:1, uint32_t)
379     uint32_t NMFI:1;                     /*!< \brief bit:    27  Non-maskable FIQ (NMFI) support */
380     uint32_t TRE:1;                      /*!< \brief bit:    28  TEX remap enable. */
381     uint32_t AFE:1;                      /*!< \brief bit:    29  Access flag enable */
382     uint32_t TE:1;                       /*!< \brief bit:    30  Thumb Exception enable */
383     RESERVED(7:1, uint32_t)
384   } b;                                   /*!< \brief Structure used for bit  access */
385   uint32_t w;                            /*!< \brief Type      used for word access */
386 } SCTLR_Type;
387 
388 #define SCTLR_TE_Pos                     30U                                    /*!< \brief SCTLR: TE Position */
389 #define SCTLR_TE_Msk                     (1UL << SCTLR_TE_Pos)                  /*!< \brief SCTLR: TE Mask */
390 
391 #define SCTLR_AFE_Pos                    29U                                    /*!< \brief SCTLR: AFE Position */
392 #define SCTLR_AFE_Msk                    (1UL << SCTLR_AFE_Pos)                 /*!< \brief SCTLR: AFE Mask */
393 
394 #define SCTLR_TRE_Pos                    28U                                    /*!< \brief SCTLR: TRE Position */
395 #define SCTLR_TRE_Msk                    (1UL << SCTLR_TRE_Pos)                 /*!< \brief SCTLR: TRE Mask */
396 
397 #define SCTLR_NMFI_Pos                   27U                                    /*!< \brief SCTLR: NMFI Position */
398 #define SCTLR_NMFI_Msk                   (1UL << SCTLR_NMFI_Pos)                /*!< \brief SCTLR: NMFI Mask */
399 
400 #define SCTLR_EE_Pos                     25U                                    /*!< \brief SCTLR: EE Position */
401 #define SCTLR_EE_Msk                     (1UL << SCTLR_EE_Pos)                  /*!< \brief SCTLR: EE Mask */
402 
403 #define SCTLR_VE_Pos                     24U                                    /*!< \brief SCTLR: VE Position */
404 #define SCTLR_VE_Msk                     (1UL << SCTLR_VE_Pos)                  /*!< \brief SCTLR: VE Mask */
405 
406 #define SCTLR_U_Pos                      22U                                    /*!< \brief SCTLR: U Position */
407 #define SCTLR_U_Msk                      (1UL << SCTLR_U_Pos)                   /*!< \brief SCTLR: U Mask */
408 
409 #define SCTLR_FI_Pos                     21U                                    /*!< \brief SCTLR: FI Position */
410 #define SCTLR_FI_Msk                     (1UL << SCTLR_FI_Pos)                  /*!< \brief SCTLR: FI Mask */
411 
412 #define SCTLR_UWXN_Pos                   20U                                    /*!< \brief SCTLR: UWXN Position */
413 #define SCTLR_UWXN_Msk                   (1UL << SCTLR_UWXN_Pos)                /*!< \brief SCTLR: UWXN Mask */
414 
415 #define SCTLR_WXN_Pos                    19U                                    /*!< \brief SCTLR: WXN Position */
416 #define SCTLR_WXN_Msk                    (1UL << SCTLR_WXN_Pos)                 /*!< \brief SCTLR: WXN Mask */
417 
418 #define SCTLR_HA_Pos                     17U                                    /*!< \brief SCTLR: HA Position */
419 #define SCTLR_HA_Msk                     (1UL << SCTLR_HA_Pos)                  /*!< \brief SCTLR: HA Mask */
420 
421 #define SCTLR_RR_Pos                     14U                                    /*!< \brief SCTLR: RR Position */
422 #define SCTLR_RR_Msk                     (1UL << SCTLR_RR_Pos)                  /*!< \brief SCTLR: RR Mask */
423 
424 #define SCTLR_V_Pos                      13U                                    /*!< \brief SCTLR: V Position */
425 #define SCTLR_V_Msk                      (1UL << SCTLR_V_Pos)                   /*!< \brief SCTLR: V Mask */
426 
427 #define SCTLR_I_Pos                      12U                                    /*!< \brief SCTLR: I Position */
428 #define SCTLR_I_Msk                      (1UL << SCTLR_I_Pos)                   /*!< \brief SCTLR: I Mask */
429 
430 #define SCTLR_Z_Pos                      11U                                    /*!< \brief SCTLR: Z Position */
431 #define SCTLR_Z_Msk                      (1UL << SCTLR_Z_Pos)                   /*!< \brief SCTLR: Z Mask */
432 
433 #define SCTLR_SW_Pos                     10U                                    /*!< \brief SCTLR: SW Position */
434 #define SCTLR_SW_Msk                     (1UL << SCTLR_SW_Pos)                  /*!< \brief SCTLR: SW Mask */
435 
436 #define SCTLR_B_Pos                      7U                                     /*!< \brief SCTLR: B Position */
437 #define SCTLR_B_Msk                      (1UL << SCTLR_B_Pos)                   /*!< \brief SCTLR: B Mask */
438 
439 #define SCTLR_CP15BEN_Pos                5U                                     /*!< \brief SCTLR: CP15BEN Position */
440 #define SCTLR_CP15BEN_Msk                (1UL << SCTLR_CP15BEN_Pos)             /*!< \brief SCTLR: CP15BEN Mask */
441 
442 #define SCTLR_C_Pos                      2U                                     /*!< \brief SCTLR: C Position */
443 #define SCTLR_C_Msk                      (1UL << SCTLR_C_Pos)                   /*!< \brief SCTLR: C Mask */
444 
445 #define SCTLR_A_Pos                      1U                                     /*!< \brief SCTLR: A Position */
446 #define SCTLR_A_Msk                      (1UL << SCTLR_A_Pos)                   /*!< \brief SCTLR: A Mask */
447 
448 #define SCTLR_M_Pos                      0U                                     /*!< \brief SCTLR: M Position */
449 #define SCTLR_M_Msk                      (1UL << SCTLR_M_Pos)                   /*!< \brief SCTLR: M Mask */
450 
451 /* CP15 Register ACTLR */
452 typedef union
453 {
454 #if __CORTEX_A == 5 || defined(DOXYGEN)
455   /** \brief Structure used for bit access on Cortex-A5 */
456   struct
457   {
458     uint32_t FW:1;                      /*!< \brief bit:      0  Cache and TLB maintenance broadcast */
459     RESERVED(0:5, uint32_t)
460     uint32_t SMP:1;                      /*!< \brief bit:     6  Enables coherent requests to the processor */
461     uint32_t EXCL:1;                     /*!< \brief bit:     7  Exclusive L1/L2 cache control */
462     RESERVED(1:2, uint32_t)
463     uint32_t DODMBS:1;                   /*!< \brief bit:    10  Disable optimized data memory barrier behavior */
464     uint32_t DWBST:1;                    /*!< \brief bit:    11  AXI data write bursts to Normal memory */
465     uint32_t RADIS:1;                    /*!< \brief bit:    12  L1 Data Cache read-allocate mode disable */
466     uint32_t L1PCTL:2;                   /*!< \brief bit:13..14  L1 Data prefetch control */
467     uint32_t BP:2;                       /*!< \brief bit:16..15  Branch prediction policy */
468     uint32_t RSDIS:1;                    /*!< \brief bit:    17  Disable return stack operation */
469     uint32_t BTDIS:1;                    /*!< \brief bit:    18  Disable indirect Branch Target Address Cache (BTAC) */
470     RESERVED(3:9, uint32_t)
471     uint32_t DBDI:1;                     /*!< \brief bit:    28  Disable branch dual issue */
472     RESERVED(7:3, uint32_t)
473  } b;
474 #endif
475 #if __CORTEX_A == 7 || defined(DOXYGEN)
476   /** \brief Structure used for bit access on Cortex-A7 */
477   struct
478   {
479     RESERVED(0:6, uint32_t)
480     uint32_t SMP:1;                      /*!< \brief bit:     6  Enables coherent requests to the processor */
481     RESERVED(1:3, uint32_t)
482     uint32_t DODMBS:1;                   /*!< \brief bit:    10  Disable optimized data memory barrier behavior */
483     uint32_t L2RADIS:1;                  /*!< \brief bit:    11  L2 Data Cache read-allocate mode disable */
484     uint32_t L1RADIS:1;                  /*!< \brief bit:    12  L1 Data Cache read-allocate mode disable */
485     uint32_t L1PCTL:2;                   /*!< \brief bit:13..14  L1 Data prefetch control */
486     uint32_t DDVM:1;                     /*!< \brief bit:    15  Disable Distributed Virtual Memory (DVM) transactions */
487     RESERVED(3:12, uint32_t)
488     uint32_t DDI:1;                      /*!< \brief bit:    28  Disable dual issue */
489     RESERVED(7:3, uint32_t)
490   } b;
491 #endif
492 #if __CORTEX_A == 9 || defined(DOXYGEN)
493   /** \brief Structure used for bit access on Cortex-A9 */
494   struct
495   {
496     uint32_t FW:1;                       /*!< \brief bit:     0  Cache and TLB maintenance broadcast */
497     RESERVED(0:1, uint32_t)
498     uint32_t L1PE:1;                     /*!< \brief bit:     2  Dside prefetch */
499     uint32_t WFLZM:1;                    /*!< \brief bit:     3  Cache and TLB maintenance broadcast */
500     RESERVED(1:2, uint32_t)
501     uint32_t SMP:1;                      /*!< \brief bit:     6  Enables coherent requests to the processor */
502     uint32_t EXCL:1;                     /*!< \brief bit:     7  Exclusive L1/L2 cache control */
503     uint32_t AOW:1;                      /*!< \brief bit:     8  Enable allocation in one cache way only */
504     uint32_t PARITY:1;                   /*!< \brief bit:     9  Support for parity checking, if implemented */
505     RESERVED(7:22, uint32_t)
506   } b;
507 #endif
508   uint32_t w;                            /*!< \brief Type      used for word access */
509 } ACTLR_Type;
510 
511 #define ACTLR_DDI_Pos                    28U                                     /*!< \brief ACTLR: DDI Position */
512 #define ACTLR_DDI_Msk                    (1UL << ACTLR_DDI_Pos)                  /*!< \brief ACTLR: DDI Mask */
513 
514 #define ACTLR_DBDI_Pos                   28U                                     /*!< \brief ACTLR: DBDI Position */
515 #define ACTLR_DBDI_Msk                   (1UL << ACTLR_DBDI_Pos)                 /*!< \brief ACTLR: DBDI Mask */
516 
517 #define ACTLR_BTDIS_Pos                  18U                                     /*!< \brief ACTLR: BTDIS Position */
518 #define ACTLR_BTDIS_Msk                  (1UL << ACTLR_BTDIS_Pos)                /*!< \brief ACTLR: BTDIS Mask */
519 
520 #define ACTLR_RSDIS_Pos                  17U                                     /*!< \brief ACTLR: RSDIS Position */
521 #define ACTLR_RSDIS_Msk                  (1UL << ACTLR_RSDIS_Pos)                /*!< \brief ACTLR: RSDIS Mask */
522 
523 #define ACTLR_BP_Pos                     15U                                     /*!< \brief ACTLR: BP Position */
524 #define ACTLR_BP_Msk                     (3UL << ACTLR_BP_Pos)                   /*!< \brief ACTLR: BP Mask */
525 
526 #define ACTLR_DDVM_Pos                   15U                                     /*!< \brief ACTLR: DDVM Position */
527 #define ACTLR_DDVM_Msk                   (1UL << ACTLR_DDVM_Pos)                 /*!< \brief ACTLR: DDVM Mask */
528 
529 #define ACTLR_L1PCTL_Pos                 13U                                     /*!< \brief ACTLR: L1PCTL Position */
530 #define ACTLR_L1PCTL_Msk                 (3UL << ACTLR_L1PCTL_Pos)               /*!< \brief ACTLR: L1PCTL Mask */
531 
532 #define ACTLR_RADIS_Pos                  12U                                     /*!< \brief ACTLR: RADIS Position */
533 #define ACTLR_RADIS_Msk                  (1UL << ACTLR_RADIS_Pos)                /*!< \brief ACTLR: RADIS Mask */
534 
535 #define ACTLR_L1RADIS_Pos                12U                                     /*!< \brief ACTLR: L1RADIS Position */
536 #define ACTLR_L1RADIS_Msk                (1UL << ACTLR_L1RADIS_Pos)              /*!< \brief ACTLR: L1RADIS Mask */
537 
538 #define ACTLR_DWBST_Pos                  11U                                     /*!< \brief ACTLR: DWBST Position */
539 #define ACTLR_DWBST_Msk                  (1UL << ACTLR_DWBST_Pos)                /*!< \brief ACTLR: DWBST Mask */
540 
541 #define ACTLR_L2RADIS_Pos                11U                                     /*!< \brief ACTLR: L2RADIS Position */
542 #define ACTLR_L2RADIS_Msk                (1UL << ACTLR_L2RADIS_Pos)              /*!< \brief ACTLR: L2RADIS Mask */
543 
544 #define ACTLR_DODMBS_Pos                 10U                                     /*!< \brief ACTLR: DODMBS Position */
545 #define ACTLR_DODMBS_Msk                 (1UL << ACTLR_DODMBS_Pos)               /*!< \brief ACTLR: DODMBS Mask */
546 
547 #define ACTLR_PARITY_Pos                 9U                                      /*!< \brief ACTLR: PARITY Position */
548 #define ACTLR_PARITY_Msk                 (1UL << ACTLR_PARITY_Pos)               /*!< \brief ACTLR: PARITY Mask */
549 
550 #define ACTLR_AOW_Pos                    8U                                      /*!< \brief ACTLR: AOW Position */
551 #define ACTLR_AOW_Msk                    (1UL << ACTLR_AOW_Pos)                  /*!< \brief ACTLR: AOW Mask */
552 
553 #define ACTLR_EXCL_Pos                   7U                                      /*!< \brief ACTLR: EXCL Position */
554 #define ACTLR_EXCL_Msk                   (1UL << ACTLR_EXCL_Pos)                 /*!< \brief ACTLR: EXCL Mask */
555 
556 #define ACTLR_SMP_Pos                    6U                                      /*!< \brief ACTLR: SMP Position */
557 #define ACTLR_SMP_Msk                    (1UL << ACTLR_SMP_Pos)                  /*!< \brief ACTLR: SMP Mask */
558 
559 #define ACTLR_WFLZM_Pos                  3U                                      /*!< \brief ACTLR: WFLZM Position */
560 #define ACTLR_WFLZM_Msk                  (1UL << ACTLR_WFLZM_Pos)                /*!< \brief ACTLR: WFLZM Mask */
561 
562 #define ACTLR_L1PE_Pos                   2U                                      /*!< \brief ACTLR: L1PE Position */
563 #define ACTLR_L1PE_Msk                   (1UL << ACTLR_L1PE_Pos)                 /*!< \brief ACTLR: L1PE Mask */
564 
565 #define ACTLR_FW_Pos                     0U                                      /*!< \brief ACTLR: FW Position */
566 #define ACTLR_FW_Msk                     (1UL << ACTLR_FW_Pos)                   /*!< \brief ACTLR: FW Mask */
567 
568 /* CP15 Register CPACR */
569 typedef union
570 {
571   struct
572   {
573     uint32_t CP0:2;                      /*!< \brief bit:  0..1  Access rights for coprocessor 0 */
574     uint32_t CP1:2;                      /*!< \brief bit:  2..3  Access rights for coprocessor 1 */
575     uint32_t CP2:2;                      /*!< \brief bit:  4..5  Access rights for coprocessor 2 */
576     uint32_t CP3:2;                      /*!< \brief bit:  6..7  Access rights for coprocessor 3 */
577     uint32_t CP4:2;                      /*!< \brief bit:  8..9  Access rights for coprocessor 4 */
578     uint32_t CP5:2;                      /*!< \brief bit:10..11  Access rights for coprocessor 5 */
579     uint32_t CP6:2;                      /*!< \brief bit:12..13  Access rights for coprocessor 6 */
580     uint32_t CP7:2;                      /*!< \brief bit:14..15  Access rights for coprocessor 7 */
581     uint32_t CP8:2;                      /*!< \brief bit:16..17  Access rights for coprocessor 8 */
582     uint32_t CP9:2;                      /*!< \brief bit:18..19  Access rights for coprocessor 9 */
583     uint32_t CP10:2;                     /*!< \brief bit:20..21  Access rights for coprocessor 10 */
584     uint32_t CP11:2;                     /*!< \brief bit:22..23  Access rights for coprocessor 11 */
585     uint32_t CP12:2;                     /*!< \brief bit:24..25  Access rights for coprocessor 11 */
586     uint32_t CP13:2;                     /*!< \brief bit:26..27  Access rights for coprocessor 11 */
587     uint32_t TRCDIS:1;                   /*!< \brief bit:    28  Disable CP14 access to trace registers */
588     RESERVED(0:1, uint32_t)
589     uint32_t D32DIS:1;                   /*!< \brief bit:    30  Disable use of registers D16-D31 of the VFP register file */
590     uint32_t ASEDIS:1;                   /*!< \brief bit:    31  Disable Advanced SIMD Functionality */
591   } b;                                   /*!< \brief Structure used for bit  access */
592   uint32_t w;                            /*!< \brief Type      used for word access */
593 } CPACR_Type;
594 
595 #define CPACR_ASEDIS_Pos                 31U                                    /*!< \brief CPACR: ASEDIS Position */
596 #define CPACR_ASEDIS_Msk                 (1UL << CPACR_ASEDIS_Pos)              /*!< \brief CPACR: ASEDIS Mask */
597 
598 #define CPACR_D32DIS_Pos                 30U                                    /*!< \brief CPACR: D32DIS Position */
599 #define CPACR_D32DIS_Msk                 (1UL << CPACR_D32DIS_Pos)              /*!< \brief CPACR: D32DIS Mask */
600 
601 #define CPACR_TRCDIS_Pos                 28U                                    /*!< \brief CPACR: D32DIS Position */
602 #define CPACR_TRCDIS_Msk                 (1UL << CPACR_D32DIS_Pos)              /*!< \brief CPACR: D32DIS Mask */
603 
604 #define CPACR_CP_Pos_(n)                 (n*2U)                                 /*!< \brief CPACR: CPn Position */
605 #define CPACR_CP_Msk_(n)                 (3UL << CPACR_CP_Pos_(n))              /*!< \brief CPACR: CPn Mask */
606 
607 #define CPACR_CP_NA                      0U                                     /*!< \brief CPACR CPn field: Access denied. */
608 #define CPACR_CP_PL1                     1U                                     /*!< \brief CPACR CPn field: Accessible from PL1 only. */
609 #define CPACR_CP_FA                      3U                                     /*!< \brief CPACR CPn field: Full access. */
610 
611 /* CP15 Register DFSR */
612 typedef union
613 {
614   struct
615   {
616     uint32_t FS0:4;                      /*!< \brief bit: 0.. 3  Fault Status bits bit 0-3 */
617     uint32_t Domain:4;                   /*!< \brief bit: 4.. 7  Fault on which domain */
618     RESERVED(0:1, uint32_t)
619     uint32_t LPAE:1;                     /*!< \brief bit:     9  Large Physical Address Extension */
620     uint32_t FS1:1;                      /*!< \brief bit:    10  Fault Status bits bit 4 */
621     uint32_t WnR:1;                      /*!< \brief bit:    11  Write not Read bit */
622     uint32_t ExT:1;                      /*!< \brief bit:    12  External abort type */
623     uint32_t CM:1;                       /*!< \brief bit:    13  Cache maintenance fault */
624     RESERVED(1:18, uint32_t)
625   } s;                                   /*!< \brief Structure used for bit  access in short format */
626   struct
627   {
628     uint32_t STATUS:5;                   /*!< \brief bit: 0.. 5  Fault Status bits */
629     RESERVED(0:3, uint32_t)
630     uint32_t LPAE:1;                     /*!< \brief bit:     9  Large Physical Address Extension */
631     RESERVED(1:1, uint32_t)
632     uint32_t WnR:1;                      /*!< \brief bit:    11  Write not Read bit */
633     uint32_t ExT:1;                      /*!< \brief bit:    12  External abort type */
634     uint32_t CM:1;                       /*!< \brief bit:    13  Cache maintenance fault */
635     RESERVED(2:18, uint32_t)
636   } l;                                   /*!< \brief Structure used for bit  access in long format */
637   uint32_t w;                            /*!< \brief Type      used for word access */
638 } DFSR_Type;
639 
640 #define DFSR_CM_Pos                      13U                                    /*!< \brief DFSR: CM Position */
641 #define DFSR_CM_Msk                      (1UL << DFSR_CM_Pos)                   /*!< \brief DFSR: CM Mask */
642 
643 #define DFSR_Ext_Pos                     12U                                    /*!< \brief DFSR: Ext Position */
644 #define DFSR_Ext_Msk                     (1UL << DFSR_Ext_Pos)                  /*!< \brief DFSR: Ext Mask */
645 
646 #define DFSR_WnR_Pos                     11U                                    /*!< \brief DFSR: WnR Position */
647 #define DFSR_WnR_Msk                     (1UL << DFSR_WnR_Pos)                  /*!< \brief DFSR: WnR Mask */
648 
649 #define DFSR_FS1_Pos                     10U                                    /*!< \brief DFSR: FS1 Position */
650 #define DFSR_FS1_Msk                     (1UL << DFSR_FS1_Pos)                  /*!< \brief DFSR: FS1 Mask */
651 
652 #define DFSR_LPAE_Pos                    9U                                    /*!< \brief DFSR: LPAE Position */
653 #define DFSR_LPAE_Msk                    (1UL << DFSR_LPAE_Pos)                /*!< \brief DFSR: LPAE Mask */
654 
655 #define DFSR_Domain_Pos                  4U                                     /*!< \brief DFSR: Domain Position */
656 #define DFSR_Domain_Msk                  (0xFUL << DFSR_Domain_Pos)             /*!< \brief DFSR: Domain Mask */
657 
658 #define DFSR_FS0_Pos                     0U                                     /*!< \brief DFSR: FS0 Position */
659 #define DFSR_FS0_Msk                     (0xFUL << DFSR_FS0_Pos)                /*!< \brief DFSR: FS0 Mask */
660 
661 #define DFSR_STATUS_Pos                  0U                                     /*!< \brief DFSR: STATUS Position */
662 #define DFSR_STATUS_Msk                  (0x3FUL << DFSR_STATUS_Pos)            /*!< \brief DFSR: STATUS Mask */
663 
664 /* CP15 Register IFSR */
665 typedef union
666 {
667   struct
668   {
669     uint32_t FS0:4;                      /*!< \brief bit: 0.. 3  Fault Status bits bit 0-3 */
670     RESERVED(0:5, uint32_t)
671     uint32_t LPAE:1;                     /*!< \brief bit:     9  Large Physical Address Extension */
672     uint32_t FS1:1;                      /*!< \brief bit:    10  Fault Status bits bit 4 */
673     RESERVED(1:1, uint32_t)
674     uint32_t ExT:1;                      /*!< \brief bit:    12  External abort type */
675     RESERVED(2:19, uint32_t)
676   } s;                                   /*!< \brief Structure used for bit access in short format */
677   struct
678   {
679     uint32_t STATUS:6;                   /*!< \brief bit: 0.. 5  Fault Status bits */
680     RESERVED(0:3, uint32_t)
681     uint32_t LPAE:1;                     /*!< \brief bit:     9  Large Physical Address Extension */
682     RESERVED(1:2, uint32_t)
683     uint32_t ExT:1;                      /*!< \brief bit:    12  External abort type */
684     RESERVED(2:19, uint32_t)
685   } l;                                   /*!< \brief Structure used for bit access in long format */
686   uint32_t w;                            /*!< \brief Type      used for word access */
687 } IFSR_Type;
688 
689 #define IFSR_ExT_Pos                     12U                                    /*!< \brief IFSR: ExT Position */
690 #define IFSR_ExT_Msk                     (1UL << IFSR_ExT_Pos)                  /*!< \brief IFSR: ExT Mask */
691 
692 #define IFSR_FS1_Pos                     10U                                    /*!< \brief IFSR: FS1 Position */
693 #define IFSR_FS1_Msk                     (1UL << IFSR_FS1_Pos)                  /*!< \brief IFSR: FS1 Mask */
694 
695 #define IFSR_LPAE_Pos                    9U                                     /*!< \brief IFSR: LPAE Position */
696 #define IFSR_LPAE_Msk                    (0x1UL << IFSR_LPAE_Pos)               /*!< \brief IFSR: LPAE Mask */
697 
698 #define IFSR_FS0_Pos                     0U                                     /*!< \brief IFSR: FS0 Position */
699 #define IFSR_FS0_Msk                     (0xFUL << IFSR_FS0_Pos)                /*!< \brief IFSR: FS0 Mask */
700 
701 #define IFSR_STATUS_Pos                  0U                                     /*!< \brief IFSR: STATUS Position */
702 #define IFSR_STATUS_Msk                  (0x3FUL << IFSR_STATUS_Pos)            /*!< \brief IFSR: STATUS Mask */
703 
704 /* CP15 Register ISR */
705 typedef union
706 {
707   struct
708   {
709     RESERVED(0:6, uint32_t)
710     uint32_t F:1;                        /*!< \brief bit:     6  FIQ pending bit */
711     uint32_t I:1;                        /*!< \brief bit:     7  IRQ pending bit */
712     uint32_t A:1;                        /*!< \brief bit:     8  External abort pending bit */
713     RESERVED(1:23, uint32_t)
714   } b;                                   /*!< \brief Structure used for bit  access */
715   uint32_t w;                            /*!< \brief Type      used for word access */
716 } ISR_Type;
717 
718 #define ISR_A_Pos                        13U                                    /*!< \brief ISR: A Position */
719 #define ISR_A_Msk                        (1UL << ISR_A_Pos)                     /*!< \brief ISR: A Mask */
720 
721 #define ISR_I_Pos                        12U                                    /*!< \brief ISR: I Position */
722 #define ISR_I_Msk                        (1UL << ISR_I_Pos)                     /*!< \brief ISR: I Mask */
723 
724 #define ISR_F_Pos                        11U                                    /*!< \brief ISR: F Position */
725 #define ISR_F_Msk                        (1UL << ISR_F_Pos)                     /*!< \brief ISR: F Mask */
726 
727 /* DACR Register */
728 #define DACR_D_Pos_(n)                   (2U*n)                                 /*!< \brief DACR: Dn Position */
729 #define DACR_D_Msk_(n)                   (3UL << DACR_D_Pos_(n))                /*!< \brief DACR: Dn Mask */
730 #define DACR_Dn_NOACCESS                 0U                                     /*!< \brief DACR Dn field: No access */
731 #define DACR_Dn_CLIENT                   1U                                     /*!< \brief DACR Dn field: Client */
732 #define DACR_Dn_MANAGER                  3U                                     /*!< \brief DACR Dn field: Manager */
733 
734 /**
735   \brief     Mask and shift a bit field value for use in a register bit range.
736   \param [in] field  Name of the register bit field.
737   \param [in] value  Value of the bit field. This parameter is interpreted as an uint32_t type.
738   \return           Masked and shifted value.
739 */
740 #define _VAL2FLD(field, value)    (((uint32_t)(value) << field ## _Pos) & field ## _Msk)
741 
742 /**
743   \brief     Mask and shift a register value to extract a bit filed value.
744   \param [in] field  Name of the register bit field.
745   \param [in] value  Value of register. This parameter is interpreted as an uint32_t type.
746   \return           Masked and shifted bit field value.
747 */
748 #define _FLD2VAL(field, value)    (((uint32_t)(value) & field ## _Msk) >> field ## _Pos)
749 
750 
751 /**
752  \brief  Union type to access the L2C_310 Cache Controller.
753 */
754 #if (__L2C_PRESENT == 1U) || defined(DOXYGEN)
755 typedef struct
756 {
757   __IM  uint32_t CACHE_ID;                   /*!< \brief Offset: 0x0000 (R/ ) Cache ID Register               */
758   __IM  uint32_t CACHE_TYPE;                 /*!< \brief Offset: 0x0004 (R/ ) Cache Type Register             */
759         RESERVED(0[0x3e], uint32_t)
760   __IOM uint32_t CONTROL;                    /*!< \brief Offset: 0x0100 (R/W) Control Register                */
761   __IOM uint32_t AUX_CNT;                    /*!< \brief Offset: 0x0104 (R/W) Auxiliary Control               */
762         RESERVED(1[0x3e], uint32_t)
763   __IOM uint32_t EVENT_CONTROL;              /*!< \brief Offset: 0x0200 (R/W) Event Counter Control           */
764   __IOM uint32_t EVENT_COUNTER1_CONF;        /*!< \brief Offset: 0x0204 (R/W) Event Counter 1 Configuration   */
765   __IOM uint32_t EVENT_COUNTER0_CONF;        /*!< \brief Offset: 0x0208 (R/W) Event Counter 1 Configuration   */
766         RESERVED(2[0x2], uint32_t)
767   __IOM uint32_t INTERRUPT_MASK;             /*!< \brief Offset: 0x0214 (R/W) Interrupt Mask                  */
768   __IM  uint32_t MASKED_INT_STATUS;          /*!< \brief Offset: 0x0218 (R/ ) Masked Interrupt Status         */
769   __IM  uint32_t RAW_INT_STATUS;             /*!< \brief Offset: 0x021c (R/ ) Raw Interrupt Status            */
770   __OM  uint32_t INTERRUPT_CLEAR;            /*!< \brief Offset: 0x0220 ( /W) Interrupt Clear                 */
771         RESERVED(3[0x143], uint32_t)
772   __IOM uint32_t CACHE_SYNC;                 /*!< \brief Offset: 0x0730 (R/W) Cache Sync                      */
773         RESERVED(4[0xf], uint32_t)
774   __IOM uint32_t INV_LINE_PA;                /*!< \brief Offset: 0x0770 (R/W) Invalidate Line By PA           */
775         RESERVED(6[2], uint32_t)
776   __IOM uint32_t INV_WAY;                    /*!< \brief Offset: 0x077c (R/W) Invalidate by Way               */
777         RESERVED(5[0xc], uint32_t)
778   __IOM uint32_t CLEAN_LINE_PA;              /*!< \brief Offset: 0x07b0 (R/W) Clean Line by PA                */
779         RESERVED(7[1], uint32_t)
780   __IOM uint32_t CLEAN_LINE_INDEX_WAY;       /*!< \brief Offset: 0x07b8 (R/W) Clean Line by Index/Way         */
781   __IOM uint32_t CLEAN_WAY;                  /*!< \brief Offset: 0x07bc (R/W) Clean by Way                    */
782         RESERVED(8[0xc], uint32_t)
783   __IOM uint32_t CLEAN_INV_LINE_PA;          /*!< \brief Offset: 0x07f0 (R/W) Clean and Invalidate Line by PA  */
784         RESERVED(9[1], uint32_t)
785   __IOM uint32_t CLEAN_INV_LINE_INDEX_WAY;   /*!< \brief Offset: 0x07f8 (R/W) Clean and Invalidate Line by Index/Way  */
786   __IOM uint32_t CLEAN_INV_WAY;              /*!< \brief Offset: 0x07fc (R/W) Clean and Invalidate by Way     */
787         RESERVED(10[0x40], uint32_t)
788   __IOM uint32_t DATA_LOCK_0_WAY;            /*!< \brief Offset: 0x0900 (R/W) Data Lockdown 0 by Way          */
789   __IOM uint32_t INST_LOCK_0_WAY;            /*!< \brief Offset: 0x0904 (R/W) Instruction Lockdown 0 by Way   */
790   __IOM uint32_t DATA_LOCK_1_WAY;            /*!< \brief Offset: 0x0908 (R/W) Data Lockdown 1 by Way          */
791   __IOM uint32_t INST_LOCK_1_WAY;            /*!< \brief Offset: 0x090c (R/W) Instruction Lockdown 1 by Way   */
792   __IOM uint32_t DATA_LOCK_2_WAY;            /*!< \brief Offset: 0x0910 (R/W) Data Lockdown 2 by Way          */
793   __IOM uint32_t INST_LOCK_2_WAY;            /*!< \brief Offset: 0x0914 (R/W) Instruction Lockdown 2 by Way   */
794   __IOM uint32_t DATA_LOCK_3_WAY;            /*!< \brief Offset: 0x0918 (R/W) Data Lockdown 3 by Way          */
795   __IOM uint32_t INST_LOCK_3_WAY;            /*!< \brief Offset: 0x091c (R/W) Instruction Lockdown 3 by Way   */
796   __IOM uint32_t DATA_LOCK_4_WAY;            /*!< \brief Offset: 0x0920 (R/W) Data Lockdown 4 by Way          */
797   __IOM uint32_t INST_LOCK_4_WAY;            /*!< \brief Offset: 0x0924 (R/W) Instruction Lockdown 4 by Way   */
798   __IOM uint32_t DATA_LOCK_5_WAY;            /*!< \brief Offset: 0x0928 (R/W) Data Lockdown 5 by Way          */
799   __IOM uint32_t INST_LOCK_5_WAY;            /*!< \brief Offset: 0x092c (R/W) Instruction Lockdown 5 by Way   */
800   __IOM uint32_t DATA_LOCK_6_WAY;            /*!< \brief Offset: 0x0930 (R/W) Data Lockdown 5 by Way          */
801   __IOM uint32_t INST_LOCK_6_WAY;            /*!< \brief Offset: 0x0934 (R/W) Instruction Lockdown 5 by Way   */
802   __IOM uint32_t DATA_LOCK_7_WAY;            /*!< \brief Offset: 0x0938 (R/W) Data Lockdown 6 by Way          */
803   __IOM uint32_t INST_LOCK_7_WAY;            /*!< \brief Offset: 0x093c (R/W) Instruction Lockdown 6 by Way   */
804         RESERVED(11[0x4], uint32_t)
805   __IOM uint32_t LOCK_LINE_EN;               /*!< \brief Offset: 0x0950 (R/W) Lockdown by Line Enable         */
806   __IOM uint32_t UNLOCK_ALL_BY_WAY;          /*!< \brief Offset: 0x0954 (R/W) Unlock All Lines by Way         */
807         RESERVED(12[0xaa], uint32_t)
808   __IOM uint32_t ADDRESS_FILTER_START;       /*!< \brief Offset: 0x0c00 (R/W) Address Filtering Start         */
809   __IOM uint32_t ADDRESS_FILTER_END;         /*!< \brief Offset: 0x0c04 (R/W) Address Filtering End           */
810         RESERVED(13[0xce], uint32_t)
811   __IOM uint32_t DEBUG_CONTROL;              /*!< \brief Offset: 0x0f40 (R/W) Debug Control Register          */
812 } L2C_310_TypeDef;
813 
814 #define L2C_310           ((L2C_310_TypeDef *)L2C_310_BASE) /*!< \brief L2C_310 register set access pointer */
815 #endif
816 
817 #if (__GIC_PRESENT == 1U) || defined(DOXYGEN)
818 
819 /** \brief  Structure type to access the Generic Interrupt Controller Distributor (GICD)
820 */
821 typedef struct
822 {
823   __IOM uint32_t CTLR;                 /*!< \brief  Offset: 0x000 (R/W) Distributor Control Register */
824   __IM  uint32_t TYPER;                /*!< \brief  Offset: 0x004 (R/ ) Interrupt Controller Type Register */
825   __IM  uint32_t IIDR;                 /*!< \brief  Offset: 0x008 (R/ ) Distributor Implementer Identification Register */
826         RESERVED(0, uint32_t)
827   __IOM uint32_t STATUSR;              /*!< \brief  Offset: 0x010 (R/W) Error Reporting Status Register, optional */
828         RESERVED(1[11], uint32_t)
829   __OM  uint32_t SETSPI_NSR;           /*!< \brief  Offset: 0x040 ( /W) Set SPI Register */
830         RESERVED(2, uint32_t)
831   __OM  uint32_t CLRSPI_NSR;           /*!< \brief  Offset: 0x048 ( /W) Clear SPI Register */
832         RESERVED(3, uint32_t)
833   __OM  uint32_t SETSPI_SR;            /*!< \brief  Offset: 0x050 ( /W) Set SPI, Secure Register */
834         RESERVED(4, uint32_t)
835   __OM  uint32_t CLRSPI_SR;            /*!< \brief  Offset: 0x058 ( /W) Clear SPI, Secure Register */
836         RESERVED(5[9], uint32_t)
837   __IOM uint32_t IGROUPR[32];          /*!< \brief  Offset: 0x080 (R/W) Interrupt Group Registers */
838   __IOM uint32_t ISENABLER[32];        /*!< \brief  Offset: 0x100 (R/W) Interrupt Set-Enable Registers */
839   __IOM uint32_t ICENABLER[32];        /*!< \brief  Offset: 0x180 (R/W) Interrupt Clear-Enable Registers */
840   __IOM uint32_t ISPENDR[32];          /*!< \brief  Offset: 0x200 (R/W) Interrupt Set-Pending Registers */
841   __IOM uint32_t ICPENDR[32];          /*!< \brief  Offset: 0x280 (R/W) Interrupt Clear-Pending Registers */
842   __IOM uint32_t ISACTIVER[32];        /*!< \brief  Offset: 0x300 (R/W) Interrupt Set-Active Registers */
843   __IOM uint32_t ICACTIVER[32];        /*!< \brief  Offset: 0x380 (R/W) Interrupt Clear-Active Registers */
844   __IOM uint32_t IPRIORITYR[255];      /*!< \brief  Offset: 0x400 (R/W) Interrupt Priority Registers */
845         RESERVED(6, uint32_t)
846   __IOM uint32_t  ITARGETSR[255];      /*!< \brief  Offset: 0x800 (R/W) Interrupt Targets Registers */
847         RESERVED(7, uint32_t)
848   __IOM uint32_t ICFGR[64];            /*!< \brief  Offset: 0xC00 (R/W) Interrupt Configuration Registers */
849   __IOM uint32_t IGRPMODR[32];         /*!< \brief  Offset: 0xD00 (R/W) Interrupt Group Modifier Registers */
850         RESERVED(8[32], uint32_t)
851   __IOM uint32_t NSACR[64];            /*!< \brief  Offset: 0xE00 (R/W) Non-secure Access Control Registers */
852   __OM  uint32_t SGIR;                 /*!< \brief  Offset: 0xF00 ( /W) Software Generated Interrupt Register */
853         RESERVED(9[3], uint32_t)
854   __IOM uint32_t CPENDSGIR[4];         /*!< \brief  Offset: 0xF10 (R/W) SGI Clear-Pending Registers */
855   __IOM uint32_t SPENDSGIR[4];         /*!< \brief  Offset: 0xF20 (R/W) SGI Set-Pending Registers */
856         RESERVED(10[5236], uint32_t)
857   __IOM uint64_t IROUTER[988];         /*!< \brief  Offset: 0x6100(R/W) Interrupt Routing Registers */
858 }  GICDistributor_Type;
859 
860 #define GICDistributor      ((GICDistributor_Type      *)     GIC_DISTRIBUTOR_BASE ) /*!< \brief GIC Distributor register set access pointer */
861 
862 /** \brief  Structure type to access the Generic Interrupt Controller Interface (GICC)
863 */
864 typedef struct
865 {
866   __IOM uint32_t CTLR;                 /*!< \brief  Offset: 0x000 (R/W) CPU Interface Control Register */
867   __IOM uint32_t PMR;                  /*!< \brief  Offset: 0x004 (R/W) Interrupt Priority Mask Register */
868   __IOM uint32_t BPR;                  /*!< \brief  Offset: 0x008 (R/W) Binary Point Register */
869   __IM  uint32_t IAR;                  /*!< \brief  Offset: 0x00C (R/ ) Interrupt Acknowledge Register */
870   __OM  uint32_t EOIR;                 /*!< \brief  Offset: 0x010 ( /W) End Of Interrupt Register */
871   __IM  uint32_t RPR;                  /*!< \brief  Offset: 0x014 (R/ ) Running Priority Register */
872   __IM  uint32_t HPPIR;                /*!< \brief  Offset: 0x018 (R/ ) Highest Priority Pending Interrupt Register */
873   __IOM uint32_t ABPR;                 /*!< \brief  Offset: 0x01C (R/W) Aliased Binary Point Register */
874   __IM  uint32_t AIAR;                 /*!< \brief  Offset: 0x020 (R/ ) Aliased Interrupt Acknowledge Register */
875   __OM  uint32_t AEOIR;                /*!< \brief  Offset: 0x024 ( /W) Aliased End Of Interrupt Register */
876   __IM  uint32_t AHPPIR;               /*!< \brief  Offset: 0x028 (R/ ) Aliased Highest Priority Pending Interrupt Register */
877   __IOM uint32_t STATUSR;              /*!< \brief  Offset: 0x02C (R/W) Error Reporting Status Register, optional */
878         RESERVED(1[40], uint32_t)
879   __IOM uint32_t APR[4];               /*!< \brief  Offset: 0x0D0 (R/W) Active Priority Register */
880   __IOM uint32_t NSAPR[4];             /*!< \brief  Offset: 0x0E0 (R/W) Non-secure Active Priority Register */
881         RESERVED(2[3], uint32_t)
882   __IM  uint32_t IIDR;                 /*!< \brief  Offset: 0x0FC (R/ ) CPU Interface Identification Register */
883         RESERVED(3[960], uint32_t)
884   __OM  uint32_t DIR;                  /*!< \brief  Offset: 0x1000( /W) Deactivate Interrupt Register */
885 }  GICInterface_Type;
886 
887 #define GICInterface        ((GICInterface_Type        *)     GIC_INTERFACE_BASE )   /*!< \brief GIC Interface register set access pointer */
888 #endif
889 
890 #if (__TIM_PRESENT == 1U) || defined(DOXYGEN)
891 #if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN)
892 /** \brief Structure type to access the Private Timer
893 */
894 typedef struct
895 {
896   __IOM uint32_t LOAD;            //!< \brief  Offset: 0x000 (R/W) Private Timer Load Register
897   __IOM uint32_t COUNTER;         //!< \brief  Offset: 0x004 (R/W) Private Timer Counter Register
898   __IOM uint32_t CONTROL;         //!< \brief  Offset: 0x008 (R/W) Private Timer Control Register
899   __IOM uint32_t ISR;             //!< \brief  Offset: 0x00C (R/W) Private Timer Interrupt Status Register
900         RESERVED(0[4], uint32_t)
901   __IOM uint32_t WLOAD;           //!< \brief  Offset: 0x020 (R/W) Watchdog Load Register
902   __IOM uint32_t WCOUNTER;        //!< \brief  Offset: 0x024 (R/W) Watchdog Counter Register
903   __IOM uint32_t WCONTROL;        //!< \brief  Offset: 0x028 (R/W) Watchdog Control Register
904   __IOM uint32_t WISR;            //!< \brief  Offset: 0x02C (R/W) Watchdog Interrupt Status Register
905   __IOM uint32_t WRESET;          //!< \brief  Offset: 0x030 (R/W) Watchdog Reset Status Register
906   __OM  uint32_t WDISABLE;        //!< \brief  Offset: 0x034 ( /W) Watchdog Disable Register
907 } Timer_Type;
908 #define PTIM ((Timer_Type *) TIMER_BASE )   /*!< \brief Timer register struct */
909 #endif
910 #endif
911 
912  /*******************************************************************************
913   *                Hardware Abstraction Layer
914    Core Function Interface contains:
915    - L1 Cache Functions
916    - L2C-310 Cache Controller Functions
917    - PL1 Timer Functions
918    - GIC Functions
919    - MMU Functions
920   ******************************************************************************/
921 
922 /* ##########################  L1 Cache functions  ################################# */
923 
924 /** \brief Enable Caches by setting I and C bits in SCTLR register.
925 */
L1C_EnableCaches(void)926 __STATIC_FORCEINLINE void L1C_EnableCaches(void) {
927   __set_SCTLR( __get_SCTLR() | SCTLR_I_Msk | SCTLR_C_Msk);
928   __ISB();
929 }
930 
931 /** \brief Disable Caches by clearing I and C bits in SCTLR register.
932 */
L1C_DisableCaches(void)933 __STATIC_FORCEINLINE void L1C_DisableCaches(void) {
934   __set_SCTLR( __get_SCTLR() & (~SCTLR_I_Msk) & (~SCTLR_C_Msk));
935   __ISB();
936 }
937 
938 /** \brief  Enable Branch Prediction by setting Z bit in SCTLR register.
939 */
L1C_EnableBTAC(void)940 __STATIC_FORCEINLINE void L1C_EnableBTAC(void) {
941   __set_SCTLR( __get_SCTLR() | SCTLR_Z_Msk);
942   __ISB();
943 }
944 
945 /** \brief  Disable Branch Prediction by clearing Z bit in SCTLR register.
946 */
L1C_DisableBTAC(void)947 __STATIC_FORCEINLINE void L1C_DisableBTAC(void) {
948   __set_SCTLR( __get_SCTLR() & (~SCTLR_Z_Msk));
949   __ISB();
950 }
951 
952 /** \brief  Invalidate entire branch predictor array
953 */
L1C_InvalidateBTAC(void)954 __STATIC_FORCEINLINE void L1C_InvalidateBTAC(void) {
955   __set_BPIALL(0);
956   __DSB();     //ensure completion of the invalidation
957   __ISB();     //ensure instruction fetch path sees new state
958 }
959 
960 /** \brief  Invalidate the whole instruction cache
961 */
L1C_InvalidateICacheAll(void)962 __STATIC_FORCEINLINE void L1C_InvalidateICacheAll(void) {
963   __set_ICIALLU(0);
964   __DSB();     //ensure completion of the invalidation
965   __ISB();     //ensure instruction fetch path sees new I cache state
966 }
967 
968 /** \brief  Clean data cache line by address.
969 * \param [in] va Pointer to data to clear the cache for.
970 */
L1C_CleanDCacheMVA(void * va)971 __STATIC_FORCEINLINE void L1C_CleanDCacheMVA(void *va) {
972   __set_DCCMVAC((uint32_t)va);
973   __DMB();     //ensure the ordering of data cache maintenance operations and their effects
974 }
975 
976 /** \brief  Invalidate data cache line by address.
977 * \param [in] va Pointer to data to invalidate the cache for.
978 */
L1C_InvalidateDCacheMVA(void * va)979 __STATIC_FORCEINLINE void L1C_InvalidateDCacheMVA(void *va) {
980   __set_DCIMVAC((uint32_t)va);
981   __DMB();     //ensure the ordering of data cache maintenance operations and their effects
982 }
983 
984 /** \brief  Clean and Invalidate data cache by address.
985 * \param [in] va Pointer to data to invalidate the cache for.
986 */
L1C_CleanInvalidateDCacheMVA(void * va)987 __STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheMVA(void *va) {
988   __set_DCCIMVAC((uint32_t)va);
989   __DMB();     //ensure the ordering of data cache maintenance operations and their effects
990 }
991 
992 /** \brief Calculate log2 rounded up
993 *  - log(0)  => 0
994 *  - log(1)  => 0
995 *  - log(2)  => 1
996 *  - log(3)  => 2
997 *  - log(4)  => 2
998 *  - log(5)  => 3
999 *        :      :
1000 *  - log(16) => 4
1001 *  - log(32) => 5
1002 *        :      :
1003 * \param [in] n input value parameter
1004 * \return log2(n)
1005 */
__log2_up(uint32_t n)1006 __STATIC_FORCEINLINE uint8_t __log2_up(uint32_t n)
1007 {
1008   if (n < 2U) {
1009     return 0U;
1010   }
1011   uint8_t log = 0U;
1012   uint32_t t = n;
1013   while(t > 1U)
1014   {
1015     log++;
1016     t >>= 1U;
1017   }
1018   if (n & 1U) { log++; }
1019   return log;
1020 }
1021 
1022 /** \brief  Apply cache maintenance to given cache level.
1023 * \param [in] level cache level to be maintained
1024 * \param [in] maint 0 - invalidate, 1 - clean, otherwise - invalidate and clean
1025 */
__L1C_MaintainDCacheSetWay(uint32_t level,uint32_t maint)1026 __STATIC_FORCEINLINE void __L1C_MaintainDCacheSetWay(uint32_t level, uint32_t maint)
1027 {
1028   uint32_t Dummy;
1029   uint32_t ccsidr;
1030   uint32_t num_sets;
1031   uint32_t num_ways;
1032   uint32_t shift_way;
1033   uint32_t log2_linesize;
1034    int32_t log2_num_ways;
1035 
1036   Dummy = level << 1U;
1037   /* set csselr, select ccsidr register */
1038   __set_CSSELR(Dummy);
1039   /* get current ccsidr register */
1040   ccsidr = __get_CCSIDR();
1041   num_sets = ((ccsidr & 0x0FFFE000U) >> 13U) + 1U;
1042   num_ways = ((ccsidr & 0x00001FF8U) >> 3U) + 1U;
1043   log2_linesize = (ccsidr & 0x00000007U) + 2U + 2U;
1044   log2_num_ways = __log2_up(num_ways);
1045   if ((log2_num_ways < 0) || (log2_num_ways > 32)) {
1046     return; // FATAL ERROR
1047   }
1048   shift_way = 32U - (uint32_t)log2_num_ways;
1049   for(int32_t way = num_ways-1; way >= 0; way--)
1050   {
1051     for(int32_t set = num_sets-1; set >= 0; set--)
1052     {
1053       Dummy = (level << 1U) | (((uint32_t)set) << log2_linesize) | (((uint32_t)way) << shift_way);
1054       switch (maint)
1055       {
1056         case 0U: __set_DCISW(Dummy);  break;
1057         case 1U: __set_DCCSW(Dummy);  break;
1058         default: __set_DCCISW(Dummy); break;
1059       }
1060     }
1061   }
1062   __DMB();
1063 }
1064 
1065 /** \brief  Clean and Invalidate the entire data or unified cache
1066 * Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency
1067 * \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean
1068 */
L1C_CleanInvalidateCache(uint32_t op)1069 __STATIC_FORCEINLINE void L1C_CleanInvalidateCache(uint32_t op) {
1070   uint32_t clidr;
1071   uint32_t cache_type;
1072   clidr =  __get_CLIDR();
1073   for(uint32_t i = 0U; i<7U; i++)
1074   {
1075     cache_type = (clidr >> i*3U) & 0x7UL;
1076     if ((cache_type >= 2U) && (cache_type <= 4U))
1077     {
1078       __L1C_MaintainDCacheSetWay(i, op);
1079     }
1080   }
1081 }
1082 
1083 /** \brief  Clean and Invalidate the entire data or unified cache
1084 * Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency
1085 * \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean
1086 * \deprecated Use generic L1C_CleanInvalidateCache instead.
1087 */
1088 CMSIS_DEPRECATED
__L1C_CleanInvalidateCache(uint32_t op)1089 __STATIC_FORCEINLINE void __L1C_CleanInvalidateCache(uint32_t op) {
1090   L1C_CleanInvalidateCache(op);
1091 }
1092 
1093 /** \brief  Invalidate the whole data cache.
1094 */
L1C_InvalidateDCacheAll(void)1095 __STATIC_FORCEINLINE void L1C_InvalidateDCacheAll(void) {
1096   L1C_CleanInvalidateCache(0);
1097 }
1098 
1099 /** \brief  Clean the whole data cache.
1100  */
L1C_CleanDCacheAll(void)1101 __STATIC_FORCEINLINE void L1C_CleanDCacheAll(void) {
1102   L1C_CleanInvalidateCache(1);
1103 }
1104 
1105 /** \brief  Clean and invalidate the whole data cache.
1106  */
L1C_CleanInvalidateDCacheAll(void)1107 __STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheAll(void) {
1108   L1C_CleanInvalidateCache(2);
1109 }
1110 
1111 /* ##########################  L2 Cache functions  ################################# */
1112 #if (__L2C_PRESENT == 1U) || defined(DOXYGEN)
1113 /** \brief Cache Sync operation by writing CACHE_SYNC register.
1114 */
L2C_Sync(void)1115 __STATIC_INLINE void L2C_Sync(void)
1116 {
1117   L2C_310->CACHE_SYNC = 0x0;
1118 }
1119 
1120 /** \brief Read cache controller cache ID from CACHE_ID register.
1121  * \return L2C_310_TypeDef::CACHE_ID
1122  */
L2C_GetID(void)1123 __STATIC_INLINE int L2C_GetID (void)
1124 {
1125   return L2C_310->CACHE_ID;
1126 }
1127 
1128 /** \brief Read cache controller cache type from CACHE_TYPE register.
1129 *  \return L2C_310_TypeDef::CACHE_TYPE
1130 */
L2C_GetType(void)1131 __STATIC_INLINE int L2C_GetType (void)
1132 {
1133   return L2C_310->CACHE_TYPE;
1134 }
1135 
1136 /** \brief Invalidate all cache by way
1137 */
L2C_InvAllByWay(void)1138 __STATIC_INLINE void L2C_InvAllByWay (void)
1139 {
1140   unsigned int assoc;
1141 
1142   if (L2C_310->AUX_CNT & (1U << 16U)) {
1143     assoc = 16U;
1144   } else {
1145     assoc =  8U;
1146   }
1147 
1148   L2C_310->INV_WAY = (1U << assoc) - 1U;
1149   while(L2C_310->INV_WAY & ((1U << assoc) - 1U)); //poll invalidate
1150 
1151   L2C_Sync();
1152 }
1153 
1154 /** \brief Clean and Invalidate all cache by way
1155 */
L2C_CleanInvAllByWay(void)1156 __STATIC_INLINE void L2C_CleanInvAllByWay (void)
1157 {
1158   unsigned int assoc;
1159 
1160   if (L2C_310->AUX_CNT & (1U << 16U)) {
1161     assoc = 16U;
1162   } else {
1163     assoc =  8U;
1164   }
1165 
1166   L2C_310->CLEAN_INV_WAY = (1U << assoc) - 1U;
1167   while(L2C_310->CLEAN_INV_WAY & ((1U << assoc) - 1U)); //poll invalidate
1168 
1169   L2C_Sync();
1170 }
1171 
1172 /** \brief Enable Level 2 Cache
1173 */
L2C_Enable(void)1174 __STATIC_INLINE void L2C_Enable(void)
1175 {
1176   L2C_310->CONTROL = 0;
1177   L2C_310->INTERRUPT_CLEAR = 0x000001FFuL;
1178   L2C_310->DEBUG_CONTROL = 0;
1179   L2C_310->DATA_LOCK_0_WAY = 0;
1180   L2C_310->CACHE_SYNC = 0;
1181   L2C_310->CONTROL = 0x01;
1182   L2C_Sync();
1183 }
1184 
1185 /** \brief Disable Level 2 Cache
1186 */
L2C_Disable(void)1187 __STATIC_INLINE void L2C_Disable(void)
1188 {
1189   L2C_310->CONTROL = 0x00;
1190   L2C_Sync();
1191 }
1192 
1193 /** \brief Invalidate cache by physical address
1194 * \param [in] pa Pointer to data to invalidate cache for.
1195 */
L2C_InvPa(void * pa)1196 __STATIC_INLINE void L2C_InvPa (void *pa)
1197 {
1198   L2C_310->INV_LINE_PA = (unsigned int)pa;
1199   L2C_Sync();
1200 }
1201 
1202 /** \brief Clean cache by physical address
1203 * \param [in] pa Pointer to data to invalidate cache for.
1204 */
L2C_CleanPa(void * pa)1205 __STATIC_INLINE void L2C_CleanPa (void *pa)
1206 {
1207   L2C_310->CLEAN_LINE_PA = (unsigned int)pa;
1208   L2C_Sync();
1209 }
1210 
1211 /** \brief Clean and invalidate cache by physical address
1212 * \param [in] pa Pointer to data to invalidate cache for.
1213 */
L2C_CleanInvPa(void * pa)1214 __STATIC_INLINE void L2C_CleanInvPa (void *pa)
1215 {
1216   L2C_310->CLEAN_INV_LINE_PA = (unsigned int)pa;
1217   L2C_Sync();
1218 }
1219 #endif
1220 
1221 /* ##########################  GIC functions  ###################################### */
1222 #if (__GIC_PRESENT == 1U) || defined(DOXYGEN)
1223 
1224 /** \brief  Enable the interrupt distributor using the GIC's CTLR register.
1225 */
GIC_EnableDistributor(void)1226 __STATIC_INLINE void GIC_EnableDistributor(void)
1227 {
1228   GICDistributor->CTLR |= 1U;
1229 }
1230 
1231 /** \brief Disable the interrupt distributor using the GIC's CTLR register.
1232 */
GIC_DisableDistributor(void)1233 __STATIC_INLINE void GIC_DisableDistributor(void)
1234 {
1235   GICDistributor->CTLR &=~1U;
1236 }
1237 
1238 /** \brief Read the GIC's TYPER register.
1239 * \return GICDistributor_Type::TYPER
1240 */
GIC_DistributorInfo(void)1241 __STATIC_INLINE uint32_t GIC_DistributorInfo(void)
1242 {
1243   return (GICDistributor->TYPER);
1244 }
1245 
1246 /** \brief Reads the GIC's IIDR register.
1247 * \return GICDistributor_Type::IIDR
1248 */
GIC_DistributorImplementer(void)1249 __STATIC_INLINE uint32_t GIC_DistributorImplementer(void)
1250 {
1251   return (GICDistributor->IIDR);
1252 }
1253 
1254 /** \brief Sets the GIC's ITARGETSR register for the given interrupt.
1255 * \param [in] IRQn Interrupt to be configured.
1256 * \param [in] cpu_target CPU interfaces to assign this interrupt to.
1257 */
GIC_SetTarget(IRQn_Type IRQn,uint32_t cpu_target)1258 __STATIC_INLINE void GIC_SetTarget(IRQn_Type IRQn, uint32_t cpu_target)
1259 {
1260   uint32_t mask = GICDistributor->ITARGETSR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U));
1261   GICDistributor->ITARGETSR[IRQn / 4U] = mask | ((cpu_target & 0xFFUL) << ((IRQn % 4U) * 8U));
1262 }
1263 
1264 /** \brief Read the GIC's ITARGETSR register.
1265 * \param [in] IRQn Interrupt to acquire the configuration for.
1266 * \return GICDistributor_Type::ITARGETSR
1267 */
GIC_GetTarget(IRQn_Type IRQn)1268 __STATIC_INLINE uint32_t GIC_GetTarget(IRQn_Type IRQn)
1269 {
1270   return (GICDistributor->ITARGETSR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL;
1271 }
1272 
1273 /** \brief Enable the CPU's interrupt interface.
1274 */
GIC_EnableInterface(void)1275 __STATIC_INLINE void GIC_EnableInterface(void)
1276 {
1277   GICInterface->CTLR |= 1U; //enable interface
1278 }
1279 
1280 /** \brief Disable the CPU's interrupt interface.
1281 */
GIC_DisableInterface(void)1282 __STATIC_INLINE void GIC_DisableInterface(void)
1283 {
1284   GICInterface->CTLR &=~1U; //disable distributor
1285 }
1286 
1287 /** \brief Read the CPU's IAR register.
1288 * \return GICInterface_Type::IAR
1289 */
GIC_AcknowledgePending(void)1290 __STATIC_INLINE IRQn_Type GIC_AcknowledgePending(void)
1291 {
1292   return (IRQn_Type)(GICInterface->IAR);
1293 }
1294 
1295 /** \brief Writes the given interrupt number to the CPU's EOIR register.
1296 * \param [in] IRQn The interrupt to be signaled as finished.
1297 */
GIC_EndInterrupt(IRQn_Type IRQn)1298 __STATIC_INLINE void GIC_EndInterrupt(IRQn_Type IRQn)
1299 {
1300   GICInterface->EOIR = IRQn;
1301 }
1302 
1303 /** \brief Enables the given interrupt using GIC's ISENABLER register.
1304 * \param [in] IRQn The interrupt to be enabled.
1305 */
GIC_EnableIRQ(IRQn_Type IRQn)1306 __STATIC_INLINE void GIC_EnableIRQ(IRQn_Type IRQn)
1307 {
1308   GICDistributor->ISENABLER[IRQn / 32U] = 1U << (IRQn % 32U);
1309 }
1310 
1311 /** \brief Get interrupt enable status using GIC's ISENABLER register.
1312 * \param [in] IRQn The interrupt to be queried.
1313 * \return 0 - interrupt is not enabled, 1 - interrupt is enabled.
1314 */
GIC_GetEnableIRQ(IRQn_Type IRQn)1315 __STATIC_INLINE uint32_t GIC_GetEnableIRQ(IRQn_Type IRQn)
1316 {
1317   return (GICDistributor->ISENABLER[IRQn / 32U] >> (IRQn % 32U)) & 1UL;
1318 }
1319 
1320 /** \brief Disables the given interrupt using GIC's ICENABLER register.
1321 * \param [in] IRQn The interrupt to be disabled.
1322 */
GIC_DisableIRQ(IRQn_Type IRQn)1323 __STATIC_INLINE void GIC_DisableIRQ(IRQn_Type IRQn)
1324 {
1325   GICDistributor->ICENABLER[IRQn / 32U] = 1U << (IRQn % 32U);
1326 }
1327 
1328 /** \brief Get interrupt pending status from GIC's ISPENDR register.
1329 * \param [in] IRQn The interrupt to be queried.
1330 * \return 0 - interrupt is not pending, 1 - interrupt is pendig.
1331 */
GIC_GetPendingIRQ(IRQn_Type IRQn)1332 __STATIC_INLINE uint32_t GIC_GetPendingIRQ(IRQn_Type IRQn)
1333 {
1334   uint32_t pend;
1335 
1336   if (IRQn >= 16U) {
1337     pend = (GICDistributor->ISPENDR[IRQn / 32U] >> (IRQn % 32U)) & 1UL;
1338   } else {
1339     // INTID 0-15 Software Generated Interrupt
1340     pend = (GICDistributor->SPENDSGIR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL;
1341     // No CPU identification offered
1342     if (pend != 0U) {
1343       pend = 1U;
1344     } else {
1345       pend = 0U;
1346     }
1347   }
1348 
1349   return (pend);
1350 }
1351 
1352 /** \brief Sets the given interrupt as pending using GIC's ISPENDR register.
1353 * \param [in] IRQn The interrupt to be enabled.
1354 */
GIC_SetPendingIRQ(IRQn_Type IRQn)1355 __STATIC_INLINE void GIC_SetPendingIRQ(IRQn_Type IRQn)
1356 {
1357   if (IRQn >= 16U) {
1358     GICDistributor->ISPENDR[IRQn / 32U] = 1U << (IRQn % 32U);
1359   } else {
1360     // INTID 0-15 Software Generated Interrupt
1361     GICDistributor->SPENDSGIR[IRQn / 4U] = 1U << ((IRQn % 4U) * 8U);
1362   }
1363 }
1364 
1365 /** \brief Clears the given interrupt from being pending using GIC's ICPENDR register.
1366 * \param [in] IRQn The interrupt to be enabled.
1367 */
GIC_ClearPendingIRQ(IRQn_Type IRQn)1368 __STATIC_INLINE void GIC_ClearPendingIRQ(IRQn_Type IRQn)
1369 {
1370   if (IRQn >= 16U) {
1371     GICDistributor->ICPENDR[IRQn / 32U] = 1U << (IRQn % 32U);
1372   } else {
1373     // INTID 0-15 Software Generated Interrupt
1374     GICDistributor->CPENDSGIR[IRQn / 4U] = 1U << ((IRQn % 4U) * 8U);
1375   }
1376 }
1377 
1378 /** \brief Sets the interrupt configuration using GIC's ICFGR register.
1379 * \param [in] IRQn The interrupt to be configured.
1380 * \param [in] int_config Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1)
1381 *                                           Bit 1: 0 - level sensitive, 1 - edge triggered
1382 */
GIC_SetConfiguration(IRQn_Type IRQn,uint32_t int_config)1383 __STATIC_INLINE void GIC_SetConfiguration(IRQn_Type IRQn, uint32_t int_config)
1384 {
1385   uint32_t icfgr = GICDistributor->ICFGR[IRQn / 16U];
1386   uint32_t shift = (IRQn % 16U) << 1U;
1387 
1388   icfgr &= (~(3U         << shift));
1389   icfgr |= (  int_config << shift);
1390 
1391   GICDistributor->ICFGR[IRQn / 16U] = icfgr;
1392 }
1393 
1394 /** \brief Get the interrupt configuration from the GIC's ICFGR register.
1395 * \param [in] IRQn Interrupt to acquire the configuration for.
1396 * \return Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1)
1397 *                                 Bit 1: 0 - level sensitive, 1 - edge triggered
1398 */
GIC_GetConfiguration(IRQn_Type IRQn)1399 __STATIC_INLINE uint32_t GIC_GetConfiguration(IRQn_Type IRQn)
1400 {
1401   return (GICDistributor->ICFGR[IRQn / 16U] >> ((IRQn % 16U) >> 1U));
1402 }
1403 
1404 /** \brief Set the priority for the given interrupt in the GIC's IPRIORITYR register.
1405 * \param [in] IRQn The interrupt to be configured.
1406 * \param [in] priority The priority for the interrupt, lower values denote higher priorities.
1407 */
GIC_SetPriority(IRQn_Type IRQn,uint32_t priority)1408 __STATIC_INLINE void GIC_SetPriority(IRQn_Type IRQn, uint32_t priority)
1409 {
1410   uint32_t mask = GICDistributor->IPRIORITYR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U));
1411   GICDistributor->IPRIORITYR[IRQn / 4U] = mask | ((priority & 0xFFUL) << ((IRQn % 4U) * 8U));
1412 }
1413 
1414 /** \brief Read the current interrupt priority from GIC's IPRIORITYR register.
1415 * \param [in] IRQn The interrupt to be queried.
1416 */
GIC_GetPriority(IRQn_Type IRQn)1417 __STATIC_INLINE uint32_t GIC_GetPriority(IRQn_Type IRQn)
1418 {
1419   return (GICDistributor->IPRIORITYR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL;
1420 }
1421 
1422 /** \brief Set the interrupt priority mask using CPU's PMR register.
1423 * \param [in] priority Priority mask to be set.
1424 */
GIC_SetInterfacePriorityMask(uint32_t priority)1425 __STATIC_INLINE void GIC_SetInterfacePriorityMask(uint32_t priority)
1426 {
1427   GICInterface->PMR = priority & 0xFFUL; //set priority mask
1428 }
1429 
1430 /** \brief Read the current interrupt priority mask from CPU's PMR register.
1431 * \result GICInterface_Type::PMR
1432 */
GIC_GetInterfacePriorityMask(void)1433 __STATIC_INLINE uint32_t GIC_GetInterfacePriorityMask(void)
1434 {
1435   return GICInterface->PMR;
1436 }
1437 
1438 /** \brief Configures the group priority and subpriority split point using CPU's BPR register.
1439 * \param [in] binary_point Amount of bits used as subpriority.
1440 */
GIC_SetBinaryPoint(uint32_t binary_point)1441 __STATIC_INLINE void GIC_SetBinaryPoint(uint32_t binary_point)
1442 {
1443   GICInterface->BPR = binary_point & 7U; //set binary point
1444 }
1445 
1446 /** \brief Read the current group priority and subpriority split point from CPU's BPR register.
1447 * \return GICInterface_Type::BPR
1448 */
GIC_GetBinaryPoint(void)1449 __STATIC_INLINE uint32_t GIC_GetBinaryPoint(void)
1450 {
1451   return GICInterface->BPR;
1452 }
1453 
1454 /** \brief Get the status for a given interrupt.
1455 * \param [in] IRQn The interrupt to get status for.
1456 * \return 0 - not pending/active, 1 - pending, 2 - active, 3 - pending and active
1457 */
GIC_GetIRQStatus(IRQn_Type IRQn)1458 __STATIC_INLINE uint32_t GIC_GetIRQStatus(IRQn_Type IRQn)
1459 {
1460   uint32_t pending, active;
1461 
1462   active = ((GICDistributor->ISACTIVER[IRQn / 32U])  >> (IRQn % 32U)) & 1UL;
1463   pending = ((GICDistributor->ISPENDR[IRQn / 32U]) >> (IRQn % 32U)) & 1UL;
1464 
1465   return ((active<<1U) | pending);
1466 }
1467 
1468 /** \brief Generate a software interrupt using GIC's SGIR register.
1469 * \param [in] IRQn Software interrupt to be generated.
1470 * \param [in] target_list List of CPUs the software interrupt should be forwarded to.
1471 * \param [in] filter_list Filter to be applied to determine interrupt receivers.
1472 */
GIC_SendSGI(IRQn_Type IRQn,uint32_t target_list,uint32_t filter_list)1473 __STATIC_INLINE void GIC_SendSGI(IRQn_Type IRQn, uint32_t target_list, uint32_t filter_list)
1474 {
1475   GICDistributor->SGIR = ((filter_list & 3U) << 24U) | ((target_list & 0xFFUL) << 16U) | (IRQn & 0x0FUL);
1476 }
1477 
1478 /** \brief Get the interrupt number of the highest interrupt pending from CPU's HPPIR register.
1479 * \return GICInterface_Type::HPPIR
1480 */
GIC_GetHighPendingIRQ(void)1481 __STATIC_INLINE uint32_t GIC_GetHighPendingIRQ(void)
1482 {
1483   return GICInterface->HPPIR;
1484 }
1485 
1486 /** \brief Provides information about the implementer and revision of the CPU interface.
1487 * \return GICInterface_Type::IIDR
1488 */
GIC_GetInterfaceId(void)1489 __STATIC_INLINE uint32_t GIC_GetInterfaceId(void)
1490 {
1491   return GICInterface->IIDR;
1492 }
1493 
1494 /** \brief Set the interrupt group from the GIC's IGROUPR register.
1495 * \param [in] IRQn The interrupt to be queried.
1496 * \param [in] group Interrupt group number: 0 - Group 0, 1 - Group 1
1497 */
GIC_SetGroup(IRQn_Type IRQn,uint32_t group)1498 __STATIC_INLINE void GIC_SetGroup(IRQn_Type IRQn, uint32_t group)
1499 {
1500   uint32_t igroupr = GICDistributor->IGROUPR[IRQn / 32U];
1501   uint32_t shift   = (IRQn % 32U);
1502 
1503   igroupr &= (~(1U          << shift));
1504   igroupr |= ( (group & 1U) << shift);
1505 
1506   GICDistributor->IGROUPR[IRQn / 32U] = igroupr;
1507 }
1508 #define GIC_SetSecurity         GIC_SetGroup
1509 
1510 /** \brief Get the interrupt group from the GIC's IGROUPR register.
1511 * \param [in] IRQn The interrupt to be queried.
1512 * \return 0 - Group 0, 1 - Group 1
1513 */
GIC_GetGroup(IRQn_Type IRQn)1514 __STATIC_INLINE uint32_t GIC_GetGroup(IRQn_Type IRQn)
1515 {
1516   return (GICDistributor->IGROUPR[IRQn / 32U] >> (IRQn % 32U)) & 1UL;
1517 }
1518 #define GIC_GetSecurity         GIC_GetGroup
1519 
1520 /** \brief Initialize the interrupt distributor.
1521 */
GIC_DistInit(void)1522 __STATIC_INLINE void GIC_DistInit(void)
1523 {
1524   uint32_t i;
1525   uint32_t num_irq = 0U;
1526   uint32_t priority_field;
1527 
1528   //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0,
1529   //configuring all of the interrupts as Secure.
1530 
1531   //Disable interrupt forwarding
1532   GIC_DisableDistributor();
1533   //Get the maximum number of interrupts that the GIC supports
1534   num_irq = 32U * ((GIC_DistributorInfo() & 0x1FU) + 1U);
1535 
1536   /* Priority level is implementation defined.
1537    To determine the number of priority bits implemented write 0xFF to an IPRIORITYR
1538    priority field and read back the value stored.*/
1539   GIC_SetPriority((IRQn_Type)0U, 0xFFU);
1540   priority_field = GIC_GetPriority((IRQn_Type)0U);
1541 
1542   for (i = 32U; i < num_irq; i++)
1543   {
1544       //Disable the SPI interrupt
1545       GIC_DisableIRQ((IRQn_Type)i);
1546       //Set level-sensitive (and N-N model)
1547       GIC_SetConfiguration((IRQn_Type)i, 0U);
1548       //Set priority
1549       GIC_SetPriority((IRQn_Type)i, priority_field/2U);
1550       //Set target list to CPU0
1551       GIC_SetTarget((IRQn_Type)i, 1U);
1552   }
1553   //Enable distributor
1554   GIC_EnableDistributor();
1555 }
1556 
1557 /** \brief Initialize the CPU's interrupt interface
1558 */
GIC_CPUInterfaceInit(void)1559 __STATIC_INLINE void GIC_CPUInterfaceInit(void)
1560 {
1561   uint32_t i;
1562   uint32_t priority_field;
1563 
1564   //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0,
1565   //configuring all of the interrupts as Secure.
1566 
1567   //Disable interrupt forwarding
1568   GIC_DisableInterface();
1569 
1570   /* Priority level is implementation defined.
1571    To determine the number of priority bits implemented write 0xFF to an IPRIORITYR
1572    priority field and read back the value stored.*/
1573   GIC_SetPriority((IRQn_Type)0U, 0xFFU);
1574   priority_field = GIC_GetPriority((IRQn_Type)0U);
1575 
1576   //SGI and PPI
1577   for (i = 0U; i < 32U; i++)
1578   {
1579     if(i > 15U) {
1580       //Set level-sensitive (and N-N model) for PPI
1581       GIC_SetConfiguration((IRQn_Type)i, 0U);
1582     }
1583     //Disable SGI and PPI interrupts
1584     GIC_DisableIRQ((IRQn_Type)i);
1585     //Set priority
1586     GIC_SetPriority((IRQn_Type)i, priority_field/2U);
1587   }
1588   //Enable interface
1589   GIC_EnableInterface();
1590   //Set binary point to 0
1591   GIC_SetBinaryPoint(0U);
1592   //Set priority mask
1593   GIC_SetInterfacePriorityMask(0xFFU);
1594 }
1595 
1596 /** \brief Initialize and enable the GIC
1597 */
GIC_Enable(void)1598 __STATIC_INLINE void GIC_Enable(void)
1599 {
1600   GIC_DistInit();
1601   GIC_CPUInterfaceInit(); //per CPU
1602 }
1603 #endif
1604 
1605 /* ##########################  Generic Timer functions  ############################ */
1606 #if (__TIM_PRESENT == 1U) || defined(DOXYGEN)
1607 
1608 /* PL1 Physical Timer */
1609 #if (__CORTEX_A == 7U) || defined(DOXYGEN)
1610 
1611 /** \brief Physical Timer Control register */
1612 typedef union
1613 {
1614   struct
1615   {
1616     uint32_t ENABLE:1;      /*!< \brief bit: 0      Enables the timer. */
1617     uint32_t IMASK:1;       /*!< \brief bit: 1      Timer output signal mask bit. */
1618     uint32_t ISTATUS:1;     /*!< \brief bit: 2      The status of the timer. */
1619     RESERVED(0:29, uint32_t)
1620   } b;                      /*!< \brief Structure used for bit  access */
1621   uint32_t w;               /*!< \brief Type      used for word access */
1622 } CNTP_CTL_Type;
1623 
1624 /** \brief Configures the frequency the timer shall run at.
1625 * \param [in] value The timer frequency in Hz.
1626 */
PL1_SetCounterFrequency(uint32_t value)1627 __STATIC_INLINE void PL1_SetCounterFrequency(uint32_t value)
1628 {
1629   __set_CNTFRQ(value);
1630   __ISB();
1631 }
1632 
1633 /** \brief Sets the reset value of the timer.
1634 * \param [in] value The value the timer is loaded with.
1635 */
PL1_SetLoadValue(uint32_t value)1636 __STATIC_INLINE void PL1_SetLoadValue(uint32_t value)
1637 {
1638   __set_CNTP_TVAL(value);
1639   __ISB();
1640 }
1641 
1642 /** \brief Get the current counter value.
1643 * \return Current counter value.
1644 */
PL1_GetCurrentValue(void)1645 __STATIC_INLINE uint32_t PL1_GetCurrentValue(void)
1646 {
1647   return(__get_CNTP_TVAL());
1648 }
1649 
1650 /** \brief Get the current physical counter value.
1651 * \return Current physical counter value.
1652 */
PL1_GetCurrentPhysicalValue(void)1653 __STATIC_INLINE uint64_t PL1_GetCurrentPhysicalValue(void)
1654 {
1655   return(__get_CNTPCT());
1656 }
1657 
1658 /** \brief Set the physical compare value.
1659 * \param [in] value New physical timer compare value.
1660 */
PL1_SetPhysicalCompareValue(uint64_t value)1661 __STATIC_INLINE void PL1_SetPhysicalCompareValue(uint64_t value)
1662 {
1663   __set_CNTP_CVAL(value);
1664   __ISB();
1665 }
1666 
1667 /** \brief Get the physical compare value.
1668 * \return Physical compare value.
1669 */
PL1_GetPhysicalCompareValue(void)1670 __STATIC_INLINE uint64_t PL1_GetPhysicalCompareValue(void)
1671 {
1672   return(__get_CNTP_CVAL());
1673 }
1674 
1675 /** \brief Configure the timer by setting the control value.
1676 * \param [in] value New timer control value.
1677 */
PL1_SetControl(uint32_t value)1678 __STATIC_INLINE void PL1_SetControl(uint32_t value)
1679 {
1680   __set_CNTP_CTL(value);
1681   __ISB();
1682 }
1683 
1684 /** \brief Get the control value.
1685 * \return Control value.
1686 */
PL1_GetControl(void)1687 __STATIC_INLINE uint32_t PL1_GetControl(void)
1688 {
1689   return(__get_CNTP_CTL());
1690 }
1691 #endif
1692 
1693 /* Private Timer */
1694 #if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN)
1695 /** \brief Set the load value to timers LOAD register.
1696 * \param [in] value The load value to be set.
1697 */
PTIM_SetLoadValue(uint32_t value)1698 __STATIC_INLINE void PTIM_SetLoadValue(uint32_t value)
1699 {
1700   PTIM->LOAD = value;
1701 }
1702 
1703 /** \brief Get the load value from timers LOAD register.
1704 * \return Timer_Type::LOAD
1705 */
PTIM_GetLoadValue(void)1706 __STATIC_INLINE uint32_t PTIM_GetLoadValue(void)
1707 {
1708   return(PTIM->LOAD);
1709 }
1710 
1711 /** \brief Set current counter value from its COUNTER register.
1712 */
PTIM_SetCurrentValue(uint32_t value)1713 __STATIC_INLINE void PTIM_SetCurrentValue(uint32_t value)
1714 {
1715   PTIM->COUNTER = value;
1716 }
1717 
1718 /** \brief Get current counter value from timers COUNTER register.
1719 * \result Timer_Type::COUNTER
1720 */
PTIM_GetCurrentValue(void)1721 __STATIC_INLINE uint32_t PTIM_GetCurrentValue(void)
1722 {
1723   return(PTIM->COUNTER);
1724 }
1725 
1726 /** \brief Configure the timer using its CONTROL register.
1727 * \param [in] value The new configuration value to be set.
1728 */
PTIM_SetControl(uint32_t value)1729 __STATIC_INLINE void PTIM_SetControl(uint32_t value)
1730 {
1731   PTIM->CONTROL = value;
1732 }
1733 
1734 /** ref Timer_Type::CONTROL Get the current timer configuration from its CONTROL register.
1735 * \return Timer_Type::CONTROL
1736 */
PTIM_GetControl(void)1737 __STATIC_INLINE uint32_t PTIM_GetControl(void)
1738 {
1739   return(PTIM->CONTROL);
1740 }
1741 
1742 /** ref Timer_Type::CONTROL Get the event flag in timers ISR register.
1743 * \return 0 - flag is not set, 1- flag is set
1744 */
PTIM_GetEventFlag(void)1745 __STATIC_INLINE uint32_t PTIM_GetEventFlag(void)
1746 {
1747   return (PTIM->ISR & 1UL);
1748 }
1749 
1750 /** ref Timer_Type::CONTROL Clears the event flag in timers ISR register.
1751 */
PTIM_ClearEventFlag(void)1752 __STATIC_INLINE void PTIM_ClearEventFlag(void)
1753 {
1754   PTIM->ISR = 1;
1755 }
1756 #endif
1757 #endif
1758 
1759 /* ##########################  MMU functions  ###################################### */
1760 
1761 #define SECTION_DESCRIPTOR      (0x2)
1762 #define SECTION_MASK            (0xFFFFFFFC)
1763 
1764 #define SECTION_TEXCB_MASK      (0xFFFF8FF3)
1765 #define SECTION_B_SHIFT         (2)
1766 #define SECTION_C_SHIFT         (3)
1767 #define SECTION_TEX0_SHIFT      (12)
1768 #define SECTION_TEX1_SHIFT      (13)
1769 #define SECTION_TEX2_SHIFT      (14)
1770 
1771 #define SECTION_XN_MASK         (0xFFFFFFEF)
1772 #define SECTION_XN_SHIFT        (4)
1773 
1774 #define SECTION_DOMAIN_MASK     (0xFFFFFE1F)
1775 #define SECTION_DOMAIN_SHIFT    (5)
1776 
1777 #define SECTION_P_MASK          (0xFFFFFDFF)
1778 #define SECTION_P_SHIFT         (9)
1779 
1780 #define SECTION_AP_MASK         (0xFFFF73FF)
1781 #define SECTION_AP_SHIFT        (10)
1782 #define SECTION_AP2_SHIFT       (15)
1783 
1784 #define SECTION_S_MASK          (0xFFFEFFFF)
1785 #define SECTION_S_SHIFT         (16)
1786 
1787 #define SECTION_NG_MASK         (0xFFFDFFFF)
1788 #define SECTION_NG_SHIFT        (17)
1789 
1790 #define SECTION_SUPER_MASK      (0xFFF7FFFF)
1791 #define SECTION_SUPER_SHIFT     (18)
1792 
1793 #define SECTION_NS_MASK         (0xFFF7FFFF)
1794 #define SECTION_NS_SHIFT        (19)
1795 
1796 #define PAGE_L1_DESCRIPTOR      (0x1)
1797 #define PAGE_L1_MASK            (0xFFFFFFFC)
1798 
1799 #define PAGE_L2_4K_DESC         (0x2)
1800 #define PAGE_L2_4K_MASK         (0xFFFFFFFD)
1801 
1802 #define PAGE_L2_64K_DESC        (0x1)
1803 #define PAGE_L2_64K_MASK        (0xFFFFFFFC)
1804 
1805 #define PAGE_4K_TEXCB_MASK      (0xFFFFFE33)
1806 #define PAGE_4K_B_SHIFT         (2)
1807 #define PAGE_4K_C_SHIFT         (3)
1808 #define PAGE_4K_TEX0_SHIFT      (6)
1809 #define PAGE_4K_TEX1_SHIFT      (7)
1810 #define PAGE_4K_TEX2_SHIFT      (8)
1811 
1812 #define PAGE_64K_TEXCB_MASK     (0xFFFF8FF3)
1813 #define PAGE_64K_B_SHIFT        (2)
1814 #define PAGE_64K_C_SHIFT        (3)
1815 #define PAGE_64K_TEX0_SHIFT     (12)
1816 #define PAGE_64K_TEX1_SHIFT     (13)
1817 #define PAGE_64K_TEX2_SHIFT     (14)
1818 
1819 #define PAGE_TEXCB_MASK         (0xFFFF8FF3)
1820 #define PAGE_B_SHIFT            (2)
1821 #define PAGE_C_SHIFT            (3)
1822 #define PAGE_TEX_SHIFT          (12)
1823 
1824 #define PAGE_XN_4K_MASK         (0xFFFFFFFE)
1825 #define PAGE_XN_4K_SHIFT        (0)
1826 #define PAGE_XN_64K_MASK        (0xFFFF7FFF)
1827 #define PAGE_XN_64K_SHIFT       (15)
1828 
1829 #define PAGE_DOMAIN_MASK        (0xFFFFFE1F)
1830 #define PAGE_DOMAIN_SHIFT       (5)
1831 
1832 #define PAGE_P_MASK             (0xFFFFFDFF)
1833 #define PAGE_P_SHIFT            (9)
1834 
1835 #define PAGE_AP_MASK            (0xFFFFFDCF)
1836 #define PAGE_AP_SHIFT           (4)
1837 #define PAGE_AP2_SHIFT          (9)
1838 
1839 #define PAGE_S_MASK             (0xFFFFFBFF)
1840 #define PAGE_S_SHIFT            (10)
1841 
1842 #define PAGE_NG_MASK            (0xFFFFF7FF)
1843 #define PAGE_NG_SHIFT           (11)
1844 
1845 #define PAGE_NS_MASK            (0xFFFFFFF7)
1846 #define PAGE_NS_SHIFT           (3)
1847 
1848 #define OFFSET_1M               (0x00100000)
1849 #define OFFSET_64K              (0x00010000)
1850 #define OFFSET_4K               (0x00001000)
1851 
1852 #define DESCRIPTOR_FAULT        (0x00000000)
1853 
1854 /* Attributes enumerations */
1855 
1856 /* Region size attributes */
1857 typedef enum
1858 {
1859    SECTION,
1860    PAGE_4k,
1861    PAGE_64k,
1862 } mmu_region_size_Type;
1863 
1864 /* Region type attributes */
1865 typedef enum
1866 {
1867    NORMAL,
1868    DEVICE,
1869    SHARED_DEVICE,
1870    NON_SHARED_DEVICE,
1871    STRONGLY_ORDERED
1872 } mmu_memory_Type;
1873 
1874 /* Region cacheability attributes */
1875 typedef enum
1876 {
1877    NON_CACHEABLE,
1878    WB_WA,
1879    WT,
1880    WB_NO_WA,
1881 } mmu_cacheability_Type;
1882 
1883 /* Region parity check attributes */
1884 typedef enum
1885 {
1886    ECC_DISABLED,
1887    ECC_ENABLED,
1888 } mmu_ecc_check_Type;
1889 
1890 /* Region execution attributes */
1891 typedef enum
1892 {
1893    EXECUTE,
1894    NON_EXECUTE,
1895 } mmu_execute_Type;
1896 
1897 /* Region global attributes */
1898 typedef enum
1899 {
1900    GLOBAL,
1901    NON_GLOBAL,
1902 } mmu_global_Type;
1903 
1904 /* Region shareability attributes */
1905 typedef enum
1906 {
1907    NON_SHARED,
1908    SHARED,
1909 } mmu_shared_Type;
1910 
1911 /* Region security attributes */
1912 typedef enum
1913 {
1914    SECURE,
1915    NON_SECURE,
1916 } mmu_secure_Type;
1917 
1918 /* Region access attributes */
1919 typedef enum
1920 {
1921    NO_ACCESS,
1922    RW,
1923    READ,
1924 } mmu_access_Type;
1925 
1926 /* Memory Region definition */
1927 typedef struct RegionStruct {
1928     mmu_region_size_Type rg_t;
1929     mmu_memory_Type mem_t;
1930     uint8_t domain;
1931     mmu_cacheability_Type inner_norm_t;
1932     mmu_cacheability_Type outer_norm_t;
1933     mmu_ecc_check_Type e_t;
1934     mmu_execute_Type xn_t;
1935     mmu_global_Type g_t;
1936     mmu_secure_Type sec_t;
1937     mmu_access_Type priv_t;
1938     mmu_access_Type user_t;
1939     mmu_shared_Type sh_t;
1940 
1941 } mmu_region_attributes_Type;
1942 
1943 //Following macros define the descriptors and attributes
1944 //Sect_Normal. Outer & inner wb/wa, non-shareable, executable, rw, domain 0
1945 #define section_normal(descriptor_l1, region)     region.rg_t = SECTION; \
1946                                    region.domain = 0x0; \
1947                                    region.e_t = ECC_DISABLED; \
1948                                    region.g_t = GLOBAL; \
1949                                    region.inner_norm_t = WB_WA; \
1950                                    region.outer_norm_t = WB_WA; \
1951                                    region.mem_t = NORMAL; \
1952                                    region.sec_t = SECURE; \
1953                                    region.xn_t = EXECUTE; \
1954                                    region.priv_t = RW; \
1955                                    region.user_t = RW; \
1956                                    region.sh_t = SHARED; \
1957                                    MMU_GetSectionDescriptor(&descriptor_l1, region);
1958 
1959 //Sect_Normal_NC. Outer & inner non-cacheable, non-shareable, executable, rw, domain 0
1960 #define section_normal_nc(descriptor_l1, region)    region.rg_t = SECTION; \
1961                                    region.domain = 0x0; \
1962                                    region.e_t = ECC_DISABLED; \
1963                                    region.g_t = GLOBAL; \
1964                                    region.inner_norm_t = NON_CACHEABLE; \
1965                                    region.outer_norm_t = NON_CACHEABLE; \
1966                                    region.mem_t = NORMAL; \
1967                                    region.sec_t = SECURE; \
1968                                    region.xn_t = EXECUTE; \
1969                                    region.priv_t = RW; \
1970                                    region.user_t = RW; \
1971                                    region.sh_t = NON_SHARED; \
1972                                    MMU_GetSectionDescriptor(&descriptor_l1, region);
1973 
1974 //Sect_Normal_RO_NC. Outer & inner non-cacheable, non-shareable, executable, ro, domain 0
1975 #define section_normal_ro_nc(descriptor_l1, region) region.rg_t = SECTION; \
1976                                    region.domain = 0x0; \
1977                                    region.e_t = ECC_DISABLED; \
1978                                    region.g_t = GLOBAL; \
1979                                    region.inner_norm_t = NON_CACHEABLE; \
1980                                    region.outer_norm_t = NON_CACHEABLE; \
1981                                    region.mem_t = NORMAL; \
1982                                    region.sec_t = SECURE; \
1983                                    region.xn_t = EXECUTE; \
1984                                    region.priv_t = READ; \
1985                                    region.user_t = READ; \
1986                                    region.sh_t = NON_SHARED; \
1987                                    MMU_GetSectionDescriptor(&descriptor_l1, region);
1988 
1989 //Sect_Normal_Cod. Outer & inner wb/wa, non-shareable, executable, ro, domain 0
1990 #define section_normal_cod(descriptor_l1, region) region.rg_t = SECTION; \
1991                                    region.domain = 0x0; \
1992                                    region.e_t = ECC_DISABLED; \
1993                                    region.g_t = GLOBAL; \
1994                                    region.inner_norm_t = WB_WA; \
1995                                    region.outer_norm_t = WB_WA; \
1996                                    region.mem_t = NORMAL; \
1997                                    region.sec_t = SECURE; \
1998                                    region.xn_t = EXECUTE; \
1999                                    region.priv_t = READ; \
2000                                    region.user_t = READ; \
2001                                    region.sh_t = SHARED; \
2002                                    MMU_GetSectionDescriptor(&descriptor_l1, region);
2003 
2004 //Sect_Normal_RO. Sect_Normal_Cod, but not executable
2005 #define section_normal_ro(descriptor_l1, region)  region.rg_t = SECTION; \
2006                                    region.domain = 0x0; \
2007                                    region.e_t = ECC_DISABLED; \
2008                                    region.g_t = GLOBAL; \
2009                                    region.inner_norm_t = WB_WA; \
2010                                    region.outer_norm_t = WB_WA; \
2011                                    region.mem_t = NORMAL; \
2012                                    region.sec_t = SECURE; \
2013                                    region.xn_t = NON_EXECUTE; \
2014                                    region.priv_t = READ; \
2015                                    region.user_t = READ; \
2016                                    region.sh_t = SHARED; \
2017                                    MMU_GetSectionDescriptor(&descriptor_l1, region);
2018 
2019 //Sect_Normal_RW. Sect_Normal_Cod, but writeable and not executable
2020 #define section_normal_rw(descriptor_l1, region) region.rg_t = SECTION; \
2021                                    region.domain = 0x0; \
2022                                    region.e_t = ECC_DISABLED; \
2023                                    region.g_t = GLOBAL; \
2024                                    region.inner_norm_t = WB_WA; \
2025                                    region.outer_norm_t = WB_WA; \
2026                                    region.mem_t = NORMAL; \
2027                                    region.sec_t = SECURE; \
2028                                    region.xn_t = NON_EXECUTE; \
2029                                    region.priv_t = RW; \
2030                                    region.user_t = RW; \
2031                                    region.sh_t = SHARED; \
2032                                    MMU_GetSectionDescriptor(&descriptor_l1, region);
2033 //Sect_SO. Strongly-ordered (therefore shareable), not executable, rw, domain 0, base addr 0
2034 #define section_so(descriptor_l1, region) region.rg_t = SECTION; \
2035                                    region.domain = 0x0; \
2036                                    region.e_t = ECC_DISABLED; \
2037                                    region.g_t = GLOBAL; \
2038                                    region.inner_norm_t = NON_CACHEABLE; \
2039                                    region.outer_norm_t = NON_CACHEABLE; \
2040                                    region.mem_t = STRONGLY_ORDERED; \
2041                                    region.sec_t = SECURE; \
2042                                    region.xn_t = NON_EXECUTE; \
2043                                    region.priv_t = RW; \
2044                                    region.user_t = RW; \
2045                                    region.sh_t = NON_SHARED; \
2046                                    MMU_GetSectionDescriptor(&descriptor_l1, region);
2047 
2048 #if 0
2049 //Sect_Device_RO. Device, non-shareable, non-executable, ro, domain 0, base addr 0
2050 #define section_device_ro(descriptor_l1, region) region.rg_t = SECTION; \
2051                                    region.domain = 0x0; \
2052                                    region.e_t = ECC_DISABLED; \
2053                                    region.g_t = GLOBAL; \
2054                                    region.inner_norm_t = NON_CACHEABLE; \
2055                                    region.outer_norm_t = NON_CACHEABLE; \
2056                                    region.mem_t = STRONGLY_ORDERED; \
2057                                    region.sec_t = SECURE; \
2058                                    region.xn_t = NON_EXECUTE; \
2059                                    region.priv_t = READ; \
2060                                    region.user_t = READ; \
2061                                    region.sh_t = NON_SHARED; \
2062                                    MMU_GetSectionDescriptor(&descriptor_l1, region);
2063 
2064 //Sect_Device_RW. Sect_Device_RO, but writeable
2065 #define section_device_rw(descriptor_l1, region) region.rg_t = SECTION; \
2066                                    region.domain = 0x0; \
2067                                    region.e_t = ECC_DISABLED; \
2068                                    region.g_t = GLOBAL; \
2069                                    region.inner_norm_t = NON_CACHEABLE; \
2070                                    region.outer_norm_t = NON_CACHEABLE; \
2071                                    region.mem_t = STRONGLY_ORDERED; \
2072                                    region.sec_t = SECURE; \
2073                                    region.xn_t = NON_EXECUTE; \
2074                                    region.priv_t = RW; \
2075                                    region.user_t = RW; \
2076                                    region.sh_t = NON_SHARED; \
2077                                    MMU_GetSectionDescriptor(&descriptor_l1, region);
2078 #else
2079 //Sect_Device_RO. Device, non-shareable, non-executable, ro, domain 0, base addr 0
2080 #define section_device_ro(descriptor_l1, region) region.rg_t = SECTION; \
2081                                    region.domain = 0x0; \
2082                                    region.e_t = ECC_DISABLED; \
2083                                    region.g_t = GLOBAL; \
2084                                    region.inner_norm_t = NON_CACHEABLE; \
2085                                    region.outer_norm_t = NON_CACHEABLE; \
2086                                    region.mem_t = SHARED_DEVICE; \
2087                                    region.sec_t = SECURE; \
2088                                    region.xn_t = NON_EXECUTE; \
2089                                    region.priv_t = READ; \
2090                                    region.user_t = READ; \
2091                                    region.sh_t = NON_SHARED; \
2092                                    MMU_GetSectionDescriptor(&descriptor_l1, region);
2093 
2094 //Sect_Device_RW. Sect_Device_RO, but writeable
2095 #define section_device_rw(descriptor_l1, region) region.rg_t = SECTION; \
2096                                    region.domain = 0x0; \
2097                                    region.e_t = ECC_DISABLED; \
2098                                    region.g_t = GLOBAL; \
2099                                    region.inner_norm_t = NON_CACHEABLE; \
2100                                    region.outer_norm_t = NON_CACHEABLE; \
2101                                    region.mem_t = SHARED_DEVICE; \
2102                                    region.sec_t = SECURE; \
2103                                    region.xn_t = NON_EXECUTE; \
2104                                    region.priv_t = RW; \
2105                                    region.user_t = RW; \
2106                                    region.sh_t = NON_SHARED; \
2107                                    MMU_GetSectionDescriptor(&descriptor_l1, region);
2108 #endif
2109 
2110 //Page_4k_Device_RW.  Shared device, not executable, rw, domain 0
2111 #define page4k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_4k; \
2112                                    region.domain = 0x0; \
2113                                    region.e_t = ECC_DISABLED; \
2114                                    region.g_t = GLOBAL; \
2115                                    region.inner_norm_t = NON_CACHEABLE; \
2116                                    region.outer_norm_t = NON_CACHEABLE; \
2117                                    region.mem_t = SHARED_DEVICE; \
2118                                    region.sec_t = SECURE; \
2119                                    region.xn_t = NON_EXECUTE; \
2120                                    region.priv_t = RW; \
2121                                    region.user_t = RW; \
2122                                    region.sh_t = NON_SHARED; \
2123                                    MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region);
2124 
2125 //Page_4k_Normal. Outer & inner wb/wa, non-shareable, executable, rw, domain 0
2126 #define page4k_normal(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_4k; \
2127                                    region.domain = 0x0; \
2128                                    region.e_t = ECC_DISABLED; \
2129                                    region.g_t = GLOBAL; \
2130                                    region.inner_norm_t = WB_WA; \
2131                                    region.outer_norm_t = WB_WA; \
2132                                    region.mem_t = NORMAL; \
2133                                    region.sec_t = SECURE; \
2134                                    region.xn_t = EXECUTE; \
2135                                    region.priv_t = RW; \
2136                                    region.user_t = RW; \
2137                                    region.sh_t = SHARED; \
2138                                    MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region);
2139 
2140 //Page_64k_Device_RW.  Shared device, not executable, rw, domain 0
2141 #define page64k_device_rw(descriptor_l1, descriptor_l2, region)  region.rg_t = PAGE_64k; \
2142                                    region.domain = 0x0; \
2143                                    region.e_t = ECC_DISABLED; \
2144                                    region.g_t = GLOBAL; \
2145                                    region.inner_norm_t = NON_CACHEABLE; \
2146                                    region.outer_norm_t = NON_CACHEABLE; \
2147                                    region.mem_t = SHARED_DEVICE; \
2148                                    region.sec_t = SECURE; \
2149                                    region.xn_t = NON_EXECUTE; \
2150                                    region.priv_t = RW; \
2151                                    region.user_t = RW; \
2152                                    region.sh_t = NON_SHARED; \
2153                                    MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region);
2154 
2155 /** \brief  Set section execution-never attribute
2156 
2157   \param [out]    descriptor_l1  L1 descriptor.
2158   \param [in]                xn  Section execution-never attribute : EXECUTE , NON_EXECUTE.
2159 
2160   \return          0
2161 */
MMU_XNSection(uint32_t * descriptor_l1,mmu_execute_Type xn)2162 __STATIC_INLINE int MMU_XNSection(uint32_t *descriptor_l1, mmu_execute_Type xn)
2163 {
2164   *descriptor_l1 &= SECTION_XN_MASK;
2165   *descriptor_l1 |= ((xn & 0x1) << SECTION_XN_SHIFT);
2166   return 0;
2167 }
2168 
2169 /** \brief  Set section domain
2170 
2171   \param [out]    descriptor_l1  L1 descriptor.
2172   \param [in]            domain  Section domain
2173 
2174   \return          0
2175 */
MMU_DomainSection(uint32_t * descriptor_l1,uint8_t domain)2176 __STATIC_INLINE int MMU_DomainSection(uint32_t *descriptor_l1, uint8_t domain)
2177 {
2178   *descriptor_l1 &= SECTION_DOMAIN_MASK;
2179   *descriptor_l1 |= ((domain & 0xF) << SECTION_DOMAIN_SHIFT);
2180   return 0;
2181 }
2182 
2183 /** \brief  Set section parity check
2184 
2185   \param [out]    descriptor_l1  L1 descriptor.
2186   \param [in]              p_bit Parity check: ECC_DISABLED, ECC_ENABLED
2187 
2188   \return          0
2189 */
MMU_PSection(uint32_t * descriptor_l1,mmu_ecc_check_Type p_bit)2190 __STATIC_INLINE int MMU_PSection(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit)
2191 {
2192   *descriptor_l1 &= SECTION_P_MASK;
2193   *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT);
2194   return 0;
2195 }
2196 
2197 /** \brief  Set section access privileges
2198 
2199   \param [out]    descriptor_l1  L1 descriptor.
2200   \param [in]              user  User Level Access: NO_ACCESS, RW, READ
2201   \param [in]              priv  Privilege Level Access: NO_ACCESS, RW, READ
2202   \param [in]               afe  Access flag enable
2203 
2204   \return          0
2205 */
MMU_APSection(uint32_t * descriptor_l1,mmu_access_Type user,mmu_access_Type priv,uint32_t afe)2206 __STATIC_INLINE int MMU_APSection(uint32_t *descriptor_l1, mmu_access_Type user, mmu_access_Type priv, uint32_t afe)
2207 {
2208   uint32_t ap = 0;
2209 
2210   if (afe == 0) { //full access
2211     if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; }
2212     else if ((priv == RW) && (user == NO_ACCESS))   { ap = 0x1; }
2213     else if ((priv == RW) && (user == READ))        { ap = 0x2; }
2214     else if ((priv == RW) && (user == RW))          { ap = 0x3; }
2215     else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
2216     else if ((priv == READ) && (user == READ))      { ap = 0x7; }
2217   }
2218 
2219   else { //Simplified access
2220     if ((priv == RW) && (user == NO_ACCESS))        { ap = 0x1; }
2221     else if ((priv == RW) && (user == RW))          { ap = 0x3; }
2222     else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
2223     else if ((priv == READ) && (user == READ))      { ap = 0x7; }
2224   }
2225 
2226   *descriptor_l1 &= SECTION_AP_MASK;
2227   *descriptor_l1 |= (ap & 0x3) << SECTION_AP_SHIFT;
2228   *descriptor_l1 |= ((ap & 0x4)>>2) << SECTION_AP2_SHIFT;
2229 
2230   return 0;
2231 }
2232 
2233 /** \brief  Set section shareability
2234 
2235   \param [out]    descriptor_l1  L1 descriptor.
2236   \param [in]             s_bit  Section shareability: NON_SHARED, SHARED
2237 
2238   \return          0
2239 */
MMU_SharedSection(uint32_t * descriptor_l1,mmu_shared_Type s_bit)2240 __STATIC_INLINE int MMU_SharedSection(uint32_t *descriptor_l1, mmu_shared_Type s_bit)
2241 {
2242   *descriptor_l1 &= SECTION_S_MASK;
2243   *descriptor_l1 |= ((s_bit & 0x1) << SECTION_S_SHIFT);
2244   return 0;
2245 }
2246 
2247 /** \brief  Set section Global attribute
2248 
2249   \param [out]    descriptor_l1  L1 descriptor.
2250   \param [in]             g_bit  Section attribute: GLOBAL, NON_GLOBAL
2251 
2252   \return          0
2253 */
MMU_GlobalSection(uint32_t * descriptor_l1,mmu_global_Type g_bit)2254 __STATIC_INLINE int MMU_GlobalSection(uint32_t *descriptor_l1, mmu_global_Type g_bit)
2255 {
2256   *descriptor_l1 &= SECTION_NG_MASK;
2257   *descriptor_l1 |= ((g_bit & 0x1) << SECTION_NG_SHIFT);
2258   return 0;
2259 }
2260 
MMU_SuperSection(uint32_t * descriptor_l1,mmu_secure_Type s_bit)2261 __STATIC_INLINE int MMU_SuperSection(uint32_t *descriptor_l1, mmu_secure_Type s_bit)
2262 {
2263   *descriptor_l1 &= SECTION_SUPER_MASK;
2264   *descriptor_l1 |= ((s_bit & 0x1) << SECTION_SUPER_SHIFT);
2265   return 0;
2266 }
2267 
2268 /** \brief  Set section Security attribute
2269 
2270   \param [out]    descriptor_l1  L1 descriptor.
2271   \param [in]             s_bit  Section Security attribute: SECURE, NON_SECURE
2272 
2273   \return          0
2274 */
MMU_SecureSection(uint32_t * descriptor_l1,mmu_secure_Type s_bit)2275 __STATIC_INLINE int MMU_SecureSection(uint32_t *descriptor_l1, mmu_secure_Type s_bit)
2276 {
2277   *descriptor_l1 &= SECTION_NS_MASK;
2278   *descriptor_l1 |= ((s_bit & 0x1) << SECTION_NS_SHIFT);
2279   return 0;
2280 }
2281 
2282 /* Page 4k or 64k */
2283 /** \brief  Set 4k/64k page execution-never attribute
2284 
2285   \param [out]    descriptor_l2  L2 descriptor.
2286   \param [in]                xn  Page execution-never attribute : EXECUTE , NON_EXECUTE.
2287   \param [in]              page  Page size: PAGE_4k, PAGE_64k,
2288 
2289   \return          0
2290 */
MMU_XNPage(uint32_t * descriptor_l2,mmu_execute_Type xn,mmu_region_size_Type page)2291 __STATIC_INLINE int MMU_XNPage(uint32_t *descriptor_l2, mmu_execute_Type xn, mmu_region_size_Type page)
2292 {
2293   if (page == PAGE_4k)
2294   {
2295       *descriptor_l2 &= PAGE_XN_4K_MASK;
2296       *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_4K_SHIFT);
2297   }
2298   else
2299   {
2300       *descriptor_l2 &= PAGE_XN_64K_MASK;
2301       *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_64K_SHIFT);
2302   }
2303   return 0;
2304 }
2305 
2306 /** \brief  Set 4k/64k page domain
2307 
2308   \param [out]    descriptor_l1  L1 descriptor.
2309   \param [in]            domain  Page domain
2310 
2311   \return          0
2312 */
MMU_DomainPage(uint32_t * descriptor_l1,uint8_t domain)2313 __STATIC_INLINE int MMU_DomainPage(uint32_t *descriptor_l1, uint8_t domain)
2314 {
2315   *descriptor_l1 &= PAGE_DOMAIN_MASK;
2316   *descriptor_l1 |= ((domain & 0xf) << PAGE_DOMAIN_SHIFT);
2317   return 0;
2318 }
2319 
2320 /** \brief  Set 4k/64k page parity check
2321 
2322   \param [out]    descriptor_l1  L1 descriptor.
2323   \param [in]              p_bit Parity check: ECC_DISABLED, ECC_ENABLED
2324 
2325   \return          0
2326 */
MMU_PPage(uint32_t * descriptor_l1,mmu_ecc_check_Type p_bit)2327 __STATIC_INLINE int MMU_PPage(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit)
2328 {
2329   *descriptor_l1 &= SECTION_P_MASK;
2330   *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT);
2331   return 0;
2332 }
2333 
2334 /** \brief  Set 4k/64k page access privileges
2335 
2336   \param [out]    descriptor_l2  L2 descriptor.
2337   \param [in]              user  User Level Access: NO_ACCESS, RW, READ
2338   \param [in]              priv  Privilege Level Access: NO_ACCESS, RW, READ
2339   \param [in]               afe  Access flag enable
2340 
2341   \return          0
2342 */
MMU_APPage(uint32_t * descriptor_l2,mmu_access_Type user,mmu_access_Type priv,uint32_t afe)2343 __STATIC_INLINE int MMU_APPage(uint32_t *descriptor_l2, mmu_access_Type user, mmu_access_Type priv, uint32_t afe)
2344 {
2345   uint32_t ap = 0;
2346 
2347   if (afe == 0) { //full access
2348     if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; }
2349     else if ((priv == RW) && (user == NO_ACCESS))   { ap = 0x1; }
2350     else if ((priv == RW) && (user == READ))        { ap = 0x2; }
2351     else if ((priv == RW) && (user == RW))          { ap = 0x3; }
2352     else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
2353     else if ((priv == READ) && (user == READ))      { ap = 0x6; }
2354   }
2355 
2356   else { //Simplified access
2357     if ((priv == RW) && (user == NO_ACCESS))        { ap = 0x1; }
2358     else if ((priv == RW) && (user == RW))          { ap = 0x3; }
2359     else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
2360     else if ((priv == READ) && (user == READ))      { ap = 0x7; }
2361   }
2362 
2363   *descriptor_l2 &= PAGE_AP_MASK;
2364   *descriptor_l2 |= (ap & 0x3) << PAGE_AP_SHIFT;
2365   *descriptor_l2 |= ((ap & 0x4)>>2) << PAGE_AP2_SHIFT;
2366 
2367   return 0;
2368 }
2369 
2370 /** \brief  Set 4k/64k page shareability
2371 
2372   \param [out]    descriptor_l2  L2 descriptor.
2373   \param [in]             s_bit  4k/64k page shareability: NON_SHARED, SHARED
2374 
2375   \return          0
2376 */
MMU_SharedPage(uint32_t * descriptor_l2,mmu_shared_Type s_bit)2377 __STATIC_INLINE int MMU_SharedPage(uint32_t *descriptor_l2, mmu_shared_Type s_bit)
2378 {
2379   *descriptor_l2 &= PAGE_S_MASK;
2380   *descriptor_l2 |= ((s_bit & 0x1) << PAGE_S_SHIFT);
2381   return 0;
2382 }
2383 
2384 /** \brief  Set 4k/64k page Global attribute
2385 
2386   \param [out]    descriptor_l2  L2 descriptor.
2387   \param [in]             g_bit  4k/64k page attribute: GLOBAL, NON_GLOBAL
2388 
2389   \return          0
2390 */
MMU_GlobalPage(uint32_t * descriptor_l2,mmu_global_Type g_bit)2391 __STATIC_INLINE int MMU_GlobalPage(uint32_t *descriptor_l2, mmu_global_Type g_bit)
2392 {
2393   *descriptor_l2 &= PAGE_NG_MASK;
2394   *descriptor_l2 |= ((g_bit & 0x1) << PAGE_NG_SHIFT);
2395   return 0;
2396 }
2397 
2398 /** \brief  Set 4k/64k page Security attribute
2399 
2400   \param [out]    descriptor_l1  L1 descriptor.
2401   \param [in]             s_bit  4k/64k page Security attribute: SECURE, NON_SECURE
2402 
2403   \return          0
2404 */
MMU_SecurePage(uint32_t * descriptor_l1,mmu_secure_Type s_bit)2405 __STATIC_INLINE int MMU_SecurePage(uint32_t *descriptor_l1, mmu_secure_Type s_bit)
2406 {
2407   *descriptor_l1 &= PAGE_NS_MASK;
2408   *descriptor_l1 |= ((s_bit & 0x1) << PAGE_NS_SHIFT);
2409   return 0;
2410 }
2411 
2412 /** \brief  Set Section memory attributes
2413 
2414   \param [out]    descriptor_l1  L1 descriptor.
2415   \param [in]               mem  Section memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED
2416   \param [in]             outer  Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
2417   \param [in]             inner  Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
2418 
2419   \return          0
2420 */
MMU_MemorySection(uint32_t * descriptor_l1,mmu_memory_Type mem,mmu_cacheability_Type outer,mmu_cacheability_Type inner)2421 __STATIC_INLINE int MMU_MemorySection(uint32_t *descriptor_l1, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner)
2422 {
2423   *descriptor_l1 &= SECTION_TEXCB_MASK;
2424 
2425   if (STRONGLY_ORDERED == mem)
2426   {
2427     return 0;
2428   }
2429   else if (SHARED_DEVICE == mem)
2430   {
2431     *descriptor_l1 |= (1 << SECTION_B_SHIFT);
2432   }
2433   else if (NON_SHARED_DEVICE == mem)
2434   {
2435     *descriptor_l1 |= (1 << SECTION_TEX1_SHIFT);
2436   }
2437   else if (NORMAL == mem)
2438   {
2439    *descriptor_l1 |= 1 << SECTION_TEX2_SHIFT;
2440    switch(inner)
2441    {
2442       case NON_CACHEABLE:
2443         break;
2444       case WB_WA:
2445         *descriptor_l1 |= (1 << SECTION_B_SHIFT);
2446         break;
2447       case WT:
2448         *descriptor_l1 |= 1 << SECTION_C_SHIFT;
2449         break;
2450       case WB_NO_WA:
2451         *descriptor_l1 |= (1 << SECTION_B_SHIFT) | (1 << SECTION_C_SHIFT);
2452         break;
2453     }
2454     switch(outer)
2455     {
2456       case NON_CACHEABLE:
2457         break;
2458       case WB_WA:
2459         *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT);
2460         break;
2461       case WT:
2462         *descriptor_l1 |= 1 << SECTION_TEX1_SHIFT;
2463         break;
2464       case WB_NO_WA:
2465         *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT) | (1 << SECTION_TEX0_SHIFT);
2466         break;
2467     }
2468   }
2469   return 0;
2470 }
2471 
2472 /** \brief  Set 4k/64k page memory attributes
2473 
2474   \param [out]    descriptor_l2  L2 descriptor.
2475   \param [in]               mem  4k/64k page memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED
2476   \param [in]             outer  Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
2477   \param [in]             inner  Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
2478   \param [in]              page  Page size
2479 
2480   \return          0
2481 */
MMU_MemoryPage(uint32_t * descriptor_l2,mmu_memory_Type mem,mmu_cacheability_Type outer,mmu_cacheability_Type inner,mmu_region_size_Type page)2482 __STATIC_INLINE int MMU_MemoryPage(uint32_t *descriptor_l2, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner, mmu_region_size_Type page)
2483 {
2484   *descriptor_l2 &= PAGE_4K_TEXCB_MASK;
2485 
2486   if (page == PAGE_64k)
2487   {
2488     //same as section
2489     MMU_MemorySection(descriptor_l2, mem, outer, inner);
2490   }
2491   else
2492   {
2493     if (STRONGLY_ORDERED == mem)
2494     {
2495       return 0;
2496     }
2497     else if (SHARED_DEVICE == mem)
2498     {
2499       *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT);
2500     }
2501     else if (NON_SHARED_DEVICE == mem)
2502     {
2503       *descriptor_l2 |= (1 << PAGE_4K_TEX1_SHIFT);
2504     }
2505     else if (NORMAL == mem)
2506     {
2507       *descriptor_l2 |= 1 << PAGE_4K_TEX2_SHIFT;
2508       switch(inner)
2509       {
2510         case NON_CACHEABLE:
2511           break;
2512         case WB_WA:
2513           *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT);
2514           break;
2515         case WT:
2516           *descriptor_l2 |= 1 << PAGE_4K_C_SHIFT;
2517           break;
2518         case WB_NO_WA:
2519           *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT) | (1 << PAGE_4K_C_SHIFT);
2520           break;
2521       }
2522       switch(outer)
2523       {
2524         case NON_CACHEABLE:
2525           break;
2526         case WB_WA:
2527           *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT);
2528           break;
2529         case WT:
2530           *descriptor_l2 |= 1 << PAGE_4K_TEX1_SHIFT;
2531           break;
2532         case WB_NO_WA:
2533           *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT) | (1 << PAGE_4K_TEX1_SHIFT);
2534           break;
2535       }
2536     }
2537   }
2538 
2539   return 0;
2540 }
2541 
2542 /** \brief  Create a L1 section descriptor
2543 
2544   \param [out]     descriptor  L1 descriptor
2545   \param [in]      reg  Section attributes
2546 
2547   \return          0
2548 */
MMU_GetSectionDescriptor(uint32_t * descriptor,mmu_region_attributes_Type reg)2549 __STATIC_INLINE int MMU_GetSectionDescriptor(uint32_t *descriptor, mmu_region_attributes_Type reg)
2550 {
2551   *descriptor  = 0;
2552 
2553   MMU_MemorySection(descriptor, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t);
2554   MMU_XNSection(descriptor,reg.xn_t);
2555   MMU_DomainSection(descriptor, reg.domain);
2556   MMU_PSection(descriptor, reg.e_t);
2557   MMU_APSection(descriptor, reg.priv_t, reg.user_t, 1);
2558   MMU_SharedSection(descriptor,reg.sh_t);
2559   MMU_GlobalSection(descriptor,reg.g_t);
2560   MMU_SecureSection(descriptor,reg.sec_t);
2561   *descriptor &= SECTION_MASK;
2562   *descriptor |= SECTION_DESCRIPTOR;
2563 
2564   return 0;
2565 }
2566 
2567 /** \brief  Create a L1 and L2 4k/64k page descriptor
2568 
2569   \param [out]       descriptor  L1 descriptor
2570   \param [out]      descriptor2  L2 descriptor
2571   \param [in]               reg  4k/64k page attributes
2572 
2573   \return          0
2574 */
MMU_GetPageDescriptor(uint32_t * descriptor,uint32_t * descriptor2,mmu_region_attributes_Type reg)2575 __STATIC_INLINE int MMU_GetPageDescriptor(uint32_t *descriptor, uint32_t *descriptor2, mmu_region_attributes_Type reg)
2576 {
2577   *descriptor  = 0;
2578   *descriptor2 = 0;
2579 
2580   switch (reg.rg_t)
2581   {
2582     case PAGE_4k:
2583       MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_4k);
2584       MMU_XNPage(descriptor2, reg.xn_t, PAGE_4k);
2585       MMU_DomainPage(descriptor, reg.domain);
2586       MMU_PPage(descriptor, reg.e_t);
2587       MMU_APPage(descriptor2, reg.priv_t, reg.user_t, 1);
2588       MMU_SharedPage(descriptor2,reg.sh_t);
2589       MMU_GlobalPage(descriptor2,reg.g_t);
2590       MMU_SecurePage(descriptor,reg.sec_t);
2591       *descriptor &= PAGE_L1_MASK;
2592       *descriptor |= PAGE_L1_DESCRIPTOR;
2593       *descriptor2 &= PAGE_L2_4K_MASK;
2594       *descriptor2 |= PAGE_L2_4K_DESC;
2595       break;
2596 
2597     case PAGE_64k:
2598       MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_64k);
2599       MMU_XNPage(descriptor2, reg.xn_t, PAGE_64k);
2600       MMU_DomainPage(descriptor, reg.domain);
2601       MMU_PPage(descriptor, reg.e_t);
2602       MMU_APPage(descriptor2, reg.priv_t, reg.user_t, 1);
2603       MMU_SharedPage(descriptor2,reg.sh_t);
2604       MMU_GlobalPage(descriptor2,reg.g_t);
2605       MMU_SecurePage(descriptor,reg.sec_t);
2606       *descriptor &= PAGE_L1_MASK;
2607       *descriptor |= PAGE_L1_DESCRIPTOR;
2608       *descriptor2 &= PAGE_L2_64K_MASK;
2609       *descriptor2 |= PAGE_L2_64K_DESC;
2610       break;
2611 
2612     case SECTION:
2613       //error
2614       break;
2615   }
2616 
2617   return 0;
2618 }
2619 
2620 /** \brief  Create a 1MB Section
2621 
2622   \param [in]               ttb  Translation table base address
2623   \param [in]      base_address  Section base address
2624   \param [in]             count  Number of sections to create
2625   \param [in]     descriptor_l1  L1 descriptor (region attributes)
2626 
2627 */
MMU_TTSection(uint32_t * ttb,uint32_t base_address,uint32_t count,uint32_t descriptor_l1)2628 __STATIC_INLINE void MMU_TTSection(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1)
2629 {
2630   uint32_t offset;
2631   uint32_t entry;
2632   uint32_t i;
2633 
2634   offset = base_address >> 20;
2635   entry  = (base_address & 0xFFF00000) | descriptor_l1;
2636 
2637   //4 bytes aligned
2638   ttb = ttb + offset;
2639 
2640   for (i = 0; i < count; i++ )
2641   {
2642     //4 bytes aligned
2643     *(volatile uint32_t *)ttb++ = entry;
2644     entry += OFFSET_1M;
2645   }
2646 }
2647 
MMU_TTSuperSection(uint32_t * ttb,uint32_t base_address,uint32_t count,uint32_t descriptor_l1)2648 __STATIC_INLINE void MMU_TTSuperSection(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1)
2649 {
2650   uint32_t offset;
2651   uint32_t entry;
2652   uint32_t i;
2653 
2654   offset = base_address >> 20;
2655   entry  = (base_address & 0xFF000000) | descriptor_l1 | (1 << SECTION_SUPER_SHIFT);
2656 
2657   //4 bytes aligned
2658   ttb = ttb + offset;
2659 
2660   for (i = 0; i < count; i++ )
2661   {
2662     //4 bytes aligned
2663     *(volatile uint32_t *)ttb++ = entry;
2664     entry += OFFSET_1M;
2665   }
2666 }
2667 
2668 /** \brief  Create a 4k page entry
2669 
2670   \param [in]               ttb  L1 table base address
2671   \param [in]      base_address  4k base address
2672   \param [in]             count  Number of 4k pages to create
2673   \param [in]     descriptor_l1  L1 descriptor (region attributes)
2674   \param [in]            ttb_l2  L2 table base address
2675   \param [in]     descriptor_l2  L2 descriptor (region attributes)
2676 
2677 */
MMU_TTPage4k(uint32_t * ttb,uint32_t base_address,uint32_t count,uint32_t descriptor_l1,uint32_t * ttb_l2,uint32_t descriptor_l2)2678 __STATIC_INLINE void MMU_TTPage4k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 )
2679 {
2680 
2681   uint32_t offset, offset2;
2682   uint32_t entry, entry2;
2683   uint32_t i;
2684 
2685   offset = base_address >> 20;
2686   entry  = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1;
2687 
2688   //4 bytes aligned
2689   ttb += offset;
2690   //create l1_entry
2691   *(volatile uint32_t *)ttb = entry;
2692 
2693   offset2 = (base_address & 0xff000) >> 12;
2694   ttb_l2 += offset2;
2695   entry2 = (base_address & 0xFFFFF000) | descriptor_l2;
2696   for (i = 0; i < count; i++ )
2697   {
2698     //4 bytes aligned
2699     *(volatile uint32_t *)ttb_l2++ = entry2;
2700     entry2 += OFFSET_4K;
2701   }
2702 }
2703 
2704 /** \brief  Create a 64k page entry
2705 
2706   \param [in]               ttb  L1 table base address
2707   \param [in]      base_address  64k base address
2708   \param [in]             count  Number of 64k pages to create
2709   \param [in]     descriptor_l1  L1 descriptor (region attributes)
2710   \param [in]            ttb_l2  L2 table base address
2711   \param [in]     descriptor_l2  L2 descriptor (region attributes)
2712 
2713 */
MMU_TTPage64k(uint32_t * ttb,uint32_t base_address,uint32_t count,uint32_t descriptor_l1,uint32_t * ttb_l2,uint32_t descriptor_l2)2714 __STATIC_INLINE void MMU_TTPage64k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 )
2715 {
2716   uint32_t offset, offset2;
2717   uint32_t entry, entry2;
2718   uint32_t i,j;
2719 
2720 
2721   offset = base_address >> 20;
2722   entry  = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1;
2723 
2724   //4 bytes aligned
2725   ttb += offset;
2726   //create l1_entry
2727   *(volatile uint32_t *)ttb = entry;
2728 
2729   offset2 = (base_address & 0xff000) >> 12;
2730   ttb_l2 += offset2;
2731   entry2 = (base_address & 0xFFFF0000) | descriptor_l2;
2732   for (i = 0; i < count; i++ )
2733   {
2734     //create 16 entries
2735     for (j = 0; j < 16; j++)
2736     {
2737       //4 bytes aligned
2738       *(volatile uint32_t *)ttb_l2++ = entry2;
2739     }
2740     entry2 += OFFSET_64K;
2741   }
2742 }
2743 
2744 /** \brief  Enable MMU
2745 */
MMU_Enable(void)2746 __STATIC_INLINE void MMU_Enable(void)
2747 {
2748   // Set M bit 0 to enable the MMU
2749   // Set AFE bit to enable simplified access permissions model
2750   // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
2751 #if 1
2752   __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
2753 #else
2754     uint32_t reg;
2755 
2756     reg  = __get_SCTLR();
2757     /*
2758     SCTLR.M, bit[0]   MMU enable.
2759     0  PL1&0 stage 1 MMU disabled.
2760     1  PL1&0 stage 1 MMU enabled.
2761     */
2762     reg |= 0x1;
2763     __set_SCTLR(reg);
2764 #endif
2765   __ISB();
2766 }
2767 
2768 /** \brief  Disable MMU
2769 */
MMU_Disable(void)2770 __STATIC_INLINE void MMU_Disable(void)
2771 {
2772   // Clear M bit 0 to disable the MMU
2773   __set_SCTLR( __get_SCTLR() & ~1);
2774   __ISB();
2775 }
2776 
2777 /** \brief  Invalidate entire unified TLB
2778 */
2779 
MMU_InvalidateTLB(void)2780 __STATIC_INLINE void MMU_InvalidateTLB(void)
2781 {
2782   __set_TLBIALL(0);
2783   __DSB();     //ensure completion of the invalidation
2784   __ISB();     //ensure instruction fetch path sees new state
2785 }
2786 
2787 
2788 #endif // !__ASSEMBLER__
2789 
2790 #ifdef __cplusplus
2791 }
2792 #endif
2793 
2794 #endif /* __CORE_CA_H_DEPENDANT */
2795 
2796 #endif /* __CMSIS_GENERIC */
2797