• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************//**
2  * @file     cmsis_gcc.h
3  * @brief    CMSIS compiler specific macros, functions, instructions
4  * @version  V1.2.0
5  * @date     17. May 2019
6  ******************************************************************************/
7 /*
8  * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
9  *
10  * SPDX-License-Identifier: Apache-2.0
11  *
12  * Licensed under the Apache License, Version 2.0 (the License); you may
13  * not use this file except in compliance with the License.
14  * You may obtain a copy of the License at
15  *
16  * www.apache.org/licenses/LICENSE-2.0
17  *
18  * Unless required by applicable law or agreed to in writing, software
19  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21  * See the License for the specific language governing permissions and
22  * limitations under the License.
23  */
24 
25 #ifndef __CMSIS_GCC_CA_H
26 #define __CMSIS_GCC_CA_H
27 
28 /* ignore some GCC warnings */
29 #pragma GCC diagnostic push
30 #pragma GCC diagnostic ignored "-Wsign-conversion"
31 #pragma GCC diagnostic ignored "-Wconversion"
32 #pragma GCC diagnostic ignored "-Wunused-parameter"
33 
34 /* Fallback for __has_builtin */
35 #ifndef __has_builtin
36   #define __has_builtin(x) (0)
37 #endif
38 
39 /* CMSIS compiler specific defines */
40 
41 #ifndef   __ASM
42   #define __ASM                                  __asm
43 #endif
44 #ifndef   __INLINE
45   #define __INLINE                               inline
46 #endif
47 #ifndef   __FORCEINLINE
48   #define __FORCEINLINE                          __attribute__((always_inline))
49 #endif
50 #ifndef   __STATIC_INLINE
51   #define __STATIC_INLINE                        static inline
52 #endif
53 #ifndef   __STATIC_FORCEINLINE
54   #define __STATIC_FORCEINLINE                   __attribute__((always_inline)) static inline
55 #endif
56 #ifndef   __NO_RETURN
57   #define __NO_RETURN                            __attribute__((__noreturn__))
58 #endif
59 #ifndef   CMSIS_DEPRECATED
60  #define  CMSIS_DEPRECATED                       __attribute__((deprecated))
61 #endif
62 #ifndef   __USED
63   #define __USED                                 __attribute__((used))
64 #endif
65 #ifndef   __WEAK
66   #define __WEAK                                 __attribute__((weak))
67 #endif
68 #ifndef   __PACKED
69   #define __PACKED                               __attribute__((packed, aligned(1)))
70 #endif
71 #ifndef   __PACKED_STRUCT
72   #define __PACKED_STRUCT                        struct __attribute__((packed, aligned(1)))
73 #endif
74 #ifndef   __UNALIGNED_UINT16_WRITE
75   #pragma GCC diagnostic push
76   #pragma GCC diagnostic ignored "-Wpacked"
77 /*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */
78   __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
79   #pragma GCC diagnostic pop
80   #define __UNALIGNED_UINT16_WRITE(addr, val)    (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
81 #endif
82 #ifndef   __UNALIGNED_UINT16_READ
83   #pragma GCC diagnostic push
84   #pragma GCC diagnostic ignored "-Wpacked"
85 /*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */
86   __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
87   #pragma GCC diagnostic pop
88   #define __UNALIGNED_UINT16_READ(addr)          (((const struct T_UINT16_READ *)(const void *)(addr))->v)
89 #endif
90 #ifndef   __UNALIGNED_UINT32_WRITE
91   #pragma GCC diagnostic push
92   #pragma GCC diagnostic ignored "-Wpacked"
93 /*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */
94   __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
95   #pragma GCC diagnostic pop
96   #define __UNALIGNED_UINT32_WRITE(addr, val)    (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
97 #endif
98 #ifndef   __UNALIGNED_UINT32_READ
99   #pragma GCC diagnostic push
100   #pragma GCC diagnostic ignored "-Wpacked"
101   __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
102   #pragma GCC diagnostic pop
103   #define __UNALIGNED_UINT32_READ(addr)          (((const struct T_UINT32_READ *)(const void *)(addr))->v)
104 #endif
105 #ifndef   __ALIGNED
106   #define __ALIGNED(x)                           __attribute__((aligned(x)))
107 #endif
108 #ifndef   __COMPILER_BARRIER
109   #define __COMPILER_BARRIER()                   __ASM volatile("":::"memory")
110 #endif
111 
112 
__QSUB16(uint32_t op1,uint32_t op2)113 __STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
114 {
115   uint32_t result;
116 
117   __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
118   return(result);
119 }
120 
121 
__QADD16(uint32_t op1,uint32_t op2)122 __STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
123 {
124   uint32_t result;
125 
126   __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
127   return(result);
128 }
129 
__QADD(int32_t op1,int32_t op2)130 __STATIC_FORCEINLINE  int32_t __QADD( int32_t op1,  int32_t op2)
131 {
132   int32_t result;
133 
134   __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
135   return(result);
136 }
137 
__SMLALD(uint32_t op1,uint32_t op2,uint64_t acc)138 __STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
139 {
140   union llreg_u{
141     uint32_t w32[2];
142     uint64_t w64;
143   } llr;
144   llr.w64 = acc;
145 
146 #ifndef __ARMEB__   /* Little endian */
147   __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
148 #else               /* Big endian */
149   __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
150 #endif
151 
152   return(llr.w64);
153 }
154 
__QSUB(int32_t op1,int32_t op2)155 __STATIC_FORCEINLINE  int32_t __QSUB( int32_t op1,  int32_t op2)
156 {
157   int32_t result;
158 
159   __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
160   return(result);
161 }
162 
__SMUAD(uint32_t op1,uint32_t op2)163 __STATIC_FORCEINLINE uint32_t __SMUAD  (uint32_t op1, uint32_t op2)
164 {
165   uint32_t result;
166 
167   __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
168   return(result);
169 }
170 
171 #define __PKHBT(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0x0000FFFFUL) |  \
172                                            ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL)  )
173 
__SMLAD(uint32_t op1,uint32_t op2,uint32_t op3)174 __STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
175 {
176   uint32_t result;
177 
178   __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
179   return(result);
180 }
181 
__SMUADX(uint32_t op1,uint32_t op2)182 __STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
183 {
184   uint32_t result;
185 
186   __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
187   return(result);
188 }
189 
__SMLADX(uint32_t op1,uint32_t op2,uint32_t op3)190 __STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
191 {
192   uint32_t result;
193 
194   __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
195   return(result);
196 }
197 
__SMLALDX(uint32_t op1,uint32_t op2,uint64_t acc)198 __STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
199 {
200   union llreg_u{
201     uint32_t w32[2];
202     uint64_t w64;
203   } llr;
204   llr.w64 = acc;
205 
206 #ifndef __ARMEB__   /* Little endian */
207   __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
208 #else               /* Big endian */
209   __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
210 #endif
211 
212   return(llr.w64);
213 }
214 
__SMMLA(int32_t op1,int32_t op2,int32_t op3)215 __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
216 {
217  int32_t result;
218 
219  __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r"  (op1), "r" (op2), "r" (op3) );
220  return(result);
221 }
222 
223 
224 
225 
226 /* ##########################  Core Instruction Access  ######################### */
227 /**
228   \brief   No Operation
229  */
230 #define __NOP()                             __ASM volatile ("nop")
231 
232 /**
233   \brief   Wait For Interrupt
234  */
235 #define __WFI()                             __ASM volatile ("wfi")
236 
237 /**
238   \brief   Wait For Event
239  */
240 #define __WFE()                             __ASM volatile ("wfe")
241 
242 /**
243   \brief   Send Event
244  */
245 #define __SEV()                             __ASM volatile ("sev")
246 
247 /**
248   \brief   Instruction Synchronization Barrier
249   \details Instruction Synchronization Barrier flushes the pipeline in the processor,
250            so that all instructions following the ISB are fetched from cache or memory,
251            after the instruction has been completed.
252  */
__ISB(void)253 __STATIC_FORCEINLINE  void __ISB(void)
254 {
255   __ASM volatile ("isb 0xF":::"memory");
256 }
257 
258 
259 /**
260   \brief   Data Synchronization Barrier
261   \details Acts as a special kind of Data Memory Barrier.
262            It completes when all explicit memory accesses before this instruction complete.
263  */
__DSB(void)264 __STATIC_FORCEINLINE  void __DSB(void)
265 {
266   __ASM volatile ("dsb 0xF":::"memory");
267 }
268 
269 /**
270   \brief   Data Memory Barrier
271   \details Ensures the apparent order of the explicit memory operations before
272            and after the instruction, without ensuring their completion.
273  */
__DMB(void)274 __STATIC_FORCEINLINE  void __DMB(void)
275 {
276   __ASM volatile ("dmb 0xF":::"memory");
277 }
278 
279 /**
280   \brief   Reverse byte order (32 bit)
281   \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
282   \param [in]    value  Value to reverse
283   \return               Reversed value
284  */
__REV(uint32_t value)285 __STATIC_FORCEINLINE  uint32_t __REV(uint32_t value)
286 {
287 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
288   return __builtin_bswap32(value);
289 #else
290   uint32_t result;
291 
292   __ASM volatile ("rev %0, %1" : "=r" (result) : "r" (value) );
293   return result;
294 #endif
295 }
296 
297 /**
298   \brief   Reverse byte order (16 bit)
299   \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
300   \param [in]    value  Value to reverse
301   \return               Reversed value
302  */
303 #ifndef __NO_EMBEDDED_ASM
__REV16(uint32_t value)304 __attribute__((section(".rev16_text"))) __STATIC_INLINE uint32_t __REV16(uint32_t value)
305 {
306   uint32_t result;
307   __ASM volatile("rev16 %0, %1" : "=r" (result) : "r" (value));
308   return result;
309 }
310 #endif
311 
312 /**
313   \brief   Reverse byte order (16 bit)
314   \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
315   \param [in]    value  Value to reverse
316   \return               Reversed value
317  */
__REVSH(int16_t value)318 __STATIC_FORCEINLINE  int16_t __REVSH(int16_t value)
319 {
320 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
321   return (int16_t)__builtin_bswap16(value);
322 #else
323   int16_t result;
324 
325   __ASM volatile ("revsh %0, %1" : "=r" (result) : "r" (value) );
326   return result;
327 #endif
328 }
329 
330 /**
331   \brief   Rotate Right in unsigned value (32 bit)
332   \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
333   \param [in]    op1  Value to rotate
334   \param [in]    op2  Number of Bits to rotate
335   \return               Rotated value
336  */
__ROR(uint32_t op1,uint32_t op2)337 __STATIC_FORCEINLINE  uint32_t __ROR(uint32_t op1, uint32_t op2)
338 {
339   op2 %= 32U;
340   if (op2 == 0U) {
341     return op1;
342   }
343   return (op1 >> op2) | (op1 << (32U - op2));
344 }
345 
346 
347 /**
348   \brief   Breakpoint
349   \param [in]    value  is ignored by the processor.
350                  If required, a debugger can use it to store additional information about the breakpoint.
351  */
352 #define __BKPT(value)                       __ASM volatile ("bkpt "#value)
353 
354 /**
355   \brief   Reverse bit order of value
356   \details Reverses the bit order of the given value.
357   \param [in]    value  Value to reverse
358   \return               Reversed value
359  */
__RBIT(uint32_t value)360 __STATIC_FORCEINLINE  uint32_t __RBIT(uint32_t value)
361 {
362   uint32_t result;
363 
364 #if ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
365      (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
366      (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    )
367    __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
368 #else
369   int32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
370 
371   result = value;                      /* r will be reversed bits of v; first get LSB of v */
372   for (value >>= 1U; value; value >>= 1U)
373   {
374     result <<= 1U;
375     result |= value & 1U;
376     s--;
377   }
378   result <<= s;                        /* shift when v's highest bits are zero */
379 #endif
380   return result;
381 }
382 
383 /**
384   \brief   Count leading zeros
385   \param [in]  value  Value to count the leading zeros
386   \return             number of leading zeros in value
387  */
__CLZ(uint32_t value)388 __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
389 {
390   /* Even though __builtin_clz produces a CLZ instruction on ARM, formally
391      __builtin_clz(0) is undefined behaviour, so handle this case specially.
392      This guarantees ARM-compatible results if happening to compile on a non-ARM
393      target, and ensures the compiler doesn't decide to activate any
394      optimisations using the logic "value was passed to __builtin_clz, so it
395      is non-zero".
396      ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
397      single CLZ instruction.
398    */
399   if (value == 0U)
400   {
401     return 32U;
402   }
403   return __builtin_clz(value);
404 }
405 
406 /**
407   \brief   LDR Exclusive (8 bit)
408   \details Executes a exclusive LDR instruction for 8 bit value.
409   \param [in]    ptr  Pointer to data
410   \return             value of type uint8_t at (*ptr)
411  */
__LDREXB(volatile uint8_t * addr)412 __STATIC_FORCEINLINE  uint8_t __LDREXB(volatile uint8_t *addr)
413 {
414     uint32_t result;
415 
416 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
417    __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
418 #else
419     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
420        accepted by assembler. So has to use following less efficient pattern.
421     */
422    __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
423 #endif
424    return ((uint8_t) result);    /* Add explicit type cast here */
425 }
426 
427 
428 /**
429   \brief   LDR Exclusive (16 bit)
430   \details Executes a exclusive LDR instruction for 16 bit values.
431   \param [in]    ptr  Pointer to data
432   \return        value of type uint16_t at (*ptr)
433  */
__LDREXH(volatile uint16_t * addr)434 __STATIC_FORCEINLINE  uint16_t __LDREXH(volatile uint16_t *addr)
435 {
436     uint32_t result;
437 
438 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
439    __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
440 #else
441     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
442        accepted by assembler. So has to use following less efficient pattern.
443     */
444    __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
445 #endif
446    return ((uint16_t) result);    /* Add explicit type cast here */
447 }
448 
449 
450 /**
451   \brief   LDR Exclusive (32 bit)
452   \details Executes a exclusive LDR instruction for 32 bit values.
453   \param [in]    ptr  Pointer to data
454   \return        value of type uint32_t at (*ptr)
455  */
__LDREXW(volatile uint32_t * addr)456 __STATIC_FORCEINLINE  uint32_t __LDREXW(volatile uint32_t *addr)
457 {
458     uint32_t result;
459 
460    __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
461    return(result);
462 }
463 
464 
465 /**
466   \brief   STR Exclusive (8 bit)
467   \details Executes a exclusive STR instruction for 8 bit values.
468   \param [in]  value  Value to store
469   \param [in]    ptr  Pointer to location
470   \return          0  Function succeeded
471   \return          1  Function failed
472  */
__STREXB(uint8_t value,volatile uint8_t * addr)473 __STATIC_FORCEINLINE  uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
474 {
475    uint32_t result;
476 
477    __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
478    return(result);
479 }
480 
481 
482 /**
483   \brief   STR Exclusive (16 bit)
484   \details Executes a exclusive STR instruction for 16 bit values.
485   \param [in]  value  Value to store
486   \param [in]    ptr  Pointer to location
487   \return          0  Function succeeded
488   \return          1  Function failed
489  */
__STREXH(uint16_t value,volatile uint16_t * addr)490 __STATIC_FORCEINLINE  uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
491 {
492    uint32_t result;
493 
494    __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
495    return(result);
496 }
497 
498 
499 /**
500   \brief   STR Exclusive (32 bit)
501   \details Executes a exclusive STR instruction for 32 bit values.
502   \param [in]  value  Value to store
503   \param [in]    ptr  Pointer to location
504   \return          0  Function succeeded
505   \return          1  Function failed
506  */
__STREXW(uint32_t value,volatile uint32_t * addr)507 __STATIC_FORCEINLINE  uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
508 {
509    uint32_t result;
510 
511    __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
512    return(result);
513 }
514 
515 
516 /**
517   \brief   Remove the exclusive lock
518   \details Removes the exclusive lock which is created by LDREX.
519  */
__CLREX(void)520 __STATIC_FORCEINLINE  void __CLREX(void)
521 {
522   __ASM volatile ("clrex" ::: "memory");
523 }
524 
525 /**
526   \brief   Signed Saturate
527   \details Saturates a signed value.
528   \param [in]  value  Value to be saturated
529   \param [in]    sat  Bit position to saturate to (1..32)
530   \return             Saturated value
531  */
532 #define __SSAT(ARG1,ARG2) \
533 __extension__ \
534 ({                          \
535   int32_t __RES, __ARG1 = (ARG1); \
536   __ASM ("ssat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
537   __RES; \
538  })
539 
540 
541 /**
542   \brief   Unsigned Saturate
543   \details Saturates an unsigned value.
544   \param [in]  value  Value to be saturated
545   \param [in]    sat  Bit position to saturate to (0..31)
546   \return             Saturated value
547  */
548 #define __USAT(ARG1,ARG2) \
549 __extension__ \
550 ({                          \
551   uint32_t __RES, __ARG1 = (ARG1); \
552   __ASM ("usat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
553   __RES; \
554  })
555 
556 /* ###########################  Core Function Access  ########################### */
557 
558 /**
559   \brief   Enable IRQ Interrupts
560   \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
561            Can only be executed in Privileged modes.
562  */
__enable_irq(void)563 __STATIC_FORCEINLINE void __enable_irq(void)
564 {
565   __ASM volatile ("cpsie i" : : : "memory");
566 }
567 
568 /**
569   \brief   Disable IRQ Interrupts
570   \details Disables IRQ interrupts by setting the I-bit in the CPSR.
571   Can only be executed in Privileged modes.
572  */
__disable_irq(void)573 __STATIC_FORCEINLINE  void __disable_irq(void)
574 {
575   __ASM volatile ("cpsid i" : : : "memory");
576 }
577 
578 /**
579   \brief   Get FPSCR
580   \details Returns the current value of the Floating Point Status/Control register.
581   \return Floating Point Status/Control register value
582 */
__get_FPSCR(void)583 __STATIC_FORCEINLINE  uint32_t __get_FPSCR(void)
584 {
585   #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
586        (defined (__FPU_USED   ) && (__FPU_USED    == 1U))     )
587   #if __has_builtin(__builtin_arm_get_fpscr)
588   // Re-enable using built-in when GCC has been fixed
589   // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
590     /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
591     return __builtin_arm_get_fpscr();
592   #else
593     uint32_t result;
594 
595     __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
596     return(result);
597   #endif
598   #else
599     return(0U);
600   #endif
601 }
602 
603 /**
604   \brief   Set FPSCR
605   \details Assigns the given value to the Floating Point Status/Control register.
606   \param [in] fpscr  Floating Point Status/Control value to set
607 */
__set_FPSCR(uint32_t fpscr)608 __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
609 {
610   #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
611        (defined (__FPU_USED   ) && (__FPU_USED    == 1U))     )
612   #if __has_builtin(__builtin_arm_set_fpscr)
613   // Re-enable using built-in when GCC has been fixed
614   // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
615     /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
616     __builtin_arm_set_fpscr(fpscr);
617   #else
618     __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory");
619   #endif
620   #else
621     (void)fpscr;
622   #endif
623 }
624 
625 /** \brief  Get CPSR Register
626     \return               CPSR Register value
627  */
__get_CPSR(void)628 __STATIC_FORCEINLINE uint32_t __get_CPSR(void)
629 {
630   uint32_t result;
631   __ASM volatile("MRS %0, cpsr" : "=r" (result) );
632   return(result);
633 }
634 
635 /** \brief  Set CPSR Register
636     \param [in]    cpsr  CPSR value to set
637  */
__set_CPSR(uint32_t cpsr)638 __STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr)
639 {
640 __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory");
641 }
642 
643 /** \brief  Get Mode
644     \return                Processor Mode
645  */
__get_mode(void)646 __STATIC_FORCEINLINE uint32_t __get_mode(void)
647 {
648     return (__get_CPSR() & 0x1FU);
649 }
650 
651 /** \brief  Set Mode
652     \param [in]    mode  Mode value to set
653  */
__set_mode(uint32_t mode)654 __STATIC_FORCEINLINE void __set_mode(uint32_t mode)
655 {
656   __ASM volatile("MSR  cpsr_c, %0" : : "r" (mode) : "memory");
657 }
658 
659 /** \brief  Get Stack Pointer
660     \return Stack Pointer value
661  */
__get_SP(void)662 __STATIC_FORCEINLINE uint32_t __get_SP(void)
663 {
664   uint32_t result;
665   __ASM volatile("MOV  %0, sp" : "=r" (result) : : "memory");
666   return result;
667 }
668 
669 /** \brief  Set Stack Pointer
670     \param [in]    stack  Stack Pointer value to set
671  */
__set_SP(uint32_t stack)672 __STATIC_FORCEINLINE void __set_SP(uint32_t stack)
673 {
674   __ASM volatile("MOV  sp, %0" : : "r" (stack) : "memory");
675 }
676 
677 /** \brief  Get USR/SYS Stack Pointer
678     \return USR/SYS Stack Pointer value
679  */
__get_SP_usr(void)680 __STATIC_FORCEINLINE uint32_t __get_SP_usr(void)
681 {
682   uint32_t cpsr = __get_CPSR();
683   uint32_t result;
684   __ASM volatile(
685     "CPS     #0x1F  \n"
686     "MOV     %0, sp   " : "=r"(result) : : "memory"
687    );
688   __set_CPSR(cpsr);
689   __ISB();
690   return result;
691 }
692 
693 /** \brief  Set USR/SYS Stack Pointer
694     \param [in]    topOfProcStack  USR/SYS Stack Pointer value to set
695  */
__set_SP_usr(uint32_t topOfProcStack)696 __STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack)
697 {
698   uint32_t cpsr = __get_CPSR();
699   __ASM volatile(
700     "CPS     #0x1F  \n"
701     "MOV     sp, %0   " : : "r" (topOfProcStack) : "memory"
702    );
703   __set_CPSR(cpsr);
704   __ISB();
705 }
706 
707 /** \brief  Get FPEXC
708     \return               Floating Point Exception Control register value
709  */
__get_FPEXC(void)710 __STATIC_FORCEINLINE uint32_t __get_FPEXC(void)
711 {
712 #if (__FPU_PRESENT == 1)
713   uint32_t result;
714   __ASM volatile("VMRS %0, fpexc" : "=r" (result) );
715   return(result);
716 #else
717   return(0);
718 #endif
719 }
720 
721 /** \brief  Set FPEXC
722     \param [in]    fpexc  Floating Point Exception Control value to set
723  */
__set_FPEXC(uint32_t fpexc)724 __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
725 {
726 #if (__FPU_PRESENT == 1)
727   __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory");
728 #endif
729 }
730 
731 /*
732  * Include common core functions to access Coprocessor 15 registers
733  */
734 
735 #define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
736 #define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
737 #define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm  : "=r" (Rt) : : "memory" )
738 #define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm  : : "r" (Rt) : "memory" )
739 
740 #include "ca/cmsis_cp15_ca.h"
741 
742 /** \brief  Enable Floating Point Unit
743 
744   Critical section, called from undef handler, so systick is disabled
745  */
__FPU_Enable(void)746 __STATIC_INLINE void __FPU_Enable(void)
747 {
748   __ASM volatile(
749     //Permit access to VFP/NEON, registers by modifying CPACR
750     "        MRC     p15,0,R1,c1,c0,2  \n"
751     "        ORR     R1,R1,#0x00F00000 \n"
752     "        MCR     p15,0,R1,c1,c0,2  \n"
753 
754     //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
755     "        ISB                       \n"
756 
757     //Enable VFP/NEON
758     "        VMRS    R1,FPEXC          \n"
759     "        ORR     R1,R1,#0x40000000 \n"
760     "        VMSR    FPEXC,R1          \n"
761 
762     //Initialise VFP/NEON registers to 0
763     "        MOV     R2,#0             \n"
764 
765     //Initialise D16 registers to 0
766     "        VMOV    D0, R2,R2         \n"
767     "        VMOV    D1, R2,R2         \n"
768     "        VMOV    D2, R2,R2         \n"
769     "        VMOV    D3, R2,R2         \n"
770     "        VMOV    D4, R2,R2         \n"
771     "        VMOV    D5, R2,R2         \n"
772     "        VMOV    D6, R2,R2         \n"
773     "        VMOV    D7, R2,R2         \n"
774     "        VMOV    D8, R2,R2         \n"
775     "        VMOV    D9, R2,R2         \n"
776     "        VMOV    D10,R2,R2         \n"
777     "        VMOV    D11,R2,R2         \n"
778     "        VMOV    D12,R2,R2         \n"
779     "        VMOV    D13,R2,R2         \n"
780     "        VMOV    D14,R2,R2         \n"
781     "        VMOV    D15,R2,R2         \n"
782 
783 #if (defined(__ARM_NEON) && (__ARM_NEON == 1))
784     //Initialise D32 registers to 0
785     "        VMOV    D16,R2,R2         \n"
786     "        VMOV    D17,R2,R2         \n"
787     "        VMOV    D18,R2,R2         \n"
788     "        VMOV    D19,R2,R2         \n"
789     "        VMOV    D20,R2,R2         \n"
790     "        VMOV    D21,R2,R2         \n"
791     "        VMOV    D22,R2,R2         \n"
792     "        VMOV    D23,R2,R2         \n"
793     "        VMOV    D24,R2,R2         \n"
794     "        VMOV    D25,R2,R2         \n"
795     "        VMOV    D26,R2,R2         \n"
796     "        VMOV    D27,R2,R2         \n"
797     "        VMOV    D28,R2,R2         \n"
798     "        VMOV    D29,R2,R2         \n"
799     "        VMOV    D30,R2,R2         \n"
800     "        VMOV    D31,R2,R2         \n"
801 #endif
802 
803     //Initialise FPSCR to a known state
804     "        VMRS    R1,FPSCR          \n"
805     "        LDR     R2,=0x00086060    \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
806     "        AND     R1,R1,R2          \n"
807     "        VMSR    FPSCR,R1            "
808     : : : "cc", "r1", "r2"
809   );
810 }
811 
812 /**
813   \brief   Get APSR Register
814   \details Returns the content of the APSR Register.
815   \return               APSR Register value
816  */
__get_APSR(void)817 __STATIC_FORCEINLINE uint32_t __get_APSR(void)
818 {
819   uint32_t result;
820 
821   __ASM volatile ("MRS %0, apsr" : "=r" (result) );
822   return(result);
823 }
824 
825 
826 /**
827   \brief   Get xPSR Register
828   \details Returns the content of the xPSR Register.
829   \return               xPSR Register value
830  */
__get_xPSR(void)831 __STATIC_FORCEINLINE uint32_t __get_xPSR(void)
832 {
833   uint32_t result;
834 
835   __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
836   return(result);
837 }
838 
839 #pragma GCC diagnostic pop
840 
841 #endif /* __CMSIS_GCC_CA_H */
842