• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************//**
2  * @file     cmsis_gcc.h
3  * @brief    CMSIS compiler GCC header file
4  * @version  V5.3.0
5  * @date     26. March 2020
6  ******************************************************************************/
7 /*
8  * Copyright (c) 2009-2020 Arm Limited. All rights reserved.
9  *
10  * SPDX-License-Identifier: Apache-2.0
11  *
12  * Licensed under the Apache License, Version 2.0 (the License); you may
13  * not use this file except in compliance with the License.
14  * You may obtain a copy of the License at
15  *
16  * www.apache.org/licenses/LICENSE-2.0
17  *
18  * Unless required by applicable law or agreed to in writing, software
19  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21  * See the License for the specific language governing permissions and
22  * limitations under the License.
23  */
24 
25 #ifndef __CMSIS_GCC_H
26 #define __CMSIS_GCC_H
27 
28 /* ignore some GCC warnings */
29 #pragma GCC diagnostic push
30 #pragma GCC diagnostic ignored "-Wsign-conversion"
31 #pragma GCC diagnostic ignored "-Wconversion"
32 #pragma GCC diagnostic ignored "-Wunused-parameter"
33 
34 /* Fallback for __has_builtin */
35 #ifndef __has_builtin
36   #define __has_builtin(x) (0)
37 #endif
38 
39 /* CMSIS compiler specific defines */
40 #ifndef   __ASM
41   #define __ASM                                  __asm
42 #endif
43 #ifndef   __INLINE
44   #define __INLINE                               inline
45 #endif
46 #ifndef   __STATIC_INLINE
47   #define __STATIC_INLINE                        static inline
48 #endif
49 #ifndef   __STATIC_FORCEINLINE
50   #define __STATIC_FORCEINLINE                   __attribute__((always_inline)) static inline
51 #endif
52 #ifndef   __NO_RETURN
53   #define __NO_RETURN                            __attribute__((__noreturn__))
54 #endif
55 #ifndef   __USED
56   #define __USED                                 __attribute__((used))
57 #endif
58 #ifndef   __WEAK
59   #define __WEAK                                 __attribute__((weak))
60 #endif
61 #ifndef   __PACKED
62   #define __PACKED                               __attribute__((packed, aligned(1)))
63 #endif
64 #ifndef   __PACKED_STRUCT
65   #define __PACKED_STRUCT                        struct __attribute__((packed, aligned(1)))
66 #endif
67 #ifndef   __PACKED_UNION
68   #define __PACKED_UNION                         union __attribute__((packed, aligned(1)))
69 #endif
70 #ifndef   __UNALIGNED_UINT32        /* deprecated */
71   #pragma GCC diagnostic push
72   #pragma GCC diagnostic ignored "-Wpacked"
73   #pragma GCC diagnostic ignored "-Wattributes"
74   struct __attribute__((packed)) T_UINT32 { uint32_t v; };
75   #pragma GCC diagnostic pop
76   #define __UNALIGNED_UINT32(x)                  (((struct T_UINT32 *)(x))->v)
77 #endif
78 #ifndef   __UNALIGNED_UINT16_WRITE
79   #pragma GCC diagnostic push
80   #pragma GCC diagnostic ignored "-Wpacked"
81   #pragma GCC diagnostic ignored "-Wattributes"
82   __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
83   #pragma GCC diagnostic pop
84   #define __UNALIGNED_UINT16_WRITE(addr, val)    (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
85 #endif
86 #ifndef   __UNALIGNED_UINT16_READ
87   #pragma GCC diagnostic push
88   #pragma GCC diagnostic ignored "-Wpacked"
89   #pragma GCC diagnostic ignored "-Wattributes"
90   __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
91   #pragma GCC diagnostic pop
92   #define __UNALIGNED_UINT16_READ(addr)          (((const struct T_UINT16_READ *)(const void *)(addr))->v)
93 #endif
94 #ifndef   __UNALIGNED_UINT32_WRITE
95   #pragma GCC diagnostic push
96   #pragma GCC diagnostic ignored "-Wpacked"
97   #pragma GCC diagnostic ignored "-Wattributes"
98   __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
99   #pragma GCC diagnostic pop
100   #define __UNALIGNED_UINT32_WRITE(addr, val)    (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
101 #endif
102 #ifndef   __UNALIGNED_UINT32_READ
103   #pragma GCC diagnostic push
104   #pragma GCC diagnostic ignored "-Wpacked"
105   #pragma GCC diagnostic ignored "-Wattributes"
106   __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
107   #pragma GCC diagnostic pop
108   #define __UNALIGNED_UINT32_READ(addr)          (((const struct T_UINT32_READ *)(const void *)(addr))->v)
109 #endif
110 #ifndef   __ALIGNED
111   #define __ALIGNED(x)                           __attribute__((aligned(x)))
112 #endif
113 #ifndef   __RESTRICT
114   #define __RESTRICT                             __restrict
115 #endif
116 #ifndef   __COMPILER_BARRIER
117   #define __COMPILER_BARRIER()                   __ASM volatile("":::"memory")
118 #endif
119 
120 /* #########################  Startup and Lowlevel Init  ######################## */
121 
122 #define __PROGRAM_START
123 #ifndef __PROGRAM_START
124 
125 /**
126   \brief   Initializes data and bss sections
127   \details This default implementations initialized all data and additional bss
128            sections relying on .copy.table and .zero.table specified properly
129            in the used linker script.
130 
131  */
__cmsis_start(void)132 __STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void)
133 {
134   extern void _start(void) __NO_RETURN;
135 
136   typedef struct {
137     uint32_t const* src;
138     uint32_t* dest;
139     uint32_t  wlen;
140   } __copy_table_t;
141 
142   typedef struct {
143     uint32_t* dest;
144     uint32_t  wlen;
145   } __zero_table_t;
146 
147   extern const __copy_table_t __copy_table_start__;
148   extern const __copy_table_t __copy_table_end__;
149   extern const __zero_table_t __zero_table_start__;
150   extern const __zero_table_t __zero_table_end__;
151 
152   for (__copy_table_t const* pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable) {
153     for(uint32_t i=0u; i<pTable->wlen; ++i) {
154       pTable->dest[i] = pTable->src[i];
155     }
156   }
157 
158   for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) {
159     for(uint32_t i=0u; i<pTable->wlen; ++i) {
160       pTable->dest[i] = 0u;
161     }
162   }
163 
164   _start();
165 }
166 
167 #define __PROGRAM_START           __cmsis_start
168 #endif
169 
170 #ifndef __INITIAL_SP
171 #define __INITIAL_SP              __StackTop
172 #endif
173 
174 #ifndef __STACK_LIMIT
175 #define __STACK_LIMIT             __StackLimit
176 #endif
177 
178 #ifndef __VECTOR_TABLE
179 #define __VECTOR_TABLE            __Vectors
180 #endif
181 
182 #ifndef __VECTOR_TABLE_ATTRIBUTE
183 #define __VECTOR_TABLE_ATTRIBUTE  __attribute__((used, section(".vectors")))
184 #endif
185 
186 /* ###########################  Core Function Access  ########################### */
187 /** \ingroup  CMSIS_Core_FunctionInterface
188     \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
189   @{
190  */
191 
192 /**
193   \brief   Enable IRQ Interrupts
194   \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
195            Can only be executed in Privileged modes.
196  */
__enable_irq(void)197 __STATIC_FORCEINLINE void __enable_irq(void)
198 {
199   __ASM volatile ("cpsie i" : : : "memory");
200 }
201 
202 
203 /**
204   \brief   Disable IRQ Interrupts
205   \details Disables IRQ interrupts by setting the I-bit in the CPSR.
206            Can only be executed in Privileged modes.
207  */
__disable_irq(void)208 __STATIC_FORCEINLINE void __disable_irq(void)
209 {
210   __ASM volatile ("cpsid i" : : : "memory");
211 }
212 
213 
214 /**
215   \brief   Get Control Register
216   \details Returns the content of the Control Register.
217   \return               Control Register value
218  */
__get_CONTROL(void)219 __STATIC_FORCEINLINE uint32_t __get_CONTROL(void)
220 {
221   uint32_t result;
222 
223   __ASM volatile ("MRS %0, control" : "=r" (result) );
224   return(result);
225 }
226 
227 
228 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
229 /**
230   \brief   Get Control Register (non-secure)
231   \details Returns the content of the non-secure Control Register when in secure mode.
232   \return               non-secure Control Register value
233  */
__TZ_get_CONTROL_NS(void)234 __STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void)
235 {
236   uint32_t result;
237 
238   __ASM volatile ("MRS %0, control_ns" : "=r" (result) );
239   return(result);
240 }
241 #endif
242 
243 
244 /**
245   \brief   Set Control Register
246   \details Writes the given value to the Control Register.
247   \param [in]    control  Control Register value to set
248  */
__set_CONTROL(uint32_t control)249 __STATIC_FORCEINLINE void __set_CONTROL(uint32_t control)
250 {
251   __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
252 }
253 
254 
255 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
256 /**
257   \brief   Set Control Register (non-secure)
258   \details Writes the given value to the non-secure Control Register when in secure state.
259   \param [in]    control  Control Register value to set
260  */
__TZ_set_CONTROL_NS(uint32_t control)261 __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control)
262 {
263   __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory");
264 }
265 #endif
266 
267 
268 /**
269   \brief   Get IPSR Register
270   \details Returns the content of the IPSR Register.
271   \return               IPSR Register value
272  */
__get_IPSR(void)273 __STATIC_FORCEINLINE uint32_t __get_IPSR(void)
274 {
275   uint32_t result;
276 
277   __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
278   return(result);
279 }
280 
281 
282 /**
283   \brief   Get APSR Register
284   \details Returns the content of the APSR Register.
285   \return               APSR Register value
286  */
__get_APSR(void)287 __STATIC_FORCEINLINE uint32_t __get_APSR(void)
288 {
289   uint32_t result;
290 
291   __ASM volatile ("MRS %0, apsr" : "=r" (result) );
292   return(result);
293 }
294 
295 
296 /**
297   \brief   Get xPSR Register
298   \details Returns the content of the xPSR Register.
299   \return               xPSR Register value
300  */
__get_xPSR(void)301 __STATIC_FORCEINLINE uint32_t __get_xPSR(void)
302 {
303   uint32_t result;
304 
305   __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
306   return(result);
307 }
308 
309 
310 /**
311   \brief   Get Process Stack Pointer
312   \details Returns the current value of the Process Stack Pointer (PSP).
313   \return               PSP Register value
314  */
__get_PSP(void)315 __STATIC_FORCEINLINE uint32_t __get_PSP(void)
316 {
317   uint32_t result;
318 
319   __ASM volatile ("MRS %0, psp"  : "=r" (result) );
320   return(result);
321 }
322 
323 
324 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
325 /**
326   \brief   Get Process Stack Pointer (non-secure)
327   \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state.
328   \return               PSP Register value
329  */
__TZ_get_PSP_NS(void)330 __STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void)
331 {
332   uint32_t result;
333 
334   __ASM volatile ("MRS %0, psp_ns"  : "=r" (result) );
335   return(result);
336 }
337 #endif
338 
339 
340 /**
341   \brief   Set Process Stack Pointer
342   \details Assigns the given value to the Process Stack Pointer (PSP).
343   \param [in]    topOfProcStack  Process Stack Pointer value to set
344  */
__set_PSP(uint32_t topOfProcStack)345 __STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack)
346 {
347   __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : );
348 }
349 
350 
351 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
352 /**
353   \brief   Set Process Stack Pointer (non-secure)
354   \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state.
355   \param [in]    topOfProcStack  Process Stack Pointer value to set
356  */
__TZ_set_PSP_NS(uint32_t topOfProcStack)357 __STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack)
358 {
359   __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : );
360 }
361 #endif
362 
363 
364 /**
365   \brief   Get Main Stack Pointer
366   \details Returns the current value of the Main Stack Pointer (MSP).
367   \return               MSP Register value
368  */
__get_MSP(void)369 __STATIC_FORCEINLINE uint32_t __get_MSP(void)
370 {
371   uint32_t result;
372 
373   __ASM volatile ("MRS %0, msp" : "=r" (result) );
374   return(result);
375 }
376 
377 
378 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
379 /**
380   \brief   Get Main Stack Pointer (non-secure)
381   \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state.
382   \return               MSP Register value
383  */
__TZ_get_MSP_NS(void)384 __STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void)
385 {
386   uint32_t result;
387 
388   __ASM volatile ("MRS %0, msp_ns" : "=r" (result) );
389   return(result);
390 }
391 #endif
392 
393 
394 /**
395   \brief   Set Main Stack Pointer
396   \details Assigns the given value to the Main Stack Pointer (MSP).
397   \param [in]    topOfMainStack  Main Stack Pointer value to set
398  */
__set_MSP(uint32_t topOfMainStack)399 __STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack)
400 {
401   __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : );
402 }
403 
404 
405 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
406 /**
407   \brief   Set Main Stack Pointer (non-secure)
408   \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state.
409   \param [in]    topOfMainStack  Main Stack Pointer value to set
410  */
__TZ_set_MSP_NS(uint32_t topOfMainStack)411 __STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack)
412 {
413   __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : );
414 }
415 #endif
416 
417 
418 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
419 /**
420   \brief   Get Stack Pointer (non-secure)
421   \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state.
422   \return               SP Register value
423  */
__TZ_get_SP_NS(void)424 __STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void)
425 {
426   uint32_t result;
427 
428   __ASM volatile ("MRS %0, sp_ns" : "=r" (result) );
429   return(result);
430 }
431 
432 
433 /**
434   \brief   Set Stack Pointer (non-secure)
435   \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state.
436   \param [in]    topOfStack  Stack Pointer value to set
437  */
__TZ_set_SP_NS(uint32_t topOfStack)438 __STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack)
439 {
440   __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : );
441 }
442 #endif
443 
444 
445 /**
446   \brief   Get Priority Mask
447   \details Returns the current state of the priority mask bit from the Priority Mask Register.
448   \return               Priority Mask value
449  */
__get_PRIMASK(void)450 __STATIC_FORCEINLINE uint32_t __get_PRIMASK(void)
451 {
452   uint32_t result;
453 
454   __ASM volatile ("MRS %0, primask" : "=r" (result) );
455   return(result);
456 }
457 
458 
459 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
460 /**
461   \brief   Get Priority Mask (non-secure)
462   \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state.
463   \return               Priority Mask value
464  */
__TZ_get_PRIMASK_NS(void)465 __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void)
466 {
467   uint32_t result;
468 
469   __ASM volatile ("MRS %0, primask_ns" : "=r" (result) );
470   return(result);
471 }
472 #endif
473 
474 
475 /**
476   \brief   Set Priority Mask
477   \details Assigns the given value to the Priority Mask Register.
478   \param [in]    priMask  Priority Mask
479  */
__set_PRIMASK(uint32_t priMask)480 __STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask)
481 {
482   __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
483 }
484 
485 
486 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
487 /**
488   \brief   Set Priority Mask (non-secure)
489   \details Assigns the given value to the non-secure Priority Mask Register when in secure state.
490   \param [in]    priMask  Priority Mask
491  */
__TZ_set_PRIMASK_NS(uint32_t priMask)492 __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask)
493 {
494   __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory");
495 }
496 #endif
497 
498 
499 #if ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
500      (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
501      (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    )
502 /**
503   \brief   Enable FIQ
504   \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
505            Can only be executed in Privileged modes.
506  */
__enable_fault_irq(void)507 __STATIC_FORCEINLINE void __enable_fault_irq(void)
508 {
509   __ASM volatile ("cpsie f" : : : "memory");
510 }
511 
512 
513 /**
514   \brief   Disable FIQ
515   \details Disables FIQ interrupts by setting the F-bit in the CPSR.
516            Can only be executed in Privileged modes.
517  */
__disable_fault_irq(void)518 __STATIC_FORCEINLINE void __disable_fault_irq(void)
519 {
520   __ASM volatile ("cpsid f" : : : "memory");
521 }
522 
523 
524 /**
525   \brief   Get Base Priority
526   \details Returns the current value of the Base Priority register.
527   \return               Base Priority register value
528  */
__get_BASEPRI(void)529 __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void)
530 {
531   uint32_t result;
532 
533   __ASM volatile ("MRS %0, basepri" : "=r" (result) );
534   return(result);
535 }
536 
537 
538 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
539 /**
540   \brief   Get Base Priority (non-secure)
541   \details Returns the current value of the non-secure Base Priority register when in secure state.
542   \return               Base Priority register value
543  */
__TZ_get_BASEPRI_NS(void)544 __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void)
545 {
546   uint32_t result;
547 
548   __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) );
549   return(result);
550 }
551 #endif
552 
553 
554 /**
555   \brief   Set Base Priority
556   \details Assigns the given value to the Base Priority register.
557   \param [in]    basePri  Base Priority value to set
558  */
__set_BASEPRI(uint32_t basePri)559 __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri)
560 {
561   __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory");
562 }
563 
564 
565 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
566 /**
567   \brief   Set Base Priority (non-secure)
568   \details Assigns the given value to the non-secure Base Priority register when in secure state.
569   \param [in]    basePri  Base Priority value to set
570  */
__TZ_set_BASEPRI_NS(uint32_t basePri)571 __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri)
572 {
573   __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory");
574 }
575 #endif
576 
577 
578 /**
579   \brief   Set Base Priority with condition
580   \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
581            or the new value increases the BASEPRI priority level.
582   \param [in]    basePri  Base Priority value to set
583  */
__set_BASEPRI_MAX(uint32_t basePri)584 __STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri)
585 {
586   __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory");
587 }
588 
589 
590 /**
591   \brief   Get Fault Mask
592   \details Returns the current value of the Fault Mask register.
593   \return               Fault Mask register value
594  */
__get_FAULTMASK(void)595 __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void)
596 {
597   uint32_t result;
598 
599   __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
600   return(result);
601 }
602 
603 
604 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
605 /**
606   \brief   Get Fault Mask (non-secure)
607   \details Returns the current value of the non-secure Fault Mask register when in secure state.
608   \return               Fault Mask register value
609  */
__TZ_get_FAULTMASK_NS(void)610 __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void)
611 {
612   uint32_t result;
613 
614   __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) );
615   return(result);
616 }
617 #endif
618 
619 
620 /**
621   \brief   Set Fault Mask
622   \details Assigns the given value to the Fault Mask register.
623   \param [in]    faultMask  Fault Mask value to set
624  */
__set_FAULTMASK(uint32_t faultMask)625 __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask)
626 {
627   __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
628 }
629 
630 
631 #if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
632 /**
633   \brief   Set Fault Mask (non-secure)
634   \details Assigns the given value to the non-secure Fault Mask register when in secure state.
635   \param [in]    faultMask  Fault Mask value to set
636  */
__TZ_set_FAULTMASK_NS(uint32_t faultMask)637 __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask)
638 {
639   __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory");
640 }
641 #endif
642 
643 #endif /* ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
644            (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
645            (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    ) */
646 
647 
648 #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
649      (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    )
650 
651 /**
652   \brief   Get Process Stack Pointer Limit
653   Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
654   Stack Pointer Limit register hence zero is returned always in non-secure
655   mode.
656 
657   \details Returns the current value of the Process Stack Pointer Limit (PSPLIM).
658   \return               PSPLIM Register value
659  */
__get_PSPLIM(void)660 __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void)
661 {
662 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
663     (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
664     // without main extensions, the non-secure PSPLIM is RAZ/WI
665   return 0U;
666 #else
667   uint32_t result;
668   __ASM volatile ("MRS %0, psplim"  : "=r" (result) );
669   return result;
670 #endif
671 }
672 
673 #if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3))
674 /**
675   \brief   Get Process Stack Pointer Limit (non-secure)
676   Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
677   Stack Pointer Limit register hence zero is returned always.
678 
679   \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
680   \return               PSPLIM Register value
681  */
__TZ_get_PSPLIM_NS(void)682 __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void)
683 {
684 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
685   // without main extensions, the non-secure PSPLIM is RAZ/WI
686   return 0U;
687 #else
688   uint32_t result;
689   __ASM volatile ("MRS %0, psplim_ns"  : "=r" (result) );
690   return result;
691 #endif
692 }
693 #endif
694 
695 
696 /**
697   \brief   Set Process Stack Pointer Limit
698   Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
699   Stack Pointer Limit register hence the write is silently ignored in non-secure
700   mode.
701 
702   \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM).
703   \param [in]    ProcStackPtrLimit  Process Stack Pointer Limit value to set
704  */
__set_PSPLIM(uint32_t ProcStackPtrLimit)705 __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit)
706 {
707 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
708     (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
709   // without main extensions, the non-secure PSPLIM is RAZ/WI
710   (void)ProcStackPtrLimit;
711 #else
712   __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit));
713 #endif
714 }
715 
716 
717 #if (defined (__ARM_FEATURE_CMSE  ) && (__ARM_FEATURE_CMSE   == 3))
718 /**
719   \brief   Set Process Stack Pointer (non-secure)
720   Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
721   Stack Pointer Limit register hence the write is silently ignored.
722 
723   \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
724   \param [in]    ProcStackPtrLimit  Process Stack Pointer Limit value to set
725  */
__TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit)726 __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit)
727 {
728 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
729   // without main extensions, the non-secure PSPLIM is RAZ/WI
730   (void)ProcStackPtrLimit;
731 #else
732   __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit));
733 #endif
734 }
735 #endif
736 
737 
738 /**
739   \brief   Get Main Stack Pointer Limit
740   Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
741   Stack Pointer Limit register hence zero is returned always in non-secure
742   mode.
743 
744   \details Returns the current value of the Main Stack Pointer Limit (MSPLIM).
745   \return               MSPLIM Register value
746  */
__get_MSPLIM(void)747 __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void)
748 {
749 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
750     (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
751   // without main extensions, the non-secure MSPLIM is RAZ/WI
752   return 0U;
753 #else
754   uint32_t result;
755   __ASM volatile ("MRS %0, msplim" : "=r" (result) );
756   return result;
757 #endif
758 }
759 
760 
761 #if (defined (__ARM_FEATURE_CMSE  ) && (__ARM_FEATURE_CMSE   == 3))
762 /**
763   \brief   Get Main Stack Pointer Limit (non-secure)
764   Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
765   Stack Pointer Limit register hence zero is returned always.
766 
767   \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state.
768   \return               MSPLIM Register value
769  */
__TZ_get_MSPLIM_NS(void)770 __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void)
771 {
772 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
773   // without main extensions, the non-secure MSPLIM is RAZ/WI
774   return 0U;
775 #else
776   uint32_t result;
777   __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) );
778   return result;
779 #endif
780 }
781 #endif
782 
783 
784 /**
785   \brief   Set Main Stack Pointer Limit
786   Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
787   Stack Pointer Limit register hence the write is silently ignored in non-secure
788   mode.
789 
790   \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM).
791   \param [in]    MainStackPtrLimit  Main Stack Pointer Limit value to set
792  */
__set_MSPLIM(uint32_t MainStackPtrLimit)793 __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit)
794 {
795 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
796     (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
797   // without main extensions, the non-secure MSPLIM is RAZ/WI
798   (void)MainStackPtrLimit;
799 #else
800   __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit));
801 #endif
802 }
803 
804 
805 #if (defined (__ARM_FEATURE_CMSE  ) && (__ARM_FEATURE_CMSE   == 3))
806 /**
807   \brief   Set Main Stack Pointer Limit (non-secure)
808   Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
809   Stack Pointer Limit register hence the write is silently ignored.
810 
811   \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state.
812   \param [in]    MainStackPtrLimit  Main Stack Pointer value to set
813  */
__TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)814 __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)
815 {
816 #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
817   // without main extensions, the non-secure MSPLIM is RAZ/WI
818   (void)MainStackPtrLimit;
819 #else
820   __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit));
821 #endif
822 }
823 #endif
824 
825 #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
826            (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    ) */
827 
828 
829 /**
830   \brief   Get FPSCR
831   \details Returns the current value of the Floating Point Status/Control register.
832   \return               Floating Point Status/Control register value
833  */
__get_FPSCR(void)834 __STATIC_FORCEINLINE uint32_t __get_FPSCR(void)
835 {
836 #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
837      (defined (__FPU_USED   ) && (__FPU_USED    == 1U))     )
838 #if __has_builtin(__builtin_arm_get_fpscr)
839 // Re-enable using built-in when GCC has been fixed
840 // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
841   /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
842   return __builtin_arm_get_fpscr();
843 #else
844   uint32_t result;
845 
846   __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
847   return(result);
848 #endif
849 #else
850   return(0U);
851 #endif
852 }
853 
854 
855 /**
856   \brief   Set FPSCR
857   \details Assigns the given value to the Floating Point Status/Control register.
858   \param [in]    fpscr  Floating Point Status/Control value to set
859  */
__set_FPSCR(uint32_t fpscr)860 __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
861 {
862 #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
863      (defined (__FPU_USED   ) && (__FPU_USED    == 1U))     )
864 #if __has_builtin(__builtin_arm_set_fpscr)
865 // Re-enable using built-in when GCC has been fixed
866 // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
867   /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
868   __builtin_arm_set_fpscr(fpscr);
869 #else
870   __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory");
871 #endif
872 #else
873   (void)fpscr;
874 #endif
875 }
876 
877 
878 /*@} end of CMSIS_Core_RegAccFunctions */
879 
880 
881 /* ##########################  Core Instruction Access  ######################### */
882 /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
883   Access to dedicated instructions
884   @{
885 */
886 
887 /* Define macros for porting to both thumb1 and thumb2.
888  * For thumb1, use low register (r0-r7), specified by constraint "l"
889  * Otherwise, use general registers, specified by constraint "r" */
890 #if defined (__thumb__) && !defined (__thumb2__)
891 #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
892 #define __CMSIS_GCC_RW_REG(r) "+l" (r)
893 #define __CMSIS_GCC_USE_REG(r) "l" (r)
894 #else
895 #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
896 #define __CMSIS_GCC_RW_REG(r) "+r" (r)
897 #define __CMSIS_GCC_USE_REG(r) "r" (r)
898 #endif
899 
900 /**
901   \brief   No Operation
902   \details No Operation does nothing. This instruction can be used for code alignment purposes.
903  */
904 #define __NOP()                             __ASM volatile ("nop")
905 
906 /**
907   \brief   Wait For Interrupt
908   \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
909  */
910 #define __WFI()                             __ASM volatile ("wfi":::"memory")
911 
912 
913 /**
914   \brief   Wait For Event
915   \details Wait For Event is a hint instruction that permits the processor to enter
916            a low-power state until one of a number of events occurs.
917  */
918 #define __WFE()                             __ASM volatile ("wfe":::"memory")
919 
920 
921 /**
922   \brief   Send Event
923   \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
924  */
925 #define __SEV()                             __ASM volatile ("sev")
926 
927 
928 /**
929   \brief   Instruction Synchronization Barrier
930   \details Instruction Synchronization Barrier flushes the pipeline in the processor,
931            so that all instructions following the ISB are fetched from cache or memory,
932            after the instruction has been completed.
933  */
__ISB(void)934 __STATIC_FORCEINLINE void __ISB(void)
935 {
936   __ASM volatile ("isb 0xF":::"memory");
937 }
938 
939 
940 /**
941   \brief   Data Synchronization Barrier
942   \details Acts as a special kind of Data Memory Barrier.
943            It completes when all explicit memory accesses before this instruction complete.
944  */
__DSB(void)945 __STATIC_FORCEINLINE void __DSB(void)
946 {
947   __ASM volatile ("dsb 0xF":::"memory");
948 }
949 
950 
951 /**
952   \brief   Data Memory Barrier
953   \details Ensures the apparent order of the explicit memory operations before
954            and after the instruction, without ensuring their completion.
955  */
__DMB(void)956 __STATIC_FORCEINLINE void __DMB(void)
957 {
958   __ASM volatile ("dmb 0xF":::"memory");
959 }
960 
961 
962 /**
963   \brief   Reverse byte order (32 bit)
964   \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
965   \param [in]    value  Value to reverse
966   \return               Reversed value
967  */
__REV(uint32_t value)968 __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
969 {
970 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
971   return __builtin_bswap32(value);
972 #else
973   uint32_t result;
974 
975   __ASM ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
976   return result;
977 #endif
978 }
979 
980 
981 /**
982   \brief   Reverse byte order (16 bit)
983   \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
984   \param [in]    value  Value to reverse
985   \return               Reversed value
986  */
__REV16(uint32_t value)987 __STATIC_FORCEINLINE uint32_t __REV16(uint32_t value)
988 {
989   uint32_t result;
990 
991   __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
992   return result;
993 }
994 
995 
996 /**
997   \brief   Reverse byte order (16 bit)
998   \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
999   \param [in]    value  Value to reverse
1000   \return               Reversed value
1001  */
__REVSH(int16_t value)1002 __STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
1003 {
1004 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1005   return (int16_t)__builtin_bswap16(value);
1006 #else
1007   int16_t result;
1008 
1009   __ASM ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
1010   return result;
1011 #endif
1012 }
1013 
1014 
1015 /**
1016   \brief   Rotate Right in unsigned value (32 bit)
1017   \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
1018   \param [in]    op1  Value to rotate
1019   \param [in]    op2  Number of Bits to rotate
1020   \return               Rotated value
1021  */
__ROR(uint32_t op1,uint32_t op2)1022 __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
1023 {
1024   op2 %= 32U;
1025   if (op2 == 0U)
1026   {
1027     return op1;
1028   }
1029   return (op1 >> op2) | (op1 << (32U - op2));
1030 }
1031 
1032 
1033 /**
1034   \brief   Breakpoint
1035   \details Causes the processor to enter Debug state.
1036            Debug tools can use this to investigate system state when the instruction at a particular address is reached.
1037   \param [in]    value  is ignored by the processor.
1038                  If required, a debugger can use it to store additional information about the breakpoint.
1039  */
1040 #define __BKPT(value)                       __ASM volatile ("bkpt "#value)
1041 
1042 
1043 /**
1044   \brief   Reverse bit order of value
1045   \details Reverses the bit order of the given value.
1046   \param [in]    value  Value to reverse
1047   \return               Reversed value
1048  */
__RBIT(uint32_t value)1049 __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
1050 {
1051   uint32_t result;
1052 
1053 #if ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
1054      (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
1055      (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    )
1056    __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) );
1057 #else
1058   uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
1059 
1060   result = value;                      /* r will be reversed bits of v; first get LSB of v */
1061   for (value >>= 1U; value != 0U; value >>= 1U)
1062   {
1063     result <<= 1U;
1064     result |= value & 1U;
1065     s--;
1066   }
1067   result <<= s;                        /* shift when v's highest bits are zero */
1068 #endif
1069   return result;
1070 }
1071 
1072 
1073 /**
1074   \brief   Count leading zeros
1075   \details Counts the number of leading zeros of a data value.
1076   \param [in]  value  Value to count the leading zeros
1077   \return             number of leading zeros in value
1078  */
__CLZ(uint32_t value)1079 __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
1080 {
1081   /* Even though __builtin_clz produces a CLZ instruction on ARM, formally
1082      __builtin_clz(0) is undefined behaviour, so handle this case specially.
1083      This guarantees ARM-compatible results if happening to compile on a non-ARM
1084      target, and ensures the compiler doesn't decide to activate any
1085      optimisations using the logic "value was passed to __builtin_clz, so it
1086      is non-zero".
1087      ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
1088      single CLZ instruction.
1089    */
1090   if (value == 0U)
1091   {
1092     return 32U;
1093   }
1094   return __builtin_clz(value);
1095 }
1096 
1097 
1098 #if ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
1099      (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
1100      (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1101      (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    )
1102 /**
1103   \brief   LDR Exclusive (8 bit)
1104   \details Executes a exclusive LDR instruction for 8 bit value.
1105   \param [in]    ptr  Pointer to data
1106   \return             value of type uint8_t at (*ptr)
1107  */
__LDREXB(volatile uint8_t * addr)1108 __STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr)
1109 {
1110     uint32_t result;
1111 
1112 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1113    __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
1114 #else
1115     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1116        accepted by assembler. So has to use following less efficient pattern.
1117     */
1118    __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
1119 #endif
1120    return ((uint8_t) result);    /* Add explicit type cast here */
1121 }
1122 
1123 
1124 /**
1125   \brief   LDR Exclusive (16 bit)
1126   \details Executes a exclusive LDR instruction for 16 bit values.
1127   \param [in]    ptr  Pointer to data
1128   \return        value of type uint16_t at (*ptr)
1129  */
__LDREXH(volatile uint16_t * addr)1130 __STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr)
1131 {
1132     uint32_t result;
1133 
1134 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1135    __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
1136 #else
1137     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1138        accepted by assembler. So has to use following less efficient pattern.
1139     */
1140    __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
1141 #endif
1142    return ((uint16_t) result);    /* Add explicit type cast here */
1143 }
1144 
1145 
1146 /**
1147   \brief   LDR Exclusive (32 bit)
1148   \details Executes a exclusive LDR instruction for 32 bit values.
1149   \param [in]    ptr  Pointer to data
1150   \return        value of type uint32_t at (*ptr)
1151  */
__LDREXW(volatile uint32_t * addr)1152 __STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr)
1153 {
1154     uint32_t result;
1155 
1156    __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
1157    return(result);
1158 }
1159 
1160 
1161 /**
1162   \brief   STR Exclusive (8 bit)
1163   \details Executes a exclusive STR instruction for 8 bit values.
1164   \param [in]  value  Value to store
1165   \param [in]    ptr  Pointer to location
1166   \return          0  Function succeeded
1167   \return          1  Function failed
1168  */
__STREXB(uint8_t value,volatile uint8_t * addr)1169 __STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
1170 {
1171    uint32_t result;
1172 
1173    __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
1174    return(result);
1175 }
1176 
1177 
1178 /**
1179   \brief   STR Exclusive (16 bit)
1180   \details Executes a exclusive STR instruction for 16 bit values.
1181   \param [in]  value  Value to store
1182   \param [in]    ptr  Pointer to location
1183   \return          0  Function succeeded
1184   \return          1  Function failed
1185  */
__STREXH(uint16_t value,volatile uint16_t * addr)1186 __STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
1187 {
1188    uint32_t result;
1189 
1190    __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
1191    return(result);
1192 }
1193 
1194 
1195 /**
1196   \brief   STR Exclusive (32 bit)
1197   \details Executes a exclusive STR instruction for 32 bit values.
1198   \param [in]  value  Value to store
1199   \param [in]    ptr  Pointer to location
1200   \return          0  Function succeeded
1201   \return          1  Function failed
1202  */
__STREXW(uint32_t value,volatile uint32_t * addr)1203 __STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
1204 {
1205    uint32_t result;
1206 
1207    __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
1208    return(result);
1209 }
1210 
1211 
1212 /**
1213   \brief   Remove the exclusive lock
1214   \details Removes the exclusive lock which is created by LDREX.
1215  */
__CLREX(void)1216 __STATIC_FORCEINLINE void __CLREX(void)
1217 {
1218   __ASM volatile ("clrex" ::: "memory");
1219 }
1220 
1221 #endif /* ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
1222            (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
1223            (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1224            (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    ) */
1225 
1226 
1227 #if ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
1228      (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
1229      (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    )
1230 /**
1231   \brief   Signed Saturate
1232   \details Saturates a signed value.
1233   \param [in]  ARG1  Value to be saturated
1234   \param [in]  ARG2  Bit position to saturate to (1..32)
1235   \return             Saturated value
1236  */
1237 #define __SSAT(ARG1, ARG2) \
1238 __extension__ \
1239 ({                          \
1240   int32_t __RES, __ARG1 = (ARG1); \
1241   __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) : "cc" ); \
1242   __RES; \
1243  })
1244 
1245 
1246 /**
1247   \brief   Unsigned Saturate
1248   \details Saturates an unsigned value.
1249   \param [in]  ARG1  Value to be saturated
1250   \param [in]  ARG2  Bit position to saturate to (0..31)
1251   \return             Saturated value
1252  */
1253 #define __USAT(ARG1, ARG2) \
1254  __extension__ \
1255 ({                          \
1256   uint32_t __RES, __ARG1 = (ARG1); \
1257   __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) : "cc" ); \
1258   __RES; \
1259  })
1260 
1261 
1262 /**
1263   \brief   Rotate Right with Extend (32 bit)
1264   \details Moves each bit of a bitstring right by one bit.
1265            The carry input is shifted in at the left end of the bitstring.
1266   \param [in]    value  Value to rotate
1267   \return               Rotated value
1268  */
__RRX(uint32_t value)1269 __STATIC_FORCEINLINE uint32_t __RRX(uint32_t value)
1270 {
1271   uint32_t result;
1272 
1273   __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
1274   return(result);
1275 }
1276 
1277 
1278 /**
1279   \brief   LDRT Unprivileged (8 bit)
1280   \details Executes a Unprivileged LDRT instruction for 8 bit value.
1281   \param [in]    ptr  Pointer to data
1282   \return             value of type uint8_t at (*ptr)
1283  */
__LDRBT(volatile uint8_t * ptr)1284 __STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr)
1285 {
1286     uint32_t result;
1287 
1288 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1289    __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) );
1290 #else
1291     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1292        accepted by assembler. So has to use following less efficient pattern.
1293     */
1294    __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" );
1295 #endif
1296    return ((uint8_t) result);    /* Add explicit type cast here */
1297 }
1298 
1299 
1300 /**
1301   \brief   LDRT Unprivileged (16 bit)
1302   \details Executes a Unprivileged LDRT instruction for 16 bit values.
1303   \param [in]    ptr  Pointer to data
1304   \return        value of type uint16_t at (*ptr)
1305  */
__LDRHT(volatile uint16_t * ptr)1306 __STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr)
1307 {
1308     uint32_t result;
1309 
1310 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
1311    __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) );
1312 #else
1313     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
1314        accepted by assembler. So has to use following less efficient pattern.
1315     */
1316    __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" );
1317 #endif
1318    return ((uint16_t) result);    /* Add explicit type cast here */
1319 }
1320 
1321 
1322 /**
1323   \brief   LDRT Unprivileged (32 bit)
1324   \details Executes a Unprivileged LDRT instruction for 32 bit values.
1325   \param [in]    ptr  Pointer to data
1326   \return        value of type uint32_t at (*ptr)
1327  */
__LDRT(volatile uint32_t * ptr)1328 __STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr)
1329 {
1330     uint32_t result;
1331 
1332    __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) );
1333    return(result);
1334 }
1335 
1336 
1337 /**
1338   \brief   STRT Unprivileged (8 bit)
1339   \details Executes a Unprivileged STRT instruction for 8 bit values.
1340   \param [in]  value  Value to store
1341   \param [in]    ptr  Pointer to location
1342  */
__STRBT(uint8_t value,volatile uint8_t * ptr)1343 __STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr)
1344 {
1345    __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1346 }
1347 
1348 
1349 /**
1350   \brief   STRT Unprivileged (16 bit)
1351   \details Executes a Unprivileged STRT instruction for 16 bit values.
1352   \param [in]  value  Value to store
1353   \param [in]    ptr  Pointer to location
1354  */
__STRHT(uint16_t value,volatile uint16_t * ptr)1355 __STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr)
1356 {
1357    __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1358 }
1359 
1360 
1361 /**
1362   \brief   STRT Unprivileged (32 bit)
1363   \details Executes a Unprivileged STRT instruction for 32 bit values.
1364   \param [in]  value  Value to store
1365   \param [in]    ptr  Pointer to location
1366  */
__STRT(uint32_t value,volatile uint32_t * ptr)1367 __STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr)
1368 {
1369    __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) );
1370 }
1371 
1372 #else  /* ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
1373            (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
1374            (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    ) */
1375 
1376 /**
1377   \brief   Signed Saturate
1378   \details Saturates a signed value.
1379   \param [in]  value  Value to be saturated
1380   \param [in]    sat  Bit position to saturate to (1..32)
1381   \return             Saturated value
1382  */
__SSAT(int32_t val,uint32_t sat)1383 __STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat)
1384 {
1385   if ((sat >= 1U) && (sat <= 32U))
1386   {
1387     const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
1388     const int32_t min = -1 - max ;
1389     if (val > max)
1390     {
1391       return max;
1392     }
1393     else if (val < min)
1394     {
1395       return min;
1396     }
1397   }
1398   return val;
1399 }
1400 
1401 /**
1402   \brief   Unsigned Saturate
1403   \details Saturates an unsigned value.
1404   \param [in]  value  Value to be saturated
1405   \param [in]    sat  Bit position to saturate to (0..31)
1406   \return             Saturated value
1407  */
__USAT(int32_t val,uint32_t sat)1408 __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat)
1409 {
1410   if (sat <= 31U)
1411   {
1412     const uint32_t max = ((1U << sat) - 1U);
1413     if (val > (int32_t)max)
1414     {
1415       return max;
1416     }
1417     else if (val < 0)
1418     {
1419       return 0U;
1420     }
1421   }
1422   return (uint32_t)val;
1423 }
1424 
1425 #endif /* ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
1426            (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
1427            (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    ) */
1428 
1429 
1430 #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1431      (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    )
1432 /**
1433   \brief   Load-Acquire (8 bit)
1434   \details Executes a LDAB instruction for 8 bit value.
1435   \param [in]    ptr  Pointer to data
1436   \return             value of type uint8_t at (*ptr)
1437  */
__LDAB(volatile uint8_t * ptr)1438 __STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr)
1439 {
1440     uint32_t result;
1441 
1442    __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1443    return ((uint8_t) result);
1444 }
1445 
1446 
1447 /**
1448   \brief   Load-Acquire (16 bit)
1449   \details Executes a LDAH instruction for 16 bit values.
1450   \param [in]    ptr  Pointer to data
1451   \return        value of type uint16_t at (*ptr)
1452  */
__LDAH(volatile uint16_t * ptr)1453 __STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr)
1454 {
1455     uint32_t result;
1456 
1457    __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1458    return ((uint16_t) result);
1459 }
1460 
1461 
1462 /**
1463   \brief   Load-Acquire (32 bit)
1464   \details Executes a LDA instruction for 32 bit values.
1465   \param [in]    ptr  Pointer to data
1466   \return        value of type uint32_t at (*ptr)
1467  */
__LDA(volatile uint32_t * ptr)1468 __STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr)
1469 {
1470     uint32_t result;
1471 
1472    __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1473    return(result);
1474 }
1475 
1476 
1477 /**
1478   \brief   Store-Release (8 bit)
1479   \details Executes a STLB instruction for 8 bit values.
1480   \param [in]  value  Value to store
1481   \param [in]    ptr  Pointer to location
1482  */
__STLB(uint8_t value,volatile uint8_t * ptr)1483 __STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr)
1484 {
1485    __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1486 }
1487 
1488 
1489 /**
1490   \brief   Store-Release (16 bit)
1491   \details Executes a STLH instruction for 16 bit values.
1492   \param [in]  value  Value to store
1493   \param [in]    ptr  Pointer to location
1494  */
__STLH(uint16_t value,volatile uint16_t * ptr)1495 __STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr)
1496 {
1497    __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1498 }
1499 
1500 
1501 /**
1502   \brief   Store-Release (32 bit)
1503   \details Executes a STL instruction for 32 bit values.
1504   \param [in]  value  Value to store
1505   \param [in]    ptr  Pointer to location
1506  */
__STL(uint32_t value,volatile uint32_t * ptr)1507 __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr)
1508 {
1509    __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1510 }
1511 
1512 
1513 /**
1514   \brief   Load-Acquire Exclusive (8 bit)
1515   \details Executes a LDAB exclusive instruction for 8 bit value.
1516   \param [in]    ptr  Pointer to data
1517   \return             value of type uint8_t at (*ptr)
1518  */
__LDAEXB(volatile uint8_t * ptr)1519 __STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr)
1520 {
1521     uint32_t result;
1522 
1523    __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1524    return ((uint8_t) result);
1525 }
1526 
1527 
1528 /**
1529   \brief   Load-Acquire Exclusive (16 bit)
1530   \details Executes a LDAH exclusive instruction for 16 bit values.
1531   \param [in]    ptr  Pointer to data
1532   \return        value of type uint16_t at (*ptr)
1533  */
__LDAEXH(volatile uint16_t * ptr)1534 __STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr)
1535 {
1536     uint32_t result;
1537 
1538    __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1539    return ((uint16_t) result);
1540 }
1541 
1542 
1543 /**
1544   \brief   Load-Acquire Exclusive (32 bit)
1545   \details Executes a LDA exclusive instruction for 32 bit values.
1546   \param [in]    ptr  Pointer to data
1547   \return        value of type uint32_t at (*ptr)
1548  */
__LDAEX(volatile uint32_t * ptr)1549 __STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr)
1550 {
1551     uint32_t result;
1552 
1553    __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" );
1554    return(result);
1555 }
1556 
1557 
1558 /**
1559   \brief   Store-Release Exclusive (8 bit)
1560   \details Executes a STLB exclusive instruction for 8 bit values.
1561   \param [in]  value  Value to store
1562   \param [in]    ptr  Pointer to location
1563   \return          0  Function succeeded
1564   \return          1  Function failed
1565  */
__STLEXB(uint8_t value,volatile uint8_t * ptr)1566 __STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr)
1567 {
1568    uint32_t result;
1569 
1570    __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1571    return(result);
1572 }
1573 
1574 
1575 /**
1576   \brief   Store-Release Exclusive (16 bit)
1577   \details Executes a STLH exclusive instruction for 16 bit values.
1578   \param [in]  value  Value to store
1579   \param [in]    ptr  Pointer to location
1580   \return          0  Function succeeded
1581   \return          1  Function failed
1582  */
__STLEXH(uint16_t value,volatile uint16_t * ptr)1583 __STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr)
1584 {
1585    uint32_t result;
1586 
1587    __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1588    return(result);
1589 }
1590 
1591 
1592 /**
1593   \brief   Store-Release Exclusive (32 bit)
1594   \details Executes a STL exclusive instruction for 32 bit values.
1595   \param [in]  value  Value to store
1596   \param [in]    ptr  Pointer to location
1597   \return          0  Function succeeded
1598   \return          1  Function failed
1599  */
__STLEX(uint32_t value,volatile uint32_t * ptr)1600 __STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr)
1601 {
1602    uint32_t result;
1603 
1604    __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" );
1605    return(result);
1606 }
1607 
1608 #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
1609            (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    ) */
1610 
1611 /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
1612 
1613 
1614 /* ###################  Compiler specific Intrinsics  ########################### */
1615 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
1616   Access to dedicated SIMD instructions
1617   @{
1618 */
1619 
1620 #if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1))
1621 
__SADD8(uint32_t op1,uint32_t op2)1622 __STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
1623 {
1624   uint32_t result;
1625 
1626   __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1627   return(result);
1628 }
1629 
__QADD8(uint32_t op1,uint32_t op2)1630 __STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
1631 {
1632   uint32_t result;
1633 
1634   __ASM ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1635   return(result);
1636 }
1637 
__SHADD8(uint32_t op1,uint32_t op2)1638 __STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
1639 {
1640   uint32_t result;
1641 
1642   __ASM ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1643   return(result);
1644 }
1645 
__UADD8(uint32_t op1,uint32_t op2)1646 __STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
1647 {
1648   uint32_t result;
1649 
1650   __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1651   return(result);
1652 }
1653 
__UQADD8(uint32_t op1,uint32_t op2)1654 __STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
1655 {
1656   uint32_t result;
1657 
1658   __ASM ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1659   return(result);
1660 }
1661 
__UHADD8(uint32_t op1,uint32_t op2)1662 __STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
1663 {
1664   uint32_t result;
1665 
1666   __ASM ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1667   return(result);
1668 }
1669 
1670 
__SSUB8(uint32_t op1,uint32_t op2)1671 __STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
1672 {
1673   uint32_t result;
1674 
1675   __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1676   return(result);
1677 }
1678 
__QSUB8(uint32_t op1,uint32_t op2)1679 __STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
1680 {
1681   uint32_t result;
1682 
1683   __ASM ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1684   return(result);
1685 }
1686 
__SHSUB8(uint32_t op1,uint32_t op2)1687 __STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
1688 {
1689   uint32_t result;
1690 
1691   __ASM ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1692   return(result);
1693 }
1694 
__USUB8(uint32_t op1,uint32_t op2)1695 __STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
1696 {
1697   uint32_t result;
1698 
1699   __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1700   return(result);
1701 }
1702 
__UQSUB8(uint32_t op1,uint32_t op2)1703 __STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
1704 {
1705   uint32_t result;
1706 
1707   __ASM ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1708   return(result);
1709 }
1710 
__UHSUB8(uint32_t op1,uint32_t op2)1711 __STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
1712 {
1713   uint32_t result;
1714 
1715   __ASM ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1716   return(result);
1717 }
1718 
1719 
__SADD16(uint32_t op1,uint32_t op2)1720 __STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
1721 {
1722   uint32_t result;
1723 
1724   __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1725   return(result);
1726 }
1727 
__QADD16(uint32_t op1,uint32_t op2)1728 __STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
1729 {
1730   uint32_t result;
1731 
1732   __ASM ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1733   return(result);
1734 }
1735 
__SHADD16(uint32_t op1,uint32_t op2)1736 __STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
1737 {
1738   uint32_t result;
1739 
1740   __ASM ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1741   return(result);
1742 }
1743 
__UADD16(uint32_t op1,uint32_t op2)1744 __STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
1745 {
1746   uint32_t result;
1747 
1748   __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1749   return(result);
1750 }
1751 
__UQADD16(uint32_t op1,uint32_t op2)1752 __STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
1753 {
1754   uint32_t result;
1755 
1756   __ASM ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1757   return(result);
1758 }
1759 
__UHADD16(uint32_t op1,uint32_t op2)1760 __STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
1761 {
1762   uint32_t result;
1763 
1764   __ASM ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1765   return(result);
1766 }
1767 
__SSUB16(uint32_t op1,uint32_t op2)1768 __STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
1769 {
1770   uint32_t result;
1771 
1772   __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1773   return(result);
1774 }
1775 
__QSUB16(uint32_t op1,uint32_t op2)1776 __STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
1777 {
1778   uint32_t result;
1779 
1780   __ASM ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1781   return(result);
1782 }
1783 
__SHSUB16(uint32_t op1,uint32_t op2)1784 __STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
1785 {
1786   uint32_t result;
1787 
1788   __ASM ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1789   return(result);
1790 }
1791 
__USUB16(uint32_t op1,uint32_t op2)1792 __STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
1793 {
1794   uint32_t result;
1795 
1796   __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1797   return(result);
1798 }
1799 
__UQSUB16(uint32_t op1,uint32_t op2)1800 __STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
1801 {
1802   uint32_t result;
1803 
1804   __ASM ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1805   return(result);
1806 }
1807 
__UHSUB16(uint32_t op1,uint32_t op2)1808 __STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
1809 {
1810   uint32_t result;
1811 
1812   __ASM ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1813   return(result);
1814 }
1815 
__SASX(uint32_t op1,uint32_t op2)1816 __STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
1817 {
1818   uint32_t result;
1819 
1820   __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1821   return(result);
1822 }
1823 
__QASX(uint32_t op1,uint32_t op2)1824 __STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
1825 {
1826   uint32_t result;
1827 
1828   __ASM ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1829   return(result);
1830 }
1831 
__SHASX(uint32_t op1,uint32_t op2)1832 __STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
1833 {
1834   uint32_t result;
1835 
1836   __ASM ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1837   return(result);
1838 }
1839 
__UASX(uint32_t op1,uint32_t op2)1840 __STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
1841 {
1842   uint32_t result;
1843 
1844   __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1845   return(result);
1846 }
1847 
__UQASX(uint32_t op1,uint32_t op2)1848 __STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
1849 {
1850   uint32_t result;
1851 
1852   __ASM ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1853   return(result);
1854 }
1855 
__UHASX(uint32_t op1,uint32_t op2)1856 __STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
1857 {
1858   uint32_t result;
1859 
1860   __ASM ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1861   return(result);
1862 }
1863 
__SSAX(uint32_t op1,uint32_t op2)1864 __STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
1865 {
1866   uint32_t result;
1867 
1868   __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1869   return(result);
1870 }
1871 
__QSAX(uint32_t op1,uint32_t op2)1872 __STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
1873 {
1874   uint32_t result;
1875 
1876   __ASM ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1877   return(result);
1878 }
1879 
__SHSAX(uint32_t op1,uint32_t op2)1880 __STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
1881 {
1882   uint32_t result;
1883 
1884   __ASM ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1885   return(result);
1886 }
1887 
__USAX(uint32_t op1,uint32_t op2)1888 __STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
1889 {
1890   uint32_t result;
1891 
1892   __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1893   return(result);
1894 }
1895 
__UQSAX(uint32_t op1,uint32_t op2)1896 __STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
1897 {
1898   uint32_t result;
1899 
1900   __ASM ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1901   return(result);
1902 }
1903 
__UHSAX(uint32_t op1,uint32_t op2)1904 __STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
1905 {
1906   uint32_t result;
1907 
1908   __ASM ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1909   return(result);
1910 }
1911 
__USAD8(uint32_t op1,uint32_t op2)1912 __STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
1913 {
1914   uint32_t result;
1915 
1916   __ASM ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1917   return(result);
1918 }
1919 
__USADA8(uint32_t op1,uint32_t op2,uint32_t op3)1920 __STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
1921 {
1922   uint32_t result;
1923 
1924   __ASM ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1925   return(result);
1926 }
1927 
1928 #define __SSAT16(ARG1, ARG2) \
1929 ({                          \
1930   int32_t __RES, __ARG1 = (ARG1); \
1931   __ASM volatile ("ssat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) : "cc" ); \
1932   __RES; \
1933  })
1934 
1935 #define __USAT16(ARG1, ARG2) \
1936 ({                          \
1937   uint32_t __RES, __ARG1 = (ARG1); \
1938   __ASM volatile ("usat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) : "cc" ); \
1939   __RES; \
1940  })
1941 
__UXTB16(uint32_t op1)1942 __STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1)
1943 {
1944   uint32_t result;
1945 
1946   __ASM ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
1947   return(result);
1948 }
1949 
__UXTAB16(uint32_t op1,uint32_t op2)1950 __STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
1951 {
1952   uint32_t result;
1953 
1954   __ASM ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1955   return(result);
1956 }
1957 
__SXTB16(uint32_t op1)1958 __STATIC_FORCEINLINE int32_t __SXTB16(uint32_t op1)
1959 {
1960   int32_t result;
1961 
1962   __ASM ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
1963   return(result);
1964 }
1965 
__SXTB16_RORn(uint32_t op1,uint32_t rotate)1966 __STATIC_FORCEINLINE int32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate)
1967 {
1968   int32_t result;
1969 
1970   __ASM ("sxtb16 %0, %1, ROR %2" : "=r" (result) : "r" (op1), "i" (rotate) );
1971 
1972   return result;
1973 }
1974 
__SXTAB16(uint32_t op1,uint32_t op2)1975 __STATIC_FORCEINLINE int32_t __SXTAB16(uint32_t op1, uint32_t op2)
1976 {
1977   int32_t result;
1978 
1979   __ASM ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1980   return(result);
1981 }
1982 
__SMUAD(uint32_t op1,uint32_t op2)1983 __STATIC_FORCEINLINE int32_t __SMUAD  (uint32_t op1, uint32_t op2)
1984 {
1985   int32_t result;
1986 
1987   __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1988   return(result);
1989 }
1990 
__SMUADX(uint32_t op1,uint32_t op2)1991 __STATIC_FORCEINLINE int32_t __SMUADX (uint32_t op1, uint32_t op2)
1992 {
1993   int32_t result;
1994 
1995   __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1996   return(result);
1997 }
1998 
__SMLAD(uint32_t op1,uint32_t op2,uint32_t op3)1999 __STATIC_FORCEINLINE int32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
2000 {
2001   int32_t result;
2002 
2003   __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2004   return(result);
2005 }
2006 
__SMLADX(uint32_t op1,uint32_t op2,uint32_t op3)2007 __STATIC_FORCEINLINE int32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
2008 {
2009   int32_t result;
2010 
2011   __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2012   return(result);
2013 }
2014 
__SMLALD(uint32_t op1,uint32_t op2,int64_t acc)2015 __STATIC_FORCEINLINE int64_t __SMLALD (uint32_t op1, uint32_t op2, int64_t acc)
2016 {
2017   union llreg_u{
2018     uint32_t w32[2];
2019     int64_t w64;
2020   } llr;
2021   llr.w64 = acc;
2022 
2023 #ifndef __ARMEB__   /* Little endian */
2024   __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2025 #else               /* Big endian */
2026   __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2027 #endif
2028 
2029   return(llr.w64);
2030 }
2031 
__SMLALDX(uint32_t op1,uint32_t op2,uint64_t acc)2032 __STATIC_FORCEINLINE int64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
2033 {
2034   union llreg_u{
2035     uint32_t w32[2];
2036     int64_t w64;
2037   } llr;
2038   llr.w64 = acc;
2039 
2040 #ifndef __ARMEB__   /* Little endian */
2041   __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2042 #else               /* Big endian */
2043   __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2044 #endif
2045 
2046   return(llr.w64);
2047 }
2048 
__SMUSD(uint32_t op1,uint32_t op2)2049 __STATIC_FORCEINLINE int32_t __SMUSD  (uint32_t op1, uint32_t op2)
2050 {
2051   int32_t result;
2052 
2053   __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2054   return(result);
2055 }
2056 
__SMUSDX(uint32_t op1,uint32_t op2)2057 __STATIC_FORCEINLINE int32_t __SMUSDX (uint32_t op1, uint32_t op2)
2058 {
2059   int32_t result;
2060 
2061   __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2062   return(result);
2063 }
2064 
__SMLSD(uint32_t op1,uint32_t op2,uint32_t op3)2065 __STATIC_FORCEINLINE int32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
2066 {
2067   int32_t result;
2068 
2069   __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2070   return(result);
2071 }
2072 
__SMLSDX(uint32_t op1,uint32_t op2,uint32_t op3)2073 __STATIC_FORCEINLINE int32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
2074 {
2075   int32_t result;
2076 
2077   __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
2078   return(result);
2079 }
2080 
__SMLSLD(uint32_t op1,uint32_t op2,uint64_t acc)2081 __STATIC_FORCEINLINE int64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
2082 {
2083   union llreg_u{
2084     uint32_t w32[2];
2085     int64_t w64;
2086   } llr;
2087   llr.w64 = acc;
2088 
2089 #ifndef __ARMEB__   /* Little endian */
2090   __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2091 #else               /* Big endian */
2092   __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2093 #endif
2094 
2095   return(llr.w64);
2096 }
2097 
__SMLSLDX(uint32_t op1,uint32_t op2,uint64_t acc)2098 __STATIC_FORCEINLINE int64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
2099 {
2100   union llreg_u{
2101     uint32_t w32[2];
2102     int64_t w64;
2103   } llr;
2104   llr.w64 = acc;
2105 
2106 #ifndef __ARMEB__   /* Little endian */
2107   __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
2108 #else               /* Big endian */
2109   __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
2110 #endif
2111 
2112   return(llr.w64);
2113 }
2114 
__SEL(uint32_t op1,uint32_t op2)2115 __STATIC_FORCEINLINE uint32_t __SEL  (uint32_t op1, uint32_t op2)
2116 {
2117   uint32_t result;
2118 
2119   __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2120   return(result);
2121 }
2122 
__QADD(int32_t op1,int32_t op2)2123 __STATIC_FORCEINLINE  int32_t __QADD( int32_t op1,  int32_t op2)
2124 {
2125   int32_t result;
2126 
2127   __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2128   return(result);
2129 }
2130 
__QSUB(int32_t op1,int32_t op2)2131 __STATIC_FORCEINLINE  int32_t __QSUB( int32_t op1,  int32_t op2)
2132 {
2133   int32_t result;
2134 
2135   __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
2136   return(result);
2137 }
2138 
2139 #if 0
2140 #define __PKHBT(ARG1,ARG2,ARG3) \
2141 ({                          \
2142   uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
2143   __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
2144   __RES; \
2145  })
2146 
2147 #define __PKHTB(ARG1,ARG2,ARG3) \
2148 ({                          \
2149   uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
2150   if (ARG3 == 0) \
2151     __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2)  ); \
2152   else \
2153     __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
2154   __RES; \
2155  })
2156 #endif
2157 
2158 #define __PKHBT(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0x0000FFFFUL) |  \
2159                                            ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL)  )
2160 
2161 #define __PKHTB(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0xFFFF0000UL) |  \
2162                                            ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL)  )
2163 
__SMMLA(int32_t op1,int32_t op2,int32_t op3)2164 __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
2165 {
2166  int32_t result;
2167 
2168  __ASM ("smmla %0, %1, %2, %3" : "=r" (result): "r"  (op1), "r" (op2), "r" (op3) );
2169  return(result);
2170 }
2171 
2172 #endif /* (__ARM_FEATURE_DSP == 1) */
2173 /*@} end of group CMSIS_SIMD_intrinsics */
2174 
2175 
2176 #pragma GCC diagnostic pop
2177 
2178 #endif /* __CMSIS_GCC_H */
2179