• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3  * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  *    conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12  *    of conditions and the following disclaimer in the documentation and/or other materials
13  *    provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16  *    to endorse or promote products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _LOS_COMPILER_H
33 #define _LOS_COMPILER_H
34 
35 /* for IAR Compiler */
36 #ifdef __ICCARM__
37 #include "iccarm_builtin.h"
38 #endif
39 
40 #ifdef __cplusplus
41 #if __cplusplus
42 extern "C" {
43 #endif /* __cplusplus */
44 #endif /* __cplusplus */
45 
46 /* for IAR Compiler */
47 #ifdef __ICCARM__
48 
49 #ifndef ASM
50 #define ASM           __asm
51 #endif
52 
53 #ifndef INLINE
54 #define INLINE        inline
55 #endif
56 
57 #ifndef STATIC_INLINE
58 #define STATIC_INLINE static inline
59 #endif
60 
61 #ifndef USED
62 #define USED          __root
63 #endif
64 
65 #ifndef WEAK
66 #define WEAK          __weak
67 #endif
68 
69 #ifndef CLZ
70 #define CLZ           __iar_builtin_CLZ
71 #endif
72 
73 #ifndef NORETURN
74 #define NORETURN      __attribute__ ((__noreturn__))
75 #endif
76 
77 #ifndef UNREACHABLE
78 #define UNREACHABLE   while (1)
79 #endif
80 
81 /* for ARM Compiler */
82 #elif defined(__CC_ARM)
83 
84 #ifndef ASM
85 #define ASM           __asm
86 #endif
87 
88 #ifndef INLINE
89 #define INLINE        __inline
90 #endif
91 
92 #ifndef STATIC_INLINE
93 #define STATIC_INLINE static __inline
94 #endif
95 
96 #ifndef USED
97 #define USED          __attribute__((used))
98 #endif
99 
100 #ifndef WEAK
101 #define WEAK          __attribute__((weak))
102 #endif
103 
104 #ifndef CLZ
105 #define CLZ           __clz
106 #endif
107 
108 #ifndef NORETURN
109 #define NORETURN      __declspec(noreturn)
110 #endif
111 
112 #ifndef UNREACHABLE
113 #define UNREACHABLE   while (1)
114 #endif
115 
116 #pragma anon_unions
117 
118 /* for GNU Compiler */
119 #elif defined(__GNUC__)
120 
121 #ifndef ASM
122 #define ASM           __asm
123 #endif
124 
125 #ifndef INLINE
126 #define INLINE        inline
127 #endif
128 
129 #ifndef STATIC_INLINE
130 #define STATIC_INLINE static inline
131 #endif
132 
133 #ifndef USED
134 #define USED          __attribute__((used))
135 #endif
136 
137 #ifndef WEAK
138 #define WEAK          __attribute__((weak))
139 #endif
140 
141 #ifndef CLZ
142 #define CLZ           __builtin_clz
143 #endif
144 
145 #ifndef NORETURN
146 #define NORETURN      __attribute__ ((__noreturn__))
147 #endif
148 
149 #ifndef UNREACHABLE
150 #define UNREACHABLE   __builtin_unreachable()
151 #endif
152 
153 #else
154 #error Unknown compiler.
155 #endif
156 
157 #ifndef STATIC
158 #define STATIC       static
159 #endif
160 
161 /**
162  * @ingroup los_builddef
163  * Define inline keyword
164  */
165 #ifndef INLINE
166 #define INLINE                                              static inline
167 #endif
168 
169 /**
170  * @ingroup los_builddef
171  * Little endian
172  */
173 #define OS_LITTLE_ENDIAN                                    0x1234
174 
175 /**
176  * @ingroup los_builddef
177  * Big endian
178  */
179 #define OS_BIG_ENDIAN                                       0x4321
180 
181 /**
182  * @ingroup los_builddef
183  * Byte order
184  */
185 #ifndef OS_BYTE_ORDER
186 #define OS_BYTE_ORDER                                       OS_LITTLE_ENDIAN
187 #endif
188 
189 /* Define OS code data sections */
190 /* The indicator function is inline */
191 
192 /**
193  * @ingroup los_builddef
194  * Allow inline sections
195  */
196 #ifndef LITE_OS_SEC_ALW_INLINE
197 #define LITE_OS_SEC_ALW_INLINE      // __attribute__((always_inline))
198 #endif
199 
200 /**
201  * @ingroup los_builddef
202  * Vector table section
203  */
204 #ifndef LITE_OS_SEC_VEC
205 #define LITE_OS_SEC_VEC          __attribute__ ((section(".vector")))
206 #endif
207 
208 /**
209  * @ingroup los_builddef
210  * .Text section (Code section)
211  */
212 #ifndef LITE_OS_SEC_TEXT
213 #define LITE_OS_SEC_TEXT            // __attribute__((section(".sram.text")))
214 #endif
215 
216 /**
217  * @ingroup los_builddef
218  * .Text.ddr section
219  */
220 #ifndef LITE_OS_SEC_TEXT_MINOR
221 #define LITE_OS_SEC_TEXT_MINOR      // __attribute__((section(".dyn.text")))
222 #endif
223 
224 /**
225  * @ingroup los_builddef
226  * .Text.init section
227  */
228 #ifndef LITE_OS_SEC_TEXT_INIT
229 #define LITE_OS_SEC_TEXT_INIT       // __attribute__((section(".dyn.text")))
230 #endif
231 
232 /**
233  * @ingroup los_builddef
234  * .Data section
235  */
236 #ifndef LITE_OS_SEC_DATA
237 #define LITE_OS_SEC_DATA            // __attribute__((section(".dyn.data")))
238 #endif
239 
240 /**
241  * @ingroup los_builddef
242  * .Data.init section
243  */
244 #ifndef LITE_OS_SEC_DATA_INIT
245 #define LITE_OS_SEC_DATA_INIT       // __attribute__((section(".dyn.data")))
246 #endif
247 
248 /**
249  * @ingroup los_builddef
250  * Not initialized variable section
251  */
252 #ifndef LITE_OS_SEC_BSS
253 #define LITE_OS_SEC_BSS             // __attribute__((section(".sym.bss")))
254 #endif
255 
256 /**
257  * @ingroup los_builddef
258  * .bss.ddr section
259  */
260 #ifndef LITE_OS_SEC_BSS_MINOR
261 #define LITE_OS_SEC_BSS_MINOR
262 #endif
263 
264 /**
265  * @ingroup los_builddef
266  * .bss.init sections
267  */
268 #ifndef LITE_OS_SEC_BSS_INIT
269 #define LITE_OS_SEC_BSS_INIT
270 #endif
271 
272 #ifndef LITE_OS_SEC_TEXT_DATA
273 #define LITE_OS_SEC_TEXT_DATA       // __attribute__((section(".dyn.data")))
274 #define LITE_OS_SEC_TEXT_BSS        // __attribute__((section(".dyn.bss")))
275 #define LITE_OS_SEC_TEXT_RODATA     // __attribute__((section(".dyn.rodata")))
276 #endif
277 
278 #ifndef LITE_OS_SEC_SYMDATA
279 #define LITE_OS_SEC_SYMDATA         // __attribute__((section(".sym.data")))
280 #endif
281 
282 #ifndef LITE_OS_SEC_SYMBSS
283 #define LITE_OS_SEC_SYMBSS          // __attribute__((section(".sym.bss")))
284 #endif
285 
286 
287 #ifndef LITE_OS_SEC_KEEP_DATA_DDR
288 #define LITE_OS_SEC_KEEP_DATA_DDR   // __attribute__((section(".keep.data.ddr")))
289 #endif
290 
291 #ifndef LITE_OS_SEC_KEEP_TEXT_DDR
292 #define LITE_OS_SEC_KEEP_TEXT_DDR   // __attribute__((section(".keep.text.ddr")))
293 #endif
294 
295 #ifndef LITE_OS_SEC_KEEP_DATA_SRAM
296 #define LITE_OS_SEC_KEEP_DATA_SRAM  // __attribute__((section(".keep.data.sram")))
297 #endif
298 
299 #ifndef LITE_OS_SEC_KEEP_TEXT_SRAM
300 #define LITE_OS_SEC_KEEP_TEXT_SRAM  // __attribute__((section(".keep.text.sram")))
301 #endif
302 
303 #ifndef LITE_OS_SEC_BSS_MINOR
304 #define LITE_OS_SEC_BSS_MINOR
305 #endif
306 
307 /* type definitions */
308 typedef unsigned char          UINT8;
309 typedef unsigned short         UINT16;
310 typedef unsigned int           UINT32;
311 typedef signed char            INT8;
312 typedef signed short           INT16;
313 typedef signed int             INT32;
314 typedef float                  FLOAT;
315 typedef double                 DOUBLE;
316 typedef char                   CHAR;
317 
318 typedef unsigned long long     UINT64;
319 typedef signed long long       INT64;
320 typedef unsigned int           UINTPTR;
321 typedef signed int             INTPTR;
322 
323 typedef volatile INT32 Atomic;
324 typedef volatile INT64 Atomic64;
325 
326 #ifndef DEFINED_BOOL
327 typedef unsigned int           BOOL;
328 #define DEFINED_BOOL
329 #endif
330 
331 #ifndef VOID
332 #define VOID          void
333 #endif
334 
335 #ifndef FALSE
336 #define FALSE         ((BOOL)0)
337 #endif
338 
339 #ifndef TRUE
340 #define TRUE          ((BOOL)1)
341 #endif
342 
343 #ifndef NULL
344 #ifdef __cplusplus
345 #define NULL          0L
346 #else
347 #define NULL          ((void*)0)
348 #endif
349 #endif
350 
351 #define OS_NULL_BYTE  ((UINT8)0xFF)
352 #define OS_NULL_SHORT ((UINT16)0xFFFF)
353 #define OS_NULL_INT   ((UINT32)0xFFFFFFFF)
354 
355 #ifndef LOS_OK
356 #define LOS_OK        0U
357 #endif
358 
359 #ifndef LOS_NOK
360 #define LOS_NOK       (UINT32)(-1)
361 #endif
362 
363 #define OS_FAIL       1
364 #define OS_ERROR      (UINT32)(-1)
365 #define OS_INVALID    (UINT32)(-1)
366 #define OS_64BIT_MAX  (0xFFFFFFFFFFFFFFFFULL)
367 
368 #define asm           __asm
369 #ifdef typeof
370 #undef typeof
371 #endif
372 #define typeof        __typeof__
373 
374 #define SIZE(a) (a)
375 
376 #define LOS_ASSERT_COND(expression)
377 
378 /**
379  * @ingroup los_base
380  * Align the beginning of the object with the base address addr,
381  * with boundary bytes being the smallest unit of alignment.
382  */
383 #ifndef ALIGN
384 #define ALIGN(addr, boundary)        LOS_Align(addr, boundary)
385 #endif
386 /**
387  * @ingroup los_base
388  * Align the tail of the object with the base address addr, with size bytes being the smallest unit of alignment.
389  */
390 #define TRUNCATE(addr, size)         ((addr) & ~((size) - 1))
391 
392 
393 /**
394  * @ingroup los_base
395  * @brief Align the value (addr) by some bytes (boundary) you specify.
396  *
397  * @par Description:
398  * This API is used to align the value (addr) by some bytes (boundary) you specify.
399  *
400  * @attention
401  * <ul>
402  * <li>the value of boundary usually is 4,8,16,32.</li>
403  * </ul>
404  *
405  * @param addr     [IN]  The variable what you want to align.
406  * @param boundary [IN]  The align size what you want to align.
407  *
408  * @retval #UINT32 The variable what have been aligned.
409  * @par Dependency:
410  * <ul><li>los_base.h: the header file that contains the API declaration.</li></ul>
411  * @see
412  */
LOS_Align(UINT32 addr,UINT32 boundary)413 static inline UINT32 LOS_Align(UINT32 addr, UINT32 boundary)
414 {
415     return (addr + (((addr + (boundary - 1)) > addr) ? (boundary - 1) : 0)) & ~(boundary - 1);
416 }
417 
418 #define OS_GOTO_ERREND() \
419         do {                 \
420             goto LOS_ERREND; \
421         } while (0)
422 
423 #ifndef UNUSED
424 #define UNUSED(X) (void)X
425 #endif
426 
427 #if defined(__GNUC__)
428 #ifndef __XTENSA_LX6__
maybe_release_fence(int model)429 static inline void maybe_release_fence(int model)
430 {
431     switch (model) {
432         case __ATOMIC_RELEASE:
433             __atomic_thread_fence (__ATOMIC_RELEASE);
434             break;
435         case __ATOMIC_ACQ_REL:
436             __atomic_thread_fence (__ATOMIC_ACQ_REL);
437             break;
438         case __ATOMIC_SEQ_CST:
439             __atomic_thread_fence (__ATOMIC_SEQ_CST);
440             break;
441         default:
442             break;
443     }
444 }
445 
maybe_acquire_fence(int model)446 static inline void maybe_acquire_fence(int model)
447 {
448     switch (model) {
449         case __ATOMIC_ACQUIRE:
450             __atomic_thread_fence (__ATOMIC_ACQUIRE);
451             break;
452         case __ATOMIC_ACQ_REL:
453             __atomic_thread_fence (__ATOMIC_ACQ_REL);
454             break;
455         case __ATOMIC_SEQ_CST:
456             __atomic_thread_fence (__ATOMIC_SEQ_CST);
457             break;
458         default:
459             break;
460     }
461 }
462 
463 #define __LIBATOMIC_N_LOCKS	(1 << 4) /* 4, 1<<4 locks num */
__libatomic_flag_for_address(void * addr)464 static inline BOOL *__libatomic_flag_for_address(void *addr)
465 {
466     static BOOL flag_table[__LIBATOMIC_N_LOCKS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
467     UINTPTR p = (UINTPTR)(UINTPTR *)addr;
468     p += (p >> 2) + (p << 4); /* 2, 4, hash data */
469     p += (p >> 7) + (p << 5); /* 7, 5, hash data */
470     p += (p >> 17) + (p << 13); /* 17, 13, hash data */
471 
472     if (sizeof(void *) > 4) { /* 4, sizeof int in 32bit system */
473         p += (p >> 31); /* 31, for hash high bits data */
474     }
475 
476     p &= (__LIBATOMIC_N_LOCKS - 1);
477     return flag_table + p;
478 }
479 
get_lock(void * addr,int model)480 static inline void get_lock(void *addr, int model)
481 {
482     BOOL *lock_ptr = __libatomic_flag_for_address (addr);
483 
484     maybe_release_fence (model);
485     while (__atomic_test_and_set (lock_ptr, __ATOMIC_ACQUIRE) == 1) {
486         ;
487     }
488 }
489 
free_lock(void * addr,int model)490 static inline void free_lock(void *addr, int model)
491 {
492     BOOL *lock_ptr = __libatomic_flag_for_address (addr);
493 
494     __atomic_clear (lock_ptr, __ATOMIC_RELEASE);
495     maybe_acquire_fence (model);
496 }
497 
__atomic_load_8(const volatile void * mem,int model)498 static inline UINT64  __atomic_load_8(const volatile void *mem, int model)
499 {
500     UINT64 ret;
501 
502     void *memP = (void *)mem;
503     get_lock (memP, model);
504     ret = *(UINT64 *)mem;
505     free_lock (memP, model);
506     return ret;
507 }
508 
__atomic_store_8(volatile void * mem,UINT64 val,int model)509 static inline void __atomic_store_8(volatile void *mem, UINT64 val, int model)
510 {
511     void *memP = (void *)mem;
512     get_lock (memP, model);
513     *(UINT64 *)mem = val;
514     free_lock (memP, model);
515 }
516 
__atomic_exchange_8(volatile void * mem,UINT64 val,int model)517 static inline UINT64 __atomic_exchange_8(volatile void *mem, UINT64 val, int model)
518 {
519     UINT64 ret;
520 
521     void *memP = (void *)mem;
522     get_lock (memP, model);
523     ret = *(UINT64 *)mem;
524     *(UINT64 *)mem = val;
525     free_lock (memP, model);
526     return ret;
527 }
528 #endif
529 #endif
530 
531 #ifdef __cplusplus
532 #if __cplusplus
533 }
534 #endif /* __cplusplus */
535 #endif /* __cplusplus */
536 
537 #endif /* _LOS_COMPILER_H */
538