1 /*
2 * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3 * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this list of
9 * conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12 * of conditions and the following disclaimer in the documentation and/or other materials
13 * provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16 * to endorse or promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LOS_COMPILER_H
33 #define _LOS_COMPILER_H
34
35 /* for IAR Compiler */
36 #ifdef __ICCARM__
37 #include"iccarm_builtin.h"
38 #endif
39
40 #ifdef __cplusplus
41 #if __cplusplus
42 extern "C" {
43 #endif /* __cplusplus */
44 #endif /* __cplusplus */
45
46 /* for IAR Compiler */
47 #ifdef __ICCARM__
48
49 #ifndef ASM
50 #define ASM __asm
51 #endif
52
53 #ifndef INLINE
54 #define INLINE inline
55 #endif
56
57 #ifndef STATIC_INLINE
58 #define STATIC_INLINE static inline
59 #endif
60
61 #ifndef USED
62 #define USED __root
63 #endif
64
65 #ifndef WEAK
66 #define WEAK __weak
67 #endif
68
69 #ifndef CLZ
70 #define CLZ __iar_builtin_CLZ
71 #endif
72
73 #ifndef NORETURN
74 #define NORETURN __attribute__ ((__noreturn__))
75 #endif
76
77 #ifndef UNREACHABLE
78 #define UNREACHABLE while (1)
79 #endif
80
81 /* for ARM Compiler */
82 #elif defined(__CC_ARM)
83
84 #ifndef ASM
85 #define ASM __asm
86 #endif
87
88 #ifndef INLINE
89 #define INLINE __inline
90 #endif
91
92 #ifndef STATIC_INLINE
93 #define STATIC_INLINE static __inline
94 #endif
95
96 #ifndef USED
97 #define USED __attribute__((used))
98 #endif
99
100 #ifndef WEAK
101 #define WEAK __attribute__((weak))
102 #endif
103
104 #ifndef CLZ
105 #define CLZ __clz
106 #endif
107
108 #ifndef NORETURN
109 #define NORETURN __declspec(noreturn)
110 #endif
111
112 #ifndef UNREACHABLE
113 #define UNREACHABLE while (1)
114 #endif
115
116 #pragma anon_unions
117
118 /* for GNU Compiler */
119 #elif defined(__GNUC__)
120
121 #ifndef ASM
122 #define ASM __asm
123 #endif
124
125 #ifndef INLINE
126 #define INLINE inline
127 #endif
128
129 #ifndef STATIC_INLINE
130 #define STATIC_INLINE static inline
131 #endif
132
133 #ifndef USED
134 #define USED __attribute__((used))
135 #endif
136
137 #ifndef WEAK
138 #define WEAK __attribute__((weak))
139 #endif
140
141 #ifndef CLZ
142 #define CLZ __builtin_clz
143 #endif
144
145 #ifndef NORETURN
146 #define NORETURN __attribute__ ((__noreturn__))
147 #endif
148
149 #ifndef UNREACHABLE
150 #define UNREACHABLE __builtin_unreachable()
151 #endif
152
153 #else
154 #error Unknown compiler.
155 #endif
156
157 #ifndef STATIC
158 #define STATIC static
159 #endif
160
161 /**
162 * @ingroup los_builddef
163 * Define inline keyword
164 */
165 #ifndef INLINE
166 #define INLINE static inline
167 #endif
168
169 /**
170 * @ingroup los_builddef
171 * Little endian
172 */
173 #define OS_LITTLE_ENDIAN 0x1234
174
175 /**
176 * @ingroup los_builddef
177 * Big endian
178 */
179 #define OS_BIG_ENDIAN 0x4321
180
181 /**
182 * @ingroup los_builddef
183 * Byte order
184 */
185 #ifndef OS_BYTE_ORDER
186 #define OS_BYTE_ORDER OS_LITTLE_ENDIAN
187 #endif
188
189 /* Define OS code data sections */
190 /* The indicator function is inline */
191
192 /**
193 * @ingroup los_builddef
194 * Allow inline sections
195 */
196 #ifndef LITE_OS_SEC_ALW_INLINE
197 #define LITE_OS_SEC_ALW_INLINE // __attribute__((always_inline))
198 #endif
199
200 /**
201 * @ingroup los_builddef
202 * Vector table section
203 */
204 #ifndef LITE_OS_SEC_VEC
205 #define LITE_OS_SEC_VEC __attribute__ ((section(".vector")))
206 #endif
207
208 /**
209 * @ingroup los_builddef
210 * .Text section (Code section)
211 */
212 #ifndef LITE_OS_SEC_TEXT
213 #define LITE_OS_SEC_TEXT // __attribute__((section(".sram.text")))
214 #endif
215
216 /**
217 * @ingroup los_builddef
218 * .Text.ddr section
219 */
220 #ifndef LITE_OS_SEC_TEXT_MINOR
221 #define LITE_OS_SEC_TEXT_MINOR // __attribute__((section(".dyn.text")))
222 #endif
223
224 /**
225 * @ingroup los_builddef
226 * .Text.init section
227 */
228 #ifndef LITE_OS_SEC_TEXT_INIT
229 #define LITE_OS_SEC_TEXT_INIT // __attribute__((section(".dyn.text")))
230 #endif
231
232 /**
233 * @ingroup los_builddef
234 * .Data section
235 */
236 #ifndef LITE_OS_SEC_DATA
237 #define LITE_OS_SEC_DATA // __attribute__((section(".dyn.data")))
238 #endif
239
240 /**
241 * @ingroup los_builddef
242 * .Data.init section
243 */
244 #ifndef LITE_OS_SEC_DATA_INIT
245 #define LITE_OS_SEC_DATA_INIT // __attribute__((section(".dyn.data")))
246 #endif
247
248 /**
249 * @ingroup los_builddef
250 * Not initialized variable section
251 */
252 #ifndef LITE_OS_SEC_BSS
253 #define LITE_OS_SEC_BSS // __attribute__((section(".sym.bss")))
254 #endif
255
256 /**
257 * @ingroup los_builddef
258 * .bss.ddr section
259 */
260 #ifndef LITE_OS_SEC_BSS_MINOR
261 #define LITE_OS_SEC_BSS_MINOR
262 #endif
263
264 /**
265 * @ingroup los_builddef
266 * .bss.init sections
267 */
268 #ifndef LITE_OS_SEC_BSS_INIT
269 #define LITE_OS_SEC_BSS_INIT
270 #endif
271
272 #ifndef LITE_OS_SEC_TEXT_DATA
273 #define LITE_OS_SEC_TEXT_DATA // __attribute__((section(".dyn.data")))
274 #define LITE_OS_SEC_TEXT_BSS // __attribute__((section(".dyn.bss")))
275 #define LITE_OS_SEC_TEXT_RODATA // __attribute__((section(".dyn.rodata")))
276 #endif
277
278 #ifndef LITE_OS_SEC_SYMDATA
279 #define LITE_OS_SEC_SYMDATA // __attribute__((section(".sym.data")))
280 #endif
281
282 #ifndef LITE_OS_SEC_SYMBSS
283 #define LITE_OS_SEC_SYMBSS // __attribute__((section(".sym.bss")))
284 #endif
285
286
287 #ifndef LITE_OS_SEC_KEEP_DATA_DDR
288 #define LITE_OS_SEC_KEEP_DATA_DDR // __attribute__((section(".keep.data.ddr")))
289 #endif
290
291 #ifndef LITE_OS_SEC_KEEP_TEXT_DDR
292 #define LITE_OS_SEC_KEEP_TEXT_DDR // __attribute__((section(".keep.text.ddr")))
293 #endif
294
295 #ifndef LITE_OS_SEC_KEEP_DATA_SRAM
296 #define LITE_OS_SEC_KEEP_DATA_SRAM // __attribute__((section(".keep.data.sram")))
297 #endif
298
299 #ifndef LITE_OS_SEC_KEEP_TEXT_SRAM
300 #define LITE_OS_SEC_KEEP_TEXT_SRAM // __attribute__((section(".keep.text.sram")))
301 #endif
302
303 #ifndef LITE_OS_SEC_BSS_MINOR
304 #define LITE_OS_SEC_BSS_MINOR
305 #endif
306
307 /* type definitions */
308 typedef unsigned char UINT8;
309 typedef unsigned short UINT16;
310 typedef unsigned int UINT32;
311 typedef signed char INT8;
312 typedef signed short INT16;
313 typedef signed int INT32;
314 typedef float FLOAT;
315 typedef double DOUBLE;
316 typedef char CHAR;
317
318 typedef unsigned int BOOL;
319 typedef unsigned long long UINT64;
320 typedef signed long long INT64;
321 typedef unsigned int UINTPTR;
322 typedef signed int INTPTR;
323
324 typedef volatile INT32 Atomic;
325 typedef volatile INT64 Atomic64;
326
327 #define VOID void
328
329 #ifndef FALSE
330 #define FALSE ((BOOL)0)
331 #endif
332
333 #ifndef TRUE
334 #define TRUE ((BOOL)1)
335 #endif
336
337 #ifndef NULL
338 #ifdef __cplusplus
339 #define NULL 0L
340 #else
341 #define NULL ((void*)0)
342 #endif
343 #endif
344
345 #define OS_NULL_BYTE ((UINT8)0xFF)
346 #define OS_NULL_SHORT ((UINT16)0xFFFF)
347 #define OS_NULL_INT ((UINT32)0xFFFFFFFF)
348
349 #ifndef LOS_OK
350 #define LOS_OK 0U
351 #endif
352
353 #ifndef LOS_NOK
354 #define LOS_NOK (UINT32)(-1)
355 #endif
356
357 #define OS_FAIL 1
358 #define OS_ERROR (UINT32)(-1)
359 #define OS_INVALID (UINT32)(-1)
360 #define OS_64BIT_MAX (0xFFFFFFFFFFFFFFFFULL)
361
362 #define asm __asm
363 #ifdef typeof
364 #undef typeof
365 #endif
366 #define typeof __typeof__
367
368 #define SIZE(a) (a)
369
370 #define LOS_ASSERT_COND(expression)
371
372 /**
373 * @ingroup los_base
374 * Align the beginning of the object with the base address addr,
375 * with boundary bytes being the smallest unit of alignment.
376 */
377 #ifndef ALIGN
378 #define ALIGN(addr, boundary) LOS_Align(addr, boundary)
379 #endif
380 /**
381 * @ingroup los_base
382 * Align the tail of the object with the base address addr, with size bytes being the smallest unit of alignment.
383 */
384 #define TRUNCATE(addr, size) ((addr) & ~((size) - 1))
385
386
387 /**
388 * @ingroup los_base
389 * @brief Align the value (addr) by some bytes (boundary) you specify.
390 *
391 * @par Description:
392 * This API is used to align the value (addr) by some bytes (boundary) you specify.
393 *
394 * @attention
395 * <ul>
396 * <li>the value of boundary usually is 4,8,16,32.</li>
397 * </ul>
398 *
399 * @param addr [IN] The variable what you want to align.
400 * @param boundary [IN] The align size what you want to align.
401 *
402 * @retval #UINT32 The variable what have been aligned.
403 * @par Dependency:
404 * <ul><li>los_base.h: the header file that contains the API declaration.</li></ul>
405 * @see
406 */
LOS_Align(UINT32 addr,UINT32 boundary)407 static inline UINT32 LOS_Align(UINT32 addr, UINT32 boundary)
408 {
409 return (addr + (((addr + (boundary - 1)) > addr) ? (boundary - 1) : 0)) & ~(boundary - 1);
410 }
411
412 #define OS_GOTO_ERREND() \
413 do { \
414 goto LOS_ERREND; \
415 } while (0)
416
417 #ifndef UNUSED
418 #define UNUSED(X) (void)X
419 #endif
420
421 #if defined(__GNUC__)
422 #ifndef __XTENSA_LX6__
maybe_release_fence(int model)423 static inline void maybe_release_fence(int model)
424 {
425 switch (model) {
426 case __ATOMIC_RELEASE:
427 __atomic_thread_fence (__ATOMIC_RELEASE);
428 break;
429 case __ATOMIC_ACQ_REL:
430 __atomic_thread_fence (__ATOMIC_ACQ_REL);
431 break;
432 case __ATOMIC_SEQ_CST:
433 __atomic_thread_fence (__ATOMIC_SEQ_CST);
434 break;
435 default:
436 break;
437 }
438 }
439
maybe_acquire_fence(int model)440 static inline void maybe_acquire_fence(int model)
441 {
442 switch (model) {
443 case __ATOMIC_ACQUIRE:
444 __atomic_thread_fence (__ATOMIC_ACQUIRE);
445 break;
446 case __ATOMIC_ACQ_REL:
447 __atomic_thread_fence (__ATOMIC_ACQ_REL);
448 break;
449 case __ATOMIC_SEQ_CST:
450 __atomic_thread_fence (__ATOMIC_SEQ_CST);
451 break;
452 default:
453 break;
454 }
455 }
456
457 #define __LIBATOMIC_N_LOCKS (1 << 4) /* 4, 1<<4 locks num */
__libatomic_flag_for_address(void * addr)458 static inline BOOL *__libatomic_flag_for_address(void *addr)
459 {
460 static BOOL flag_table[__LIBATOMIC_N_LOCKS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
461 UINTPTR p = (UINTPTR)(UINTPTR *)addr;
462 p += (p >> 2) + (p << 4); /* 2, 4, hash data */
463 p += (p >> 7) + (p << 5); /* 7, 5, hash data */
464 p += (p >> 17) + (p << 13); /* 17, 13, hash data */
465
466 if (sizeof(void *) > 4) { /* 4, sizeof int in 32bit system */
467 p += (p >> 31); /* 31, for hash high bits data */
468 }
469
470 p &= (__LIBATOMIC_N_LOCKS - 1);
471 return flag_table + p;
472 }
473
get_lock(void * addr,int model)474 static inline void get_lock(void *addr, int model)
475 {
476 BOOL *lock_ptr = __libatomic_flag_for_address (addr);
477
478 maybe_release_fence (model);
479 while (__atomic_test_and_set (lock_ptr, __ATOMIC_ACQUIRE) == 1) {
480 ;
481 }
482 }
483
free_lock(void * addr,int model)484 static inline void free_lock(void *addr, int model)
485 {
486 BOOL *lock_ptr = __libatomic_flag_for_address (addr);
487
488 __atomic_clear (lock_ptr, __ATOMIC_RELEASE);
489 maybe_acquire_fence (model);
490 }
491
__atomic_load_8(const volatile void * mem,int model)492 static inline UINT64 __atomic_load_8(const volatile void *mem, int model)
493 {
494 UINT64 ret;
495
496 void *memP = (void *)mem;
497 get_lock (memP, model);
498 ret = *(UINT64 *)mem;
499 free_lock (memP, model);
500 return ret;
501 }
502
__atomic_store_8(volatile void * mem,UINT64 val,int model)503 static inline void __atomic_store_8(volatile void *mem, UINT64 val, int model)
504 {
505 void *memP = (void *)mem;
506 get_lock (memP, model);
507 *(UINT64 *)mem = val;
508 free_lock (memP, model);
509 }
510
__atomic_exchange_8(volatile void * mem,UINT64 val,int model)511 static inline UINT64 __atomic_exchange_8(volatile void *mem, UINT64 val, int model)
512 {
513 UINT64 ret;
514
515 void *memP = (void *)mem;
516 get_lock (memP, model);
517 ret = *(UINT64 *)mem;
518 *(UINT64 *)mem = val;
519 free_lock (memP, model);
520 return ret;
521 }
522 #endif
523 #endif
524
525 #ifdef __cplusplus
526 #if __cplusplus
527 }
528 #endif /* __cplusplus */
529 #endif /* __cplusplus */
530
531 #endif /* _LOS_COMPILER_H */
532