• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0 */
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             http://www.huawei.com/
5  * Created by Li Guifu <bluce.liguifu@huawei.com>
6  * Modified by Gao Xiang <gaoxiang25@huawei.com>
7  */
8 #ifndef __EROFS_DEFS_H
9 #define __EROFS_DEFS_H
10 
11 #ifdef __cplusplus
12 extern "C"
13 {
14 #endif
15 
16 #include <stddef.h>
17 #include <stdint.h>
18 #include <assert.h>
19 #include <inttypes.h>
20 #include <limits.h>
21 #include <stdbool.h>
22 
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26 
27 #ifdef HAVE_LINUX_TYPES_H
28 #include <linux/types.h>
29 #endif
30 
31 /*
32  * container_of - cast a member of a structure out to the containing structure
33  * @ptr:	the pointer to the member.
34  * @type:	the type of the container struct this is embedded in.
35  * @member:	the name of the member within the struct.
36  */
37 #define container_of(ptr, type, member) ({			\
38 	const typeof(((type *)0)->member) *__mptr = (ptr);	\
39 	(type *)((char *)__mptr - offsetof(type, member)); })
40 
41 typedef uint8_t         u8;
42 typedef uint16_t        u16;
43 typedef uint32_t        u32;
44 typedef uint64_t        u64;
45 
46 #ifndef HAVE_LINUX_TYPES_H
47 typedef u8	__u8;
48 typedef u16	__u16;
49 typedef u32	__u32;
50 typedef u64	__u64;
51 typedef u16	__le16;
52 typedef u32	__le32;
53 typedef u64	__le64;
54 typedef u16	__be16;
55 typedef u32	__be32;
56 typedef u64	__be64;
57 #endif
58 
59 typedef int8_t          s8;
60 typedef int16_t         s16;
61 typedef int32_t         s32;
62 typedef int64_t         s64;
63 
64 #if __BYTE_ORDER == __LITTLE_ENDIAN
65 /*
66  * The host byte order is the same as network byte order,
67  * so these functions are all just identity.
68  */
69 #define cpu_to_le16(x) ((__u16)(x))
70 #define cpu_to_le32(x) ((__u32)(x))
71 #define cpu_to_le64(x) ((__u64)(x))
72 #define le16_to_cpu(x) ((__u16)(x))
73 #define le32_to_cpu(x) ((__u32)(x))
74 #define le64_to_cpu(x) ((__u64)(x))
75 
76 #else
77 #if __BYTE_ORDER == __BIG_ENDIAN
78 #define cpu_to_le16(x) (__builtin_bswap16(x))
79 #define cpu_to_le32(x) (__builtin_bswap32(x))
80 #define cpu_to_le64(x) (__builtin_bswap64(x))
81 #define le16_to_cpu(x) (__builtin_bswap16(x))
82 #define le32_to_cpu(x) (__builtin_bswap32(x))
83 #define le64_to_cpu(x) (__builtin_bswap64(x))
84 #else
85 #pragma error
86 #endif
87 #endif
88 
89 #ifdef __cplusplus
90 #define BUILD_BUG_ON(condition) static_assert(!(condition))
91 #elif !defined(__OPTIMIZE__)
92 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2 * !!(condition)]))
93 #else
94 #define BUILD_BUG_ON(condition) assert(!(condition))
95 #endif
96 
97 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
98 
99 #define __round_mask(x, y)      ((__typeof__(x))((y)-1))
100 #define round_up(x, y)          ((((x)-1) | __round_mask(x, y))+1)
101 #define round_down(x, y)        ((x) & ~__round_mask(x, y))
102 
103 #ifndef roundup
104 /* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
105 #define roundup(x, y) (					\
106 {							\
107 	const typeof(y) __y = y;			\
108 	(((x) + (__y - 1)) / __y) * __y;		\
109 }							\
110 )
111 #endif
112 #define rounddown(x, y) (				\
113 {							\
114 	typeof(x) __x = (x);				\
115 	__x - (__x % (y));				\
116 }							\
117 )
118 
119 /* Can easily conflict with C++'s std::min */
120 #ifndef __cplusplus
121 #define min(x, y) ({				\
122 	typeof(x) _min1 = (x);			\
123 	typeof(y) _min2 = (y);			\
124 	(void) (&_min1 == &_min2);		\
125 	_min1 < _min2 ? _min1 : _min2; })
126 
127 #define max(x, y) ({				\
128 	typeof(x) _max1 = (x);			\
129 	typeof(y) _max2 = (y);			\
130 	(void) (&_max1 == &_max2);		\
131 	_max1 > _max2 ? _max1 : _max2; })
132 #endif
133 
134 /*
135  * ..and if you can't take the strict types, you can specify one yourself.
136  * Or don't use min/max at all, of course.
137  */
138 #define min_t(type, x, y) ({			\
139 	type __min1 = (x);			\
140 	type __min2 = (y);			\
141 	__min1 < __min2 ? __min1: __min2; })
142 
143 #define max_t(type, x, y) ({			\
144 	type __max1 = (x);			\
145 	type __max2 = (y);			\
146 	__max1 > __max2 ? __max1: __max2; })
147 
148 #define cmpsgn(x, y) ({		\
149 	typeof(x) _x = (x);	\
150 	typeof(y) _y = (y);	\
151 	(_x > _y) - (_x < _y); })
152 
153 #define ARRAY_SIZE(arr)	(sizeof(arr) / sizeof((arr)[0]))
154 
155 #define BIT(nr)             (1UL << (nr))
156 #define BIT_ULL(nr)         (1ULL << (nr))
157 #define BIT_MASK(nr)        (1UL << ((nr) % BITS_PER_LONG))
158 #define BIT_WORD(nr)        ((nr) / BITS_PER_LONG)
159 #define BIT_ULL_MASK(nr)    (1ULL << ((nr) % BITS_PER_LONG_LONG))
160 #define BIT_ULL_WORD(nr)    ((nr) / BITS_PER_LONG_LONG)
161 #define BITS_PER_BYTE       8
162 #define BITS_TO_LONGS(nr)   DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
163 
164 #ifdef __SIZEOF_LONG__
165 #define BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
166 #else
167 #define BITS_PER_LONG __WORDSIZE
168 #endif
169 
170 #define BUG_ON(cond)        assert(!(cond))
171 
172 #ifdef NDEBUG
173 #define DBG_BUGON(condition)	((void)(condition))
174 #else
175 #define DBG_BUGON(condition)	BUG_ON(condition)
176 #endif
177 
178 #ifndef __maybe_unused
179 #define __maybe_unused      __attribute__((__unused__))
180 #endif
181 
get_unaligned_le32(const u8 * p)182 static inline u32 get_unaligned_le32(const u8 *p)
183 {
184 	return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
185 }
186 
187 /**
188  * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
189  * @n - parameter
190  *
191  * constant-capable log of base 2 calculation
192  * - this can be used to initialise global variables from constant data, hence
193  *   the massive ternary operator construction
194  *
195  * selects the appropriately-sized optimised version depending on sizeof(n)
196  */
197 #define ilog2(n)			\
198 (					\
199 	(n) & (1ULL << 63) ? 63 :	\
200 	(n) & (1ULL << 62) ? 62 :	\
201 	(n) & (1ULL << 61) ? 61 :	\
202 	(n) & (1ULL << 60) ? 60 :	\
203 	(n) & (1ULL << 59) ? 59 :	\
204 	(n) & (1ULL << 58) ? 58 :	\
205 	(n) & (1ULL << 57) ? 57 :	\
206 	(n) & (1ULL << 56) ? 56 :	\
207 	(n) & (1ULL << 55) ? 55 :	\
208 	(n) & (1ULL << 54) ? 54 :	\
209 	(n) & (1ULL << 53) ? 53 :	\
210 	(n) & (1ULL << 52) ? 52 :	\
211 	(n) & (1ULL << 51) ? 51 :	\
212 	(n) & (1ULL << 50) ? 50 :	\
213 	(n) & (1ULL << 49) ? 49 :	\
214 	(n) & (1ULL << 48) ? 48 :	\
215 	(n) & (1ULL << 47) ? 47 :	\
216 	(n) & (1ULL << 46) ? 46 :	\
217 	(n) & (1ULL << 45) ? 45 :	\
218 	(n) & (1ULL << 44) ? 44 :	\
219 	(n) & (1ULL << 43) ? 43 :	\
220 	(n) & (1ULL << 42) ? 42 :	\
221 	(n) & (1ULL << 41) ? 41 :	\
222 	(n) & (1ULL << 40) ? 40 :	\
223 	(n) & (1ULL << 39) ? 39 :	\
224 	(n) & (1ULL << 38) ? 38 :	\
225 	(n) & (1ULL << 37) ? 37 :	\
226 	(n) & (1ULL << 36) ? 36 :	\
227 	(n) & (1ULL << 35) ? 35 :	\
228 	(n) & (1ULL << 34) ? 34 :	\
229 	(n) & (1ULL << 33) ? 33 :	\
230 	(n) & (1ULL << 32) ? 32 :	\
231 	(n) & (1ULL << 31) ? 31 :	\
232 	(n) & (1ULL << 30) ? 30 :	\
233 	(n) & (1ULL << 29) ? 29 :	\
234 	(n) & (1ULL << 28) ? 28 :	\
235 	(n) & (1ULL << 27) ? 27 :	\
236 	(n) & (1ULL << 26) ? 26 :	\
237 	(n) & (1ULL << 25) ? 25 :	\
238 	(n) & (1ULL << 24) ? 24 :	\
239 	(n) & (1ULL << 23) ? 23 :	\
240 	(n) & (1ULL << 22) ? 22 :	\
241 	(n) & (1ULL << 21) ? 21 :	\
242 	(n) & (1ULL << 20) ? 20 :	\
243 	(n) & (1ULL << 19) ? 19 :	\
244 	(n) & (1ULL << 18) ? 18 :	\
245 	(n) & (1ULL << 17) ? 17 :	\
246 	(n) & (1ULL << 16) ? 16 :	\
247 	(n) & (1ULL << 15) ? 15 :	\
248 	(n) & (1ULL << 14) ? 14 :	\
249 	(n) & (1ULL << 13) ? 13 :	\
250 	(n) & (1ULL << 12) ? 12 :	\
251 	(n) & (1ULL << 11) ? 11 :	\
252 	(n) & (1ULL << 10) ? 10 :	\
253 	(n) & (1ULL <<  9) ?  9 :	\
254 	(n) & (1ULL <<  8) ?  8 :	\
255 	(n) & (1ULL <<  7) ?  7 :	\
256 	(n) & (1ULL <<  6) ?  6 :	\
257 	(n) & (1ULL <<  5) ?  5 :	\
258 	(n) & (1ULL <<  4) ?  4 :	\
259 	(n) & (1ULL <<  3) ?  3 :	\
260 	(n) & (1ULL <<  2) ?  2 :	\
261 	(n) & (1ULL <<  1) ?  1 : 0	\
262 )
263 
fls_long(unsigned long x)264 static inline unsigned int fls_long(unsigned long x)
265 {
266 	return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
267 }
268 
269 /**
270  * __roundup_pow_of_two() - round up to nearest power of two
271  * @n: value to round up
272  */
273 static inline __attribute__((const))
__roundup_pow_of_two(unsigned long n)274 unsigned long __roundup_pow_of_two(unsigned long n)
275 {
276 	return 1UL << fls_long(n - 1);
277 }
278 
279 /**
280  * roundup_pow_of_two - round the given value up to nearest power of two
281  * @n: parameter
282  *
283  * round the given value up to the nearest power of two
284  * - the result is undefined when n == 0
285  * - this can be used to initialise global variables from constant data
286  */
287 #define roundup_pow_of_two(n)			\
288 (						\
289 	__builtin_constant_p(n) ? (		\
290 		((n) == 1) ? 1 :		\
291 		(1UL << (ilog2((n) - 1) + 1))	\
292 				   ) :		\
293 	__roundup_pow_of_two(n)			\
294 )
295 
296 #ifndef __always_inline
297 #define __always_inline	inline
298 #endif
299 
300 #ifdef HAVE_STRUCT_STAT_ST_ATIM
301 /* Linux */
302 #define ST_ATIM_NSEC(stbuf) ((stbuf)->st_atim.tv_nsec)
303 #define ST_CTIM_NSEC(stbuf) ((stbuf)->st_ctim.tv_nsec)
304 #define ST_MTIM_NSEC(stbuf) ((stbuf)->st_mtim.tv_nsec)
305 #elif defined(HAVE_STRUCT_STAT_ST_ATIMENSEC)
306 /* macOS */
307 #define ST_ATIM_NSEC(stbuf) ((stbuf)->st_atimensec)
308 #define ST_CTIM_NSEC(stbuf) ((stbuf)->st_ctimensec)
309 #define ST_MTIM_NSEC(stbuf) ((stbuf)->st_mtimensec)
310 #else
311 #define ST_ATIM_NSEC(stbuf) 0
312 #define ST_CTIM_NSEC(stbuf) 0
313 #define ST_MTIM_NSEC(stbuf) 0
314 #endif
315 
316 #ifdef __APPLE__
317 #define stat64		stat
318 #define lstat64		lstat
319 #endif
320 
321 #ifdef __cplusplus
322 }
323 #endif
324 
325 #endif
326