1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Li Guifu <bluce.liguifu@huawei.com>
6 * Modified by Gao Xiang <gaoxiang25@huawei.com>
7 */
8 #ifndef __EROFS_DEFS_H
9 #define __EROFS_DEFS_H
10
11 #ifdef __cplusplus
12 extern "C"
13 {
14 #endif
15
16 #include <stddef.h>
17 #include <stdint.h>
18 #include <assert.h>
19 #include <inttypes.h>
20 #include <limits.h>
21 #include <stdbool.h>
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #ifdef HAVE_LINUX_TYPES_H
28 #include <linux/types.h>
29 #endif
30
31 /*
32 * container_of - cast a member of a structure out to the containing structure
33 * @ptr: the pointer to the member.
34 * @type: the type of the container struct this is embedded in.
35 * @member: the name of the member within the struct.
36 */
37 #define container_of(ptr, type, member) ({ \
38 const typeof(((type *)0)->member) *__mptr = (ptr); \
39 (type *)((char *)__mptr - offsetof(type, member)); })
40
41 typedef uint8_t u8;
42 typedef uint16_t u16;
43 typedef uint32_t u32;
44 typedef uint64_t u64;
45
46 #ifndef HAVE_LINUX_TYPES_H
47 typedef u8 __u8;
48 typedef u16 __u16;
49 typedef u32 __u32;
50 typedef u64 __u64;
51 typedef u16 __le16;
52 typedef u32 __le32;
53 typedef u64 __le64;
54 typedef u16 __be16;
55 typedef u32 __be32;
56 typedef u64 __be64;
57 #endif
58
59 typedef int8_t s8;
60 typedef int16_t s16;
61 typedef int32_t s32;
62 typedef int64_t s64;
63
64
65 #if __BYTE_ORDER == __LITTLE_ENDIAN
66 /*
67 * The host byte order is the same as network byte order,
68 * so these functions are all just identity.
69 */
70 #define cpu_to_le16(x) ((__u16)(x))
71 #define cpu_to_le32(x) ((__u32)(x))
72 #define cpu_to_le64(x) ((__u64)(x))
73 #define le16_to_cpu(x) ((__u16)(x))
74 #define le32_to_cpu(x) ((__u32)(x))
75 #define le64_to_cpu(x) ((__u64)(x))
76
77 #else
78 #if __BYTE_ORDER == __BIG_ENDIAN
79 #define cpu_to_le16(x) (__builtin_bswap16(x))
80 #define cpu_to_le32(x) (__builtin_bswap32(x))
81 #define cpu_to_le64(x) (__builtin_bswap64(x))
82 #define le16_to_cpu(x) (__builtin_bswap16(x))
83 #define le32_to_cpu(x) (__builtin_bswap32(x))
84 #define le64_to_cpu(x) (__builtin_bswap64(x))
85 #else
86 #pragma error
87 #endif
88 #endif
89
90 #ifdef __cplusplus
91 #define BUILD_BUG_ON(condition) static_assert(!(condition))
92 #elif !defined(__OPTIMIZE__)
93 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2 * !!(condition)]))
94 #else
95 #define BUILD_BUG_ON(condition) assert(!(condition))
96 #endif
97
98 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
99
100 #define __round_mask(x, y) ((__typeof__(x))((y)-1))
101 #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
102 #define round_down(x, y) ((x) & ~__round_mask(x, y))
103
104 #ifndef roundup
105 /* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
106 #define roundup(x, y) ( \
107 { \
108 const typeof(y) __y = y; \
109 (((x) + (__y - 1)) / __y) * __y; \
110 } \
111 )
112 #endif
113 #define rounddown(x, y) ( \
114 { \
115 typeof(x) __x = (x); \
116 __x - (__x % (y)); \
117 } \
118 )
119
120 /* Can easily conflict with C++'s std::min */
121 #ifndef __cplusplus
122 #define min(x, y) ({ \
123 typeof(x) _min1 = (x); \
124 typeof(y) _min2 = (y); \
125 (void) (&_min1 == &_min2); \
126 _min1 < _min2 ? _min1 : _min2; })
127
128 #define max(x, y) ({ \
129 typeof(x) _max1 = (x); \
130 typeof(y) _max2 = (y); \
131 (void) (&_max1 == &_max2); \
132 _max1 > _max2 ? _max1 : _max2; })
133 #endif
134
135 /*
136 * ..and if you can't take the strict types, you can specify one yourself.
137 * Or don't use min/max at all, of course.
138 */
139 #define min_t(type, x, y) ({ \
140 type __min1 = (x); \
141 type __min2 = (y); \
142 __min1 < __min2 ? __min1: __min2; })
143
144 #define max_t(type, x, y) ({ \
145 type __max1 = (x); \
146 type __max2 = (y); \
147 __max1 > __max2 ? __max1: __max2; })
148
149 #define cmpsgn(x, y) ({ \
150 typeof(x) _x = (x); \
151 typeof(y) _y = (y); \
152 (_x > _y) - (_x < _y); })
153
154 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
155
156 #define BIT(nr) (1UL << (nr))
157 #define BIT_ULL(nr) (1ULL << (nr))
158 #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
159 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
160 #define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
161 #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
162 #define BITS_PER_BYTE 8
163 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
164
165 #ifdef __SIZEOF_LONG__
166 #define BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
167 #else
168 #define BITS_PER_LONG __WORDSIZE
169 #endif
170
171 #define BUG_ON(cond) assert(!(cond))
172
173 #ifdef NDEBUG
174 #define DBG_BUGON(condition) ((void)(condition))
175 #else
176 #define DBG_BUGON(condition) BUG_ON(condition)
177 #endif
178
179 #ifndef __maybe_unused
180 #define __maybe_unused __attribute__((__unused__))
181 #endif
182
get_unaligned_le32(const u8 * p)183 static inline u32 get_unaligned_le32(const u8 *p)
184 {
185 return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
186 }
187
188 /**
189 * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
190 * @n - parameter
191 *
192 * constant-capable log of base 2 calculation
193 * - this can be used to initialise global variables from constant data, hence
194 * the massive ternary operator construction
195 *
196 * selects the appropriately-sized optimised version depending on sizeof(n)
197 */
198 #define ilog2(n) \
199 ( \
200 (n) & (1ULL << 63) ? 63 : \
201 (n) & (1ULL << 62) ? 62 : \
202 (n) & (1ULL << 61) ? 61 : \
203 (n) & (1ULL << 60) ? 60 : \
204 (n) & (1ULL << 59) ? 59 : \
205 (n) & (1ULL << 58) ? 58 : \
206 (n) & (1ULL << 57) ? 57 : \
207 (n) & (1ULL << 56) ? 56 : \
208 (n) & (1ULL << 55) ? 55 : \
209 (n) & (1ULL << 54) ? 54 : \
210 (n) & (1ULL << 53) ? 53 : \
211 (n) & (1ULL << 52) ? 52 : \
212 (n) & (1ULL << 51) ? 51 : \
213 (n) & (1ULL << 50) ? 50 : \
214 (n) & (1ULL << 49) ? 49 : \
215 (n) & (1ULL << 48) ? 48 : \
216 (n) & (1ULL << 47) ? 47 : \
217 (n) & (1ULL << 46) ? 46 : \
218 (n) & (1ULL << 45) ? 45 : \
219 (n) & (1ULL << 44) ? 44 : \
220 (n) & (1ULL << 43) ? 43 : \
221 (n) & (1ULL << 42) ? 42 : \
222 (n) & (1ULL << 41) ? 41 : \
223 (n) & (1ULL << 40) ? 40 : \
224 (n) & (1ULL << 39) ? 39 : \
225 (n) & (1ULL << 38) ? 38 : \
226 (n) & (1ULL << 37) ? 37 : \
227 (n) & (1ULL << 36) ? 36 : \
228 (n) & (1ULL << 35) ? 35 : \
229 (n) & (1ULL << 34) ? 34 : \
230 (n) & (1ULL << 33) ? 33 : \
231 (n) & (1ULL << 32) ? 32 : \
232 (n) & (1ULL << 31) ? 31 : \
233 (n) & (1ULL << 30) ? 30 : \
234 (n) & (1ULL << 29) ? 29 : \
235 (n) & (1ULL << 28) ? 28 : \
236 (n) & (1ULL << 27) ? 27 : \
237 (n) & (1ULL << 26) ? 26 : \
238 (n) & (1ULL << 25) ? 25 : \
239 (n) & (1ULL << 24) ? 24 : \
240 (n) & (1ULL << 23) ? 23 : \
241 (n) & (1ULL << 22) ? 22 : \
242 (n) & (1ULL << 21) ? 21 : \
243 (n) & (1ULL << 20) ? 20 : \
244 (n) & (1ULL << 19) ? 19 : \
245 (n) & (1ULL << 18) ? 18 : \
246 (n) & (1ULL << 17) ? 17 : \
247 (n) & (1ULL << 16) ? 16 : \
248 (n) & (1ULL << 15) ? 15 : \
249 (n) & (1ULL << 14) ? 14 : \
250 (n) & (1ULL << 13) ? 13 : \
251 (n) & (1ULL << 12) ? 12 : \
252 (n) & (1ULL << 11) ? 11 : \
253 (n) & (1ULL << 10) ? 10 : \
254 (n) & (1ULL << 9) ? 9 : \
255 (n) & (1ULL << 8) ? 8 : \
256 (n) & (1ULL << 7) ? 7 : \
257 (n) & (1ULL << 6) ? 6 : \
258 (n) & (1ULL << 5) ? 5 : \
259 (n) & (1ULL << 4) ? 4 : \
260 (n) & (1ULL << 3) ? 3 : \
261 (n) & (1ULL << 2) ? 2 : \
262 (n) & (1ULL << 1) ? 1 : 0 \
263 )
264
fls_long(unsigned long x)265 static inline unsigned int fls_long(unsigned long x)
266 {
267 return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
268 }
269
270 /**
271 * __roundup_pow_of_two() - round up to nearest power of two
272 * @n: value to round up
273 */
274 static inline __attribute__((const))
__roundup_pow_of_two(unsigned long n)275 unsigned long __roundup_pow_of_two(unsigned long n)
276 {
277 return 1UL << fls_long(n - 1);
278 }
279
280 /**
281 * roundup_pow_of_two - round the given value up to nearest power of two
282 * @n: parameter
283 *
284 * round the given value up to the nearest power of two
285 * - the result is undefined when n == 0
286 * - this can be used to initialise global variables from constant data
287 */
288 #define roundup_pow_of_two(n) \
289 ( \
290 __builtin_constant_p(n) ? ( \
291 ((n) == 1) ? 1 : \
292 (1UL << (ilog2((n) - 1) + 1)) \
293 ) : \
294 __roundup_pow_of_two(n) \
295 )
296
297 #ifndef __always_inline
298 #define __always_inline inline
299 #endif
300
301 #ifdef HAVE_STRUCT_STAT_ST_ATIM
302 /* Linux */
303 #define ST_ATIM_NSEC(stbuf) ((stbuf)->st_atim.tv_nsec)
304 #define ST_CTIM_NSEC(stbuf) ((stbuf)->st_ctim.tv_nsec)
305 #define ST_MTIM_NSEC(stbuf) ((stbuf)->st_mtim.tv_nsec)
306 #elif defined(HAVE_STRUCT_STAT_ST_ATIMENSEC)
307 /* macOS */
308 #define ST_ATIM_NSEC(stbuf) ((stbuf)->st_atimensec)
309 #define ST_CTIM_NSEC(stbuf) ((stbuf)->st_ctimensec)
310 #define ST_MTIM_NSEC(stbuf) ((stbuf)->st_mtimensec)
311 #else
312 #define ST_ATIM_NSEC(stbuf) 0
313 #define ST_CTIM_NSEC(stbuf) 0
314 #define ST_MTIM_NSEC(stbuf) 0
315 #endif
316
317 #ifdef __APPLE__
318 #define stat64 stat
319 #define lstat64 lstat
320 #endif
321
322 #ifdef __cplusplus
323 }
324 #endif
325
326 #endif
327