1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22 /*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2012, Intel Corporation.
27 */
28 /*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * libcfs/include/libcfs/libcfs_private.h
33 *
34 * Various defines for libcfs.
35 *
36 */
37
38 #ifndef __LIBCFS_PRIVATE_H__
39 #define __LIBCFS_PRIVATE_H__
40
41 #ifndef DEBUG_SUBSYSTEM
42 # define DEBUG_SUBSYSTEM S_UNDEFINED
43 #endif
44
45 /*
46 * When this is on, LASSERT macro includes check for assignment used instead
47 * of equality check, but doesn't have unlikely(). Turn this on from time to
48 * time to make test-builds. This shouldn't be on for production release.
49 */
50 #define LASSERT_CHECKED (0)
51
52 #define LASSERTF(cond, fmt, ...) \
53 do { \
54 if (unlikely(!(cond))) { \
55 LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \
56 libcfs_debug_msg(&__msg_data, \
57 "ASSERTION( %s ) failed: " fmt, #cond, \
58 ## __VA_ARGS__); \
59 lbug_with_loc(&__msg_data); \
60 } \
61 } while (0)
62
63 #define LASSERT(cond) LASSERTF(cond, "\n")
64
65 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
66 /**
67 * This is for more expensive checks that one doesn't want to be enabled all
68 * the time. LINVRNT() has to be explicitly enabled by
69 * CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK option.
70 */
71 # define LINVRNT(exp) LASSERT(exp)
72 #else
73 # define LINVRNT(exp) ((void)sizeof !!(exp))
74 #endif
75
76 #define KLASSERT(e) LASSERT(e)
77
78 void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *);
79
80 #define LBUG() \
81 do { \
82 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \
83 lbug_with_loc(&msgdata); \
84 } while (0)
85
86 #ifndef LIBCFS_VMALLOC_SIZE
87 #define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */
88 #endif
89
90 #define LIBCFS_ALLOC_PRE(size, mask) \
91 do { \
92 LASSERT(!in_interrupt() || \
93 ((size) <= LIBCFS_VMALLOC_SIZE && \
94 !gfpflags_allow_blocking(mask))); \
95 } while (0)
96
97 #define LIBCFS_ALLOC_POST(ptr, size) \
98 do { \
99 if (unlikely((ptr) == NULL)) { \
100 CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
101 #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \
102 } else { \
103 memset((ptr), 0, (size)); \
104 } \
105 } while (0)
106
107 /**
108 * allocate memory with GFP flags @mask
109 */
110 #define LIBCFS_ALLOC_GFP(ptr, size, mask) \
111 do { \
112 LIBCFS_ALLOC_PRE((size), (mask)); \
113 (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
114 kmalloc((size), (mask)) : vmalloc(size); \
115 LIBCFS_ALLOC_POST((ptr), (size)); \
116 } while (0)
117
118 /**
119 * default allocator
120 */
121 #define LIBCFS_ALLOC(ptr, size) \
122 LIBCFS_ALLOC_GFP(ptr, size, GFP_NOFS)
123
124 /**
125 * non-sleeping allocator
126 */
127 #define LIBCFS_ALLOC_ATOMIC(ptr, size) \
128 LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC)
129
130 /**
131 * allocate memory for specified CPU partition
132 * \a cptab != NULL, \a cpt is CPU partition id of \a cptab
133 * \a cptab == NULL, \a cpt is HW NUMA node id
134 */
135 #define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask) \
136 do { \
137 LIBCFS_ALLOC_PRE((size), (mask)); \
138 (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
139 kmalloc_node((size), (mask), cfs_cpt_spread_node(cptab, cpt)) :\
140 vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); \
141 LIBCFS_ALLOC_POST((ptr), (size)); \
142 } while (0)
143
144 /** default numa allocator */
145 #define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \
146 LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, GFP_NOFS)
147
148 #define LIBCFS_FREE(ptr, size) \
149 do { \
150 if (unlikely((ptr) == NULL)) { \
151 CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
152 "%s:%d\n", (int)(size), __FILE__, __LINE__); \
153 break; \
154 } \
155 kvfree(ptr); \
156 } while (0)
157
158 /******************************************************************************/
159
160 /* htonl hack - either this, or compile with -O2. Stupid byteorder/generic.h */
161 #if defined(__GNUC__) && (__GNUC__ >= 2) && !defined(__OPTIMIZE__)
162 #define ___htonl(x) __cpu_to_be32(x)
163 #define ___htons(x) __cpu_to_be16(x)
164 #define ___ntohl(x) __be32_to_cpu(x)
165 #define ___ntohs(x) __be16_to_cpu(x)
166 #define htonl(x) ___htonl(x)
167 #define ntohl(x) ___ntohl(x)
168 #define htons(x) ___htons(x)
169 #define ntohs(x) ___ntohs(x)
170 #endif
171
172 void libcfs_run_upcall(char **argv);
173 void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
174 void libcfs_debug_dumplog(void);
175 int libcfs_debug_init(unsigned long bufsize);
176 int libcfs_debug_cleanup(void);
177 int libcfs_debug_clear_buffer(void);
178 int libcfs_debug_mark_buffer(const char *text);
179
180 /*
181 * allocate a variable array, returned value is an array of pointers.
182 * Caller can specify length of array by count.
183 */
184 void *cfs_array_alloc(int count, unsigned int size);
185 void cfs_array_free(void *vars);
186
187 #define LASSERT_ATOMIC_ENABLED (1)
188
189 #if LASSERT_ATOMIC_ENABLED
190
191 /** assert value of @a is equal to @v */
192 #define LASSERT_ATOMIC_EQ(a, v) \
193 do { \
194 LASSERTF(atomic_read(a) == v, \
195 "value: %d\n", atomic_read((a))); \
196 } while (0)
197
198 /** assert value of @a is unequal to @v */
199 #define LASSERT_ATOMIC_NE(a, v) \
200 do { \
201 LASSERTF(atomic_read(a) != v, \
202 "value: %d\n", atomic_read((a))); \
203 } while (0)
204
205 /** assert value of @a is little than @v */
206 #define LASSERT_ATOMIC_LT(a, v) \
207 do { \
208 LASSERTF(atomic_read(a) < v, \
209 "value: %d\n", atomic_read((a))); \
210 } while (0)
211
212 /** assert value of @a is little/equal to @v */
213 #define LASSERT_ATOMIC_LE(a, v) \
214 do { \
215 LASSERTF(atomic_read(a) <= v, \
216 "value: %d\n", atomic_read((a))); \
217 } while (0)
218
219 /** assert value of @a is great than @v */
220 #define LASSERT_ATOMIC_GT(a, v) \
221 do { \
222 LASSERTF(atomic_read(a) > v, \
223 "value: %d\n", atomic_read((a))); \
224 } while (0)
225
226 /** assert value of @a is great/equal to @v */
227 #define LASSERT_ATOMIC_GE(a, v) \
228 do { \
229 LASSERTF(atomic_read(a) >= v, \
230 "value: %d\n", atomic_read((a))); \
231 } while (0)
232
233 /** assert value of @a is great than @v1 and little than @v2 */
234 #define LASSERT_ATOMIC_GT_LT(a, v1, v2) \
235 do { \
236 int __v = atomic_read(a); \
237 LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \
238 } while (0)
239
240 /** assert value of @a is great than @v1 and little/equal to @v2 */
241 #define LASSERT_ATOMIC_GT_LE(a, v1, v2) \
242 do { \
243 int __v = atomic_read(a); \
244 LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \
245 } while (0)
246
247 /** assert value of @a is great/equal to @v1 and little than @v2 */
248 #define LASSERT_ATOMIC_GE_LT(a, v1, v2) \
249 do { \
250 int __v = atomic_read(a); \
251 LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \
252 } while (0)
253
254 /** assert value of @a is great/equal to @v1 and little/equal to @v2 */
255 #define LASSERT_ATOMIC_GE_LE(a, v1, v2) \
256 do { \
257 int __v = atomic_read(a); \
258 LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \
259 } while (0)
260
261 #else /* !LASSERT_ATOMIC_ENABLED */
262
263 #define LASSERT_ATOMIC_EQ(a, v) do {} while (0)
264 #define LASSERT_ATOMIC_NE(a, v) do {} while (0)
265 #define LASSERT_ATOMIC_LT(a, v) do {} while (0)
266 #define LASSERT_ATOMIC_LE(a, v) do {} while (0)
267 #define LASSERT_ATOMIC_GT(a, v) do {} while (0)
268 #define LASSERT_ATOMIC_GE(a, v) do {} while (0)
269 #define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0)
270 #define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0)
271 #define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0)
272 #define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0)
273
274 #endif /* LASSERT_ATOMIC_ENABLED */
275
276 #define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0)
277 #define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0)
278
279 #define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)))
280 #define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
281
282 /** Compile-time assertion.
283
284 * Check an invariant described by a constant expression at compile time by
285 * forcing a compiler error if it does not hold. \a cond must be a constant
286 * expression as defined by the ISO C Standard:
287 *
288 * 6.8.4.2 The switch statement
289 * ....
290 * [#3] The expression of each case label shall be an integer
291 * constant expression and no two of the case constant
292 * expressions in the same switch statement shall have the same
293 * value after conversion...
294 *
295 */
296 #define CLASSERT(cond) do {switch (42) {case (cond): case 0: break; } } while (0)
297
298 /* max value for numeric network address */
299 #define MAX_NUMERIC_VALUE 0xffffffff
300
301 /* implication */
302 #define ergo(a, b) (!(a) || (b))
303 /* logical equivalence */
304 #define equi(a, b) (!!(a) == !!(b))
305
306 /* --------------------------------------------------------------------
307 * Light-weight trace
308 * Support for temporary event tracing with minimal Heisenberg effect.
309 * -------------------------------------------------------------------- */
310
311 #define MKSTR(ptr) ((ptr)) ? (ptr) : ""
312
cfs_size_round4(int val)313 static inline size_t cfs_size_round4(int val)
314 {
315 return (val + 3) & (~0x3);
316 }
317
318 #ifndef HAVE_CFS_SIZE_ROUND
cfs_size_round(int val)319 static inline size_t cfs_size_round(int val)
320 {
321 return (val + 7) & (~0x7);
322 }
323
324 #define HAVE_CFS_SIZE_ROUND
325 #endif
326
cfs_size_round16(int val)327 static inline size_t cfs_size_round16(int val)
328 {
329 return (val + 0xf) & (~0xf);
330 }
331
cfs_size_round32(int val)332 static inline size_t cfs_size_round32(int val)
333 {
334 return (val + 0x1f) & (~0x1f);
335 }
336
cfs_size_round0(int val)337 static inline size_t cfs_size_round0(int val)
338 {
339 if (!val)
340 return 0;
341 return (val + 1 + 7) & (~0x7);
342 }
343
cfs_round_strlen(char * fset)344 static inline size_t cfs_round_strlen(char *fset)
345 {
346 return cfs_size_round((int)strlen(fset) + 1);
347 }
348
349 #define LOGL(var, len, ptr) \
350 do { \
351 if (var) \
352 memcpy((char *)ptr, (const char *)var, len); \
353 ptr += cfs_size_round(len); \
354 } while (0)
355
356 #define LOGU(var, len, ptr) \
357 do { \
358 if (var) \
359 memcpy((char *)var, (const char *)ptr, len); \
360 ptr += cfs_size_round(len); \
361 } while (0)
362
363 #endif
364