• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/include/libcfs/libcfs_private.h
37  *
38  * Various defines for libcfs.
39  *
40  */
41 
42 #ifndef __LIBCFS_PRIVATE_H__
43 #define __LIBCFS_PRIVATE_H__
44 
45 #ifndef DEBUG_SUBSYSTEM
46 # define DEBUG_SUBSYSTEM S_UNDEFINED
47 #endif
48 
49 /*
50  * When this is on, LASSERT macro includes check for assignment used instead
51  * of equality check, but doesn't have unlikely(). Turn this on from time to
52  * time to make test-builds. This shouldn't be on for production release.
53  */
54 #define LASSERT_CHECKED (0)
55 
56 #define LASSERTF(cond, fmt, ...)					\
57 do {									\
58 	if (unlikely(!(cond))) {					\
59 		LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL);	\
60 		libcfs_debug_msg(&__msg_data,				\
61 				 "ASSERTION( %s ) failed: " fmt, #cond,	\
62 				 ## __VA_ARGS__);			\
63 		lbug_with_loc(&__msg_data);				\
64 	}								\
65 } while (0)
66 
67 #define LASSERT(cond) LASSERTF(cond, "\n")
68 
69 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
70 /**
71  * This is for more expensive checks that one doesn't want to be enabled all
72  * the time. LINVRNT() has to be explicitly enabled by
73  * CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK option.
74  */
75 # define LINVRNT(exp) LASSERT(exp)
76 #else
77 # define LINVRNT(exp) ((void)sizeof !!(exp))
78 #endif
79 
80 #define KLASSERT(e) LASSERT(e)
81 
82 void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *);
83 
84 #define LBUG()							  \
85 do {								    \
86 	LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL);	     \
87 	lbug_with_loc(&msgdata);					\
88 } while (0)
89 
90 #ifndef LIBCFS_VMALLOC_SIZE
91 #define LIBCFS_VMALLOC_SIZE	(2 << PAGE_CACHE_SHIFT) /* 2 pages */
92 #endif
93 
94 #define LIBCFS_ALLOC_PRE(size, mask)					    \
95 do {									    \
96 	LASSERT(!in_interrupt() ||					    \
97 		((size) <= LIBCFS_VMALLOC_SIZE &&			    \
98 		 !gfpflags_allow_blocking(mask)));			    \
99 } while (0)
100 
101 #define LIBCFS_ALLOC_POST(ptr, size)					    \
102 do {									    \
103 	if (unlikely((ptr) == NULL)) {					    \
104 		CERROR("LNET: out of memory at %s:%d (tried to alloc '"	    \
105 		       #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size));  \
106 	} else {							    \
107 		memset((ptr), 0, (size));				    \
108 	}								    \
109 } while (0)
110 
111 /**
112  * allocate memory with GFP flags @mask
113  */
114 #define LIBCFS_ALLOC_GFP(ptr, size, mask)				    \
115 do {									    \
116 	LIBCFS_ALLOC_PRE((size), (mask));				    \
117 	(ptr) = (size) <= LIBCFS_VMALLOC_SIZE ?				    \
118 		kmalloc((size), (mask)) : vmalloc(size);	    \
119 	LIBCFS_ALLOC_POST((ptr), (size));				    \
120 } while (0)
121 
122 /**
123  * default allocator
124  */
125 #define LIBCFS_ALLOC(ptr, size) \
126 	LIBCFS_ALLOC_GFP(ptr, size, GFP_NOFS)
127 
128 /**
129  * non-sleeping allocator
130  */
131 #define LIBCFS_ALLOC_ATOMIC(ptr, size) \
132 	LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC)
133 
134 /**
135  * allocate memory for specified CPU partition
136  *   \a cptab != NULL, \a cpt is CPU partition id of \a cptab
137  *   \a cptab == NULL, \a cpt is HW NUMA node id
138  */
139 #define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask)		    \
140 do {									    \
141 	LIBCFS_ALLOC_PRE((size), (mask));				    \
142 	(ptr) = (size) <= LIBCFS_VMALLOC_SIZE ?				    \
143 		kmalloc_node((size), (mask), cfs_cpt_spread_node(cptab, cpt)) :\
144 		vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt));	    \
145 	LIBCFS_ALLOC_POST((ptr), (size));				    \
146 } while (0)
147 
148 /** default numa allocator */
149 #define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size)				    \
150 	LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, GFP_NOFS)
151 
152 #define LIBCFS_FREE(ptr, size)					  \
153 do {								    \
154 	int s = (size);						 \
155 	if (unlikely((ptr) == NULL)) {				  \
156 		CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at "    \
157 		       "%s:%d\n", s, __FILE__, __LINE__);	       \
158 		break;						  \
159 	}							       \
160 	if (unlikely(s > LIBCFS_VMALLOC_SIZE))			  \
161 		vfree(ptr);				    \
162 	else							    \
163 		kfree(ptr);					  \
164 } while (0)
165 
166 /******************************************************************************/
167 
168 /* htonl hack - either this, or compile with -O2. Stupid byteorder/generic.h */
169 #if defined(__GNUC__) && (__GNUC__ >= 2) && !defined(__OPTIMIZE__)
170 #define ___htonl(x) __cpu_to_be32(x)
171 #define ___htons(x) __cpu_to_be16(x)
172 #define ___ntohl(x) __be32_to_cpu(x)
173 #define ___ntohs(x) __be16_to_cpu(x)
174 #define htonl(x) ___htonl(x)
175 #define ntohl(x) ___ntohl(x)
176 #define htons(x) ___htons(x)
177 #define ntohs(x) ___ntohs(x)
178 #endif
179 
180 void libcfs_run_upcall(char **argv);
181 void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
182 void libcfs_debug_dumplog(void);
183 int libcfs_debug_init(unsigned long bufsize);
184 int libcfs_debug_cleanup(void);
185 int libcfs_debug_clear_buffer(void);
186 int libcfs_debug_mark_buffer(const char *text);
187 
188 void libcfs_debug_set_level(unsigned int debug_level);
189 
190 /*
191  * allocate per-cpu-partition data, returned value is an array of pointers,
192  * variable can be indexed by CPU ID.
193  *	cptable != NULL: size of array is number of CPU partitions
194  *	cptable == NULL: size of array is number of HW cores
195  */
196 void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
197 /*
198  * destroy per-cpu-partition variable
199  */
200 void  cfs_percpt_free(void *vars);
201 int   cfs_percpt_number(void *vars);
202 void *cfs_percpt_current(void *vars);
203 void *cfs_percpt_index(void *vars, int idx);
204 
205 #define cfs_percpt_for_each(var, i, vars)		\
206 	for (i = 0; i < cfs_percpt_number(vars) &&	\
207 		    ((var) = (vars)[i]) != NULL; i++)
208 
209 /*
210  * allocate a variable array, returned value is an array of pointers.
211  * Caller can specify length of array by count.
212  */
213 void *cfs_array_alloc(int count, unsigned int size);
214 void  cfs_array_free(void *vars);
215 
216 #define LASSERT_ATOMIC_ENABLED	  (1)
217 
218 #if LASSERT_ATOMIC_ENABLED
219 
220 /** assert value of @a is equal to @v */
221 #define LASSERT_ATOMIC_EQ(a, v)				 \
222 do {							    \
223 	LASSERTF(atomic_read(a) == v,		       \
224 		 "value: %d\n", atomic_read((a)));	  \
225 } while (0)
226 
227 /** assert value of @a is unequal to @v */
228 #define LASSERT_ATOMIC_NE(a, v)				 \
229 do {							    \
230 	LASSERTF(atomic_read(a) != v,		       \
231 		 "value: %d\n", atomic_read((a)));	  \
232 } while (0)
233 
234 /** assert value of @a is little than @v */
235 #define LASSERT_ATOMIC_LT(a, v)				 \
236 do {							    \
237 	LASSERTF(atomic_read(a) < v,			\
238 		 "value: %d\n", atomic_read((a)));	  \
239 } while (0)
240 
241 /** assert value of @a is little/equal to @v */
242 #define LASSERT_ATOMIC_LE(a, v)				 \
243 do {							    \
244 	LASSERTF(atomic_read(a) <= v,		       \
245 		 "value: %d\n", atomic_read((a)));	  \
246 } while (0)
247 
248 /** assert value of @a is great than @v */
249 #define LASSERT_ATOMIC_GT(a, v)				 \
250 do {							    \
251 	LASSERTF(atomic_read(a) > v,			\
252 		 "value: %d\n", atomic_read((a)));	  \
253 } while (0)
254 
255 /** assert value of @a is great/equal to @v */
256 #define LASSERT_ATOMIC_GE(a, v)				 \
257 do {							    \
258 	LASSERTF(atomic_read(a) >= v,		       \
259 		 "value: %d\n", atomic_read((a)));	  \
260 } while (0)
261 
262 /** assert value of @a is great than @v1 and little than @v2 */
263 #define LASSERT_ATOMIC_GT_LT(a, v1, v2)			 \
264 do {							    \
265 	int __v = atomic_read(a);			   \
266 	LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v);     \
267 } while (0)
268 
269 /** assert value of @a is great than @v1 and little/equal to @v2 */
270 #define LASSERT_ATOMIC_GT_LE(a, v1, v2)			 \
271 do {							    \
272 	int __v = atomic_read(a);			   \
273 	LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v);    \
274 } while (0)
275 
276 /** assert value of @a is great/equal to @v1 and little than @v2 */
277 #define LASSERT_ATOMIC_GE_LT(a, v1, v2)			 \
278 do {							    \
279 	int __v = atomic_read(a);			   \
280 	LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v);    \
281 } while (0)
282 
283 /** assert value of @a is great/equal to @v1 and little/equal to @v2 */
284 #define LASSERT_ATOMIC_GE_LE(a, v1, v2)			 \
285 do {							    \
286 	int __v = atomic_read(a);			   \
287 	LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v);   \
288 } while (0)
289 
290 #else /* !LASSERT_ATOMIC_ENABLED */
291 
292 #define LASSERT_ATOMIC_EQ(a, v)		 do {} while (0)
293 #define LASSERT_ATOMIC_NE(a, v)		 do {} while (0)
294 #define LASSERT_ATOMIC_LT(a, v)		 do {} while (0)
295 #define LASSERT_ATOMIC_LE(a, v)		 do {} while (0)
296 #define LASSERT_ATOMIC_GT(a, v)		 do {} while (0)
297 #define LASSERT_ATOMIC_GE(a, v)		 do {} while (0)
298 #define LASSERT_ATOMIC_GT_LT(a, v1, v2)	 do {} while (0)
299 #define LASSERT_ATOMIC_GT_LE(a, v1, v2)	 do {} while (0)
300 #define LASSERT_ATOMIC_GE_LT(a, v1, v2)	 do {} while (0)
301 #define LASSERT_ATOMIC_GE_LE(a, v1, v2)	 do {} while (0)
302 
303 #endif /* LASSERT_ATOMIC_ENABLED */
304 
305 #define LASSERT_ATOMIC_ZERO(a)		  LASSERT_ATOMIC_EQ(a, 0)
306 #define LASSERT_ATOMIC_POS(a)		   LASSERT_ATOMIC_GT(a, 0)
307 
308 #define CFS_ALLOC_PTR(ptr)      LIBCFS_ALLOC(ptr, sizeof(*(ptr)))
309 #define CFS_FREE_PTR(ptr)       LIBCFS_FREE(ptr, sizeof(*(ptr)))
310 
311 /*
312  * percpu partition lock
313  *
314  * There are some use-cases like this in Lustre:
315  * . each CPU partition has it's own private data which is frequently changed,
316  *   and mostly by the local CPU partition.
317  * . all CPU partitions share some global data, these data are rarely changed.
318  *
319  * LNet is typical example.
320  * CPU partition lock is designed for this kind of use-cases:
321  * . each CPU partition has it's own private lock
322  * . change on private data just needs to take the private lock
323  * . read on shared data just needs to take _any_ of private locks
324  * . change on shared data needs to take _all_ private locks,
325  *   which is slow and should be really rare.
326  */
327 
328 enum {
329 	CFS_PERCPT_LOCK_EX	= -1, /* negative */
330 };
331 
332 struct cfs_percpt_lock {
333 	/* cpu-partition-table for this lock */
334 	struct cfs_cpt_table	*pcl_cptab;
335 	/* exclusively locked */
336 	unsigned int		pcl_locked;
337 	/* private lock table */
338 	spinlock_t		**pcl_locks;
339 };
340 
341 /* return number of private locks */
342 static inline int
cfs_percpt_lock_num(struct cfs_percpt_lock * pcl)343 cfs_percpt_lock_num(struct cfs_percpt_lock *pcl)
344 {
345 	return cfs_cpt_number(pcl->pcl_cptab);
346 }
347 
348 /*
349  * create a cpu-partition lock based on CPU partition table \a cptab,
350  * each private lock has extra \a psize bytes padding data
351  */
352 struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab);
353 /* destroy a cpu-partition lock */
354 void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
355 
356 /* lock private lock \a index of \a pcl */
357 void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
358 /* unlock private lock \a index of \a pcl */
359 void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
360 /* create percpt (atomic) refcount based on @cptab */
361 atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
362 /* destroy percpt refcount */
363 void cfs_percpt_atomic_free(atomic_t **refs);
364 /* return sum of all percpu refs */
365 int cfs_percpt_atomic_summary(atomic_t **refs);
366 
367 /** Compile-time assertion.
368 
369  * Check an invariant described by a constant expression at compile time by
370  * forcing a compiler error if it does not hold.  \a cond must be a constant
371  * expression as defined by the ISO C Standard:
372  *
373  *       6.8.4.2  The switch statement
374  *       ....
375  *       [#3] The expression of each case label shall be  an  integer
376  *       constant   expression  and  no  two  of  the  case  constant
377  *       expressions in the same switch statement shall have the same
378  *       value  after  conversion...
379  *
380  */
381 #define CLASSERT(cond) do {switch (42) {case (cond): case 0: break; } } while (0)
382 
383 /* max value for numeric network address */
384 #define MAX_NUMERIC_VALUE 0xffffffff
385 
386 /* implication */
387 #define ergo(a, b) (!(a) || (b))
388 /* logical equivalence */
389 #define equi(a, b) (!!(a) == !!(b))
390 
391 /* --------------------------------------------------------------------
392  * Light-weight trace
393  * Support for temporary event tracing with minimal Heisenberg effect.
394  * -------------------------------------------------------------------- */
395 
396 struct libcfs_device_userstate {
397 	int	   ldu_memhog_pages;
398 	struct page   *ldu_memhog_root_page;
399 };
400 
401 #define MKSTR(ptr) ((ptr)) ? (ptr) : ""
402 
cfs_size_round4(int val)403 static inline int cfs_size_round4(int val)
404 {
405 	return (val + 3) & (~0x3);
406 }
407 
408 #ifndef HAVE_CFS_SIZE_ROUND
cfs_size_round(int val)409 static inline int cfs_size_round(int val)
410 {
411 	return (val + 7) & (~0x7);
412 }
413 
414 #define HAVE_CFS_SIZE_ROUND
415 #endif
416 
cfs_size_round16(int val)417 static inline int cfs_size_round16(int val)
418 {
419 	return (val + 0xf) & (~0xf);
420 }
421 
cfs_size_round32(int val)422 static inline int cfs_size_round32(int val)
423 {
424 	return (val + 0x1f) & (~0x1f);
425 }
426 
cfs_size_round0(int val)427 static inline int cfs_size_round0(int val)
428 {
429 	if (!val)
430 		return 0;
431 	return (val + 1 + 7) & (~0x7);
432 }
433 
cfs_round_strlen(char * fset)434 static inline size_t cfs_round_strlen(char *fset)
435 {
436 	return (size_t)cfs_size_round((int)strlen(fset) + 1);
437 }
438 
439 #define LOGL(var, len, ptr)				       \
440 do {							    \
441 	if (var)						\
442 		memcpy((char *)ptr, (const char *)var, len);    \
443 	ptr += cfs_size_round(len);			     \
444 } while (0)
445 
446 #define LOGU(var, len, ptr)				       \
447 do {							    \
448 	if (var)						\
449 		memcpy((char *)var, (const char *)ptr, len);    \
450 	ptr += cfs_size_round(len);			     \
451 } while (0)
452 
453 #define LOGL0(var, len, ptr)			      \
454 do {						    \
455 	if (!len)				       \
456 		break;				  \
457 	memcpy((char *)ptr, (const char *)var, len);    \
458 	*((char *)(ptr) + len) = 0;		     \
459 	ptr += cfs_size_round(len + 1);		 \
460 } while (0)
461 
462 #endif
463