• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __LINUX_PERCPU_H
2 #define __LINUX_PERCPU_H
3 #include <linux/spinlock.h> /* For preempt_disable() */
4 #include <linux/slab.h> /* For kmalloc() */
5 #include <linux/smp.h>
6 #include <linux/string.h> /* For memset() */
7 #include <asm/percpu.h>
8 
9 /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
10 #ifndef PERCPU_ENOUGH_ROOM
11 #define PERCPU_ENOUGH_ROOM 32768
12 #endif
13 
14 /* Must be an lvalue. */
15 #define get_cpu_var(var) (*({ preempt_disable(); &__get_cpu_var(var); }))
16 #define put_cpu_var(var) preempt_enable()
17 
18 #ifdef CONFIG_SMP
19 
20 struct percpu_data {
21 	void *ptrs[NR_CPUS];
22 };
23 
24 /*
25  * Use this to get to a cpu's version of the per-cpu object allocated using
26  * alloc_percpu.  Non-atomic access to the current CPU's version should
27  * probably be combined with get_cpu()/put_cpu().
28  */
29 #define per_cpu_ptr(ptr, cpu)                   \
30 ({                                              \
31         struct percpu_data *__p = (struct percpu_data *)~(unsigned long)(ptr); \
32         (__typeof__(ptr))__p->ptrs[(cpu)];	\
33 })
34 
35 extern void *__alloc_percpu(size_t size);
36 extern void free_percpu(const void *);
37 
38 #else /* CONFIG_SMP */
39 
40 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
41 
__alloc_percpu(size_t size)42 static inline void *__alloc_percpu(size_t size)
43 {
44 	void *ret = kmalloc(size, GFP_KERNEL);
45 	if (ret)
46 		memset(ret, 0, size);
47 	return ret;
48 }
free_percpu(const void * ptr)49 static inline void free_percpu(const void *ptr)
50 {
51 	kfree(ptr);
52 }
53 
54 #endif /* CONFIG_SMP */
55 
56 /* Simple wrapper for the common case: zeros memory. */
57 #define alloc_percpu(type)	((type *)(__alloc_percpu(sizeof(type))))
58 
59 #endif /* __LINUX_PERCPU_H */
60