• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __ARCH_I386_PERCPU__
2 #define __ARCH_I386_PERCPU__
3 
4 #ifdef __ASSEMBLY__
5 
6 /*
7  * PER_CPU finds an address of a per-cpu variable.
8  *
9  * Args:
10  *    var - variable name
11  *    reg - 32bit register
12  *
13  * The resulting address is stored in the "reg" argument.
14  *
15  * Example:
16  *    PER_CPU(cpu_gdt_descr, %ebx)
17  */
18 #ifdef CONFIG_SMP
19 #define PER_CPU(var, reg)				\
20 	movl %fs:per_cpu__##this_cpu_off, reg;		\
21 	lea per_cpu__##var(reg), reg
22 #define PER_CPU_VAR(var)	%fs:per_cpu__##var
23 #else /* ! SMP */
24 #define PER_CPU(var, reg)			\
25 	movl $per_cpu__##var, reg
26 #define PER_CPU_VAR(var)	per_cpu__##var
27 #endif	/* SMP */
28 
29 #else /* ...!ASSEMBLY */
30 
31 /*
32  * PER_CPU finds an address of a per-cpu variable.
33  *
34  * Args:
35  *    var - variable name
36  *    cpu - 32bit register containing the current CPU number
37  *
38  * The resulting address is stored in the "cpu" argument.
39  *
40  * Example:
41  *    PER_CPU(cpu_gdt_descr, %ebx)
42  */
43 #ifdef CONFIG_SMP
44 /* Same as generic implementation except for optimized local access. */
45 #define __GENERIC_PER_CPU
46 
47 /* This is used for other cpus to find our section. */
48 extern unsigned long __per_cpu_offset[];
49 
50 #define per_cpu_offset(x) (__per_cpu_offset[x])
51 
52 /* Separate out the type, so (int[3], foo) works. */
53 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
54 #define DEFINE_PER_CPU(type, name) \
55     __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
56 
57 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)		\
58     __attribute__((__section__(".data.percpu.shared_aligned"))) \
59     __typeof__(type) per_cpu__##name				\
60     ____cacheline_aligned_in_smp
61 
62 /* We can use this directly for local CPU (faster). */
63 DECLARE_PER_CPU(unsigned long, this_cpu_off);
64 
65 /* var is in discarded region: offset to particular copy we want */
66 #define per_cpu(var, cpu) (*({				\
67 	extern int simple_indentifier_##var(void);	\
68 	RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); }))
69 
70 #define __raw_get_cpu_var(var) (*({					\
71 	extern int simple_indentifier_##var(void);			\
72 	RELOC_HIDE(&per_cpu__##var, x86_read_percpu(this_cpu_off));	\
73 }))
74 
75 #define __get_cpu_var(var) __raw_get_cpu_var(var)
76 
77 /* A macro to avoid #include hell... */
78 #define percpu_modcopy(pcpudst, src, size)			\
79 do {								\
80 	unsigned int __i;					\
81 	for_each_possible_cpu(__i)				\
82 		memcpy((pcpudst)+__per_cpu_offset[__i],		\
83 		       (src), (size));				\
84 } while (0)
85 
86 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
87 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
88 
89 /* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
90 #define __percpu_seg "%%fs:"
91 #else  /* !SMP */
92 #include <asm-generic/percpu.h>
93 #define __percpu_seg ""
94 #endif	/* SMP */
95 
96 /* For arch-specific code, we can use direct single-insn ops (they
97  * don't give an lvalue though). */
98 extern void __bad_percpu_size(void);
99 
100 #define percpu_to_op(op,var,val)				\
101 	do {							\
102 		typedef typeof(var) T__;			\
103 		if (0) { T__ tmp__; tmp__ = (val); }		\
104 		switch (sizeof(var)) {				\
105 		case 1:						\
106 			asm(op "b %1,"__percpu_seg"%0"		\
107 			    : "+m" (var)			\
108 			    :"ri" ((T__)val));			\
109 			break;					\
110 		case 2:						\
111 			asm(op "w %1,"__percpu_seg"%0"		\
112 			    : "+m" (var)			\
113 			    :"ri" ((T__)val));			\
114 			break;					\
115 		case 4:						\
116 			asm(op "l %1,"__percpu_seg"%0"		\
117 			    : "+m" (var)			\
118 			    :"ri" ((T__)val));			\
119 			break;					\
120 		default: __bad_percpu_size();			\
121 		}						\
122 	} while (0)
123 
124 #define percpu_from_op(op,var)					\
125 	({							\
126 		typeof(var) ret__;				\
127 		switch (sizeof(var)) {				\
128 		case 1:						\
129 			asm(op "b "__percpu_seg"%1,%0"		\
130 			    : "=r" (ret__)			\
131 			    : "m" (var));			\
132 			break;					\
133 		case 2:						\
134 			asm(op "w "__percpu_seg"%1,%0"		\
135 			    : "=r" (ret__)			\
136 			    : "m" (var));			\
137 			break;					\
138 		case 4:						\
139 			asm(op "l "__percpu_seg"%1,%0"		\
140 			    : "=r" (ret__)			\
141 			    : "m" (var));			\
142 			break;					\
143 		default: __bad_percpu_size();			\
144 		}						\
145 		ret__; })
146 
147 #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
148 #define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val)
149 #define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val)
150 #define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val)
151 #define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val)
152 #endif /* !__ASSEMBLY__ */
153 
154 #endif /* __ARCH_I386_PERCPU__ */
155