• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_X86_SYSTEM_H_
2 #define _ASM_X86_SYSTEM_H_
3 
4 #include <asm/asm.h>
5 #include <asm/segment.h>
6 #include <asm/cpufeature.h>
7 #include <asm/cmpxchg.h>
8 #include <asm/nops.h>
9 
10 #include <linux/kernel.h>
11 #include <linux/irqflags.h>
12 
13 /* entries in ARCH_DLINFO: */
14 #ifdef CONFIG_IA32_EMULATION
15 # define AT_VECTOR_SIZE_ARCH 2
16 #else
17 # define AT_VECTOR_SIZE_ARCH 1
18 #endif
19 
20 extern unsigned long arch_align_stack(unsigned long sp);
21 
22 void default_idle(void);
23 
24 /*
25  * Force strict CPU ordering.
26  * And yes, this is required on UP too when we're talking
27  * to devices.
28  */
29 #ifdef CONFIG_X86_32
30 /*
31  * Some non-Intel clones support out of order store. wmb() ceases to be a
32  * nop for these.
33  */
34 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
35 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
36 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
37 #else
38 #define mb() 	asm volatile("mfence":::"memory")
39 #define rmb()	asm volatile("lfence":::"memory")
40 #define wmb()	asm volatile("sfence" ::: "memory")
41 #endif
42 
43 /**
44  * read_barrier_depends - Flush all pending reads that subsequents reads
45  * depend on.
46  *
47  * No data-dependent reads from memory-like regions are ever reordered
48  * over this barrier.  All reads preceding this primitive are guaranteed
49  * to access memory (but not necessarily other CPUs' caches) before any
50  * reads following this primitive that depend on the data return by
51  * any of the preceding reads.  This primitive is much lighter weight than
52  * rmb() on most CPUs, and is never heavier weight than is
53  * rmb().
54  *
55  * These ordering constraints are respected by both the local CPU
56  * and the compiler.
57  *
58  * Ordering is not guaranteed by anything other than these primitives,
59  * not even by data dependencies.  See the documentation for
60  * memory_barrier() for examples and URLs to more information.
61  *
62  * For example, the following code would force ordering (the initial
63  * value of "a" is zero, "b" is one, and "p" is "&a"):
64  *
65  * <programlisting>
66  *	CPU 0				CPU 1
67  *
68  *	b = 2;
69  *	memory_barrier();
70  *	p = &b;				q = p;
71  *					read_barrier_depends();
72  *					d = *q;
73  * </programlisting>
74  *
75  * because the read of "*q" depends on the read of "p" and these
76  * two reads are separated by a read_barrier_depends().  However,
77  * the following code, with the same initial values for "a" and "b":
78  *
79  * <programlisting>
80  *	CPU 0				CPU 1
81  *
82  *	a = 2;
83  *	memory_barrier();
84  *	b = 3;				y = b;
85  *					read_barrier_depends();
86  *					x = a;
87  * </programlisting>
88  *
89  * does not enforce ordering, since there is no data dependency between
90  * the read of "a" and the read of "b".  Therefore, on some CPUs, such
91  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
92  * in cases like this where there are no data dependencies.
93  **/
94 
95 #define read_barrier_depends()	do { } while (0)
96 
97 #ifdef CONFIG_SMP
98 #define smp_mb()	mb()
99 #ifdef CONFIG_X86_PPRO_FENCE
100 # define smp_rmb()	rmb()
101 #else
102 # define smp_rmb()	barrier()
103 #endif
104 #ifdef CONFIG_X86_OOSTORE
105 # define smp_wmb() 	wmb()
106 #else
107 # define smp_wmb()	barrier()
108 #endif
109 #define smp_read_barrier_depends()	read_barrier_depends()
110 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
111 #else
112 #define smp_mb()	barrier()
113 #define smp_rmb()	barrier()
114 #define smp_wmb()	barrier()
115 #define smp_read_barrier_depends()	do { } while (0)
116 #define set_mb(var, value) do { var = value; barrier(); } while (0)
117 #endif
118 
119 /*
120  * Stop RDTSC speculation. This is needed when you need to use RDTSC
121  * (or get_cycles or vread that possibly accesses the TSC) in a defined
122  * code region.
123  *
124  * (Could use an alternative three way for this if there was one.)
125  */
rdtsc_barrier(void)126 static inline void rdtsc_barrier(void)
127 {
128 	alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
129 	alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
130 }
131 
132 #endif
133