1 #ifndef _ASM_X86_PDA_H 2 #define _ASM_X86_PDA_H 3 4 #ifndef __ASSEMBLY__ 5 #include <linux/stddef.h> 6 #include <linux/types.h> 7 #include <linux/cache.h> 8 #include <asm/page.h> 9 10 /* Per processor datastructure. %gs points to it while the kernel runs */ 11 struct x8664_pda { 12 struct task_struct *pcurrent; /* 0 Current process */ 13 unsigned long data_offset; /* 8 Per cpu data offset from linker 14 address */ 15 unsigned long kernelstack; /* 16 top of kernel stack for current */ 16 unsigned long oldrsp; /* 24 user rsp for system call */ 17 int irqcount; /* 32 Irq nesting counter. Starts -1 */ 18 unsigned int cpunumber; /* 36 Logical CPU number */ 19 #ifdef CONFIG_CC_STACKPROTECTOR 20 unsigned long stack_canary; /* 40 stack canary value */ 21 /* gcc-ABI: this canary MUST be at 22 offset 40!!! */ 23 #endif 24 char *irqstackptr; 25 short nodenumber; /* number of current node (32k max) */ 26 short in_bootmem; /* pda lives in bootmem */ 27 unsigned int __softirq_pending; 28 unsigned int __nmi_count; /* number of NMI on this CPUs */ 29 short mmu_state; 30 short isidle; 31 struct mm_struct *active_mm; 32 unsigned apic_timer_irqs; 33 unsigned irq0_irqs; 34 unsigned irq_resched_count; 35 unsigned irq_call_count; 36 unsigned irq_tlb_count; 37 unsigned irq_thermal_count; 38 unsigned irq_threshold_count; 39 unsigned irq_spurious_count; 40 } ____cacheline_aligned_in_smp; 41 42 extern struct x8664_pda **_cpu_pda; 43 extern void pda_init(int); 44 45 #define cpu_pda(i) (_cpu_pda[i]) 46 47 /* 48 * There is no fast way to get the base address of the PDA, all the accesses 49 * have to mention %fs/%gs. So it needs to be done this Torvaldian way. 50 */ 51 extern void __bad_pda_field(void) __attribute__((noreturn)); 52 53 /* 54 * proxy_pda doesn't actually exist, but tell gcc it is accessed for 55 * all PDA accesses so it gets read/write dependencies right. 56 */ 57 extern struct x8664_pda _proxy_pda; 58 59 #define pda_offset(field) offsetof(struct x8664_pda, field) 60 61 #define pda_to_op(op, field, val) \ 62 do { \ 63 typedef typeof(_proxy_pda.field) T__; \ 64 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ 65 switch (sizeof(_proxy_pda.field)) { \ 66 case 2: \ 67 asm(op "w %1,%%gs:%c2" : \ 68 "+m" (_proxy_pda.field) : \ 69 "ri" ((T__)val), \ 70 "i"(pda_offset(field))); \ 71 break; \ 72 case 4: \ 73 asm(op "l %1,%%gs:%c2" : \ 74 "+m" (_proxy_pda.field) : \ 75 "ri" ((T__)val), \ 76 "i" (pda_offset(field))); \ 77 break; \ 78 case 8: \ 79 asm(op "q %1,%%gs:%c2": \ 80 "+m" (_proxy_pda.field) : \ 81 "ri" ((T__)val), \ 82 "i"(pda_offset(field))); \ 83 break; \ 84 default: \ 85 __bad_pda_field(); \ 86 } \ 87 } while (0) 88 89 #define pda_from_op(op, field) \ 90 ({ \ 91 typeof(_proxy_pda.field) ret__; \ 92 switch (sizeof(_proxy_pda.field)) { \ 93 case 2: \ 94 asm(op "w %%gs:%c1,%0" : \ 95 "=r" (ret__) : \ 96 "i" (pda_offset(field)), \ 97 "m" (_proxy_pda.field)); \ 98 break; \ 99 case 4: \ 100 asm(op "l %%gs:%c1,%0": \ 101 "=r" (ret__): \ 102 "i" (pda_offset(field)), \ 103 "m" (_proxy_pda.field)); \ 104 break; \ 105 case 8: \ 106 asm(op "q %%gs:%c1,%0": \ 107 "=r" (ret__) : \ 108 "i" (pda_offset(field)), \ 109 "m" (_proxy_pda.field)); \ 110 break; \ 111 default: \ 112 __bad_pda_field(); \ 113 } \ 114 ret__; \ 115 }) 116 117 #define read_pda(field) pda_from_op("mov", field) 118 #define write_pda(field, val) pda_to_op("mov", field, val) 119 #define add_pda(field, val) pda_to_op("add", field, val) 120 #define sub_pda(field, val) pda_to_op("sub", field, val) 121 #define or_pda(field, val) pda_to_op("or", field, val) 122 123 /* This is not atomic against other CPUs -- CPU preemption needs to be off */ 124 #define test_and_clear_bit_pda(bit, field) \ 125 ({ \ 126 int old__; \ 127 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ 128 : "=r" (old__), "+m" (_proxy_pda.field) \ 129 : "dIr" (bit), "i" (pda_offset(field)) : "memory");\ 130 old__; \ 131 }) 132 133 #endif 134 135 #define PDA_STACKOFFSET (5*8) 136 137 #endif /* _ASM_X86_PDA_H */ 138