• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_X86_SWITCH_TO_H
2 #define _ASM_X86_SWITCH_TO_H
3 
4 #include <asm/nospec-branch.h>
5 
6 struct task_struct; /* one of the stranger aspects of C forward declarations */
7 __visible struct task_struct *__switch_to(struct task_struct *prev,
8 					   struct task_struct *next);
9 
10 #ifdef CONFIG_X86_32
11 
12 #ifdef CONFIG_CC_STACKPROTECTOR
13 #define __switch_canary							\
14 	"movl %P[task_canary](%[next]), %%ebx\n\t"			\
15 	"movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
16 #define __switch_canary_oparam						\
17 	, [stack_canary] "=m" (stack_canary.canary)
18 #define __switch_canary_iparam						\
19 	, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
20 #else	/* CC_STACKPROTECTOR */
21 #define __switch_canary
22 #define __switch_canary_oparam
23 #define __switch_canary_iparam
24 #endif	/* CC_STACKPROTECTOR */
25 
26 #ifdef CONFIG_RETPOLINE
27 	/*
28 	 * When switching from a shallower to a deeper call stack
29 	 * the RSB may either underflow or use entries populated
30 	 * with userspace addresses. On CPUs where those concerns
31 	 * exist, overwrite the RSB with entries which capture
32 	 * speculative execution to prevent attack.
33 	 */
34 #define __retpoline_fill_return_buffer					\
35 	ALTERNATIVE("jmp 910f",						\
36 		__stringify(__FILL_RETURN_BUFFER(%%ebx, RSB_CLEAR_LOOPS, %%esp)),\
37 		X86_FEATURE_RSB_CTXSW)					\
38 	"910:\n\t"
39 #else
40 #define __retpoline_fill_return_buffer
41 #endif
42 
43 /*
44  * Saving eflags is important. It switches not only IOPL between tasks,
45  * it also protects other tasks from NT leaking through sysenter etc.
46  */
47 #define switch_to(prev, next, last)					\
48 do {									\
49 	/*								\
50 	 * Context-switching clobbers all registers, so we clobber	\
51 	 * them explicitly, via unused output variables.		\
52 	 * (EAX and EBP is not listed because EBP is saved/restored	\
53 	 * explicitly for wchan access and EAX is the return value of	\
54 	 * __switch_to())						\
55 	 */								\
56 	unsigned long ebx, ecx, edx, esi, edi;				\
57 									\
58 	asm volatile("pushfl\n\t"		/* save    flags */	\
59 		     "pushl %%ebp\n\t"		/* save    EBP   */	\
60 		     "movl %%esp,%[prev_sp]\n\t"	/* save    ESP   */ \
61 		     "movl %[next_sp],%%esp\n\t"	/* restore ESP   */ \
62 		     "movl $1f,%[prev_ip]\n\t"	/* save    EIP   */	\
63 		     "pushl %[next_ip]\n\t"	/* restore EIP   */	\
64 		     __switch_canary					\
65 		     __retpoline_fill_return_buffer			\
66 		     "jmp __switch_to\n"	/* regparm call  */	\
67 		     "1:\t"						\
68 		     "popl %%ebp\n\t"		/* restore EBP   */	\
69 		     "popfl\n"			/* restore flags */	\
70 									\
71 		     /* output parameters */				\
72 		     : [prev_sp] "=m" (prev->thread.sp),		\
73 		       [prev_ip] "=m" (prev->thread.ip),		\
74 		       "=a" (last),					\
75 									\
76 		       /* clobbered output registers: */		\
77 		       "=b" (ebx), "=c" (ecx), "=d" (edx),		\
78 		       "=S" (esi), "=D" (edi)				\
79 		       							\
80 		       __switch_canary_oparam				\
81 									\
82 		       /* input parameters: */				\
83 		     : [next_sp]  "m" (next->thread.sp),		\
84 		       [next_ip]  "m" (next->thread.ip),		\
85 		       							\
86 		       /* regparm parameters for __switch_to(): */	\
87 		       [prev]     "a" (prev),				\
88 		       [next]     "d" (next)				\
89 									\
90 		       __switch_canary_iparam				\
91 									\
92 		     : /* reloaded segment registers */			\
93 			"memory");					\
94 } while (0)
95 
96 #else /* CONFIG_X86_32 */
97 
98 /* frame pointer must be last for get_wchan */
99 #define SAVE_CONTEXT    "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
100 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
101 
102 #define __EXTRA_CLOBBER  \
103 	, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
104 	  "r12", "r13", "r14", "r15", "flags"
105 
106 #ifdef CONFIG_CC_STACKPROTECTOR
107 #define __switch_canary							  \
108 	"movq %P[task_canary](%%rsi),%%r8\n\t"				  \
109 	"movq %%r8,"__percpu_arg([gs_canary])"\n\t"
110 #define __switch_canary_oparam						  \
111 	, [gs_canary] "=m" (irq_stack_union.stack_canary)
112 #define __switch_canary_iparam						  \
113 	, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
114 #else	/* CC_STACKPROTECTOR */
115 #define __switch_canary
116 #define __switch_canary_oparam
117 #define __switch_canary_iparam
118 #endif	/* CC_STACKPROTECTOR */
119 
120 #ifdef CONFIG_RETPOLINE
121 	/*
122 	 * When switching from a shallower to a deeper call stack
123 	 * the RSB may either underflow or use entries populated
124 	 * with userspace addresses. On CPUs where those concerns
125 	 * exist, overwrite the RSB with entries which capture
126 	 * speculative execution to prevent attack.
127 	 */
128 #define __retpoline_fill_return_buffer					\
129 	ALTERNATIVE("jmp 910f",						\
130 		__stringify(__FILL_RETURN_BUFFER(%%r12, RSB_CLEAR_LOOPS, %%rsp)),\
131 		X86_FEATURE_RSB_CTXSW)					\
132 	"910:\n\t"
133 #else
134 #define __retpoline_fill_return_buffer
135 #endif
136 
137 /*
138  * There is no need to save or restore flags, because flags are always
139  * clean in kernel mode, with the possible exception of IOPL.  Kernel IOPL
140  * has no effect.
141  */
142 #define switch_to(prev, next, last) \
143 	asm volatile(SAVE_CONTEXT					  \
144 	     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
145 	     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
146 	     "call __switch_to\n\t"					  \
147 	     "movq "__percpu_arg([current_task])",%%rsi\n\t"		  \
148 	     __switch_canary						  \
149 	     __retpoline_fill_return_buffer				  \
150 	     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
151 	     "movq %%rax,%%rdi\n\t" 					  \
152 	     "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"		  \
153 	     "jnz   ret_from_fork\n\t"					  \
154 	     RESTORE_CONTEXT						  \
155 	     : "=a" (last)					  	  \
156 	       __switch_canary_oparam					  \
157 	     : [next] "S" (next), [prev] "D" (prev),			  \
158 	       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
159 	       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \
160 	       [_tif_fork] "i" (_TIF_FORK),			  	  \
161 	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
162 	       [current_task] "m" (current_task)			  \
163 	       __switch_canary_iparam					  \
164 	     : "memory", "cc" __EXTRA_CLOBBER)
165 
166 #endif /* CONFIG_X86_32 */
167 
168 #endif /* _ASM_X86_SWITCH_TO_H */
169