1 /*
2 * User-space Probes (UProbes) for x86
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2008-2011
19 * Authors:
20 * Srikar Dronamraju
21 * Jim Keniston
22 */
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/ptrace.h>
26 #include <linux/uprobes.h>
27 #include <linux/uaccess.h>
28
29 #include <linux/kdebug.h>
30 #include <asm/processor.h>
31 #include <asm/insn.h>
32 #include <asm/mmu_context.h>
33
34 /* Post-execution fixups. */
35
36 /* Adjust IP back to vicinity of actual insn */
37 #define UPROBE_FIX_IP 0x01
38
39 /* Adjust the return address of a call insn */
40 #define UPROBE_FIX_CALL 0x02
41
42 /* Instruction will modify TF, don't change it */
43 #define UPROBE_FIX_SETF 0x04
44
45 #define UPROBE_FIX_RIP_SI 0x08
46 #define UPROBE_FIX_RIP_DI 0x10
47 #define UPROBE_FIX_RIP_BX 0x20
48 #define UPROBE_FIX_RIP_MASK \
49 (UPROBE_FIX_RIP_SI | UPROBE_FIX_RIP_DI | UPROBE_FIX_RIP_BX)
50
51 #define UPROBE_TRAP_NR UINT_MAX
52
53 /* Adaptations for mhiramat x86 decoder v14. */
54 #define OPCODE1(insn) ((insn)->opcode.bytes[0])
55 #define OPCODE2(insn) ((insn)->opcode.bytes[1])
56 #define OPCODE3(insn) ((insn)->opcode.bytes[2])
57 #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
58
59 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
60 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
61 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
62 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
63 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
64 << (row % 32))
65
66 /*
67 * Good-instruction tables for 32-bit apps. This is non-const and volatile
68 * to keep gcc from statically optimizing it out, as variable_test_bit makes
69 * some versions of gcc to think only *(unsigned long*) is used.
70 *
71 * Opcodes we'll probably never support:
72 * 6c-6f - ins,outs. SEGVs if used in userspace
73 * e4-e7 - in,out imm. SEGVs if used in userspace
74 * ec-ef - in,out acc. SEGVs if used in userspace
75 * cc - int3. SIGTRAP if used in userspace
76 * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs
77 * (why we support bound (62) then? it's similar, and similarly unused...)
78 * f1 - int1. SIGTRAP if used in userspace
79 * f4 - hlt. SEGVs if used in userspace
80 * fa - cli. SEGVs if used in userspace
81 * fb - sti. SEGVs if used in userspace
82 *
83 * Opcodes which need some work to be supported:
84 * 07,17,1f - pop es/ss/ds
85 * Normally not used in userspace, but would execute if used.
86 * Can cause GP or stack exception if tries to load wrong segment descriptor.
87 * We hesitate to run them under single step since kernel's handling
88 * of userspace single-stepping (TF flag) is fragile.
89 * We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e)
90 * on the same grounds that they are never used.
91 * cd - int N.
92 * Used by userspace for "int 80" syscall entry. (Other "int N"
93 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
94 * Not supported since kernel's handling of userspace single-stepping
95 * (TF flag) is fragile.
96 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
97 */
98 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
99 static volatile u32 good_insns_32[256 / 32] = {
100 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
101 /* ---------------------------------------------- */
102 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
103 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
104 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
105 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
106 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
107 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
108 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
109 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
110 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
111 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
112 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
113 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
114 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
115 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
116 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
117 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
118 /* ---------------------------------------------- */
119 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
120 };
121 #else
122 #define good_insns_32 NULL
123 #endif
124
125 /* Good-instruction tables for 64-bit apps.
126 *
127 * Genuinely invalid opcodes:
128 * 06,07 - formerly push/pop es
129 * 0e - formerly push cs
130 * 16,17 - formerly push/pop ss
131 * 1e,1f - formerly push/pop ds
132 * 27,2f,37,3f - formerly daa/das/aaa/aas
133 * 60,61 - formerly pusha/popa
134 * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported)
135 * 82 - formerly redundant encoding of Group1
136 * 9a - formerly call seg:ofs
137 * ce - formerly into
138 * d4,d5 - formerly aam/aad
139 * d6 - formerly undocumented salc
140 * ea - formerly jmp seg:ofs
141 *
142 * Opcodes we'll probably never support:
143 * 6c-6f - ins,outs. SEGVs if used in userspace
144 * e4-e7 - in,out imm. SEGVs if used in userspace
145 * ec-ef - in,out acc. SEGVs if used in userspace
146 * cc - int3. SIGTRAP if used in userspace
147 * f1 - int1. SIGTRAP if used in userspace
148 * f4 - hlt. SEGVs if used in userspace
149 * fa - cli. SEGVs if used in userspace
150 * fb - sti. SEGVs if used in userspace
151 *
152 * Opcodes which need some work to be supported:
153 * cd - int N.
154 * Used by userspace for "int 80" syscall entry. (Other "int N"
155 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
156 * Not supported since kernel's handling of userspace single-stepping
157 * (TF flag) is fragile.
158 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
159 */
160 #if defined(CONFIG_X86_64)
161 static volatile u32 good_insns_64[256 / 32] = {
162 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
163 /* ---------------------------------------------- */
164 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */
165 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
166 W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */
167 W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */
168 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
169 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
170 W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
171 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
172 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
173 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */
174 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
175 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
176 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
177 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
178 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */
179 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
180 /* ---------------------------------------------- */
181 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
182 };
183 #else
184 #define good_insns_64 NULL
185 #endif
186
187 /* Using this for both 64-bit and 32-bit apps.
188 * Opcodes we don't support:
189 * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns
190 * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group.
191 * Also encodes tons of other system insns if mod=11.
192 * Some are in fact non-system: xend, xtest, rdtscp, maybe more
193 * 0f 05 - syscall
194 * 0f 06 - clts (CPL0 insn)
195 * 0f 07 - sysret
196 * 0f 08 - invd (CPL0 insn)
197 * 0f 09 - wbinvd (CPL0 insn)
198 * 0f 0b - ud2
199 * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?)
200 * 0f 34 - sysenter
201 * 0f 35 - sysexit
202 * 0f 37 - getsec
203 * 0f 78 - vmread (Intel VMX. CPL0 insn)
204 * 0f 79 - vmwrite (Intel VMX. CPL0 insn)
205 * Note: with prefixes, these two opcodes are
206 * extrq/insertq/AVX512 convert vector ops.
207 * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt],
208 * {rd,wr}{fs,gs}base,{s,l,m}fence.
209 * Why? They are all user-executable.
210 */
211 static volatile u32 good_2byte_insns[256 / 32] = {
212 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
213 /* ---------------------------------------------- */
214 W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */
215 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
216 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
217 W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
218 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
219 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
220 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
221 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */
222 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
223 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
224 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
225 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
226 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
227 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
228 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
229 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* f0 */
230 /* ---------------------------------------------- */
231 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
232 };
233 #undef W
234
235 /*
236 * opcodes we may need to refine support for:
237 *
238 * 0f - 2-byte instructions: For many of these instructions, the validity
239 * depends on the prefix and/or the reg field. On such instructions, we
240 * just consider the opcode combination valid if it corresponds to any
241 * valid instruction.
242 *
243 * 8f - Group 1 - only reg = 0 is OK
244 * c6-c7 - Group 11 - only reg = 0 is OK
245 * d9-df - fpu insns with some illegal encodings
246 * f2, f3 - repnz, repz prefixes. These are also the first byte for
247 * certain floating-point instructions, such as addsd.
248 *
249 * fe - Group 4 - only reg = 0 or 1 is OK
250 * ff - Group 5 - only reg = 0-6 is OK
251 *
252 * others -- Do we need to support these?
253 *
254 * 0f - (floating-point?) prefetch instructions
255 * 07, 17, 1f - pop es, pop ss, pop ds
256 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
257 * but 64 and 65 (fs: and gs:) seem to be used, so we support them
258 * 67 - addr16 prefix
259 * ce - into
260 * f0 - lock prefix
261 */
262
263 /*
264 * TODO:
265 * - Where necessary, examine the modrm byte and allow only valid instructions
266 * in the different Groups and fpu instructions.
267 */
268
is_prefix_bad(struct insn * insn)269 static bool is_prefix_bad(struct insn *insn)
270 {
271 int i;
272
273 for (i = 0; i < insn->prefixes.nbytes; i++) {
274 insn_attr_t attr;
275
276 attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
277 switch (attr) {
278 case INAT_MAKE_PREFIX(INAT_PFX_ES):
279 case INAT_MAKE_PREFIX(INAT_PFX_CS):
280 case INAT_MAKE_PREFIX(INAT_PFX_DS):
281 case INAT_MAKE_PREFIX(INAT_PFX_SS):
282 case INAT_MAKE_PREFIX(INAT_PFX_LOCK):
283 return true;
284 }
285 }
286 return false;
287 }
288
uprobe_init_insn(struct arch_uprobe * auprobe,struct insn * insn,bool x86_64)289 static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
290 {
291 u32 volatile *good_insns;
292
293 insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
294 /* has the side-effect of processing the entire instruction */
295 insn_get_length(insn);
296 if (!insn_complete(insn))
297 return -ENOEXEC;
298
299 if (is_prefix_bad(insn))
300 return -ENOTSUPP;
301
302 /* We should not singlestep on the exception masking instructions */
303 if (insn_masking_exception(insn))
304 return -ENOTSUPP;
305
306 if (x86_64)
307 good_insns = good_insns_64;
308 else
309 good_insns = good_insns_32;
310
311 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
312 return 0;
313
314 if (insn->opcode.nbytes == 2) {
315 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
316 return 0;
317 }
318
319 return -ENOTSUPP;
320 }
321
322 #ifdef CONFIG_X86_64
323 /*
324 * If arch_uprobe->insn doesn't use rip-relative addressing, return
325 * immediately. Otherwise, rewrite the instruction so that it accesses
326 * its memory operand indirectly through a scratch register. Set
327 * defparam->fixups accordingly. (The contents of the scratch register
328 * will be saved before we single-step the modified instruction,
329 * and restored afterward).
330 *
331 * We do this because a rip-relative instruction can access only a
332 * relatively small area (+/- 2 GB from the instruction), and the XOL
333 * area typically lies beyond that area. At least for instructions
334 * that store to memory, we can't execute the original instruction
335 * and "fix things up" later, because the misdirected store could be
336 * disastrous.
337 *
338 * Some useful facts about rip-relative instructions:
339 *
340 * - There's always a modrm byte with bit layout "00 reg 101".
341 * - There's never a SIB byte.
342 * - The displacement is always 4 bytes.
343 * - REX.B=1 bit in REX prefix, which normally extends r/m field,
344 * has no effect on rip-relative mode. It doesn't make modrm byte
345 * with r/m=101 refer to register 1101 = R13.
346 */
riprel_analyze(struct arch_uprobe * auprobe,struct insn * insn)347 static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
348 {
349 u8 *cursor;
350 u8 reg;
351 u8 reg2;
352
353 if (!insn_rip_relative(insn))
354 return;
355
356 /*
357 * insn_rip_relative() would have decoded rex_prefix, vex_prefix, modrm.
358 * Clear REX.b bit (extension of MODRM.rm field):
359 * we want to encode low numbered reg, not r8+.
360 */
361 if (insn->rex_prefix.nbytes) {
362 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
363 /* REX byte has 0100wrxb layout, clearing REX.b bit */
364 *cursor &= 0xfe;
365 }
366 /*
367 * Similar treatment for VEX3/EVEX prefix.
368 * TODO: add XOP treatment when insn decoder supports them
369 */
370 if (insn->vex_prefix.nbytes >= 3) {
371 /*
372 * vex2: c5 rvvvvLpp (has no b bit)
373 * vex3/xop: c4/8f rxbmmmmm wvvvvLpp
374 * evex: 62 rxbR00mm wvvvv1pp zllBVaaa
375 * Setting VEX3.b (setting because it has inverted meaning).
376 * Setting EVEX.x since (in non-SIB encoding) EVEX.x
377 * is the 4th bit of MODRM.rm, and needs the same treatment.
378 * For VEX3-encoded insns, VEX3.x value has no effect in
379 * non-SIB encoding, the change is superfluous but harmless.
380 */
381 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
382 *cursor |= 0x60;
383 }
384
385 /*
386 * Convert from rip-relative addressing to register-relative addressing
387 * via a scratch register.
388 *
389 * This is tricky since there are insns with modrm byte
390 * which also use registers not encoded in modrm byte:
391 * [i]div/[i]mul: implicitly use dx:ax
392 * shift ops: implicitly use cx
393 * cmpxchg: implicitly uses ax
394 * cmpxchg8/16b: implicitly uses dx:ax and bx:cx
395 * Encoding: 0f c7/1 modrm
396 * The code below thinks that reg=1 (cx), chooses si as scratch.
397 * mulx: implicitly uses dx: mulx r/m,r1,r2 does r1:r2 = dx * r/m.
398 * First appeared in Haswell (BMI2 insn). It is vex-encoded.
399 * Example where none of bx,cx,dx can be used as scratch reg:
400 * c4 e2 63 f6 0d disp32 mulx disp32(%rip),%ebx,%ecx
401 * [v]pcmpistri: implicitly uses cx, xmm0
402 * [v]pcmpistrm: implicitly uses xmm0
403 * [v]pcmpestri: implicitly uses ax, dx, cx, xmm0
404 * [v]pcmpestrm: implicitly uses ax, dx, xmm0
405 * Evil SSE4.2 string comparison ops from hell.
406 * maskmovq/[v]maskmovdqu: implicitly uses (ds:rdi) as destination.
407 * Encoding: 0f f7 modrm, 66 0f f7 modrm, vex-encoded: c5 f9 f7 modrm.
408 * Store op1, byte-masked by op2 msb's in each byte, to (ds:rdi).
409 * AMD says it has no 3-operand form (vex.vvvv must be 1111)
410 * and that it can have only register operands, not mem
411 * (its modrm byte must have mode=11).
412 * If these restrictions will ever be lifted,
413 * we'll need code to prevent selection of di as scratch reg!
414 *
415 * Summary: I don't know any insns with modrm byte which
416 * use SI register implicitly. DI register is used only
417 * by one insn (maskmovq) and BX register is used
418 * only by one too (cmpxchg8b).
419 * BP is stack-segment based (may be a problem?).
420 * AX, DX, CX are off-limits (many implicit users).
421 * SP is unusable (it's stack pointer - think about "pop mem";
422 * also, rsp+disp32 needs sib encoding -> insn length change).
423 */
424
425 reg = MODRM_REG(insn); /* Fetch modrm.reg */
426 reg2 = 0xff; /* Fetch vex.vvvv */
427 if (insn->vex_prefix.nbytes)
428 reg2 = insn->vex_prefix.bytes[2];
429 /*
430 * TODO: add XOP vvvv reading.
431 *
432 * vex.vvvv field is in bits 6-3, bits are inverted.
433 * But in 32-bit mode, high-order bit may be ignored.
434 * Therefore, let's consider only 3 low-order bits.
435 */
436 reg2 = ((reg2 >> 3) & 0x7) ^ 0x7;
437 /*
438 * Register numbering is ax,cx,dx,bx, sp,bp,si,di, r8..r15.
439 *
440 * Choose scratch reg. Order is important: must not select bx
441 * if we can use si (cmpxchg8b case!)
442 */
443 if (reg != 6 && reg2 != 6) {
444 reg2 = 6;
445 auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI;
446 } else if (reg != 7 && reg2 != 7) {
447 reg2 = 7;
448 auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI;
449 /* TODO (paranoia): force maskmovq to not use di */
450 } else {
451 reg2 = 3;
452 auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX;
453 }
454 /*
455 * Point cursor at the modrm byte. The next 4 bytes are the
456 * displacement. Beyond the displacement, for some instructions,
457 * is the immediate operand.
458 */
459 cursor = auprobe->insn + insn_offset_modrm(insn);
460 /*
461 * Change modrm from "00 reg 101" to "10 reg reg2". Example:
462 * 89 05 disp32 mov %eax,disp32(%rip) becomes
463 * 89 86 disp32 mov %eax,disp32(%rsi)
464 */
465 *cursor = 0x80 | (reg << 3) | reg2;
466 }
467
468 static inline unsigned long *
scratch_reg(struct arch_uprobe * auprobe,struct pt_regs * regs)469 scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs)
470 {
471 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI)
472 return ®s->si;
473 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI)
474 return ®s->di;
475 return ®s->bx;
476 }
477
478 /*
479 * If we're emulating a rip-relative instruction, save the contents
480 * of the scratch register and store the target address in that register.
481 */
riprel_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)482 static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
483 {
484 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
485 struct uprobe_task *utask = current->utask;
486 unsigned long *sr = scratch_reg(auprobe, regs);
487
488 utask->autask.saved_scratch_register = *sr;
489 *sr = utask->vaddr + auprobe->defparam.ilen;
490 }
491 }
492
riprel_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)493 static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
494 {
495 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
496 struct uprobe_task *utask = current->utask;
497 unsigned long *sr = scratch_reg(auprobe, regs);
498
499 *sr = utask->autask.saved_scratch_register;
500 }
501 }
502 #else /* 32-bit: */
503 /*
504 * No RIP-relative addressing on 32-bit
505 */
riprel_analyze(struct arch_uprobe * auprobe,struct insn * insn)506 static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
507 {
508 }
riprel_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)509 static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
510 {
511 }
riprel_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)512 static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
513 {
514 }
515 #endif /* CONFIG_X86_64 */
516
517 struct uprobe_xol_ops {
518 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
519 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
520 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
521 void (*abort)(struct arch_uprobe *, struct pt_regs *);
522 };
523
sizeof_long(struct pt_regs * regs)524 static inline int sizeof_long(struct pt_regs *regs)
525 {
526 /*
527 * Check registers for mode as in_xxx_syscall() does not apply here.
528 */
529 return user_64bit_mode(regs) ? 8 : 4;
530 }
531
default_pre_xol_op(struct arch_uprobe * auprobe,struct pt_regs * regs)532 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
533 {
534 riprel_pre_xol(auprobe, regs);
535 return 0;
536 }
537
emulate_push_stack(struct pt_regs * regs,unsigned long val)538 static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
539 {
540 unsigned long new_sp = regs->sp - sizeof_long(regs);
541
542 if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
543 return -EFAULT;
544
545 regs->sp = new_sp;
546 return 0;
547 }
548
549 /*
550 * We have to fix things up as follows:
551 *
552 * Typically, the new ip is relative to the copied instruction. We need
553 * to make it relative to the original instruction (FIX_IP). Exceptions
554 * are return instructions and absolute or indirect jump or call instructions.
555 *
556 * If the single-stepped instruction was a call, the return address that
557 * is atop the stack is the address following the copied instruction. We
558 * need to make it the address following the original instruction (FIX_CALL).
559 *
560 * If the original instruction was a rip-relative instruction such as
561 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
562 * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rsi)".
563 * We need to restore the contents of the scratch register
564 * (FIX_RIP_reg).
565 */
default_post_xol_op(struct arch_uprobe * auprobe,struct pt_regs * regs)566 static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
567 {
568 struct uprobe_task *utask = current->utask;
569
570 riprel_post_xol(auprobe, regs);
571 if (auprobe->defparam.fixups & UPROBE_FIX_IP) {
572 long correction = utask->vaddr - utask->xol_vaddr;
573 regs->ip += correction;
574 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
575 regs->sp += sizeof_long(regs); /* Pop incorrect return address */
576 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
577 return -ERESTART;
578 }
579 /* popf; tell the caller to not touch TF */
580 if (auprobe->defparam.fixups & UPROBE_FIX_SETF)
581 utask->autask.saved_tf = true;
582
583 return 0;
584 }
585
default_abort_op(struct arch_uprobe * auprobe,struct pt_regs * regs)586 static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
587 {
588 riprel_post_xol(auprobe, regs);
589 }
590
591 static const struct uprobe_xol_ops default_xol_ops = {
592 .pre_xol = default_pre_xol_op,
593 .post_xol = default_post_xol_op,
594 .abort = default_abort_op,
595 };
596
branch_is_call(struct arch_uprobe * auprobe)597 static bool branch_is_call(struct arch_uprobe *auprobe)
598 {
599 return auprobe->branch.opc1 == 0xe8;
600 }
601
602 #define CASE_COND \
603 COND(70, 71, XF(OF)) \
604 COND(72, 73, XF(CF)) \
605 COND(74, 75, XF(ZF)) \
606 COND(78, 79, XF(SF)) \
607 COND(7a, 7b, XF(PF)) \
608 COND(76, 77, XF(CF) || XF(ZF)) \
609 COND(7c, 7d, XF(SF) != XF(OF)) \
610 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF))
611
612 #define COND(op_y, op_n, expr) \
613 case 0x ## op_y: DO((expr) != 0) \
614 case 0x ## op_n: DO((expr) == 0)
615
616 #define XF(xf) (!!(flags & X86_EFLAGS_ ## xf))
617
is_cond_jmp_opcode(u8 opcode)618 static bool is_cond_jmp_opcode(u8 opcode)
619 {
620 switch (opcode) {
621 #define DO(expr) \
622 return true;
623 CASE_COND
624 #undef DO
625
626 default:
627 return false;
628 }
629 }
630
check_jmp_cond(struct arch_uprobe * auprobe,struct pt_regs * regs)631 static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs)
632 {
633 unsigned long flags = regs->flags;
634
635 switch (auprobe->branch.opc1) {
636 #define DO(expr) \
637 return expr;
638 CASE_COND
639 #undef DO
640
641 default: /* not a conditional jmp */
642 return true;
643 }
644 }
645
646 #undef XF
647 #undef COND
648 #undef CASE_COND
649
branch_emulate_op(struct arch_uprobe * auprobe,struct pt_regs * regs)650 static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
651 {
652 unsigned long new_ip = regs->ip += auprobe->branch.ilen;
653 unsigned long offs = (long)auprobe->branch.offs;
654
655 if (branch_is_call(auprobe)) {
656 /*
657 * If it fails we execute this (mangled, see the comment in
658 * branch_clear_offset) insn out-of-line. In the likely case
659 * this should trigger the trap, and the probed application
660 * should die or restart the same insn after it handles the
661 * signal, arch_uprobe_post_xol() won't be even called.
662 *
663 * But there is corner case, see the comment in ->post_xol().
664 */
665 if (emulate_push_stack(regs, new_ip))
666 return false;
667 } else if (!check_jmp_cond(auprobe, regs)) {
668 offs = 0;
669 }
670
671 regs->ip = new_ip + offs;
672 return true;
673 }
674
push_emulate_op(struct arch_uprobe * auprobe,struct pt_regs * regs)675 static bool push_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
676 {
677 unsigned long *src_ptr = (void *)regs + auprobe->push.reg_offset;
678
679 if (emulate_push_stack(regs, *src_ptr))
680 return false;
681 regs->ip += auprobe->push.ilen;
682 return true;
683 }
684
branch_post_xol_op(struct arch_uprobe * auprobe,struct pt_regs * regs)685 static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
686 {
687 BUG_ON(!branch_is_call(auprobe));
688 /*
689 * We can only get here if branch_emulate_op() failed to push the ret
690 * address _and_ another thread expanded our stack before the (mangled)
691 * "call" insn was executed out-of-line. Just restore ->sp and restart.
692 * We could also restore ->ip and try to call branch_emulate_op() again.
693 */
694 regs->sp += sizeof_long(regs);
695 return -ERESTART;
696 }
697
branch_clear_offset(struct arch_uprobe * auprobe,struct insn * insn)698 static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn)
699 {
700 /*
701 * Turn this insn into "call 1f; 1:", this is what we will execute
702 * out-of-line if ->emulate() fails. We only need this to generate
703 * a trap, so that the probed task receives the correct signal with
704 * the properly filled siginfo.
705 *
706 * But see the comment in ->post_xol(), in the unlikely case it can
707 * succeed. So we need to ensure that the new ->ip can not fall into
708 * the non-canonical area and trigger #GP.
709 *
710 * We could turn it into (say) "pushf", but then we would need to
711 * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte
712 * of ->insn[] for set_orig_insn().
713 */
714 memset(auprobe->insn + insn_offset_immediate(insn),
715 0, insn->immediate.nbytes);
716 }
717
718 static const struct uprobe_xol_ops branch_xol_ops = {
719 .emulate = branch_emulate_op,
720 .post_xol = branch_post_xol_op,
721 };
722
723 static const struct uprobe_xol_ops push_xol_ops = {
724 .emulate = push_emulate_op,
725 };
726
727 /* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
branch_setup_xol_ops(struct arch_uprobe * auprobe,struct insn * insn)728 static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
729 {
730 u8 opc1 = OPCODE1(insn);
731 int i;
732
733 switch (opc1) {
734 case 0xeb: /* jmp 8 */
735 case 0xe9: /* jmp 32 */
736 case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
737 break;
738
739 case 0xe8: /* call relative */
740 branch_clear_offset(auprobe, insn);
741 break;
742
743 case 0x0f:
744 if (insn->opcode.nbytes != 2)
745 return -ENOSYS;
746 /*
747 * If it is a "near" conditional jmp, OPCODE2() - 0x10 matches
748 * OPCODE1() of the "short" jmp which checks the same condition.
749 */
750 opc1 = OPCODE2(insn) - 0x10;
751 default:
752 if (!is_cond_jmp_opcode(opc1))
753 return -ENOSYS;
754 }
755
756 /*
757 * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported.
758 * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
759 * No one uses these insns, reject any branch insns with such prefix.
760 */
761 for (i = 0; i < insn->prefixes.nbytes; i++) {
762 if (insn->prefixes.bytes[i] == 0x66)
763 return -ENOTSUPP;
764 }
765
766 auprobe->branch.opc1 = opc1;
767 auprobe->branch.ilen = insn->length;
768 auprobe->branch.offs = insn->immediate.value;
769
770 auprobe->ops = &branch_xol_ops;
771 return 0;
772 }
773
774 /* Returns -ENOSYS if push_xol_ops doesn't handle this insn */
push_setup_xol_ops(struct arch_uprobe * auprobe,struct insn * insn)775 static int push_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
776 {
777 u8 opc1 = OPCODE1(insn), reg_offset = 0;
778
779 if (opc1 < 0x50 || opc1 > 0x57)
780 return -ENOSYS;
781
782 if (insn->length > 2)
783 return -ENOSYS;
784 if (insn->length == 2) {
785 /* only support rex_prefix 0x41 (x64 only) */
786 #ifdef CONFIG_X86_64
787 if (insn->rex_prefix.nbytes != 1 ||
788 insn->rex_prefix.bytes[0] != 0x41)
789 return -ENOSYS;
790
791 switch (opc1) {
792 case 0x50:
793 reg_offset = offsetof(struct pt_regs, r8);
794 break;
795 case 0x51:
796 reg_offset = offsetof(struct pt_regs, r9);
797 break;
798 case 0x52:
799 reg_offset = offsetof(struct pt_regs, r10);
800 break;
801 case 0x53:
802 reg_offset = offsetof(struct pt_regs, r11);
803 break;
804 case 0x54:
805 reg_offset = offsetof(struct pt_regs, r12);
806 break;
807 case 0x55:
808 reg_offset = offsetof(struct pt_regs, r13);
809 break;
810 case 0x56:
811 reg_offset = offsetof(struct pt_regs, r14);
812 break;
813 case 0x57:
814 reg_offset = offsetof(struct pt_regs, r15);
815 break;
816 }
817 #else
818 return -ENOSYS;
819 #endif
820 } else {
821 switch (opc1) {
822 case 0x50:
823 reg_offset = offsetof(struct pt_regs, ax);
824 break;
825 case 0x51:
826 reg_offset = offsetof(struct pt_regs, cx);
827 break;
828 case 0x52:
829 reg_offset = offsetof(struct pt_regs, dx);
830 break;
831 case 0x53:
832 reg_offset = offsetof(struct pt_regs, bx);
833 break;
834 case 0x54:
835 reg_offset = offsetof(struct pt_regs, sp);
836 break;
837 case 0x55:
838 reg_offset = offsetof(struct pt_regs, bp);
839 break;
840 case 0x56:
841 reg_offset = offsetof(struct pt_regs, si);
842 break;
843 case 0x57:
844 reg_offset = offsetof(struct pt_regs, di);
845 break;
846 }
847 }
848
849 auprobe->push.reg_offset = reg_offset;
850 auprobe->push.ilen = insn->length;
851 auprobe->ops = &push_xol_ops;
852 return 0;
853 }
854
855 /**
856 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
857 * @mm: the probed address space.
858 * @arch_uprobe: the probepoint information.
859 * @addr: virtual address at which to install the probepoint
860 * Return 0 on success or a -ve number on error.
861 */
arch_uprobe_analyze_insn(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long addr)862 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
863 {
864 struct insn insn;
865 u8 fix_ip_or_call = UPROBE_FIX_IP;
866 int ret;
867
868 ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
869 if (ret)
870 return ret;
871
872 ret = branch_setup_xol_ops(auprobe, &insn);
873 if (ret != -ENOSYS)
874 return ret;
875
876 ret = push_setup_xol_ops(auprobe, &insn);
877 if (ret != -ENOSYS)
878 return ret;
879
880 /*
881 * Figure out which fixups default_post_xol_op() will need to perform,
882 * and annotate defparam->fixups accordingly.
883 */
884 switch (OPCODE1(&insn)) {
885 case 0x9d: /* popf */
886 auprobe->defparam.fixups |= UPROBE_FIX_SETF;
887 break;
888 case 0xc3: /* ret or lret -- ip is correct */
889 case 0xcb:
890 case 0xc2:
891 case 0xca:
892 case 0xea: /* jmp absolute -- ip is correct */
893 fix_ip_or_call = 0;
894 break;
895 case 0x9a: /* call absolute - Fix return addr, not ip */
896 fix_ip_or_call = UPROBE_FIX_CALL;
897 break;
898 case 0xff:
899 switch (MODRM_REG(&insn)) {
900 case 2: case 3: /* call or lcall, indirect */
901 fix_ip_or_call = UPROBE_FIX_CALL;
902 break;
903 case 4: case 5: /* jmp or ljmp, indirect */
904 fix_ip_or_call = 0;
905 break;
906 }
907 /* fall through */
908 default:
909 riprel_analyze(auprobe, &insn);
910 }
911
912 auprobe->defparam.ilen = insn.length;
913 auprobe->defparam.fixups |= fix_ip_or_call;
914
915 auprobe->ops = &default_xol_ops;
916 return 0;
917 }
918
919 /*
920 * arch_uprobe_pre_xol - prepare to execute out of line.
921 * @auprobe: the probepoint information.
922 * @regs: reflects the saved user state of current task.
923 */
arch_uprobe_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)924 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
925 {
926 struct uprobe_task *utask = current->utask;
927
928 if (auprobe->ops->pre_xol) {
929 int err = auprobe->ops->pre_xol(auprobe, regs);
930 if (err)
931 return err;
932 }
933
934 regs->ip = utask->xol_vaddr;
935 utask->autask.saved_trap_nr = current->thread.trap_nr;
936 current->thread.trap_nr = UPROBE_TRAP_NR;
937
938 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
939 regs->flags |= X86_EFLAGS_TF;
940 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
941 set_task_blockstep(current, false);
942
943 return 0;
944 }
945
946 /*
947 * If xol insn itself traps and generates a signal(Say,
948 * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
949 * instruction jumps back to its own address. It is assumed that anything
950 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
951 *
952 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
953 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
954 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
955 */
arch_uprobe_xol_was_trapped(struct task_struct * t)956 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
957 {
958 if (t->thread.trap_nr != UPROBE_TRAP_NR)
959 return true;
960
961 return false;
962 }
963
964 /*
965 * Called after single-stepping. To avoid the SMP problems that can
966 * occur when we temporarily put back the original opcode to
967 * single-step, we single-stepped a copy of the instruction.
968 *
969 * This function prepares to resume execution after the single-step.
970 */
arch_uprobe_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)971 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
972 {
973 struct uprobe_task *utask = current->utask;
974 bool send_sigtrap = utask->autask.saved_tf;
975 int err = 0;
976
977 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
978 current->thread.trap_nr = utask->autask.saved_trap_nr;
979
980 if (auprobe->ops->post_xol) {
981 err = auprobe->ops->post_xol(auprobe, regs);
982 if (err) {
983 /*
984 * Restore ->ip for restart or post mortem analysis.
985 * ->post_xol() must not return -ERESTART unless this
986 * is really possible.
987 */
988 regs->ip = utask->vaddr;
989 if (err == -ERESTART)
990 err = 0;
991 send_sigtrap = false;
992 }
993 }
994 /*
995 * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP
996 * so we can get an extra SIGTRAP if we do not clear TF. We need
997 * to examine the opcode to make it right.
998 */
999 if (send_sigtrap)
1000 send_sig(SIGTRAP, current, 0);
1001
1002 if (!utask->autask.saved_tf)
1003 regs->flags &= ~X86_EFLAGS_TF;
1004
1005 return err;
1006 }
1007
1008 /* callback routine for handling exceptions. */
arch_uprobe_exception_notify(struct notifier_block * self,unsigned long val,void * data)1009 int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
1010 {
1011 struct die_args *args = data;
1012 struct pt_regs *regs = args->regs;
1013 int ret = NOTIFY_DONE;
1014
1015 /* We are only interested in userspace traps */
1016 if (regs && !user_mode(regs))
1017 return NOTIFY_DONE;
1018
1019 switch (val) {
1020 case DIE_INT3:
1021 if (uprobe_pre_sstep_notifier(regs))
1022 ret = NOTIFY_STOP;
1023
1024 break;
1025
1026 case DIE_DEBUG:
1027 if (uprobe_post_sstep_notifier(regs))
1028 ret = NOTIFY_STOP;
1029
1030 default:
1031 break;
1032 }
1033
1034 return ret;
1035 }
1036
1037 /*
1038 * This function gets called when XOL instruction either gets trapped or
1039 * the thread has a fatal signal. Reset the instruction pointer to its
1040 * probed address for the potential restart or for post mortem analysis.
1041 */
arch_uprobe_abort_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)1042 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1043 {
1044 struct uprobe_task *utask = current->utask;
1045
1046 if (auprobe->ops->abort)
1047 auprobe->ops->abort(auprobe, regs);
1048
1049 current->thread.trap_nr = utask->autask.saved_trap_nr;
1050 regs->ip = utask->vaddr;
1051 /* clear TF if it was set by us in arch_uprobe_pre_xol() */
1052 if (!utask->autask.saved_tf)
1053 regs->flags &= ~X86_EFLAGS_TF;
1054 }
1055
__skip_sstep(struct arch_uprobe * auprobe,struct pt_regs * regs)1056 static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1057 {
1058 if (auprobe->ops->emulate)
1059 return auprobe->ops->emulate(auprobe, regs);
1060 return false;
1061 }
1062
arch_uprobe_skip_sstep(struct arch_uprobe * auprobe,struct pt_regs * regs)1063 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1064 {
1065 bool ret = __skip_sstep(auprobe, regs);
1066 if (ret && (regs->flags & X86_EFLAGS_TF))
1067 send_sig(SIGTRAP, current, 0);
1068 return ret;
1069 }
1070
1071 unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,struct pt_regs * regs)1072 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
1073 {
1074 int rasize = sizeof_long(regs), nleft;
1075 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
1076
1077 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
1078 return -1;
1079
1080 /* check whether address has been already hijacked */
1081 if (orig_ret_vaddr == trampoline_vaddr)
1082 return orig_ret_vaddr;
1083
1084 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
1085 if (likely(!nleft))
1086 return orig_ret_vaddr;
1087
1088 if (nleft != rasize) {
1089 pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n",
1090 current->pid, regs->sp, regs->ip);
1091
1092 force_sig(SIGSEGV, current);
1093 }
1094
1095 return -1;
1096 }
1097
arch_uretprobe_is_alive(struct return_instance * ret,enum rp_check ctx,struct pt_regs * regs)1098 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1099 struct pt_regs *regs)
1100 {
1101 if (ctx == RP_CHECK_CALL) /* sp was just decremented by "call" insn */
1102 return regs->sp < ret->stack;
1103 else
1104 return regs->sp <= ret->stack;
1105 }
1106