• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 Huawei Ltd.
3  * Author: Jiang Liu <liuj97@gmail.com>
4  *
5  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29 
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
33 #include <asm/opcodes.h>
34 #include <asm/insn.h>
35 
36 #define AARCH64_INSN_SF_BIT	BIT(31)
37 #define AARCH64_INSN_N_BIT	BIT(22)
38 
39 static int aarch64_insn_encoding_class[] = {
40 	AARCH64_INSN_CLS_UNKNOWN,
41 	AARCH64_INSN_CLS_UNKNOWN,
42 	AARCH64_INSN_CLS_UNKNOWN,
43 	AARCH64_INSN_CLS_UNKNOWN,
44 	AARCH64_INSN_CLS_LDST,
45 	AARCH64_INSN_CLS_DP_REG,
46 	AARCH64_INSN_CLS_LDST,
47 	AARCH64_INSN_CLS_DP_FPSIMD,
48 	AARCH64_INSN_CLS_DP_IMM,
49 	AARCH64_INSN_CLS_DP_IMM,
50 	AARCH64_INSN_CLS_BR_SYS,
51 	AARCH64_INSN_CLS_BR_SYS,
52 	AARCH64_INSN_CLS_LDST,
53 	AARCH64_INSN_CLS_DP_REG,
54 	AARCH64_INSN_CLS_LDST,
55 	AARCH64_INSN_CLS_DP_FPSIMD,
56 };
57 
aarch64_get_insn_class(u32 insn)58 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
59 {
60 	return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
61 }
62 
63 /* NOP is an alias of HINT */
aarch64_insn_is_nop(u32 insn)64 bool __kprobes aarch64_insn_is_nop(u32 insn)
65 {
66 	if (!aarch64_insn_is_hint(insn))
67 		return false;
68 
69 	switch (insn & 0xFE0) {
70 	case AARCH64_INSN_HINT_YIELD:
71 	case AARCH64_INSN_HINT_WFE:
72 	case AARCH64_INSN_HINT_WFI:
73 	case AARCH64_INSN_HINT_SEV:
74 	case AARCH64_INSN_HINT_SEVL:
75 		return false;
76 	default:
77 		return true;
78 	}
79 }
80 
aarch64_insn_is_branch_imm(u32 insn)81 bool aarch64_insn_is_branch_imm(u32 insn)
82 {
83 	return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
84 		aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
85 		aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
86 		aarch64_insn_is_bcond(insn));
87 }
88 
89 static DEFINE_RAW_SPINLOCK(patch_lock);
90 
patch_map(void * addr,int fixmap)91 static void __kprobes *patch_map(void *addr, int fixmap)
92 {
93 	unsigned long uintaddr = (uintptr_t) addr;
94 	bool module = !core_kernel_text(uintaddr);
95 	struct page *page;
96 
97 	if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
98 		page = vmalloc_to_page(addr);
99 	else if (!module)
100 		page = phys_to_page(__pa_symbol(addr));
101 	else
102 		return addr;
103 
104 	BUG_ON(!page);
105 	return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
106 			(uintaddr & ~PAGE_MASK));
107 }
108 
patch_unmap(int fixmap)109 static void __kprobes patch_unmap(int fixmap)
110 {
111 	clear_fixmap(fixmap);
112 }
113 /*
114  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
115  * little-endian.
116  */
aarch64_insn_read(void * addr,u32 * insnp)117 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
118 {
119 	int ret;
120 	u32 val;
121 
122 	ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
123 	if (!ret)
124 		*insnp = le32_to_cpu(val);
125 
126 	return ret;
127 }
128 
__aarch64_insn_write(void * addr,u32 insn)129 static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
130 {
131 	void *waddr = addr;
132 	unsigned long flags = 0;
133 	int ret;
134 
135 	raw_spin_lock_irqsave(&patch_lock, flags);
136 	waddr = patch_map(addr, FIX_TEXT_POKE0);
137 
138 	ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
139 
140 	patch_unmap(FIX_TEXT_POKE0);
141 	raw_spin_unlock_irqrestore(&patch_lock, flags);
142 
143 	return ret;
144 }
145 
aarch64_insn_write(void * addr,u32 insn)146 int __kprobes aarch64_insn_write(void *addr, u32 insn)
147 {
148 	insn = cpu_to_le32(insn);
149 	return __aarch64_insn_write(addr, insn);
150 }
151 
__aarch64_insn_hotpatch_safe(u32 insn)152 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
153 {
154 	if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
155 		return false;
156 
157 	return	aarch64_insn_is_b(insn) ||
158 		aarch64_insn_is_bl(insn) ||
159 		aarch64_insn_is_svc(insn) ||
160 		aarch64_insn_is_hvc(insn) ||
161 		aarch64_insn_is_smc(insn) ||
162 		aarch64_insn_is_brk(insn) ||
163 		aarch64_insn_is_nop(insn);
164 }
165 
aarch64_insn_uses_literal(u32 insn)166 bool __kprobes aarch64_insn_uses_literal(u32 insn)
167 {
168 	/* ldr/ldrsw (literal), prfm */
169 
170 	return aarch64_insn_is_ldr_lit(insn) ||
171 		aarch64_insn_is_ldrsw_lit(insn) ||
172 		aarch64_insn_is_adr_adrp(insn) ||
173 		aarch64_insn_is_prfm_lit(insn);
174 }
175 
aarch64_insn_is_branch(u32 insn)176 bool __kprobes aarch64_insn_is_branch(u32 insn)
177 {
178 	/* b, bl, cb*, tb*, b.cond, br, blr */
179 
180 	return aarch64_insn_is_b(insn) ||
181 		aarch64_insn_is_bl(insn) ||
182 		aarch64_insn_is_cbz(insn) ||
183 		aarch64_insn_is_cbnz(insn) ||
184 		aarch64_insn_is_tbz(insn) ||
185 		aarch64_insn_is_tbnz(insn) ||
186 		aarch64_insn_is_ret(insn) ||
187 		aarch64_insn_is_br(insn) ||
188 		aarch64_insn_is_blr(insn) ||
189 		aarch64_insn_is_bcond(insn);
190 }
191 
192 /*
193  * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
194  * Section B2.6.5 "Concurrent modification and execution of instructions":
195  * Concurrent modification and execution of instructions can lead to the
196  * resulting instruction performing any behavior that can be achieved by
197  * executing any sequence of instructions that can be executed from the
198  * same Exception level, except where the instruction before modification
199  * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
200  * or SMC instruction.
201  */
aarch64_insn_hotpatch_safe(u32 old_insn,u32 new_insn)202 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
203 {
204 	return __aarch64_insn_hotpatch_safe(old_insn) &&
205 	       __aarch64_insn_hotpatch_safe(new_insn);
206 }
207 
aarch64_insn_patch_text_nosync(void * addr,u32 insn)208 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
209 {
210 	u32 *tp = addr;
211 	int ret;
212 
213 	/* A64 instructions must be word aligned */
214 	if ((uintptr_t)tp & 0x3)
215 		return -EINVAL;
216 
217 	ret = aarch64_insn_write(tp, insn);
218 	if (ret == 0)
219 		flush_icache_range((uintptr_t)tp,
220 				   (uintptr_t)tp + AARCH64_INSN_SIZE);
221 
222 	return ret;
223 }
224 
225 struct aarch64_insn_patch {
226 	void		**text_addrs;
227 	u32		*new_insns;
228 	int		insn_cnt;
229 	atomic_t	cpu_count;
230 };
231 
aarch64_insn_patch_text_cb(void * arg)232 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
233 {
234 	int i, ret = 0;
235 	struct aarch64_insn_patch *pp = arg;
236 
237 	/* The first CPU becomes master */
238 	if (atomic_inc_return(&pp->cpu_count) == 1) {
239 		for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
240 			ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
241 							     pp->new_insns[i]);
242 		/*
243 		 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
244 		 * which ends with "dsb; isb" pair guaranteeing global
245 		 * visibility.
246 		 */
247 		/* Notify other processors with an additional increment. */
248 		atomic_inc(&pp->cpu_count);
249 	} else {
250 		while (atomic_read(&pp->cpu_count) <= num_online_cpus())
251 			cpu_relax();
252 		isb();
253 	}
254 
255 	return ret;
256 }
257 
aarch64_insn_patch_text_sync(void * addrs[],u32 insns[],int cnt)258 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
259 {
260 	struct aarch64_insn_patch patch = {
261 		.text_addrs = addrs,
262 		.new_insns = insns,
263 		.insn_cnt = cnt,
264 		.cpu_count = ATOMIC_INIT(0),
265 	};
266 
267 	if (cnt <= 0)
268 		return -EINVAL;
269 
270 	return stop_machine(aarch64_insn_patch_text_cb, &patch,
271 			    cpu_online_mask);
272 }
273 
aarch64_insn_patch_text(void * addrs[],u32 insns[],int cnt)274 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
275 {
276 	int ret;
277 	u32 insn;
278 
279 	/* Unsafe to patch multiple instructions without synchronizaiton */
280 	if (cnt == 1) {
281 		ret = aarch64_insn_read(addrs[0], &insn);
282 		if (ret)
283 			return ret;
284 
285 		if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
286 			/*
287 			 * ARMv8 architecture doesn't guarantee all CPUs see
288 			 * the new instruction after returning from function
289 			 * aarch64_insn_patch_text_nosync(). So send IPIs to
290 			 * all other CPUs to achieve instruction
291 			 * synchronization.
292 			 */
293 			ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
294 			kick_all_cpus_sync();
295 			return ret;
296 		}
297 	}
298 
299 	return aarch64_insn_patch_text_sync(addrs, insns, cnt);
300 }
301 
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,u32 * maskp,int * shiftp)302 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
303 						u32 *maskp, int *shiftp)
304 {
305 	u32 mask;
306 	int shift;
307 
308 	switch (type) {
309 	case AARCH64_INSN_IMM_26:
310 		mask = BIT(26) - 1;
311 		shift = 0;
312 		break;
313 	case AARCH64_INSN_IMM_19:
314 		mask = BIT(19) - 1;
315 		shift = 5;
316 		break;
317 	case AARCH64_INSN_IMM_16:
318 		mask = BIT(16) - 1;
319 		shift = 5;
320 		break;
321 	case AARCH64_INSN_IMM_14:
322 		mask = BIT(14) - 1;
323 		shift = 5;
324 		break;
325 	case AARCH64_INSN_IMM_12:
326 		mask = BIT(12) - 1;
327 		shift = 10;
328 		break;
329 	case AARCH64_INSN_IMM_9:
330 		mask = BIT(9) - 1;
331 		shift = 12;
332 		break;
333 	case AARCH64_INSN_IMM_7:
334 		mask = BIT(7) - 1;
335 		shift = 15;
336 		break;
337 	case AARCH64_INSN_IMM_6:
338 	case AARCH64_INSN_IMM_S:
339 		mask = BIT(6) - 1;
340 		shift = 10;
341 		break;
342 	case AARCH64_INSN_IMM_R:
343 		mask = BIT(6) - 1;
344 		shift = 16;
345 		break;
346 	default:
347 		return -EINVAL;
348 	}
349 
350 	*maskp = mask;
351 	*shiftp = shift;
352 
353 	return 0;
354 }
355 
356 #define ADR_IMM_HILOSPLIT	2
357 #define ADR_IMM_SIZE		SZ_2M
358 #define ADR_IMM_LOMASK		((1 << ADR_IMM_HILOSPLIT) - 1)
359 #define ADR_IMM_HIMASK		((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
360 #define ADR_IMM_LOSHIFT		29
361 #define ADR_IMM_HISHIFT		5
362 
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type,u32 insn)363 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
364 {
365 	u32 immlo, immhi, mask;
366 	int shift;
367 
368 	switch (type) {
369 	case AARCH64_INSN_IMM_ADR:
370 		shift = 0;
371 		immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
372 		immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
373 		insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
374 		mask = ADR_IMM_SIZE - 1;
375 		break;
376 	default:
377 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
378 			pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
379 			       type);
380 			return 0;
381 		}
382 	}
383 
384 	return (insn >> shift) & mask;
385 }
386 
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,u32 insn,u64 imm)387 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
388 				  u32 insn, u64 imm)
389 {
390 	u32 immlo, immhi, mask;
391 	int shift;
392 
393 	if (insn == AARCH64_BREAK_FAULT)
394 		return AARCH64_BREAK_FAULT;
395 
396 	switch (type) {
397 	case AARCH64_INSN_IMM_ADR:
398 		shift = 0;
399 		immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
400 		imm >>= ADR_IMM_HILOSPLIT;
401 		immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
402 		imm = immlo | immhi;
403 		mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
404 			(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
405 		break;
406 	default:
407 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
408 			pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
409 			       type);
410 			return AARCH64_BREAK_FAULT;
411 		}
412 	}
413 
414 	/* Update the immediate field. */
415 	insn &= ~(mask << shift);
416 	insn |= (imm & mask) << shift;
417 
418 	return insn;
419 }
420 
aarch64_insn_encode_register(enum aarch64_insn_register_type type,u32 insn,enum aarch64_insn_register reg)421 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
422 					u32 insn,
423 					enum aarch64_insn_register reg)
424 {
425 	int shift;
426 
427 	if (insn == AARCH64_BREAK_FAULT)
428 		return AARCH64_BREAK_FAULT;
429 
430 	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
431 		pr_err("%s: unknown register encoding %d\n", __func__, reg);
432 		return AARCH64_BREAK_FAULT;
433 	}
434 
435 	switch (type) {
436 	case AARCH64_INSN_REGTYPE_RT:
437 	case AARCH64_INSN_REGTYPE_RD:
438 		shift = 0;
439 		break;
440 	case AARCH64_INSN_REGTYPE_RN:
441 		shift = 5;
442 		break;
443 	case AARCH64_INSN_REGTYPE_RT2:
444 	case AARCH64_INSN_REGTYPE_RA:
445 		shift = 10;
446 		break;
447 	case AARCH64_INSN_REGTYPE_RM:
448 		shift = 16;
449 		break;
450 	default:
451 		pr_err("%s: unknown register type encoding %d\n", __func__,
452 		       type);
453 		return AARCH64_BREAK_FAULT;
454 	}
455 
456 	insn &= ~(GENMASK(4, 0) << shift);
457 	insn |= reg << shift;
458 
459 	return insn;
460 }
461 
aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,u32 insn)462 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
463 					 u32 insn)
464 {
465 	u32 size;
466 
467 	switch (type) {
468 	case AARCH64_INSN_SIZE_8:
469 		size = 0;
470 		break;
471 	case AARCH64_INSN_SIZE_16:
472 		size = 1;
473 		break;
474 	case AARCH64_INSN_SIZE_32:
475 		size = 2;
476 		break;
477 	case AARCH64_INSN_SIZE_64:
478 		size = 3;
479 		break;
480 	default:
481 		pr_err("%s: unknown size encoding %d\n", __func__, type);
482 		return AARCH64_BREAK_FAULT;
483 	}
484 
485 	insn &= ~GENMASK(31, 30);
486 	insn |= size << 30;
487 
488 	return insn;
489 }
490 
branch_imm_common(unsigned long pc,unsigned long addr,long range)491 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
492 				     long range)
493 {
494 	long offset;
495 
496 	if ((pc & 0x3) || (addr & 0x3)) {
497 		pr_err("%s: A64 instructions must be word aligned\n", __func__);
498 		return range;
499 	}
500 
501 	offset = ((long)addr - (long)pc);
502 
503 	if (offset < -range || offset >= range) {
504 		pr_err("%s: offset out of range\n", __func__);
505 		return range;
506 	}
507 
508 	return offset;
509 }
510 
aarch64_insn_gen_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_branch_type type)511 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
512 					  enum aarch64_insn_branch_type type)
513 {
514 	u32 insn;
515 	long offset;
516 
517 	/*
518 	 * B/BL support [-128M, 128M) offset
519 	 * ARM64 virtual address arrangement guarantees all kernel and module
520 	 * texts are within +/-128M.
521 	 */
522 	offset = branch_imm_common(pc, addr, SZ_128M);
523 	if (offset >= SZ_128M)
524 		return AARCH64_BREAK_FAULT;
525 
526 	switch (type) {
527 	case AARCH64_INSN_BRANCH_LINK:
528 		insn = aarch64_insn_get_bl_value();
529 		break;
530 	case AARCH64_INSN_BRANCH_NOLINK:
531 		insn = aarch64_insn_get_b_value();
532 		break;
533 	default:
534 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
535 		return AARCH64_BREAK_FAULT;
536 	}
537 
538 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
539 					     offset >> 2);
540 }
541 
aarch64_insn_gen_comp_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_branch_type type)542 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
543 				     enum aarch64_insn_register reg,
544 				     enum aarch64_insn_variant variant,
545 				     enum aarch64_insn_branch_type type)
546 {
547 	u32 insn;
548 	long offset;
549 
550 	offset = branch_imm_common(pc, addr, SZ_1M);
551 	if (offset >= SZ_1M)
552 		return AARCH64_BREAK_FAULT;
553 
554 	switch (type) {
555 	case AARCH64_INSN_BRANCH_COMP_ZERO:
556 		insn = aarch64_insn_get_cbz_value();
557 		break;
558 	case AARCH64_INSN_BRANCH_COMP_NONZERO:
559 		insn = aarch64_insn_get_cbnz_value();
560 		break;
561 	default:
562 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
563 		return AARCH64_BREAK_FAULT;
564 	}
565 
566 	switch (variant) {
567 	case AARCH64_INSN_VARIANT_32BIT:
568 		break;
569 	case AARCH64_INSN_VARIANT_64BIT:
570 		insn |= AARCH64_INSN_SF_BIT;
571 		break;
572 	default:
573 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
574 		return AARCH64_BREAK_FAULT;
575 	}
576 
577 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
578 
579 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
580 					     offset >> 2);
581 }
582 
aarch64_insn_gen_cond_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_condition cond)583 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
584 				     enum aarch64_insn_condition cond)
585 {
586 	u32 insn;
587 	long offset;
588 
589 	offset = branch_imm_common(pc, addr, SZ_1M);
590 
591 	insn = aarch64_insn_get_bcond_value();
592 
593 	if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
594 		pr_err("%s: unknown condition encoding %d\n", __func__, cond);
595 		return AARCH64_BREAK_FAULT;
596 	}
597 	insn |= cond;
598 
599 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
600 					     offset >> 2);
601 }
602 
aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)603 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
604 {
605 	return aarch64_insn_get_hint_value() | op;
606 }
607 
aarch64_insn_gen_nop(void)608 u32 __kprobes aarch64_insn_gen_nop(void)
609 {
610 	return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
611 }
612 
aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,enum aarch64_insn_branch_type type)613 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
614 				enum aarch64_insn_branch_type type)
615 {
616 	u32 insn;
617 
618 	switch (type) {
619 	case AARCH64_INSN_BRANCH_NOLINK:
620 		insn = aarch64_insn_get_br_value();
621 		break;
622 	case AARCH64_INSN_BRANCH_LINK:
623 		insn = aarch64_insn_get_blr_value();
624 		break;
625 	case AARCH64_INSN_BRANCH_RETURN:
626 		insn = aarch64_insn_get_ret_value();
627 		break;
628 	default:
629 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
630 		return AARCH64_BREAK_FAULT;
631 	}
632 
633 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
634 }
635 
aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,enum aarch64_insn_register base,enum aarch64_insn_register offset,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)636 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
637 				    enum aarch64_insn_register base,
638 				    enum aarch64_insn_register offset,
639 				    enum aarch64_insn_size_type size,
640 				    enum aarch64_insn_ldst_type type)
641 {
642 	u32 insn;
643 
644 	switch (type) {
645 	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
646 		insn = aarch64_insn_get_ldr_reg_value();
647 		break;
648 	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
649 		insn = aarch64_insn_get_str_reg_value();
650 		break;
651 	default:
652 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
653 		return AARCH64_BREAK_FAULT;
654 	}
655 
656 	insn = aarch64_insn_encode_ldst_size(size, insn);
657 
658 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
659 
660 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
661 					    base);
662 
663 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
664 					    offset);
665 }
666 
aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_register base,int offset,enum aarch64_insn_variant variant,enum aarch64_insn_ldst_type type)667 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
668 				     enum aarch64_insn_register reg2,
669 				     enum aarch64_insn_register base,
670 				     int offset,
671 				     enum aarch64_insn_variant variant,
672 				     enum aarch64_insn_ldst_type type)
673 {
674 	u32 insn;
675 	int shift;
676 
677 	switch (type) {
678 	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
679 		insn = aarch64_insn_get_ldp_pre_value();
680 		break;
681 	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
682 		insn = aarch64_insn_get_stp_pre_value();
683 		break;
684 	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
685 		insn = aarch64_insn_get_ldp_post_value();
686 		break;
687 	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
688 		insn = aarch64_insn_get_stp_post_value();
689 		break;
690 	default:
691 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
692 		return AARCH64_BREAK_FAULT;
693 	}
694 
695 	switch (variant) {
696 	case AARCH64_INSN_VARIANT_32BIT:
697 		if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
698 			pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
699 			       __func__, offset);
700 			return AARCH64_BREAK_FAULT;
701 		}
702 		shift = 2;
703 		break;
704 	case AARCH64_INSN_VARIANT_64BIT:
705 		if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
706 			pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
707 			       __func__, offset);
708 			return AARCH64_BREAK_FAULT;
709 		}
710 		shift = 3;
711 		insn |= AARCH64_INSN_SF_BIT;
712 		break;
713 	default:
714 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
715 		return AARCH64_BREAK_FAULT;
716 	}
717 
718 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
719 					    reg1);
720 
721 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
722 					    reg2);
723 
724 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
725 					    base);
726 
727 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
728 					     offset >> shift);
729 }
730 
aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,enum aarch64_insn_register src,int imm,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)731 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
732 				 enum aarch64_insn_register src,
733 				 int imm, enum aarch64_insn_variant variant,
734 				 enum aarch64_insn_adsb_type type)
735 {
736 	u32 insn;
737 
738 	switch (type) {
739 	case AARCH64_INSN_ADSB_ADD:
740 		insn = aarch64_insn_get_add_imm_value();
741 		break;
742 	case AARCH64_INSN_ADSB_SUB:
743 		insn = aarch64_insn_get_sub_imm_value();
744 		break;
745 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
746 		insn = aarch64_insn_get_adds_imm_value();
747 		break;
748 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
749 		insn = aarch64_insn_get_subs_imm_value();
750 		break;
751 	default:
752 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
753 		return AARCH64_BREAK_FAULT;
754 	}
755 
756 	switch (variant) {
757 	case AARCH64_INSN_VARIANT_32BIT:
758 		break;
759 	case AARCH64_INSN_VARIANT_64BIT:
760 		insn |= AARCH64_INSN_SF_BIT;
761 		break;
762 	default:
763 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
764 		return AARCH64_BREAK_FAULT;
765 	}
766 
767 	if (imm & ~(SZ_4K - 1)) {
768 		pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
769 		return AARCH64_BREAK_FAULT;
770 	}
771 
772 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
773 
774 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
775 
776 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
777 }
778 
aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,enum aarch64_insn_register src,int immr,int imms,enum aarch64_insn_variant variant,enum aarch64_insn_bitfield_type type)779 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
780 			      enum aarch64_insn_register src,
781 			      int immr, int imms,
782 			      enum aarch64_insn_variant variant,
783 			      enum aarch64_insn_bitfield_type type)
784 {
785 	u32 insn;
786 	u32 mask;
787 
788 	switch (type) {
789 	case AARCH64_INSN_BITFIELD_MOVE:
790 		insn = aarch64_insn_get_bfm_value();
791 		break;
792 	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
793 		insn = aarch64_insn_get_ubfm_value();
794 		break;
795 	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
796 		insn = aarch64_insn_get_sbfm_value();
797 		break;
798 	default:
799 		pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
800 		return AARCH64_BREAK_FAULT;
801 	}
802 
803 	switch (variant) {
804 	case AARCH64_INSN_VARIANT_32BIT:
805 		mask = GENMASK(4, 0);
806 		break;
807 	case AARCH64_INSN_VARIANT_64BIT:
808 		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
809 		mask = GENMASK(5, 0);
810 		break;
811 	default:
812 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
813 		return AARCH64_BREAK_FAULT;
814 	}
815 
816 	if (immr & ~mask) {
817 		pr_err("%s: invalid immr encoding %d\n", __func__, immr);
818 		return AARCH64_BREAK_FAULT;
819 	}
820 	if (imms & ~mask) {
821 		pr_err("%s: invalid imms encoding %d\n", __func__, imms);
822 		return AARCH64_BREAK_FAULT;
823 	}
824 
825 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
826 
827 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
828 
829 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
830 
831 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
832 }
833 
aarch64_insn_gen_movewide(enum aarch64_insn_register dst,int imm,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_movewide_type type)834 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
835 			      int imm, int shift,
836 			      enum aarch64_insn_variant variant,
837 			      enum aarch64_insn_movewide_type type)
838 {
839 	u32 insn;
840 
841 	switch (type) {
842 	case AARCH64_INSN_MOVEWIDE_ZERO:
843 		insn = aarch64_insn_get_movz_value();
844 		break;
845 	case AARCH64_INSN_MOVEWIDE_KEEP:
846 		insn = aarch64_insn_get_movk_value();
847 		break;
848 	case AARCH64_INSN_MOVEWIDE_INVERSE:
849 		insn = aarch64_insn_get_movn_value();
850 		break;
851 	default:
852 		pr_err("%s: unknown movewide encoding %d\n", __func__, type);
853 		return AARCH64_BREAK_FAULT;
854 	}
855 
856 	if (imm & ~(SZ_64K - 1)) {
857 		pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
858 		return AARCH64_BREAK_FAULT;
859 	}
860 
861 	switch (variant) {
862 	case AARCH64_INSN_VARIANT_32BIT:
863 		if (shift != 0 && shift != 16) {
864 			pr_err("%s: invalid shift encoding %d\n", __func__,
865 			       shift);
866 			return AARCH64_BREAK_FAULT;
867 		}
868 		break;
869 	case AARCH64_INSN_VARIANT_64BIT:
870 		insn |= AARCH64_INSN_SF_BIT;
871 		if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
872 			pr_err("%s: invalid shift encoding %d\n", __func__,
873 			       shift);
874 			return AARCH64_BREAK_FAULT;
875 		}
876 		break;
877 	default:
878 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
879 		return AARCH64_BREAK_FAULT;
880 	}
881 
882 	insn |= (shift >> 4) << 21;
883 
884 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
885 
886 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
887 }
888 
aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)889 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
890 					 enum aarch64_insn_register src,
891 					 enum aarch64_insn_register reg,
892 					 int shift,
893 					 enum aarch64_insn_variant variant,
894 					 enum aarch64_insn_adsb_type type)
895 {
896 	u32 insn;
897 
898 	switch (type) {
899 	case AARCH64_INSN_ADSB_ADD:
900 		insn = aarch64_insn_get_add_value();
901 		break;
902 	case AARCH64_INSN_ADSB_SUB:
903 		insn = aarch64_insn_get_sub_value();
904 		break;
905 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
906 		insn = aarch64_insn_get_adds_value();
907 		break;
908 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
909 		insn = aarch64_insn_get_subs_value();
910 		break;
911 	default:
912 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
913 		return AARCH64_BREAK_FAULT;
914 	}
915 
916 	switch (variant) {
917 	case AARCH64_INSN_VARIANT_32BIT:
918 		if (shift & ~(SZ_32 - 1)) {
919 			pr_err("%s: invalid shift encoding %d\n", __func__,
920 			       shift);
921 			return AARCH64_BREAK_FAULT;
922 		}
923 		break;
924 	case AARCH64_INSN_VARIANT_64BIT:
925 		insn |= AARCH64_INSN_SF_BIT;
926 		if (shift & ~(SZ_64 - 1)) {
927 			pr_err("%s: invalid shift encoding %d\n", __func__,
928 			       shift);
929 			return AARCH64_BREAK_FAULT;
930 		}
931 		break;
932 	default:
933 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
934 		return AARCH64_BREAK_FAULT;
935 	}
936 
937 
938 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
939 
940 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
941 
942 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
943 
944 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
945 }
946 
aarch64_insn_gen_data1(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_variant variant,enum aarch64_insn_data1_type type)947 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
948 			   enum aarch64_insn_register src,
949 			   enum aarch64_insn_variant variant,
950 			   enum aarch64_insn_data1_type type)
951 {
952 	u32 insn;
953 
954 	switch (type) {
955 	case AARCH64_INSN_DATA1_REVERSE_16:
956 		insn = aarch64_insn_get_rev16_value();
957 		break;
958 	case AARCH64_INSN_DATA1_REVERSE_32:
959 		insn = aarch64_insn_get_rev32_value();
960 		break;
961 	case AARCH64_INSN_DATA1_REVERSE_64:
962 		if (variant != AARCH64_INSN_VARIANT_64BIT) {
963 			pr_err("%s: invalid variant for reverse64 %d\n",
964 			       __func__, variant);
965 			return AARCH64_BREAK_FAULT;
966 		}
967 		insn = aarch64_insn_get_rev64_value();
968 		break;
969 	default:
970 		pr_err("%s: unknown data1 encoding %d\n", __func__, type);
971 		return AARCH64_BREAK_FAULT;
972 	}
973 
974 	switch (variant) {
975 	case AARCH64_INSN_VARIANT_32BIT:
976 		break;
977 	case AARCH64_INSN_VARIANT_64BIT:
978 		insn |= AARCH64_INSN_SF_BIT;
979 		break;
980 	default:
981 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
982 		return AARCH64_BREAK_FAULT;
983 	}
984 
985 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
986 
987 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
988 }
989 
aarch64_insn_gen_data2(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_data2_type type)990 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
991 			   enum aarch64_insn_register src,
992 			   enum aarch64_insn_register reg,
993 			   enum aarch64_insn_variant variant,
994 			   enum aarch64_insn_data2_type type)
995 {
996 	u32 insn;
997 
998 	switch (type) {
999 	case AARCH64_INSN_DATA2_UDIV:
1000 		insn = aarch64_insn_get_udiv_value();
1001 		break;
1002 	case AARCH64_INSN_DATA2_SDIV:
1003 		insn = aarch64_insn_get_sdiv_value();
1004 		break;
1005 	case AARCH64_INSN_DATA2_LSLV:
1006 		insn = aarch64_insn_get_lslv_value();
1007 		break;
1008 	case AARCH64_INSN_DATA2_LSRV:
1009 		insn = aarch64_insn_get_lsrv_value();
1010 		break;
1011 	case AARCH64_INSN_DATA2_ASRV:
1012 		insn = aarch64_insn_get_asrv_value();
1013 		break;
1014 	case AARCH64_INSN_DATA2_RORV:
1015 		insn = aarch64_insn_get_rorv_value();
1016 		break;
1017 	default:
1018 		pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1019 		return AARCH64_BREAK_FAULT;
1020 	}
1021 
1022 	switch (variant) {
1023 	case AARCH64_INSN_VARIANT_32BIT:
1024 		break;
1025 	case AARCH64_INSN_VARIANT_64BIT:
1026 		insn |= AARCH64_INSN_SF_BIT;
1027 		break;
1028 	default:
1029 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1030 		return AARCH64_BREAK_FAULT;
1031 	}
1032 
1033 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1034 
1035 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1036 
1037 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1038 }
1039 
aarch64_insn_gen_data3(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_variant variant,enum aarch64_insn_data3_type type)1040 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1041 			   enum aarch64_insn_register src,
1042 			   enum aarch64_insn_register reg1,
1043 			   enum aarch64_insn_register reg2,
1044 			   enum aarch64_insn_variant variant,
1045 			   enum aarch64_insn_data3_type type)
1046 {
1047 	u32 insn;
1048 
1049 	switch (type) {
1050 	case AARCH64_INSN_DATA3_MADD:
1051 		insn = aarch64_insn_get_madd_value();
1052 		break;
1053 	case AARCH64_INSN_DATA3_MSUB:
1054 		insn = aarch64_insn_get_msub_value();
1055 		break;
1056 	default:
1057 		pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1058 		return AARCH64_BREAK_FAULT;
1059 	}
1060 
1061 	switch (variant) {
1062 	case AARCH64_INSN_VARIANT_32BIT:
1063 		break;
1064 	case AARCH64_INSN_VARIANT_64BIT:
1065 		insn |= AARCH64_INSN_SF_BIT;
1066 		break;
1067 	default:
1068 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1069 		return AARCH64_BREAK_FAULT;
1070 	}
1071 
1072 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1073 
1074 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1075 
1076 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1077 					    reg1);
1078 
1079 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1080 					    reg2);
1081 }
1082 
aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_logic_type type)1083 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1084 					 enum aarch64_insn_register src,
1085 					 enum aarch64_insn_register reg,
1086 					 int shift,
1087 					 enum aarch64_insn_variant variant,
1088 					 enum aarch64_insn_logic_type type)
1089 {
1090 	u32 insn;
1091 
1092 	switch (type) {
1093 	case AARCH64_INSN_LOGIC_AND:
1094 		insn = aarch64_insn_get_and_value();
1095 		break;
1096 	case AARCH64_INSN_LOGIC_BIC:
1097 		insn = aarch64_insn_get_bic_value();
1098 		break;
1099 	case AARCH64_INSN_LOGIC_ORR:
1100 		insn = aarch64_insn_get_orr_value();
1101 		break;
1102 	case AARCH64_INSN_LOGIC_ORN:
1103 		insn = aarch64_insn_get_orn_value();
1104 		break;
1105 	case AARCH64_INSN_LOGIC_EOR:
1106 		insn = aarch64_insn_get_eor_value();
1107 		break;
1108 	case AARCH64_INSN_LOGIC_EON:
1109 		insn = aarch64_insn_get_eon_value();
1110 		break;
1111 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1112 		insn = aarch64_insn_get_ands_value();
1113 		break;
1114 	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1115 		insn = aarch64_insn_get_bics_value();
1116 		break;
1117 	default:
1118 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1119 		return AARCH64_BREAK_FAULT;
1120 	}
1121 
1122 	switch (variant) {
1123 	case AARCH64_INSN_VARIANT_32BIT:
1124 		if (shift & ~(SZ_32 - 1)) {
1125 			pr_err("%s: invalid shift encoding %d\n", __func__,
1126 			       shift);
1127 			return AARCH64_BREAK_FAULT;
1128 		}
1129 		break;
1130 	case AARCH64_INSN_VARIANT_64BIT:
1131 		insn |= AARCH64_INSN_SF_BIT;
1132 		if (shift & ~(SZ_64 - 1)) {
1133 			pr_err("%s: invalid shift encoding %d\n", __func__,
1134 			       shift);
1135 			return AARCH64_BREAK_FAULT;
1136 		}
1137 		break;
1138 	default:
1139 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1140 		return AARCH64_BREAK_FAULT;
1141 	}
1142 
1143 
1144 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1145 
1146 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1147 
1148 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1149 
1150 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1151 }
1152 
1153 /*
1154  * Decode the imm field of a branch, and return the byte offset as a
1155  * signed value (so it can be used when computing a new branch
1156  * target).
1157  */
aarch64_get_branch_offset(u32 insn)1158 s32 aarch64_get_branch_offset(u32 insn)
1159 {
1160 	s32 imm;
1161 
1162 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1163 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1164 		return (imm << 6) >> 4;
1165 	}
1166 
1167 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1168 	    aarch64_insn_is_bcond(insn)) {
1169 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1170 		return (imm << 13) >> 11;
1171 	}
1172 
1173 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1174 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1175 		return (imm << 18) >> 16;
1176 	}
1177 
1178 	/* Unhandled instruction */
1179 	BUG();
1180 }
1181 
1182 /*
1183  * Encode the displacement of a branch in the imm field and return the
1184  * updated instruction.
1185  */
aarch64_set_branch_offset(u32 insn,s32 offset)1186 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1187 {
1188 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1189 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1190 						     offset >> 2);
1191 
1192 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1193 	    aarch64_insn_is_bcond(insn))
1194 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1195 						     offset >> 2);
1196 
1197 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1198 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1199 						     offset >> 2);
1200 
1201 	/* Unhandled instruction */
1202 	BUG();
1203 }
1204 
aarch64_insn_adrp_get_offset(u32 insn)1205 s32 aarch64_insn_adrp_get_offset(u32 insn)
1206 {
1207 	BUG_ON(!aarch64_insn_is_adrp(insn));
1208 	return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1209 }
1210 
aarch64_insn_adrp_set_offset(u32 insn,s32 offset)1211 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1212 {
1213 	BUG_ON(!aarch64_insn_is_adrp(insn));
1214 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1215 						offset >> 12);
1216 }
1217 
1218 /*
1219  * Extract the Op/CR data from a msr/mrs instruction.
1220  */
aarch64_insn_extract_system_reg(u32 insn)1221 u32 aarch64_insn_extract_system_reg(u32 insn)
1222 {
1223 	return (insn & 0x1FFFE0) >> 5;
1224 }
1225 
aarch32_insn_is_wide(u32 insn)1226 bool aarch32_insn_is_wide(u32 insn)
1227 {
1228 	return insn >= 0xe800;
1229 }
1230 
1231 /*
1232  * Macros/defines for extracting register numbers from instruction.
1233  */
aarch32_insn_extract_reg_num(u32 insn,int offset)1234 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1235 {
1236 	return (insn & (0xf << offset)) >> offset;
1237 }
1238 
1239 #define OPC2_MASK	0x7
1240 #define OPC2_OFFSET	5
aarch32_insn_mcr_extract_opc2(u32 insn)1241 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1242 {
1243 	return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1244 }
1245 
1246 #define CRM_MASK	0xf
aarch32_insn_mcr_extract_crm(u32 insn)1247 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1248 {
1249 	return insn & CRM_MASK;
1250 }
1251 
__check_eq(unsigned long pstate)1252 static bool __kprobes __check_eq(unsigned long pstate)
1253 {
1254 	return (pstate & PSR_Z_BIT) != 0;
1255 }
1256 
__check_ne(unsigned long pstate)1257 static bool __kprobes __check_ne(unsigned long pstate)
1258 {
1259 	return (pstate & PSR_Z_BIT) == 0;
1260 }
1261 
__check_cs(unsigned long pstate)1262 static bool __kprobes __check_cs(unsigned long pstate)
1263 {
1264 	return (pstate & PSR_C_BIT) != 0;
1265 }
1266 
__check_cc(unsigned long pstate)1267 static bool __kprobes __check_cc(unsigned long pstate)
1268 {
1269 	return (pstate & PSR_C_BIT) == 0;
1270 }
1271 
__check_mi(unsigned long pstate)1272 static bool __kprobes __check_mi(unsigned long pstate)
1273 {
1274 	return (pstate & PSR_N_BIT) != 0;
1275 }
1276 
__check_pl(unsigned long pstate)1277 static bool __kprobes __check_pl(unsigned long pstate)
1278 {
1279 	return (pstate & PSR_N_BIT) == 0;
1280 }
1281 
__check_vs(unsigned long pstate)1282 static bool __kprobes __check_vs(unsigned long pstate)
1283 {
1284 	return (pstate & PSR_V_BIT) != 0;
1285 }
1286 
__check_vc(unsigned long pstate)1287 static bool __kprobes __check_vc(unsigned long pstate)
1288 {
1289 	return (pstate & PSR_V_BIT) == 0;
1290 }
1291 
__check_hi(unsigned long pstate)1292 static bool __kprobes __check_hi(unsigned long pstate)
1293 {
1294 	pstate &= ~(pstate >> 1);	/* PSR_C_BIT &= ~PSR_Z_BIT */
1295 	return (pstate & PSR_C_BIT) != 0;
1296 }
1297 
__check_ls(unsigned long pstate)1298 static bool __kprobes __check_ls(unsigned long pstate)
1299 {
1300 	pstate &= ~(pstate >> 1);	/* PSR_C_BIT &= ~PSR_Z_BIT */
1301 	return (pstate & PSR_C_BIT) == 0;
1302 }
1303 
__check_ge(unsigned long pstate)1304 static bool __kprobes __check_ge(unsigned long pstate)
1305 {
1306 	pstate ^= (pstate << 3);	/* PSR_N_BIT ^= PSR_V_BIT */
1307 	return (pstate & PSR_N_BIT) == 0;
1308 }
1309 
__check_lt(unsigned long pstate)1310 static bool __kprobes __check_lt(unsigned long pstate)
1311 {
1312 	pstate ^= (pstate << 3);	/* PSR_N_BIT ^= PSR_V_BIT */
1313 	return (pstate & PSR_N_BIT) != 0;
1314 }
1315 
__check_gt(unsigned long pstate)1316 static bool __kprobes __check_gt(unsigned long pstate)
1317 {
1318 	/*PSR_N_BIT ^= PSR_V_BIT */
1319 	unsigned long temp = pstate ^ (pstate << 3);
1320 
1321 	temp |= (pstate << 1);	/*PSR_N_BIT |= PSR_Z_BIT */
1322 	return (temp & PSR_N_BIT) == 0;
1323 }
1324 
__check_le(unsigned long pstate)1325 static bool __kprobes __check_le(unsigned long pstate)
1326 {
1327 	/*PSR_N_BIT ^= PSR_V_BIT */
1328 	unsigned long temp = pstate ^ (pstate << 3);
1329 
1330 	temp |= (pstate << 1);	/*PSR_N_BIT |= PSR_Z_BIT */
1331 	return (temp & PSR_N_BIT) != 0;
1332 }
1333 
__check_al(unsigned long pstate)1334 static bool __kprobes __check_al(unsigned long pstate)
1335 {
1336 	return true;
1337 }
1338 
1339 /*
1340  * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1341  * it behaves identically to 0b1110 ("al").
1342  */
1343 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1344 	__check_eq, __check_ne, __check_cs, __check_cc,
1345 	__check_mi, __check_pl, __check_vs, __check_vc,
1346 	__check_hi, __check_ls, __check_ge, __check_lt,
1347 	__check_gt, __check_le, __check_al, __check_al
1348 };
1349