• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 Huawei Ltd.
3  * Author: Jiang Liu <liuj97@gmail.com>
4  *
5  * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29 
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
33 #include <asm/insn.h>
34 
35 #define AARCH64_INSN_SF_BIT	BIT(31)
36 #define AARCH64_INSN_N_BIT	BIT(22)
37 
38 static int aarch64_insn_encoding_class[] = {
39 	AARCH64_INSN_CLS_UNKNOWN,
40 	AARCH64_INSN_CLS_UNKNOWN,
41 	AARCH64_INSN_CLS_UNKNOWN,
42 	AARCH64_INSN_CLS_UNKNOWN,
43 	AARCH64_INSN_CLS_LDST,
44 	AARCH64_INSN_CLS_DP_REG,
45 	AARCH64_INSN_CLS_LDST,
46 	AARCH64_INSN_CLS_DP_FPSIMD,
47 	AARCH64_INSN_CLS_DP_IMM,
48 	AARCH64_INSN_CLS_DP_IMM,
49 	AARCH64_INSN_CLS_BR_SYS,
50 	AARCH64_INSN_CLS_BR_SYS,
51 	AARCH64_INSN_CLS_LDST,
52 	AARCH64_INSN_CLS_DP_REG,
53 	AARCH64_INSN_CLS_LDST,
54 	AARCH64_INSN_CLS_DP_FPSIMD,
55 };
56 
aarch64_get_insn_class(u32 insn)57 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
58 {
59 	return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
60 }
61 
62 /* NOP is an alias of HINT */
aarch64_insn_is_nop(u32 insn)63 bool __kprobes aarch64_insn_is_nop(u32 insn)
64 {
65 	if (!aarch64_insn_is_hint(insn))
66 		return false;
67 
68 	switch (insn & 0xFE0) {
69 	case AARCH64_INSN_HINT_YIELD:
70 	case AARCH64_INSN_HINT_WFE:
71 	case AARCH64_INSN_HINT_WFI:
72 	case AARCH64_INSN_HINT_SEV:
73 	case AARCH64_INSN_HINT_SEVL:
74 		return false;
75 	default:
76 		return true;
77 	}
78 }
79 
aarch64_insn_is_branch_imm(u32 insn)80 bool aarch64_insn_is_branch_imm(u32 insn)
81 {
82 	return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
83 		aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
84 		aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
85 		aarch64_insn_is_bcond(insn));
86 }
87 
88 static DEFINE_RAW_SPINLOCK(patch_lock);
89 
patch_map(void * addr,int fixmap)90 static void __kprobes *patch_map(void *addr, int fixmap)
91 {
92 	unsigned long uintaddr = (uintptr_t) addr;
93 	bool module = !core_kernel_text(uintaddr);
94 	struct page *page;
95 
96 	if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
97 		page = vmalloc_to_page(addr);
98 	else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
99 		page = phys_to_page(__pa_symbol(addr));
100 	else
101 		return addr;
102 
103 	BUG_ON(!page);
104 	return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
105 			(uintaddr & ~PAGE_MASK));
106 }
107 
patch_unmap(int fixmap)108 static void __kprobes patch_unmap(int fixmap)
109 {
110 	clear_fixmap(fixmap);
111 }
112 /*
113  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
114  * little-endian.
115  */
aarch64_insn_read(void * addr,u32 * insnp)116 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
117 {
118 	int ret;
119 	u32 val;
120 
121 	ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
122 	if (!ret)
123 		*insnp = le32_to_cpu(val);
124 
125 	return ret;
126 }
127 
__aarch64_insn_write(void * addr,u32 insn)128 static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
129 {
130 	void *waddr = addr;
131 	unsigned long flags = 0;
132 	int ret;
133 
134 	raw_spin_lock_irqsave(&patch_lock, flags);
135 	waddr = patch_map(addr, FIX_TEXT_POKE0);
136 
137 	ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
138 
139 	patch_unmap(FIX_TEXT_POKE0);
140 	raw_spin_unlock_irqrestore(&patch_lock, flags);
141 
142 	return ret;
143 }
144 
aarch64_insn_write(void * addr,u32 insn)145 int __kprobes aarch64_insn_write(void *addr, u32 insn)
146 {
147 	insn = cpu_to_le32(insn);
148 	return __aarch64_insn_write(addr, insn);
149 }
150 
__aarch64_insn_hotpatch_safe(u32 insn)151 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
152 {
153 	if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
154 		return false;
155 
156 	return	aarch64_insn_is_b(insn) ||
157 		aarch64_insn_is_bl(insn) ||
158 		aarch64_insn_is_svc(insn) ||
159 		aarch64_insn_is_hvc(insn) ||
160 		aarch64_insn_is_smc(insn) ||
161 		aarch64_insn_is_brk(insn) ||
162 		aarch64_insn_is_nop(insn);
163 }
164 
165 /*
166  * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
167  * Section B2.6.5 "Concurrent modification and execution of instructions":
168  * Concurrent modification and execution of instructions can lead to the
169  * resulting instruction performing any behavior that can be achieved by
170  * executing any sequence of instructions that can be executed from the
171  * same Exception level, except where the instruction before modification
172  * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
173  * or SMC instruction.
174  */
aarch64_insn_hotpatch_safe(u32 old_insn,u32 new_insn)175 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
176 {
177 	return __aarch64_insn_hotpatch_safe(old_insn) &&
178 	       __aarch64_insn_hotpatch_safe(new_insn);
179 }
180 
aarch64_insn_patch_text_nosync(void * addr,u32 insn)181 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
182 {
183 	u32 *tp = addr;
184 	int ret;
185 
186 	/* A64 instructions must be word aligned */
187 	if ((uintptr_t)tp & 0x3)
188 		return -EINVAL;
189 
190 	ret = aarch64_insn_write(tp, insn);
191 	if (ret == 0)
192 		flush_icache_range((uintptr_t)tp,
193 				   (uintptr_t)tp + AARCH64_INSN_SIZE);
194 
195 	return ret;
196 }
197 
198 struct aarch64_insn_patch {
199 	void		**text_addrs;
200 	u32		*new_insns;
201 	int		insn_cnt;
202 	atomic_t	cpu_count;
203 };
204 
aarch64_insn_patch_text_cb(void * arg)205 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
206 {
207 	int i, ret = 0;
208 	struct aarch64_insn_patch *pp = arg;
209 
210 	/* The first CPU becomes master */
211 	if (atomic_inc_return(&pp->cpu_count) == 1) {
212 		for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
213 			ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
214 							     pp->new_insns[i]);
215 		/*
216 		 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
217 		 * which ends with "dsb; isb" pair guaranteeing global
218 		 * visibility.
219 		 */
220 		/* Notify other processors with an additional increment. */
221 		atomic_inc(&pp->cpu_count);
222 	} else {
223 		while (atomic_read(&pp->cpu_count) <= num_online_cpus())
224 			cpu_relax();
225 		isb();
226 	}
227 
228 	return ret;
229 }
230 
aarch64_insn_patch_text_sync(void * addrs[],u32 insns[],int cnt)231 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
232 {
233 	struct aarch64_insn_patch patch = {
234 		.text_addrs = addrs,
235 		.new_insns = insns,
236 		.insn_cnt = cnt,
237 		.cpu_count = ATOMIC_INIT(0),
238 	};
239 
240 	if (cnt <= 0)
241 		return -EINVAL;
242 
243 	return stop_machine(aarch64_insn_patch_text_cb, &patch,
244 			    cpu_online_mask);
245 }
246 
aarch64_insn_patch_text(void * addrs[],u32 insns[],int cnt)247 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
248 {
249 	int ret;
250 	u32 insn;
251 
252 	/* Unsafe to patch multiple instructions without synchronizaiton */
253 	if (cnt == 1) {
254 		ret = aarch64_insn_read(addrs[0], &insn);
255 		if (ret)
256 			return ret;
257 
258 		if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
259 			/*
260 			 * ARMv8 architecture doesn't guarantee all CPUs see
261 			 * the new instruction after returning from function
262 			 * aarch64_insn_patch_text_nosync(). So send IPIs to
263 			 * all other CPUs to achieve instruction
264 			 * synchronization.
265 			 */
266 			ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
267 			kick_all_cpus_sync();
268 			return ret;
269 		}
270 	}
271 
272 	return aarch64_insn_patch_text_sync(addrs, insns, cnt);
273 }
274 
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,u32 * maskp,int * shiftp)275 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
276 						u32 *maskp, int *shiftp)
277 {
278 	u32 mask;
279 	int shift;
280 
281 	switch (type) {
282 	case AARCH64_INSN_IMM_26:
283 		mask = BIT(26) - 1;
284 		shift = 0;
285 		break;
286 	case AARCH64_INSN_IMM_19:
287 		mask = BIT(19) - 1;
288 		shift = 5;
289 		break;
290 	case AARCH64_INSN_IMM_16:
291 		mask = BIT(16) - 1;
292 		shift = 5;
293 		break;
294 	case AARCH64_INSN_IMM_14:
295 		mask = BIT(14) - 1;
296 		shift = 5;
297 		break;
298 	case AARCH64_INSN_IMM_12:
299 		mask = BIT(12) - 1;
300 		shift = 10;
301 		break;
302 	case AARCH64_INSN_IMM_9:
303 		mask = BIT(9) - 1;
304 		shift = 12;
305 		break;
306 	case AARCH64_INSN_IMM_7:
307 		mask = BIT(7) - 1;
308 		shift = 15;
309 		break;
310 	case AARCH64_INSN_IMM_6:
311 	case AARCH64_INSN_IMM_S:
312 		mask = BIT(6) - 1;
313 		shift = 10;
314 		break;
315 	case AARCH64_INSN_IMM_R:
316 		mask = BIT(6) - 1;
317 		shift = 16;
318 		break;
319 	default:
320 		return -EINVAL;
321 	}
322 
323 	*maskp = mask;
324 	*shiftp = shift;
325 
326 	return 0;
327 }
328 
329 #define ADR_IMM_HILOSPLIT	2
330 #define ADR_IMM_SIZE		SZ_2M
331 #define ADR_IMM_LOMASK		((1 << ADR_IMM_HILOSPLIT) - 1)
332 #define ADR_IMM_HIMASK		((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
333 #define ADR_IMM_LOSHIFT		29
334 #define ADR_IMM_HISHIFT		5
335 
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type,u32 insn)336 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
337 {
338 	u32 immlo, immhi, mask;
339 	int shift;
340 
341 	switch (type) {
342 	case AARCH64_INSN_IMM_ADR:
343 		shift = 0;
344 		immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
345 		immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
346 		insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
347 		mask = ADR_IMM_SIZE - 1;
348 		break;
349 	default:
350 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
351 			pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
352 			       type);
353 			return 0;
354 		}
355 	}
356 
357 	return (insn >> shift) & mask;
358 }
359 
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,u32 insn,u64 imm)360 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
361 				  u32 insn, u64 imm)
362 {
363 	u32 immlo, immhi, mask;
364 	int shift;
365 
366 	switch (type) {
367 	case AARCH64_INSN_IMM_ADR:
368 		shift = 0;
369 		immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
370 		imm >>= ADR_IMM_HILOSPLIT;
371 		immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
372 		imm = immlo | immhi;
373 		mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
374 			(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
375 		break;
376 	default:
377 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
378 			pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
379 			       type);
380 			return 0;
381 		}
382 	}
383 
384 	/* Update the immediate field. */
385 	insn &= ~(mask << shift);
386 	insn |= (imm & mask) << shift;
387 
388 	return insn;
389 }
390 
aarch64_insn_encode_register(enum aarch64_insn_register_type type,u32 insn,enum aarch64_insn_register reg)391 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
392 					u32 insn,
393 					enum aarch64_insn_register reg)
394 {
395 	int shift;
396 
397 	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
398 		pr_err("%s: unknown register encoding %d\n", __func__, reg);
399 		return 0;
400 	}
401 
402 	switch (type) {
403 	case AARCH64_INSN_REGTYPE_RT:
404 	case AARCH64_INSN_REGTYPE_RD:
405 		shift = 0;
406 		break;
407 	case AARCH64_INSN_REGTYPE_RN:
408 		shift = 5;
409 		break;
410 	case AARCH64_INSN_REGTYPE_RT2:
411 	case AARCH64_INSN_REGTYPE_RA:
412 		shift = 10;
413 		break;
414 	case AARCH64_INSN_REGTYPE_RM:
415 		shift = 16;
416 		break;
417 	default:
418 		pr_err("%s: unknown register type encoding %d\n", __func__,
419 		       type);
420 		return 0;
421 	}
422 
423 	insn &= ~(GENMASK(4, 0) << shift);
424 	insn |= reg << shift;
425 
426 	return insn;
427 }
428 
aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,u32 insn)429 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
430 					 u32 insn)
431 {
432 	u32 size;
433 
434 	switch (type) {
435 	case AARCH64_INSN_SIZE_8:
436 		size = 0;
437 		break;
438 	case AARCH64_INSN_SIZE_16:
439 		size = 1;
440 		break;
441 	case AARCH64_INSN_SIZE_32:
442 		size = 2;
443 		break;
444 	case AARCH64_INSN_SIZE_64:
445 		size = 3;
446 		break;
447 	default:
448 		pr_err("%s: unknown size encoding %d\n", __func__, type);
449 		return 0;
450 	}
451 
452 	insn &= ~GENMASK(31, 30);
453 	insn |= size << 30;
454 
455 	return insn;
456 }
457 
branch_imm_common(unsigned long pc,unsigned long addr,long range)458 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
459 				     long range)
460 {
461 	long offset;
462 
463 	/*
464 	 * PC: A 64-bit Program Counter holding the address of the current
465 	 * instruction. A64 instructions must be word-aligned.
466 	 */
467 	BUG_ON((pc & 0x3) || (addr & 0x3));
468 
469 	offset = ((long)addr - (long)pc);
470 	BUG_ON(offset < -range || offset >= range);
471 
472 	return offset;
473 }
474 
aarch64_insn_gen_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_branch_type type)475 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
476 					  enum aarch64_insn_branch_type type)
477 {
478 	u32 insn;
479 	long offset;
480 
481 	/*
482 	 * B/BL support [-128M, 128M) offset
483 	 * ARM64 virtual address arrangement guarantees all kernel and module
484 	 * texts are within +/-128M.
485 	 */
486 	offset = branch_imm_common(pc, addr, SZ_128M);
487 
488 	switch (type) {
489 	case AARCH64_INSN_BRANCH_LINK:
490 		insn = aarch64_insn_get_bl_value();
491 		break;
492 	case AARCH64_INSN_BRANCH_NOLINK:
493 		insn = aarch64_insn_get_b_value();
494 		break;
495 	default:
496 		BUG_ON(1);
497 		return AARCH64_BREAK_FAULT;
498 	}
499 
500 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
501 					     offset >> 2);
502 }
503 
aarch64_insn_gen_comp_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_branch_type type)504 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
505 				     enum aarch64_insn_register reg,
506 				     enum aarch64_insn_variant variant,
507 				     enum aarch64_insn_branch_type type)
508 {
509 	u32 insn;
510 	long offset;
511 
512 	offset = branch_imm_common(pc, addr, SZ_1M);
513 
514 	switch (type) {
515 	case AARCH64_INSN_BRANCH_COMP_ZERO:
516 		insn = aarch64_insn_get_cbz_value();
517 		break;
518 	case AARCH64_INSN_BRANCH_COMP_NONZERO:
519 		insn = aarch64_insn_get_cbnz_value();
520 		break;
521 	default:
522 		BUG_ON(1);
523 		return AARCH64_BREAK_FAULT;
524 	}
525 
526 	switch (variant) {
527 	case AARCH64_INSN_VARIANT_32BIT:
528 		break;
529 	case AARCH64_INSN_VARIANT_64BIT:
530 		insn |= AARCH64_INSN_SF_BIT;
531 		break;
532 	default:
533 		BUG_ON(1);
534 		return AARCH64_BREAK_FAULT;
535 	}
536 
537 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
538 
539 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
540 					     offset >> 2);
541 }
542 
aarch64_insn_gen_cond_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_condition cond)543 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
544 				     enum aarch64_insn_condition cond)
545 {
546 	u32 insn;
547 	long offset;
548 
549 	offset = branch_imm_common(pc, addr, SZ_1M);
550 
551 	insn = aarch64_insn_get_bcond_value();
552 
553 	BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
554 	insn |= cond;
555 
556 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
557 					     offset >> 2);
558 }
559 
aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)560 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
561 {
562 	return aarch64_insn_get_hint_value() | op;
563 }
564 
aarch64_insn_gen_nop(void)565 u32 __kprobes aarch64_insn_gen_nop(void)
566 {
567 	return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
568 }
569 
aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,enum aarch64_insn_branch_type type)570 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
571 				enum aarch64_insn_branch_type type)
572 {
573 	u32 insn;
574 
575 	switch (type) {
576 	case AARCH64_INSN_BRANCH_NOLINK:
577 		insn = aarch64_insn_get_br_value();
578 		break;
579 	case AARCH64_INSN_BRANCH_LINK:
580 		insn = aarch64_insn_get_blr_value();
581 		break;
582 	case AARCH64_INSN_BRANCH_RETURN:
583 		insn = aarch64_insn_get_ret_value();
584 		break;
585 	default:
586 		BUG_ON(1);
587 		return AARCH64_BREAK_FAULT;
588 	}
589 
590 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
591 }
592 
aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,enum aarch64_insn_register base,enum aarch64_insn_register offset,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)593 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
594 				    enum aarch64_insn_register base,
595 				    enum aarch64_insn_register offset,
596 				    enum aarch64_insn_size_type size,
597 				    enum aarch64_insn_ldst_type type)
598 {
599 	u32 insn;
600 
601 	switch (type) {
602 	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
603 		insn = aarch64_insn_get_ldr_reg_value();
604 		break;
605 	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
606 		insn = aarch64_insn_get_str_reg_value();
607 		break;
608 	default:
609 		BUG_ON(1);
610 		return AARCH64_BREAK_FAULT;
611 	}
612 
613 	insn = aarch64_insn_encode_ldst_size(size, insn);
614 
615 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
616 
617 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
618 					    base);
619 
620 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
621 					    offset);
622 }
623 
aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_register base,int offset,enum aarch64_insn_variant variant,enum aarch64_insn_ldst_type type)624 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
625 				     enum aarch64_insn_register reg2,
626 				     enum aarch64_insn_register base,
627 				     int offset,
628 				     enum aarch64_insn_variant variant,
629 				     enum aarch64_insn_ldst_type type)
630 {
631 	u32 insn;
632 	int shift;
633 
634 	switch (type) {
635 	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
636 		insn = aarch64_insn_get_ldp_pre_value();
637 		break;
638 	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
639 		insn = aarch64_insn_get_stp_pre_value();
640 		break;
641 	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
642 		insn = aarch64_insn_get_ldp_post_value();
643 		break;
644 	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
645 		insn = aarch64_insn_get_stp_post_value();
646 		break;
647 	default:
648 		BUG_ON(1);
649 		return AARCH64_BREAK_FAULT;
650 	}
651 
652 	switch (variant) {
653 	case AARCH64_INSN_VARIANT_32BIT:
654 		/* offset must be multiples of 4 in the range [-256, 252] */
655 		BUG_ON(offset & 0x3);
656 		BUG_ON(offset < -256 || offset > 252);
657 		shift = 2;
658 		break;
659 	case AARCH64_INSN_VARIANT_64BIT:
660 		/* offset must be multiples of 8 in the range [-512, 504] */
661 		BUG_ON(offset & 0x7);
662 		BUG_ON(offset < -512 || offset > 504);
663 		shift = 3;
664 		insn |= AARCH64_INSN_SF_BIT;
665 		break;
666 	default:
667 		BUG_ON(1);
668 		return AARCH64_BREAK_FAULT;
669 	}
670 
671 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
672 					    reg1);
673 
674 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
675 					    reg2);
676 
677 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
678 					    base);
679 
680 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
681 					     offset >> shift);
682 }
683 
aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,enum aarch64_insn_register src,int imm,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)684 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
685 				 enum aarch64_insn_register src,
686 				 int imm, enum aarch64_insn_variant variant,
687 				 enum aarch64_insn_adsb_type type)
688 {
689 	u32 insn;
690 
691 	switch (type) {
692 	case AARCH64_INSN_ADSB_ADD:
693 		insn = aarch64_insn_get_add_imm_value();
694 		break;
695 	case AARCH64_INSN_ADSB_SUB:
696 		insn = aarch64_insn_get_sub_imm_value();
697 		break;
698 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
699 		insn = aarch64_insn_get_adds_imm_value();
700 		break;
701 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
702 		insn = aarch64_insn_get_subs_imm_value();
703 		break;
704 	default:
705 		BUG_ON(1);
706 		return AARCH64_BREAK_FAULT;
707 	}
708 
709 	switch (variant) {
710 	case AARCH64_INSN_VARIANT_32BIT:
711 		break;
712 	case AARCH64_INSN_VARIANT_64BIT:
713 		insn |= AARCH64_INSN_SF_BIT;
714 		break;
715 	default:
716 		BUG_ON(1);
717 		return AARCH64_BREAK_FAULT;
718 	}
719 
720 	BUG_ON(imm & ~(SZ_4K - 1));
721 
722 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
723 
724 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
725 
726 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
727 }
728 
aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,enum aarch64_insn_register src,int immr,int imms,enum aarch64_insn_variant variant,enum aarch64_insn_bitfield_type type)729 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
730 			      enum aarch64_insn_register src,
731 			      int immr, int imms,
732 			      enum aarch64_insn_variant variant,
733 			      enum aarch64_insn_bitfield_type type)
734 {
735 	u32 insn;
736 	u32 mask;
737 
738 	switch (type) {
739 	case AARCH64_INSN_BITFIELD_MOVE:
740 		insn = aarch64_insn_get_bfm_value();
741 		break;
742 	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
743 		insn = aarch64_insn_get_ubfm_value();
744 		break;
745 	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
746 		insn = aarch64_insn_get_sbfm_value();
747 		break;
748 	default:
749 		BUG_ON(1);
750 		return AARCH64_BREAK_FAULT;
751 	}
752 
753 	switch (variant) {
754 	case AARCH64_INSN_VARIANT_32BIT:
755 		mask = GENMASK(4, 0);
756 		break;
757 	case AARCH64_INSN_VARIANT_64BIT:
758 		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
759 		mask = GENMASK(5, 0);
760 		break;
761 	default:
762 		BUG_ON(1);
763 		return AARCH64_BREAK_FAULT;
764 	}
765 
766 	BUG_ON(immr & ~mask);
767 	BUG_ON(imms & ~mask);
768 
769 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
770 
771 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
772 
773 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
774 
775 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
776 }
777 
aarch64_insn_gen_movewide(enum aarch64_insn_register dst,int imm,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_movewide_type type)778 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
779 			      int imm, int shift,
780 			      enum aarch64_insn_variant variant,
781 			      enum aarch64_insn_movewide_type type)
782 {
783 	u32 insn;
784 
785 	switch (type) {
786 	case AARCH64_INSN_MOVEWIDE_ZERO:
787 		insn = aarch64_insn_get_movz_value();
788 		break;
789 	case AARCH64_INSN_MOVEWIDE_KEEP:
790 		insn = aarch64_insn_get_movk_value();
791 		break;
792 	case AARCH64_INSN_MOVEWIDE_INVERSE:
793 		insn = aarch64_insn_get_movn_value();
794 		break;
795 	default:
796 		BUG_ON(1);
797 		return AARCH64_BREAK_FAULT;
798 	}
799 
800 	BUG_ON(imm & ~(SZ_64K - 1));
801 
802 	switch (variant) {
803 	case AARCH64_INSN_VARIANT_32BIT:
804 		BUG_ON(shift != 0 && shift != 16);
805 		break;
806 	case AARCH64_INSN_VARIANT_64BIT:
807 		insn |= AARCH64_INSN_SF_BIT;
808 		BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
809 		       shift != 48);
810 		break;
811 	default:
812 		BUG_ON(1);
813 		return AARCH64_BREAK_FAULT;
814 	}
815 
816 	insn |= (shift >> 4) << 21;
817 
818 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
819 
820 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
821 }
822 
aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)823 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
824 					 enum aarch64_insn_register src,
825 					 enum aarch64_insn_register reg,
826 					 int shift,
827 					 enum aarch64_insn_variant variant,
828 					 enum aarch64_insn_adsb_type type)
829 {
830 	u32 insn;
831 
832 	switch (type) {
833 	case AARCH64_INSN_ADSB_ADD:
834 		insn = aarch64_insn_get_add_value();
835 		break;
836 	case AARCH64_INSN_ADSB_SUB:
837 		insn = aarch64_insn_get_sub_value();
838 		break;
839 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
840 		insn = aarch64_insn_get_adds_value();
841 		break;
842 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
843 		insn = aarch64_insn_get_subs_value();
844 		break;
845 	default:
846 		BUG_ON(1);
847 		return AARCH64_BREAK_FAULT;
848 	}
849 
850 	switch (variant) {
851 	case AARCH64_INSN_VARIANT_32BIT:
852 		BUG_ON(shift & ~(SZ_32 - 1));
853 		break;
854 	case AARCH64_INSN_VARIANT_64BIT:
855 		insn |= AARCH64_INSN_SF_BIT;
856 		BUG_ON(shift & ~(SZ_64 - 1));
857 		break;
858 	default:
859 		BUG_ON(1);
860 		return AARCH64_BREAK_FAULT;
861 	}
862 
863 
864 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
865 
866 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
867 
868 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
869 
870 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
871 }
872 
aarch64_insn_gen_data1(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_variant variant,enum aarch64_insn_data1_type type)873 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
874 			   enum aarch64_insn_register src,
875 			   enum aarch64_insn_variant variant,
876 			   enum aarch64_insn_data1_type type)
877 {
878 	u32 insn;
879 
880 	switch (type) {
881 	case AARCH64_INSN_DATA1_REVERSE_16:
882 		insn = aarch64_insn_get_rev16_value();
883 		break;
884 	case AARCH64_INSN_DATA1_REVERSE_32:
885 		insn = aarch64_insn_get_rev32_value();
886 		break;
887 	case AARCH64_INSN_DATA1_REVERSE_64:
888 		BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
889 		insn = aarch64_insn_get_rev64_value();
890 		break;
891 	default:
892 		BUG_ON(1);
893 		return AARCH64_BREAK_FAULT;
894 	}
895 
896 	switch (variant) {
897 	case AARCH64_INSN_VARIANT_32BIT:
898 		break;
899 	case AARCH64_INSN_VARIANT_64BIT:
900 		insn |= AARCH64_INSN_SF_BIT;
901 		break;
902 	default:
903 		BUG_ON(1);
904 		return AARCH64_BREAK_FAULT;
905 	}
906 
907 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
908 
909 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
910 }
911 
aarch64_insn_gen_data2(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_data2_type type)912 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
913 			   enum aarch64_insn_register src,
914 			   enum aarch64_insn_register reg,
915 			   enum aarch64_insn_variant variant,
916 			   enum aarch64_insn_data2_type type)
917 {
918 	u32 insn;
919 
920 	switch (type) {
921 	case AARCH64_INSN_DATA2_UDIV:
922 		insn = aarch64_insn_get_udiv_value();
923 		break;
924 	case AARCH64_INSN_DATA2_SDIV:
925 		insn = aarch64_insn_get_sdiv_value();
926 		break;
927 	case AARCH64_INSN_DATA2_LSLV:
928 		insn = aarch64_insn_get_lslv_value();
929 		break;
930 	case AARCH64_INSN_DATA2_LSRV:
931 		insn = aarch64_insn_get_lsrv_value();
932 		break;
933 	case AARCH64_INSN_DATA2_ASRV:
934 		insn = aarch64_insn_get_asrv_value();
935 		break;
936 	case AARCH64_INSN_DATA2_RORV:
937 		insn = aarch64_insn_get_rorv_value();
938 		break;
939 	default:
940 		BUG_ON(1);
941 		return AARCH64_BREAK_FAULT;
942 	}
943 
944 	switch (variant) {
945 	case AARCH64_INSN_VARIANT_32BIT:
946 		break;
947 	case AARCH64_INSN_VARIANT_64BIT:
948 		insn |= AARCH64_INSN_SF_BIT;
949 		break;
950 	default:
951 		BUG_ON(1);
952 		return AARCH64_BREAK_FAULT;
953 	}
954 
955 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
956 
957 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
958 
959 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
960 }
961 
aarch64_insn_gen_data3(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_variant variant,enum aarch64_insn_data3_type type)962 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
963 			   enum aarch64_insn_register src,
964 			   enum aarch64_insn_register reg1,
965 			   enum aarch64_insn_register reg2,
966 			   enum aarch64_insn_variant variant,
967 			   enum aarch64_insn_data3_type type)
968 {
969 	u32 insn;
970 
971 	switch (type) {
972 	case AARCH64_INSN_DATA3_MADD:
973 		insn = aarch64_insn_get_madd_value();
974 		break;
975 	case AARCH64_INSN_DATA3_MSUB:
976 		insn = aarch64_insn_get_msub_value();
977 		break;
978 	default:
979 		BUG_ON(1);
980 		return AARCH64_BREAK_FAULT;
981 	}
982 
983 	switch (variant) {
984 	case AARCH64_INSN_VARIANT_32BIT:
985 		break;
986 	case AARCH64_INSN_VARIANT_64BIT:
987 		insn |= AARCH64_INSN_SF_BIT;
988 		break;
989 	default:
990 		BUG_ON(1);
991 		return AARCH64_BREAK_FAULT;
992 	}
993 
994 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
995 
996 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
997 
998 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
999 					    reg1);
1000 
1001 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1002 					    reg2);
1003 }
1004 
aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_logic_type type)1005 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1006 					 enum aarch64_insn_register src,
1007 					 enum aarch64_insn_register reg,
1008 					 int shift,
1009 					 enum aarch64_insn_variant variant,
1010 					 enum aarch64_insn_logic_type type)
1011 {
1012 	u32 insn;
1013 
1014 	switch (type) {
1015 	case AARCH64_INSN_LOGIC_AND:
1016 		insn = aarch64_insn_get_and_value();
1017 		break;
1018 	case AARCH64_INSN_LOGIC_BIC:
1019 		insn = aarch64_insn_get_bic_value();
1020 		break;
1021 	case AARCH64_INSN_LOGIC_ORR:
1022 		insn = aarch64_insn_get_orr_value();
1023 		break;
1024 	case AARCH64_INSN_LOGIC_ORN:
1025 		insn = aarch64_insn_get_orn_value();
1026 		break;
1027 	case AARCH64_INSN_LOGIC_EOR:
1028 		insn = aarch64_insn_get_eor_value();
1029 		break;
1030 	case AARCH64_INSN_LOGIC_EON:
1031 		insn = aarch64_insn_get_eon_value();
1032 		break;
1033 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1034 		insn = aarch64_insn_get_ands_value();
1035 		break;
1036 	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1037 		insn = aarch64_insn_get_bics_value();
1038 		break;
1039 	default:
1040 		BUG_ON(1);
1041 		return AARCH64_BREAK_FAULT;
1042 	}
1043 
1044 	switch (variant) {
1045 	case AARCH64_INSN_VARIANT_32BIT:
1046 		BUG_ON(shift & ~(SZ_32 - 1));
1047 		break;
1048 	case AARCH64_INSN_VARIANT_64BIT:
1049 		insn |= AARCH64_INSN_SF_BIT;
1050 		BUG_ON(shift & ~(SZ_64 - 1));
1051 		break;
1052 	default:
1053 		BUG_ON(1);
1054 		return AARCH64_BREAK_FAULT;
1055 	}
1056 
1057 
1058 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1059 
1060 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1061 
1062 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1063 
1064 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1065 }
1066 
1067 /*
1068  * Decode the imm field of a branch, and return the byte offset as a
1069  * signed value (so it can be used when computing a new branch
1070  * target).
1071  */
aarch64_get_branch_offset(u32 insn)1072 s32 aarch64_get_branch_offset(u32 insn)
1073 {
1074 	s32 imm;
1075 
1076 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1077 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1078 		return (imm << 6) >> 4;
1079 	}
1080 
1081 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1082 	    aarch64_insn_is_bcond(insn)) {
1083 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1084 		return (imm << 13) >> 11;
1085 	}
1086 
1087 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1088 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1089 		return (imm << 18) >> 16;
1090 	}
1091 
1092 	/* Unhandled instruction */
1093 	BUG();
1094 }
1095 
1096 /*
1097  * Encode the displacement of a branch in the imm field and return the
1098  * updated instruction.
1099  */
aarch64_set_branch_offset(u32 insn,s32 offset)1100 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1101 {
1102 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1103 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1104 						     offset >> 2);
1105 
1106 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1107 	    aarch64_insn_is_bcond(insn))
1108 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1109 						     offset >> 2);
1110 
1111 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1112 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1113 						     offset >> 2);
1114 
1115 	/* Unhandled instruction */
1116 	BUG();
1117 }
1118 
aarch32_insn_is_wide(u32 insn)1119 bool aarch32_insn_is_wide(u32 insn)
1120 {
1121 	return insn >= 0xe800;
1122 }
1123 
1124 /*
1125  * Macros/defines for extracting register numbers from instruction.
1126  */
aarch32_insn_extract_reg_num(u32 insn,int offset)1127 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1128 {
1129 	return (insn & (0xf << offset)) >> offset;
1130 }
1131 
1132 #define OPC2_MASK	0x7
1133 #define OPC2_OFFSET	5
aarch32_insn_mcr_extract_opc2(u32 insn)1134 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1135 {
1136 	return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1137 }
1138 
1139 #define CRM_MASK	0xf
aarch32_insn_mcr_extract_crm(u32 insn)1140 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1141 {
1142 	return insn & CRM_MASK;
1143 }
1144