1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/stringify.h>
3
4 #include <asm/paravirt.h>
5 #include <asm/asm-offsets.h>
6
7 #define PSTART(d, m) \
8 patch_data_##d.m
9
10 #define PEND(d, m) \
11 (PSTART(d, m) + sizeof(patch_data_##d.m))
12
13 #define PATCH(d, m, insn_buff, len) \
14 paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m))
15
16 #define PATCH_CASE(ops, m, data, insn_buff, len) \
17 case PARAVIRT_PATCH(ops.m): \
18 return PATCH(data, ops##_##m, insn_buff, len)
19
20 #ifdef CONFIG_PARAVIRT_XXL
21 struct patch_xxl {
22 const unsigned char irq_irq_disable[1];
23 const unsigned char irq_irq_enable[1];
24 const unsigned char irq_save_fl[2];
25 const unsigned char mmu_read_cr2[3];
26 const unsigned char mmu_read_cr3[3];
27 const unsigned char mmu_write_cr3[3];
28 const unsigned char irq_restore_fl[2];
29 const unsigned char cpu_wbinvd[2];
30 const unsigned char cpu_usergs_sysret64[6];
31 const unsigned char mov64[3];
32 };
33
34 static const struct patch_xxl patch_data_xxl = {
35 .irq_irq_disable = { 0xfa }, // cli
36 .irq_irq_enable = { 0xfb }, // sti
37 .irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax
38 .mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
39 .mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
40 .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
41 .irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq
42 .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
43 .cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8,
44 0x48, 0x0f, 0x07 }, // swapgs; sysretq
45 .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
46 };
47
paravirt_patch_ident_64(void * insn_buff,unsigned int len)48 unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len)
49 {
50 return PATCH(xxl, mov64, insn_buff, len);
51 }
52 # endif /* CONFIG_PARAVIRT_XXL */
53
54 #ifdef CONFIG_PARAVIRT_SPINLOCKS
55 struct patch_lock {
56 unsigned char queued_spin_unlock[3];
57 unsigned char vcpu_is_preempted[2];
58 };
59
60 static const struct patch_lock patch_data_lock = {
61 .vcpu_is_preempted = { 0x31, 0xc0 }, // xor %eax, %eax
62
63 # ifdef CONFIG_X86_64
64 .queued_spin_unlock = { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi)
65 # else
66 .queued_spin_unlock = { 0xc6, 0x00, 0x00 }, // movb $0, (%eax)
67 # endif
68 };
69 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
70
native_patch(u8 type,void * insn_buff,unsigned long addr,unsigned int len)71 unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
72 unsigned int len)
73 {
74 switch (type) {
75
76 #ifdef CONFIG_PARAVIRT_XXL
77 PATCH_CASE(irq, restore_fl, xxl, insn_buff, len);
78 PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
79 PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
80 PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
81
82 PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len);
83 PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
84 PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
85
86 PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
87 PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
88 #endif
89
90 #ifdef CONFIG_PARAVIRT_SPINLOCKS
91 case PARAVIRT_PATCH(lock.queued_spin_unlock):
92 if (pv_is_native_spin_unlock())
93 return PATCH(lock, queued_spin_unlock, insn_buff, len);
94 break;
95
96 case PARAVIRT_PATCH(lock.vcpu_is_preempted):
97 if (pv_is_native_vcpu_is_preempted())
98 return PATCH(lock, vcpu_is_preempted, insn_buff, len);
99 break;
100 #endif
101 default:
102 break;
103 }
104
105 return paravirt_patch_default(type, insn_buff, addr, len);
106 }
107