1 /*
2 * kvm_host.h: used for kvm module, and hold ia64-specific sections.
3 *
4 * Copyright (C) 2007, Intel Corporation.
5 *
6 * Xiantao Zhang <xiantao.zhang@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22
23 #ifndef __ASM_KVM_HOST_H
24 #define __ASM_KVM_HOST_H
25
26 #define KVM_USER_MEM_SLOTS 32
27
28 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
29 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
30
31 /* define exit reasons from vmm to kvm*/
32 #define EXIT_REASON_VM_PANIC 0
33 #define EXIT_REASON_MMIO_INSTRUCTION 1
34 #define EXIT_REASON_PAL_CALL 2
35 #define EXIT_REASON_SAL_CALL 3
36 #define EXIT_REASON_SWITCH_RR6 4
37 #define EXIT_REASON_VM_DESTROY 5
38 #define EXIT_REASON_EXTERNAL_INTERRUPT 6
39 #define EXIT_REASON_IPI 7
40 #define EXIT_REASON_PTC_G 8
41 #define EXIT_REASON_DEBUG 20
42
43 /*Define vmm address space and vm data space.*/
44 #define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20)
45 #define KVM_VMM_SHIFT 24
46 #define KVM_VMM_BASE 0xD000000000000000
47 #define VMM_SIZE (__IA64_UL_CONST(8)<<20)
48
49 /*
50 * Define vm_buffer, used by PAL Services, base address.
51 * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M
52 */
53 #define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
54 #define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20)
55
56 /*
57 * kvm guest's data area looks as follow:
58 *
59 * +----------------------+ ------- KVM_VM_DATA_SIZE
60 * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET
61 * | | | / |
62 * | .......... | | /vcpu's struct&stack |
63 * | .......... | | /---------------------|---- 0
64 * | vcpu[5]'s data | | / vpd |
65 * | vcpu[4]'s data | |/-----------------------|
66 * | vcpu[3]'s data | / vtlb |
67 * | vcpu[2]'s data | /|------------------------|
68 * | vcpu[1]'s data |/ | vhpt |
69 * | vcpu[0]'s data |____________________________|
70 * +----------------------+ |
71 * | memory dirty log | |
72 * +----------------------+ |
73 * | vm's data struct | |
74 * +----------------------+ |
75 * | | |
76 * | | |
77 * | | |
78 * | | |
79 * | | |
80 * | | |
81 * | | |
82 * | vm's p2m table | |
83 * | | |
84 * | | |
85 * | | | |
86 * vm's data->| | | |
87 * +----------------------+ ------- 0
88 * To support large memory, needs to increase the size of p2m.
89 * To support more vcpus, needs to ensure it has enough space to
90 * hold vcpus' data.
91 */
92
93 #define KVM_VM_DATA_SHIFT 26
94 #define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT)
95 #define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE)
96
97 #define KVM_P2M_BASE KVM_VM_DATA_BASE
98 #define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20)
99
100 #define VHPT_SHIFT 16
101 #define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT)
102 #define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5))
103
104 #define VTLB_SHIFT 16
105 #define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT)
106 #define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5))
107
108 #define VPD_SHIFT 16
109 #define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT)
110
111 #define VCPU_STRUCT_SHIFT 16
112 #define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
113
114 /*
115 * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h
116 */
117 #define KVM_STK_SHIFT 16
118 #define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT)
119
120 #define KVM_VM_STRUCT_SHIFT 19
121 #define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
122
123 #define KVM_MEM_DIRY_LOG_SHIFT 19
124 #define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT)
125
126 #ifndef __ASSEMBLY__
127
128 /*Define the max vcpus and memory for Guests.*/
129 #define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\
130 KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data)
131 #define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT)
132
133 #define VMM_LOG_LEN 256
134
135 #include <linux/types.h>
136 #include <linux/mm.h>
137 #include <linux/kvm.h>
138 #include <linux/kvm_para.h>
139 #include <linux/kvm_types.h>
140
141 #include <asm/pal.h>
142 #include <asm/sal.h>
143 #include <asm/page.h>
144
145 struct kvm_vcpu_data {
146 char vcpu_vhpt[VHPT_SIZE];
147 char vcpu_vtlb[VTLB_SIZE];
148 char vcpu_vpd[VPD_SIZE];
149 char vcpu_struct[VCPU_STRUCT_SIZE];
150 };
151
152 struct kvm_vm_data {
153 char kvm_p2m[KVM_P2M_SIZE];
154 char kvm_vm_struct[KVM_VM_STRUCT_SIZE];
155 char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE];
156 struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
157 };
158
159 #define VCPU_BASE(n) (KVM_VM_DATA_BASE + \
160 offsetof(struct kvm_vm_data, vcpu_data[n]))
161 #define KVM_VM_BASE (KVM_VM_DATA_BASE + \
162 offsetof(struct kvm_vm_data, kvm_vm_struct))
163 #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
164 offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
165
166 #define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt))
167 #define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb))
168 #define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd))
169 #define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \
170 offsetof(struct kvm_vcpu_data, vcpu_struct))
171
172 /*IO section definitions*/
173 #define IOREQ_READ 1
174 #define IOREQ_WRITE 0
175
176 #define STATE_IOREQ_NONE 0
177 #define STATE_IOREQ_READY 1
178 #define STATE_IOREQ_INPROCESS 2
179 #define STATE_IORESP_READY 3
180
181 /*Guest Physical address layout.*/
182 #define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */
183 #define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */
184 #define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */
185 #define GPFN_PIB (3UL << 60) /* PIB base */
186 #define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
187 #define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
188 #define GPFN_GFW (6UL << 60) /* Guest Firmware */
189 #define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */
190
191 #define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
192 #define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
193 #define INVALID_MFN (~0UL)
194 #define MEM_G (1UL << 30)
195 #define MEM_M (1UL << 20)
196 #define MMIO_START (3 * MEM_G)
197 #define MMIO_SIZE (512 * MEM_M)
198 #define VGA_IO_START 0xA0000UL
199 #define VGA_IO_SIZE 0x20000
200 #define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
201 #define LEGACY_IO_SIZE (64 * MEM_M)
202 #define IO_SAPIC_START 0xfec00000UL
203 #define IO_SAPIC_SIZE 0x100000
204 #define PIB_START 0xfee00000UL
205 #define PIB_SIZE 0x200000
206 #define GFW_START (4 * MEM_G - 16 * MEM_M)
207 #define GFW_SIZE (16 * MEM_M)
208
209 /*Deliver mode, defined for ioapic.c*/
210 #define dest_Fixed IOSAPIC_FIXED
211 #define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY
212
213 #define NMI_VECTOR 2
214 #define ExtINT_VECTOR 0
215 #define NULL_VECTOR (-1)
216 #define IA64_SPURIOUS_INT_VECTOR 0x0f
217
218 #define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24)
219
220 /*
221 *Delivery mode
222 */
223 #define SAPIC_DELIV_SHIFT 8
224 #define SAPIC_FIXED 0x0
225 #define SAPIC_LOWEST_PRIORITY 0x1
226 #define SAPIC_PMI 0x2
227 #define SAPIC_NMI 0x4
228 #define SAPIC_INIT 0x5
229 #define SAPIC_EXTINT 0x7
230
231 /*
232 * vcpu->requests bit members for arch
233 */
234 #define KVM_REQ_PTC_G 32
235 #define KVM_REQ_RESUME 33
236
237 struct kvm_mmio_req {
238 uint64_t addr; /* physical address */
239 uint64_t size; /* size in bytes */
240 uint64_t data; /* data (or paddr of data) */
241 uint8_t state:4;
242 uint8_t dir:1; /* 1=read, 0=write */
243 };
244
245 /*Pal data struct */
246 struct kvm_pal_call{
247 /*In area*/
248 uint64_t gr28;
249 uint64_t gr29;
250 uint64_t gr30;
251 uint64_t gr31;
252 /*Out area*/
253 struct ia64_pal_retval ret;
254 };
255
256 /* Sal data structure */
257 struct kvm_sal_call{
258 /*In area*/
259 uint64_t in0;
260 uint64_t in1;
261 uint64_t in2;
262 uint64_t in3;
263 uint64_t in4;
264 uint64_t in5;
265 uint64_t in6;
266 uint64_t in7;
267 struct sal_ret_values ret;
268 };
269
270 /*Guest change rr6*/
271 struct kvm_switch_rr6 {
272 uint64_t old_rr;
273 uint64_t new_rr;
274 };
275
276 union ia64_ipi_a{
277 unsigned long val;
278 struct {
279 unsigned long rv : 3;
280 unsigned long ir : 1;
281 unsigned long eid : 8;
282 unsigned long id : 8;
283 unsigned long ib_base : 44;
284 };
285 };
286
287 union ia64_ipi_d {
288 unsigned long val;
289 struct {
290 unsigned long vector : 8;
291 unsigned long dm : 3;
292 unsigned long ig : 53;
293 };
294 };
295
296 /*ipi check exit data*/
297 struct kvm_ipi_data{
298 union ia64_ipi_a addr;
299 union ia64_ipi_d data;
300 };
301
302 /*global purge data*/
303 struct kvm_ptc_g {
304 unsigned long vaddr;
305 unsigned long rr;
306 unsigned long ps;
307 struct kvm_vcpu *vcpu;
308 };
309
310 /*Exit control data */
311 struct exit_ctl_data{
312 uint32_t exit_reason;
313 uint32_t vm_status;
314 union {
315 struct kvm_mmio_req ioreq;
316 struct kvm_pal_call pal_data;
317 struct kvm_sal_call sal_data;
318 struct kvm_switch_rr6 rr_data;
319 struct kvm_ipi_data ipi_data;
320 struct kvm_ptc_g ptc_g_data;
321 } u;
322 };
323
324 union pte_flags {
325 unsigned long val;
326 struct {
327 unsigned long p : 1; /*0 */
328 unsigned long : 1; /* 1 */
329 unsigned long ma : 3; /* 2-4 */
330 unsigned long a : 1; /* 5 */
331 unsigned long d : 1; /* 6 */
332 unsigned long pl : 2; /* 7-8 */
333 unsigned long ar : 3; /* 9-11 */
334 unsigned long ppn : 38; /* 12-49 */
335 unsigned long : 2; /* 50-51 */
336 unsigned long ed : 1; /* 52 */
337 };
338 };
339
340 union ia64_pta {
341 unsigned long val;
342 struct {
343 unsigned long ve : 1;
344 unsigned long reserved0 : 1;
345 unsigned long size : 6;
346 unsigned long vf : 1;
347 unsigned long reserved1 : 6;
348 unsigned long base : 49;
349 };
350 };
351
352 struct thash_cb {
353 /* THASH base information */
354 struct thash_data *hash; /* hash table pointer */
355 union ia64_pta pta;
356 int num;
357 };
358
359 struct kvm_vcpu_stat {
360 u32 halt_wakeup;
361 };
362
363 struct kvm_vcpu_arch {
364 int launched;
365 int last_exit;
366 int last_run_cpu;
367 int vmm_tr_slot;
368 int vm_tr_slot;
369 int sn_rtc_tr_slot;
370
371 #define KVM_MP_STATE_RUNNABLE 0
372 #define KVM_MP_STATE_UNINITIALIZED 1
373 #define KVM_MP_STATE_INIT_RECEIVED 2
374 #define KVM_MP_STATE_HALTED 3
375 int mp_state;
376
377 #define MAX_PTC_G_NUM 3
378 int ptc_g_count;
379 struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM];
380
381 /*halt timer to wake up sleepy vcpus*/
382 struct hrtimer hlt_timer;
383 long ht_active;
384
385 struct kvm_lapic *apic; /* kernel irqchip context */
386 struct vpd *vpd;
387
388 /* Exit data for vmm_transition*/
389 struct exit_ctl_data exit_data;
390
391 cpumask_t cache_coherent_map;
392
393 unsigned long vmm_rr;
394 unsigned long host_rr6;
395 unsigned long psbits[8];
396 unsigned long cr_iipa;
397 unsigned long cr_isr;
398 unsigned long vsa_base;
399 unsigned long dirty_log_lock_pa;
400 unsigned long __gp;
401 /* TR and TC. */
402 struct thash_data itrs[NITRS];
403 struct thash_data dtrs[NDTRS];
404 /* Bit is set if there is a tr/tc for the region. */
405 unsigned char itr_regions;
406 unsigned char dtr_regions;
407 unsigned char tc_regions;
408 /* purge all */
409 unsigned long ptce_base;
410 unsigned long ptce_count[2];
411 unsigned long ptce_stride[2];
412 /* itc/itm */
413 unsigned long last_itc;
414 long itc_offset;
415 unsigned long itc_check;
416 unsigned long timer_check;
417 unsigned int timer_pending;
418 unsigned int timer_fired;
419
420 unsigned long vrr[8];
421 unsigned long ibr[8];
422 unsigned long dbr[8];
423 unsigned long insvc[4]; /* Interrupt in service. */
424 unsigned long xtp;
425
426 unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
427 unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
428 unsigned long metaphysical_saved_rr0; /* from kvm_arch */
429 unsigned long metaphysical_saved_rr4; /* from kvm_arch */
430 unsigned long fp_psr; /*used for lazy float register */
431 unsigned long saved_gp;
432 /*for phycial emulation */
433 int mode_flags;
434 struct thash_cb vtlb;
435 struct thash_cb vhpt;
436 char irq_check;
437 char irq_new_pending;
438
439 unsigned long opcode;
440 unsigned long cause;
441 char log_buf[VMM_LOG_LEN];
442 union context host;
443 union context guest;
444
445 char mmio_data[8];
446 };
447
448 struct kvm_vm_stat {
449 u64 remote_tlb_flush;
450 };
451
452 struct kvm_sal_data {
453 unsigned long boot_ip;
454 unsigned long boot_gp;
455 };
456
457 struct kvm_arch_memory_slot {
458 };
459
460 struct kvm_arch {
461 spinlock_t dirty_log_lock;
462
463 unsigned long vm_base;
464 unsigned long metaphysical_rr0;
465 unsigned long metaphysical_rr4;
466 unsigned long vmm_init_rr;
467
468 int is_sn2;
469
470 struct kvm_ioapic *vioapic;
471 struct kvm_vm_stat stat;
472 struct kvm_sal_data rdv_sal_data;
473
474 struct list_head assigned_dev_head;
475 struct iommu_domain *iommu_domain;
476 bool iommu_noncoherent;
477
478 unsigned long irq_sources_bitmap;
479 unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
480 };
481
482 union cpuid3_t {
483 u64 value;
484 struct {
485 u64 number : 8;
486 u64 revision : 8;
487 u64 model : 8;
488 u64 family : 8;
489 u64 archrev : 8;
490 u64 rv : 24;
491 };
492 };
493
494 struct kvm_pt_regs {
495 /* The following registers are saved by SAVE_MIN: */
496 unsigned long b6; /* scratch */
497 unsigned long b7; /* scratch */
498
499 unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
500 unsigned long ar_ssd; /* reserved for future use (scratch) */
501
502 unsigned long r8; /* scratch (return value register 0) */
503 unsigned long r9; /* scratch (return value register 1) */
504 unsigned long r10; /* scratch (return value register 2) */
505 unsigned long r11; /* scratch (return value register 3) */
506
507 unsigned long cr_ipsr; /* interrupted task's psr */
508 unsigned long cr_iip; /* interrupted task's instruction pointer */
509 unsigned long cr_ifs; /* interrupted task's function state */
510
511 unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
512 unsigned long ar_pfs; /* prev function state */
513 unsigned long ar_rsc; /* RSE configuration */
514 /* The following two are valid only if cr_ipsr.cpl > 0: */
515 unsigned long ar_rnat; /* RSE NaT */
516 unsigned long ar_bspstore; /* RSE bspstore */
517
518 unsigned long pr; /* 64 predicate registers (1 bit each) */
519 unsigned long b0; /* return pointer (bp) */
520 unsigned long loadrs; /* size of dirty partition << 16 */
521
522 unsigned long r1; /* the gp pointer */
523 unsigned long r12; /* interrupted task's memory stack pointer */
524 unsigned long r13; /* thread pointer */
525
526 unsigned long ar_fpsr; /* floating point status (preserved) */
527 unsigned long r15; /* scratch */
528
529 /* The remaining registers are NOT saved for system calls. */
530 unsigned long r14; /* scratch */
531 unsigned long r2; /* scratch */
532 unsigned long r3; /* scratch */
533 unsigned long r16; /* scratch */
534 unsigned long r17; /* scratch */
535 unsigned long r18; /* scratch */
536 unsigned long r19; /* scratch */
537 unsigned long r20; /* scratch */
538 unsigned long r21; /* scratch */
539 unsigned long r22; /* scratch */
540 unsigned long r23; /* scratch */
541 unsigned long r24; /* scratch */
542 unsigned long r25; /* scratch */
543 unsigned long r26; /* scratch */
544 unsigned long r27; /* scratch */
545 unsigned long r28; /* scratch */
546 unsigned long r29; /* scratch */
547 unsigned long r30; /* scratch */
548 unsigned long r31; /* scratch */
549 unsigned long ar_ccv; /* compare/exchange value (scratch) */
550
551 /*
552 * Floating point registers that the kernel considers scratch:
553 */
554 struct ia64_fpreg f6; /* scratch */
555 struct ia64_fpreg f7; /* scratch */
556 struct ia64_fpreg f8; /* scratch */
557 struct ia64_fpreg f9; /* scratch */
558 struct ia64_fpreg f10; /* scratch */
559 struct ia64_fpreg f11; /* scratch */
560
561 unsigned long r4; /* preserved */
562 unsigned long r5; /* preserved */
563 unsigned long r6; /* preserved */
564 unsigned long r7; /* preserved */
565 unsigned long eml_unat; /* used for emulating instruction */
566 unsigned long pad0; /* alignment pad */
567 };
568
vcpu_regs(struct kvm_vcpu * v)569 static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
570 {
571 return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1;
572 }
573
574 typedef int kvm_vmm_entry(void);
575 typedef void kvm_tramp_entry(union context *host, union context *guest);
576
577 struct kvm_vmm_info{
578 struct module *module;
579 kvm_vmm_entry *vmm_entry;
580 kvm_tramp_entry *tramp_entry;
581 unsigned long vmm_ivt;
582 unsigned long patch_mov_ar;
583 unsigned long patch_mov_ar_sn2;
584 };
585
586 int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
587 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
588 int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
589 void kvm_sal_emul(struct kvm_vcpu *vcpu);
590
591 #define __KVM_HAVE_ARCH_VM_ALLOC 1
592 struct kvm *kvm_arch_alloc_vm(void);
593 void kvm_arch_free_vm(struct kvm *kvm);
594
kvm_arch_sync_events(struct kvm * kvm)595 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)596 static inline void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) {}
kvm_arch_sched_in(struct kvm_vcpu * vcpu)597 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu) {}
kvm_arch_free_memslot(struct kvm * kvm,struct kvm_memory_slot * free,struct kvm_memory_slot * dont)598 static inline void kvm_arch_free_memslot(struct kvm *kvm,
599 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
kvm_arch_memslots_updated(struct kvm * kvm)600 static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
kvm_arch_commit_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,enum kvm_mr_change change)601 static inline void kvm_arch_commit_memory_region(struct kvm *kvm,
602 struct kvm_userspace_memory_region *mem,
603 const struct kvm_memory_slot *old,
604 enum kvm_mr_change change) {}
kvm_arch_hardware_unsetup(void)605 static inline void kvm_arch_hardware_unsetup(void) {}
606
607 #endif /* __ASSEMBLY__*/
608
609 #endif
610