/arch/powerpc/kvm/ |
D | booke_emulate.c | 26 vcpu->arch.regs.nip = vcpu->arch.shared->srr0; in kvmppc_emul_rfi() 27 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); in kvmppc_emul_rfi() 32 vcpu->arch.regs.nip = vcpu->arch.dsrr0; in kvmppc_emul_rfdi() 33 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); in kvmppc_emul_rfdi() 38 vcpu->arch.regs.nip = vcpu->arch.csrr0; in kvmppc_emul_rfci() 39 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); in kvmppc_emul_rfci() 80 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); in kvmppc_booke_emulate_op() 90 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) in kvmppc_booke_emulate_op() 96 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) in kvmppc_booke_emulate_op() 127 vcpu->arch.shared->dar = spr_val; in kvmppc_booke_emulate_mtspr() [all …]
|
D | book3s_hv_tm.c | 19 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure() 21 tfiar = vcpu->arch.regs.nip & ~0x3ull; in emulate_tx_failure() 23 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure() 29 vcpu->arch.tfiar = tfiar; in emulate_tx_failure() 31 vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr; in emulate_tx_failure() 44 u32 instr = vcpu->arch.emul_inst; in kvmhv_p9_tm_emulation() 45 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation() 63 newmsr = vcpu->arch.shregs.srr1; in kvmhv_p9_tm_emulation() 69 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation() 70 vcpu->arch.cfar = vcpu->arch.regs.nip - 4; in kvmhv_p9_tm_emulation() [all …]
|
D | book3s_hv_tm_builtin.c | 22 u32 instr = vcpu->arch.emul_inst; in kvmhv_p9_tm_emulation_early() 40 newmsr = vcpu->arch.shregs.srr1; in kvmhv_p9_tm_emulation_early() 45 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early() 46 vcpu->arch.cfar = vcpu->arch.regs.nip - 4; in kvmhv_p9_tm_emulation_early() 47 vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; in kvmhv_p9_tm_emulation_early() 52 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early() 53 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) in kvmhv_p9_tm_emulation_early() 56 if (!(vcpu->arch.hfscr & HFSCR_EBB) || in kvmhv_p9_tm_emulation_early() 68 vcpu->arch.shregs.msr = msr; in kvmhv_p9_tm_emulation_early() 69 vcpu->arch.cfar = vcpu->arch.regs.nip - 4; in kvmhv_p9_tm_emulation_early() [all …]
|
D | timing.c | 27 mutex_lock(&vcpu->arch.exit_timing_lock); in kvmppc_init_timing_stats() 29 vcpu->arch.last_exit_type = 0xDEAD; in kvmppc_init_timing_stats() 31 vcpu->arch.timing_count_type[i] = 0; in kvmppc_init_timing_stats() 32 vcpu->arch.timing_max_duration[i] = 0; in kvmppc_init_timing_stats() 33 vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; in kvmppc_init_timing_stats() 34 vcpu->arch.timing_sum_duration[i] = 0; in kvmppc_init_timing_stats() 35 vcpu->arch.timing_sum_quad_duration[i] = 0; in kvmppc_init_timing_stats() 37 vcpu->arch.timing_last_exit = 0; in kvmppc_init_timing_stats() 38 vcpu->arch.timing_exit.tv64 = 0; in kvmppc_init_timing_stats() 39 vcpu->arch.timing_last_enter.tv64 = 0; in kvmppc_init_timing_stats() [all …]
|
D | emulate_loadstore.c | 86 vcpu->arch.mmio_vsx_copy_nums = 0; in kvmppc_emulate_loadstore() 87 vcpu->arch.mmio_vsx_offset = 0; in kvmppc_emulate_loadstore() 88 vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE; in kvmppc_emulate_loadstore() 89 vcpu->arch.mmio_sp64_extend = 0; in kvmppc_emulate_loadstore() 90 vcpu->arch.mmio_sign_extend = 0; in kvmppc_emulate_loadstore() 91 vcpu->arch.mmio_vmx_copy_nums = 0; in kvmppc_emulate_loadstore() 92 vcpu->arch.mmio_vmx_offset = 0; in kvmppc_emulate_loadstore() 93 vcpu->arch.mmio_host_swabbed = 0; in kvmppc_emulate_loadstore() 96 vcpu->arch.regs.msr = vcpu->arch.shared->msr; in kvmppc_emulate_loadstore() 97 if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) { in kvmppc_emulate_loadstore() [all …]
|
D | book3s_hv.c | 134 return kvm->arch.nested_enable && kvm_is_radix(kvm); in nesting_enabled() 241 cpu = READ_ONCE(vcpu->arch.thread_cpu); in kvmppc_fast_vcpu_kick_hv() 307 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv() 319 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv() 320 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv() 321 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv() 322 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv() 323 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv() 325 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv() 330 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv() [all …]
|
D | booke.c | 68 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip, in kvmppc_dump_vcpu() 69 vcpu->arch.shared->msr); in kvmppc_dump_vcpu() 70 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link, in kvmppc_dump_vcpu() 71 vcpu->arch.regs.ctr); in kvmppc_dump_vcpu() 72 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, in kvmppc_dump_vcpu() 73 vcpu->arch.shared->srr1); in kvmppc_dump_vcpu() 75 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvmppc_dump_vcpu() 93 vcpu->arch.shadow_msr &= ~MSR_SPE; in kvmppc_vcpu_disable_spe() 103 vcpu->arch.shadow_msr |= MSR_SPE; in kvmppc_vcpu_enable_spe() 109 if (vcpu->arch.shared->msr & MSR_SPE) { in kvmppc_vcpu_sync_spe() [all …]
|
D | e500_emulate.c | 53 ulong param = vcpu->arch.regs.gpr[rb]; in kvmppc_e500_emul_msgclr() 59 clear_bit(prio, &vcpu->arch.pending_exceptions); in kvmppc_e500_emul_msgclr() 65 ulong param = vcpu->arch.regs.gpr[rb]; in kvmppc_e500_emul_msgsnd() 75 int cpir = cvcpu->arch.shared->pir; in kvmppc_e500_emul_msgsnd() 77 set_bit(prio, &cvcpu->arch.pending_exceptions); in kvmppc_e500_emul_msgsnd() 94 vcpu->run->debug.arch.address = vcpu->arch.regs.nip; in kvmppc_e500_emul_ehpriv() 95 vcpu->run->debug.arch.status = 0; in kvmppc_e500_emul_ehpriv() 225 vcpu->arch.shared->mas0 = spr_val; in kvmppc_core_emulate_mtspr_e500() 228 vcpu->arch.shared->mas1 = spr_val; in kvmppc_core_emulate_mtspr_e500() 231 vcpu->arch.shared->mas2 = spr_val; in kvmppc_core_emulate_mtspr_e500() [all …]
|
D | book3s_emulate.c | 77 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) in spr_allowed() 90 memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], in kvmppc_copyto_vcpu_tm() 91 sizeof(vcpu->arch.gpr_tm)); in kvmppc_copyto_vcpu_tm() 92 memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, in kvmppc_copyto_vcpu_tm() 94 memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, in kvmppc_copyto_vcpu_tm() 96 vcpu->arch.ppr_tm = vcpu->arch.ppr; in kvmppc_copyto_vcpu_tm() 97 vcpu->arch.dscr_tm = vcpu->arch.dscr; in kvmppc_copyto_vcpu_tm() 98 vcpu->arch.amr_tm = vcpu->arch.amr; in kvmppc_copyto_vcpu_tm() 99 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; in kvmppc_copyto_vcpu_tm() 100 vcpu->arch.tar_tm = vcpu->arch.tar; in kvmppc_copyto_vcpu_tm() [all …]
|
D | e500mc.c | 102 vcpu->arch.pid = pid; in kvmppc_set_pid() 119 mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); in kvmppc_core_vcpu_load_e500mc() 121 mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); in kvmppc_core_vcpu_load_e500mc() 122 vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT); in kvmppc_core_vcpu_load_e500mc() 123 vcpu->arch.epsc = vcpu->arch.eplc; in kvmppc_core_vcpu_load_e500mc() 124 mtspr(SPRN_EPLC, vcpu->arch.eplc); in kvmppc_core_vcpu_load_e500mc() 125 mtspr(SPRN_EPSC, vcpu->arch.epsc); in kvmppc_core_vcpu_load_e500mc() 127 mtspr(SPRN_GIVPR, vcpu->arch.ivpr); in kvmppc_core_vcpu_load_e500mc() 128 mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); in kvmppc_core_vcpu_load_e500mc() 129 mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); in kvmppc_core_vcpu_load_e500mc() [all …]
|
D | book3s_pr.c | 82 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) in kvmppc_fixup_split_real() 89 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_fixup_split_real() 95 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { in kvmppc_unfixup_split_real() 102 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_unfixup_split_real() 114 new_msr = vcpu->arch.intr_msr; in kvmppc_inject_interrupt_pr() 148 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; in kvmppc_core_vcpu_load_pr() 189 svcpu->gpr[0] = vcpu->arch.regs.gpr[0]; in kvmppc_copy_to_svcpu() 190 svcpu->gpr[1] = vcpu->arch.regs.gpr[1]; in kvmppc_copy_to_svcpu() 191 svcpu->gpr[2] = vcpu->arch.regs.gpr[2]; in kvmppc_copy_to_svcpu() 192 svcpu->gpr[3] = vcpu->arch.regs.gpr[3]; in kvmppc_copy_to_svcpu() [all …]
|
D | book3s_hv_nested.c | 30 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmhv_save_hv_regs() 34 hr->hfscr = vcpu->arch.hfscr; in kvmhv_save_hv_regs() 36 hr->dawr0 = vcpu->arch.dawr; in kvmhv_save_hv_regs() 37 hr->dawrx0 = vcpu->arch.dawrx; in kvmhv_save_hv_regs() 38 hr->ciabr = vcpu->arch.ciabr; in kvmhv_save_hv_regs() 39 hr->purr = vcpu->arch.purr; in kvmhv_save_hv_regs() 40 hr->spurr = vcpu->arch.spurr; in kvmhv_save_hv_regs() 41 hr->ic = vcpu->arch.ic; in kvmhv_save_hv_regs() 43 hr->srr0 = vcpu->arch.shregs.srr0; in kvmhv_save_hv_regs() 44 hr->srr1 = vcpu->arch.shregs.srr1; in kvmhv_save_hv_regs() [all …]
|
/arch/mips/kvm/ |
D | emulate.c | 46 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_compute_return_epc() local 66 arch->gprs[insn.r_format.rd] = epc + 8; in kvm_compute_return_epc() 69 nextpc = arch->gprs[insn.r_format.rs]; in kvm_compute_return_epc() 85 if ((long)arch->gprs[insn.i_format.rs] < 0) in kvm_compute_return_epc() 94 if ((long)arch->gprs[insn.i_format.rs] >= 0) in kvm_compute_return_epc() 103 arch->gprs[31] = epc + 8; in kvm_compute_return_epc() 104 if ((long)arch->gprs[insn.i_format.rs] < 0) in kvm_compute_return_epc() 113 arch->gprs[31] = epc + 8; in kvm_compute_return_epc() 114 if ((long)arch->gprs[insn.i_format.rs] >= 0) in kvm_compute_return_epc() 142 arch->gprs[31] = instpc + 8; in kvm_compute_return_epc() [all …]
|
D | interrupt.c | 26 set_bit(priority, &vcpu->arch.pending_exceptions); in kvm_mips_queue_irq() 31 clear_bit(priority, &vcpu->arch.pending_exceptions); in kvm_mips_dequeue_irq() 41 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); in kvm_mips_queue_timer_int_cb() 50 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); in kvm_mips_dequeue_timer_int_cb() 64 kvm_set_c0_guest_cause(vcpu->arch.cop0, 1 << (intr + 8)); in kvm_mips_queue_io_int_cb() 73 kvm_clear_c0_guest_cause(vcpu->arch.cop0, 1 << (-intr + 8)); in kvm_mips_dequeue_io_int_cb() 84 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_irq_deliver_cb() local 85 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_irq_deliver_cb() 102 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_irq_deliver_cb() 110 kvm_debug("Delivering INT @ pc %#lx\n", arch->pc); in kvm_mips_irq_deliver_cb() [all …]
|
/arch/s390/kvm/ |
D | guestdbg.c | 62 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp() 63 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp() 64 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp() 67 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || in enable_all_hw_bp() 68 vcpu->arch.guestdbg.hw_bp_info == NULL) in enable_all_hw_bp() 79 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { in enable_all_hw_bp() 80 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; in enable_all_hw_bp() 81 len = vcpu->arch.guestdbg.hw_bp_info[i].len; in enable_all_hw_bp() 102 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_wp() 103 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_wp() [all …]
|
D | kvm-s390.c | 285 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); in kvm_clock_sync() 287 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync() 288 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync() 290 if (vcpu->arch.cputm_enabled) in kvm_clock_sync() 291 vcpu->arch.cputm_start += *delta; in kvm_clock_sync() 292 if (vcpu->arch.vsie_block) in kvm_clock_sync() 293 kvm_clock_sync_scb(vcpu->arch.vsie_block, in kvm_clock_sync() 597 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log() 680 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap() 685 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap() [all …]
|
D | pv.c | 32 free_pages(vcpu->arch.pv.stor_base, in kvm_s390_pv_destroy_cpu() 35 free_page(sida_origin(vcpu->arch.sie_block)); in kvm_s390_pv_destroy_cpu() 36 vcpu->arch.sie_block->pv_handle_cpu = 0; in kvm_s390_pv_destroy_cpu() 37 vcpu->arch.sie_block->pv_handle_config = 0; in kvm_s390_pv_destroy_cpu() 38 memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv)); in kvm_s390_pv_destroy_cpu() 39 vcpu->arch.sie_block->sdf = 0; in kvm_s390_pv_destroy_cpu() 45 vcpu->arch.sie_block->gbea = 1; in kvm_s390_pv_destroy_cpu() 62 vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL, in kvm_s390_pv_create_cpu() 64 if (!vcpu->arch.pv.stor_base) in kvm_s390_pv_create_cpu() 69 uvcb.num = vcpu->arch.sie_block->icpua; in kvm_s390_pv_create_cpu() [all …]
|
D | intercept.c | 27 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; in kvm_s390_get_ilen() 30 switch (vcpu->arch.sie_block->icptcode) { in kvm_s390_get_ilen() 37 ilen = insn_length(vcpu->arch.sie_block->ipa >> 8); in kvm_s390_get_ilen() 47 ilen = vcpu->arch.sie_block->pgmilc & 0x6; in kvm_s390_get_ilen() 55 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; in handle_stop() 93 int viwhy = vcpu->arch.sie_block->ipb >> 16; in handle_validity() 110 vcpu->arch.sie_block->ipa, in handle_instruction() 111 vcpu->arch.sie_block->ipb); in handle_instruction() 113 switch (vcpu->arch.sie_block->ipa >> 8) { in handle_instruction() 146 .code = vcpu->arch.sie_block->iprcc, in inject_prog_on_prog_intercept() [all …]
|
/arch/arm64/kvm/ |
D | debug.c | 40 vcpu->arch.guest_debug_preserved.mdscr_el1 = val; in save_guest_debug_regs() 43 vcpu->arch.guest_debug_preserved.mdscr_el1); in save_guest_debug_regs() 48 u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1; in restore_guest_debug_regs() 89 vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK; in kvm_arm_setup_mdcr_el2() 90 vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM | in kvm_arm_setup_mdcr_el2() 100 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE; in kvm_arm_setup_mdcr_el2() 109 !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) in kvm_arm_setup_mdcr_el2() 110 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA; in kvm_arm_setup_mdcr_el2() 112 trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2); in kvm_arm_setup_mdcr_el2() 135 vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state; in kvm_arm_reset_debug_ptr() [all …]
|
D | fpsimd.c | 46 if (vcpu->arch.sve_state) { in kvm_arch_vcpu_run_map_fp() 49 sve_end = vcpu->arch.sve_state + vcpu_sve_state_size(vcpu); in kvm_arch_vcpu_run_map_fp() 51 ret = create_hyp_mappings(vcpu->arch.sve_state, sve_end, in kvm_arch_vcpu_run_map_fp() 57 vcpu->arch.host_thread_info = kern_hyp_va(ti); in kvm_arch_vcpu_run_map_fp() 58 vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); in kvm_arch_vcpu_run_map_fp() 77 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | in kvm_arch_vcpu_load_fp() 80 vcpu->arch.flags |= KVM_ARM64_FP_HOST; in kvm_arch_vcpu_load_fp() 83 vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE; in kvm_arch_vcpu_load_fp() 86 vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; in kvm_arch_vcpu_load_fp() 99 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { in kvm_arch_vcpu_ctxsync_fp() [all …]
|
/arch/x86/tools/ |
D | Makefile | 16 reformatter = $(srctree)/arch/x86/tools/objdump_reformat.awk 17 chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk 32 …st.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86… 34 …STCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srct… 37 …arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(… 39 …arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(…
|
/arch/x86/kernel/ |
D | machine_kexec_32.c | 56 free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER); in machine_kexec_free_page_tables() 57 image->arch.pgd = NULL; in machine_kexec_free_page_tables() 59 free_page((unsigned long)image->arch.pmd0); in machine_kexec_free_page_tables() 60 image->arch.pmd0 = NULL; in machine_kexec_free_page_tables() 61 free_page((unsigned long)image->arch.pmd1); in machine_kexec_free_page_tables() 62 image->arch.pmd1 = NULL; in machine_kexec_free_page_tables() 64 free_page((unsigned long)image->arch.pte0); in machine_kexec_free_page_tables() 65 image->arch.pte0 = NULL; in machine_kexec_free_page_tables() 66 free_page((unsigned long)image->arch.pte1); in machine_kexec_free_page_tables() 67 image->arch.pte1 = NULL; in machine_kexec_free_page_tables() [all …]
|
/arch/powerpc/include/asm/ |
D | kvm_book3s_64.h | 141 return kvm->arch.radix; in kvm_is_radix() 148 if (vcpu->arch.nested) in kvmhv_vcpu_is_radix() 149 radix = vcpu->arch.nested->radix; in kvmhv_vcpu_is_radix() 527 if (atomic_read(&kvm->arch.hpte_mod_interest)) in note_hpte_modification() 591 vcpu->arch.regs.ccr = vcpu->arch.cr_tm; in copy_from_checkpoint() 592 vcpu->arch.regs.xer = vcpu->arch.xer_tm; in copy_from_checkpoint() 593 vcpu->arch.regs.link = vcpu->arch.lr_tm; in copy_from_checkpoint() 594 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; in copy_from_checkpoint() 595 vcpu->arch.amr = vcpu->arch.amr_tm; in copy_from_checkpoint() 596 vcpu->arch.ppr = vcpu->arch.ppr_tm; in copy_from_checkpoint() [all …]
|
/arch/powerpc/kernel/ |
D | asm-offsets.c | 455 OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack); in main() 456 OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid); in main() 457 OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid); in main() 458 OFFSET(VCPU_GPRS, kvm_vcpu, arch.regs.gpr); in main() 459 OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave); in main() 460 OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr); in main() 462 OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr); in main() 464 OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer); in main() 465 OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr); in main() 466 OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link); in main() [all …]
|
/arch/riscv/kernel/ |
D | module-sections.c | 15 struct mod_section *got_sec = &mod->arch.got; in module_emit_got_entry() 34 struct mod_section *got_plt_sec = &mod->arch.got_plt; in module_emit_plt_entry() 36 struct mod_section *plt_sec = &mod->arch.plt; in module_emit_plt_entry() 102 mod->arch.plt.shdr = sechdrs + i; in module_frob_arch_sections() 104 mod->arch.got.shdr = sechdrs + i; in module_frob_arch_sections() 106 mod->arch.got_plt.shdr = sechdrs + i; in module_frob_arch_sections() 109 if (!mod->arch.plt.shdr) { in module_frob_arch_sections() 113 if (!mod->arch.got.shdr) { in module_frob_arch_sections() 117 if (!mod->arch.got_plt.shdr) { in module_frob_arch_sections() 138 mod->arch.plt.shdr->sh_type = SHT_NOBITS; in module_frob_arch_sections() [all …]
|