/arch/x86/events/intel/ |
D | uncore_nhmex.c | 555 struct intel_uncore_extra_reg *er; in nhmex_mbox_get_shared_reg() local 561 er = &box->shared_regs[idx]; in nhmex_mbox_get_shared_reg() 562 raw_spin_lock_irqsave(&er->lock, flags); in nhmex_mbox_get_shared_reg() 563 if (!atomic_read(&er->ref) || er->config == config) { in nhmex_mbox_get_shared_reg() 564 atomic_inc(&er->ref); in nhmex_mbox_get_shared_reg() 565 er->config = config; in nhmex_mbox_get_shared_reg() 568 raw_spin_unlock_irqrestore(&er->lock, flags); in nhmex_mbox_get_shared_reg() 586 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; in nhmex_mbox_get_shared_reg() 588 raw_spin_lock_irqsave(&er->lock, flags); in nhmex_mbox_get_shared_reg() 590 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) { in nhmex_mbox_get_shared_reg() [all …]
|
D | uncore_snbep.c | 781 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; in snbep_cbox_put_constraint() local 789 atomic_sub(1 << (i * 6), &er->ref); in snbep_cbox_put_constraint() 799 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; in __snbep_cbox_get_constraint() local 807 raw_spin_lock_irqsave(&er->lock, flags); in __snbep_cbox_get_constraint() 815 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) || in __snbep_cbox_get_constraint() 816 !((reg1->config ^ er->config) & mask)) { in __snbep_cbox_get_constraint() 817 atomic_add(1 << (i * 6), &er->ref); in __snbep_cbox_get_constraint() 818 er->config &= ~mask; in __snbep_cbox_get_constraint() 819 er->config |= reg1->config & mask; in __snbep_cbox_get_constraint() 825 raw_spin_unlock_irqrestore(&er->lock, flags); in __snbep_cbox_get_constraint() [all …]
|
D | uncore.c | 127 struct intel_uncore_extra_reg *er; in uncore_get_constraint() local 142 er = &box->shared_regs[reg1->idx]; in uncore_get_constraint() 143 raw_spin_lock_irqsave(&er->lock, flags); in uncore_get_constraint() 144 if (!atomic_read(&er->ref) || in uncore_get_constraint() 145 (er->config1 == reg1->config && er->config2 == reg2->config)) { in uncore_get_constraint() 146 atomic_inc(&er->ref); in uncore_get_constraint() 147 er->config1 = reg1->config; in uncore_get_constraint() 148 er->config2 = reg2->config; in uncore_get_constraint() 151 raw_spin_unlock_irqrestore(&er->lock, flags); in uncore_get_constraint() 164 struct intel_uncore_extra_reg *er; in uncore_put_constraint() local [all …]
|
D | core.c | 3999 struct extra_reg *er; in intel_pmu_init() local 4543 for (er = x86_pmu.extra_regs; er->msr; er++) { in intel_pmu_init() 4544 er->extra_msr_access = check_msr(er->msr, 0x11UL); in intel_pmu_init() 4546 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) in intel_pmu_init()
|
/arch/mips/kvm/ |
D | emulate.c | 948 enum emulation_result er = EMULATE_DONE; in kvm_mips_emul_eret() local 962 er = EMULATE_FAIL; in kvm_mips_emul_eret() 965 return er; in kvm_mips_emul_eret() 1270 enum emulation_result er = EMULATE_DONE; in kvm_mips_emulate_CP0() local 1279 er = update_pc(vcpu, cause); in kvm_mips_emulate_CP0() 1280 if (er == EMULATE_FAIL) in kvm_mips_emulate_CP0() 1281 return er; in kvm_mips_emulate_CP0() 1286 er = kvm_mips_emul_tlbr(vcpu); in kvm_mips_emulate_CP0() 1289 er = kvm_mips_emul_tlbwi(vcpu); in kvm_mips_emulate_CP0() 1292 er = kvm_mips_emul_tlbwr(vcpu); in kvm_mips_emulate_CP0() [all …]
|
D | trap_emul.c | 73 enum emulation_result er = EMULATE_DONE; in kvm_trap_emul_handle_cop_unusable() local 84 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); in kvm_trap_emul_handle_cop_unusable() 88 er = EMULATE_DONE; in kvm_trap_emul_handle_cop_unusable() 91 er = kvm_mips_emulate_inst(cause, opc, run, vcpu); in kvm_trap_emul_handle_cop_unusable() 94 switch (er) { in kvm_trap_emul_handle_cop_unusable() 122 enum emulation_result er; in kvm_mips_bad_load() local 142 er = kvm_mips_emulate_load(inst, cause, run, vcpu); in kvm_mips_bad_load() 143 if (er == EMULATE_FAIL) { in kvm_mips_bad_load() 155 enum emulation_result er; in kvm_mips_bad_store() local 169 er = kvm_mips_emulate_store(inst, cause, run, vcpu); in kvm_mips_bad_store() [all …]
|
D | vz.c | 906 enum emulation_result er = EMULATE_DONE; in kvm_vz_gpsi_cop0() local 916 er = update_pc(vcpu, cause); in kvm_vz_gpsi_cop0() 917 if (er == EMULATE_FAIL) in kvm_vz_gpsi_cop0() 918 return er; in kvm_vz_gpsi_cop0() 923 er = kvm_mips_emul_wait(vcpu); in kvm_vz_gpsi_cop0() 926 er = EMULATE_FAIL; in kvm_vz_gpsi_cop0() 979 er = EMULATE_FAIL; in kvm_vz_gpsi_cop0() 982 if (er != EMULATE_FAIL) { in kvm_vz_gpsi_cop0() 1043 er = EMULATE_FAIL; in kvm_vz_gpsi_cop0() 1048 er = EMULATE_FAIL; in kvm_vz_gpsi_cop0() [all …]
|
D | mips.c | 1247 enum emulation_result er = EMULATE_DONE; in kvm_mips_handle_exit() local 1279 er = kvm_mips_check_privilege(cause, opc, run, vcpu); in kvm_mips_handle_exit() 1280 if (er == EMULATE_PRIV_FAIL) { in kvm_mips_handle_exit() 1282 } else if (er == EMULATE_FAIL) { in kvm_mips_handle_exit() 1404 if (er == EMULATE_DONE && !(ret & RESUME_HOST)) in kvm_mips_handle_exit()
|
/arch/arm/mach-ux500/ |
D | pm.c | 92 u32 er; /* Enable register */ in prcmu_gic_pending_irq() local 99 er = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); in prcmu_gic_pending_irq() 101 if (pr & er) in prcmu_gic_pending_irq() 146 u32 er; /* Enable register */ in prcmu_copy_gic_settings() local 151 er = readl_relaxed(dist_base + in prcmu_copy_gic_settings() 153 writel(er, PRCM_ARMITMSK31TO0 + i * 4); in prcmu_copy_gic_settings()
|
/arch/powerpc/platforms/4xx/ |
D | uic.c | 59 u32 er, sr; in uic_unmask_irq() local 66 er = mfdcr(uic->dcrbase + UIC_ER); in uic_unmask_irq() 67 er |= sr; in uic_unmask_irq() 68 mtdcr(uic->dcrbase + UIC_ER, er); in uic_unmask_irq() 77 u32 er; in uic_mask_irq() local 80 er = mfdcr(uic->dcrbase + UIC_ER); in uic_mask_irq() 81 er &= ~(1 << (31 - src)); in uic_mask_irq() 82 mtdcr(uic->dcrbase + UIC_ER, er); in uic_mask_irq() 102 u32 er, sr; in uic_mask_ack_irq() local 106 er = mfdcr(uic->dcrbase + UIC_ER); in uic_mask_ack_irq() [all …]
|
/arch/x86/events/ |
D | core.c | 117 struct extra_reg *er; in x86_pmu_extra_regs() local 124 for (er = x86_pmu.extra_regs; er->msr; er++) { in x86_pmu_extra_regs() 125 if (er->event != (config & er->config_mask)) in x86_pmu_extra_regs() 127 if (event->attr.config1 & ~er->valid_mask) in x86_pmu_extra_regs() 130 if (!er->extra_msr_access) in x86_pmu_extra_regs() 133 reg->idx = er->idx; in x86_pmu_extra_regs() 135 reg->reg = er->msr; in x86_pmu_extra_regs()
|
/arch/powerpc/kvm/ |
D | book3s_pr.c | 826 enum emulation_result er = EMULATE_FAIL; in kvmppc_emulate_fac() local 829 er = kvmppc_emulate_instruction(vcpu->run, vcpu); in kvmppc_emulate_fac() 831 if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { in kvmppc_emulate_fac() 912 enum emulation_result er; in kvmppc_exit_pr_progint() local 944 er = kvmppc_emulate_instruction(run, vcpu); in kvmppc_exit_pr_progint() 945 switch (er) { in kvmppc_exit_pr_progint()
|
D | powerpc.c | 291 enum emulation_result er; in kvmppc_emulate_mmio() local 294 er = kvmppc_emulate_loadstore(vcpu); in kvmppc_emulate_mmio() 295 switch (er) { in kvmppc_emulate_mmio()
|
D | booke.c | 806 enum emulation_result er; in emulation_exit() local 808 er = kvmppc_emulate_instruction(run, vcpu); in emulation_exit() 809 switch (er) { in emulation_exit()
|
/arch/powerpc/boot/dts/fsl/ |
D | e6500_power_isa.dtsi | 61 fsl,eref-er; // Enhanced Reservations (Load and Reserve and Store Cond.)
|
/arch/x86/kvm/ |
D | svm.c | 2306 int er; in ud_interception() local 2308 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); in ud_interception() 2309 if (er == EMULATE_USER_EXIT) in ud_interception() 2311 if (er != EMULATE_DONE) in ud_interception()
|
D | mmu.c | 5044 enum emulation_result er; in kvm_mmu_page_fault() local 5089 er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); in kvm_mmu_page_fault() 5091 switch (er) { in kvm_mmu_page_fault()
|
D | vmx.c | 6310 enum emulation_result er; in handle_exception() local 6322 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); in handle_exception() 6323 if (er == EMULATE_USER_EXIT) in handle_exception() 6325 if (er != EMULATE_DONE) in handle_exception()
|