Home
last modified time | relevance | path

Searched refs:msr (Results 1 – 25 of 179) sorted by relevance

12345678

/arch/x86/kernel/cpu/
Dperfctr-watchdog.c45 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) in nmi_perfctr_msr_to_bit() argument
50 if (msr >= MSR_F15H_PERF_CTR) in nmi_perfctr_msr_to_bit()
51 return (msr - MSR_F15H_PERF_CTR) >> 1; in nmi_perfctr_msr_to_bit()
52 return msr - MSR_K7_PERFCTR0; in nmi_perfctr_msr_to_bit()
55 return msr - MSR_ARCH_PERFMON_PERFCTR0; in nmi_perfctr_msr_to_bit()
59 return msr - MSR_P6_PERFCTR0; in nmi_perfctr_msr_to_bit()
61 return msr - MSR_KNC_PERFCTR0; in nmi_perfctr_msr_to_bit()
63 return msr - MSR_P4_BPU_PERFCTR0; in nmi_perfctr_msr_to_bit()
73 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) in nmi_evntsel_msr_to_bit() argument
78 if (msr >= MSR_F15H_PERF_CTL) in nmi_evntsel_msr_to_bit()
[all …]
/arch/x86/lib/
Dmsr.c9 struct msr *msrs_alloc(void) in msrs_alloc()
11 struct msr *msrs = NULL; in msrs_alloc()
13 msrs = alloc_percpu(struct msr); in msrs_alloc()
23 void msrs_free(struct msr *msrs) in msrs_free()
39 int msr_read(u32 msr, struct msr *m) in msr_read() argument
44 err = rdmsrl_safe(msr, &val); in msr_read()
57 int msr_write(u32 msr, struct msr *m) in msr_write() argument
59 return wrmsrl_safe(msr, m->q); in msr_write()
62 static inline int __flip_bit(u32 msr, u8 bit, bool set) in __flip_bit() argument
64 struct msr m, m1; in __flip_bit()
[all …]
/arch/x86/include/asm/
Dmsr-trace.h3 #define TRACE_SYSTEM msr
6 #define TRACE_INCLUDE_FILE msr-trace
22 TP_PROTO(unsigned msr, u64 val, int failed),
23 TP_ARGS(msr, val, failed),
25 __field( unsigned, msr )
30 __entry->msr = msr;
35 __entry->msr,
41 TP_PROTO(unsigned msr, u64 val, int failed),
42 TP_ARGS(msr, val, failed)
46 TP_PROTO(unsigned msr, u64 val, int failed),
[all …]
Dmsr.h14 struct msr { struct
26 struct msr reg; argument
27 struct msr *msrs;
74 extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
75 extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
76 extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
79 static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} in do_trace_write_msr() argument
80 static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} in do_trace_read_msr() argument
81 static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} in do_trace_rdpmc() argument
91 static inline unsigned long long notrace __rdmsr(unsigned int msr) in __rdmsr() argument
[all …]
Dmicrocode.h9 #define native_rdmsr(msr, val1, val2) \ argument
11 u64 __val = __rdmsr((msr)); \
16 #define native_wrmsr(msr, low, high) \ argument
17 __wrmsr(msr, low, high)
19 #define native_wrmsrl(msr, val) \ argument
20 __wrmsr((msr), (u32)((u64)(val)), \
/arch/m68k/bvme6000/
Dconfig.c160 unsigned char msr; in bvme6000_timer_int() local
163 msr = rtc->msr & 0xc0; in bvme6000_timer_int()
164 rtc->msr = msr | 0x20; /* Ack the interrupt */ in bvme6000_timer_int()
183 unsigned char msr = rtc->msr & 0xc0; in bvme6000_sched_init() local
185 rtc->msr = 0; /* Ensure timer registers accessible */ in bvme6000_sched_init()
195 rtc->msr = 0x40; /* Access int.cntrl, etc */ in bvme6000_sched_init()
200 rtc->msr = 0; /* Access timer 1 control */ in bvme6000_sched_init()
203 rtc->msr = msr; in bvme6000_sched_init()
224 unsigned char msr = rtc->msr & 0xc0; in bvme6000_gettimeoffset() local
228 rtc->msr = 0; /* Ensure timer registers accessible */ in bvme6000_gettimeoffset()
[all …]
Drtc.c42 unsigned char msr; in rtc_ioctl() local
52 msr = rtc->msr & 0xc0; in rtc_ioctl()
53 rtc->msr = 0x40; in rtc_ioctl()
66 rtc->msr = msr; in rtc_ioctl()
108 msr = rtc->msr & 0xc0; in rtc_ioctl()
109 rtc->msr = 0x40; in rtc_ioctl()
123 rtc->msr = msr; in rtc_ioctl()
/arch/microblaze/kernel/
Dprocess.c47 regs->msr, regs->ear, regs->esr, regs->fsr); in show_regs()
72 local_save_flags(childregs->msr); in copy_thread()
74 ti->cpu_context.msr = childregs->msr & ~MSR_IE; in copy_thread()
86 ti->cpu_context.msr = (unsigned long)childregs->msr; in copy_thread()
88 childregs->msr |= MSR_UMS; in copy_thread()
100 childregs->msr &= ~MSR_EIP; in copy_thread()
101 childregs->msr |= MSR_IE; in copy_thread()
102 childregs->msr &= ~MSR_VM; in copy_thread()
103 childregs->msr |= MSR_VMS; in copy_thread()
104 childregs->msr |= MSR_EE; /* exceptions will be enabled*/ in copy_thread()
[all …]
Dsetup.c100 unsigned int fdt, unsigned int msr, unsigned int tlb0, in machine_early_init() argument
165 if (msr) { in machine_early_init()
167 pr_cont("CPU don't have it %x\n", msr); in machine_early_init()
170 if (!msr) { in machine_early_init()
172 pr_cont("CPU have it %x\n", msr); in machine_early_init()
/arch/x86/kvm/
Dmtrr.c29 static bool msr_mtrr_valid(unsigned msr) in msr_mtrr_valid() argument
31 switch (msr) { in msr_mtrr_valid()
61 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) in kvm_mtrr_valid() argument
66 if (!msr_mtrr_valid(msr)) in kvm_mtrr_valid()
69 if (msr == MSR_IA32_CR_PAT) { in kvm_mtrr_valid()
74 } else if (msr == MSR_MTRRdefType) { in kvm_mtrr_valid()
78 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { in kvm_mtrr_valid()
86 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); in kvm_mtrr_valid()
89 if ((msr & 1) == 0) { in kvm_mtrr_valid()
196 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit) in fixed_msr_to_seg_unit() argument
[all …]
Dpmu.h28 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
29 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
83 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc() argument
86 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) { in get_gp_pmc()
87 u32 index = array_index_nospec(msr - base, in get_gp_pmc()
97 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) in get_fixed_pmc() argument
101 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) { in get_fixed_pmc()
102 u32 index = array_index_nospec(msr - base, in get_fixed_pmc()
119 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
120 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
Dpmu_amd.c94 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in amd_is_valid_msr() argument
99 ret = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0) || in amd_is_valid_msr()
100 get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0); in amd_is_valid_msr()
105 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) in amd_pmu_get_msr() argument
111 pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0); in amd_pmu_get_msr()
117 pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0); in amd_pmu_get_msr()
130 u32 msr = msr_info->index; in amd_pmu_set_msr() local
134 pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0); in amd_pmu_set_msr()
140 pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0); in amd_pmu_set_msr()
Dpmu_intel.c153 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in intel_is_valid_msr() argument
158 switch (msr) { in intel_is_valid_msr()
166 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || in intel_is_valid_msr()
167 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || in intel_is_valid_msr()
168 get_fixed_pmc(pmu, msr); in intel_is_valid_msr()
175 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) in intel_pmu_get_msr() argument
180 switch (msr) { in intel_pmu_get_msr()
194 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_get_msr()
195 (pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_get_msr()
198 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_get_msr()
[all …]
Dhyperv.c197 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) in synic_exit() argument
203 hv_vcpu->exit.u.synic.msr = msr; in synic_exit()
212 u32 msr, u64 data, bool host) in synic_set_msr() argument
220 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); in synic_set_msr()
223 switch (msr) { in synic_set_msr()
227 synic_exit(synic, msr); in synic_set_msr()
246 synic_exit(synic, msr); in synic_set_msr()
258 synic_exit(synic, msr); in synic_set_msr()
268 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host); in synic_set_msr()
277 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata) in synic_get_msr() argument
[all …]
/arch/x86/xen/
Dpmu.c125 static inline bool is_amd_pmu_msr(unsigned int msr) in is_amd_pmu_msr() argument
127 if ((msr >= MSR_F15H_PERF_CTL && in is_amd_pmu_msr()
128 msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) || in is_amd_pmu_msr()
129 (msr >= MSR_K7_EVNTSEL0 && in is_amd_pmu_msr()
130 msr < MSR_K7_PERFCTR0 + amd_num_counters)) in is_amd_pmu_msr()
182 static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, in xen_intel_pmu_emulate() argument
198 switch (msr) { in xen_intel_pmu_emulate()
236 if (msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL) in xen_intel_pmu_emulate()
245 static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) in xen_amd_pmu_emulate() argument
258 ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3))) in xen_amd_pmu_emulate()
[all …]
/arch/powerpc/kernel/
Dsignal_64.c111 unsigned long msr = regs->msr; in setup_sigcontext() local
128 msr |= MSR_VEC; in setup_sigcontext()
151 msr &= ~MSR_VSX; in setup_sigcontext()
165 msr |= MSR_VSX; in setup_sigcontext()
171 err |= __put_user(msr, &sc->gp_regs[PT_MSR]); in setup_sigcontext()
210 unsigned long msr = tsk->thread.ckpt_regs.msr; in setup_tm_sigcontexts() local
215 BUG_ON(!MSR_TM_ACTIVE(regs->msr)); in setup_tm_sigcontexts()
222 regs->msr &= ~MSR_TS_MASK; in setup_tm_sigcontexts()
236 if (msr & MSR_VEC) in setup_tm_sigcontexts()
248 msr |= MSR_VEC; in setup_tm_sigcontexts()
[all …]
Dprocess.c89 MSR_TM_ACTIVE(tsk->thread.regs->msr) && in check_if_tm_restore_required()
91 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; in check_if_tm_restore_required()
96 static inline bool msr_tm_active(unsigned long msr) in msr_tm_active() argument
98 return MSR_TM_ACTIVE(msr); in msr_tm_active()
101 static inline bool msr_tm_active(unsigned long msr) { return false; } in msr_tm_active() argument
155 unsigned long msr; in __giveup_fpu() local
158 msr = tsk->thread.regs->msr; in __giveup_fpu()
159 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); in __giveup_fpu()
162 msr &= ~MSR_VSX; in __giveup_fpu()
164 tsk->thread.regs->msr = msr; in __giveup_fpu()
[all …]
Dsignal_32.c413 unsigned long msr = regs->msr; in save_user_regs() local
431 msr |= MSR_VEC; in save_user_regs()
453 msr &= ~MSR_VSX; in save_user_regs()
465 msr |= MSR_VSX; in save_user_regs()
477 msr |= MSR_SPE; in save_user_regs()
486 if (__put_user(msr, &frame->mc_gregs[PT_MSR])) in save_user_regs()
520 unsigned long msr = regs->msr; in save_tm_user_regs() local
527 regs->msr &= ~MSR_TS_MASK; in save_tm_user_regs()
540 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR])) in save_tm_user_regs()
549 if (msr & MSR_VEC) { in save_tm_user_regs()
[all …]
/arch/arm64/mm/
Dproc.S102 msr tpidr_el0, x2
103 msr tpidrro_el0, x3
104 msr contextidr_el1, x4
105 msr cpacr_el1, x6
111 msr tcr_el1, x8
112 msr vbar_el1, x9
121 msr mdscr_el1, x10
123 msr sctlr_el1, x12
125 msr tpidr_el1, x13
127 msr tpidr_el2, x13
[all …]
/arch/arm64/kvm/
Dhyp-init.S66 msr ttbr0_el2, x0
99 msr tcr_el2, x4
102 msr mair_el2, x4
116 msr sctlr_el2, x4
123 msr vbar_el2, x2
127 msr tpidr_el2, x1
138 msr elr_el2, x1
140 msr spsr_el2, x0
159 msr sctlr_el2, x5
164 msr vbar_el2, x5
/arch/powerpc/include/asm/
Drunlatch.h22 unsigned long msr = mfmsr(); \
25 if (msr & MSR_EE) \
34 unsigned long msr = mfmsr(); \
37 if (msr & MSR_EE) \
/arch/x86/events/
Dmsr.c94 u64 msr; member
107 static struct perf_msr msr[] = { variable
164 if (!msr[cfg].attr) in msr_event_init()
168 event->hw.event_base = msr[cfg].msr; in msr_event_init()
259 if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val)) in msr_init()
260 msr[i].attr = NULL; in msr_init()
265 if (msr[i].attr) in msr_init()
266 events_attrs[j++] = &msr[i].attr->attr.attr; in msr_init()
/arch/x86/power/
Dcpu.c40 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_save_context() local
41 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_save_context()
43 while (msr < end) { in msr_save_context()
44 msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q); in msr_save_context()
45 msr++; in msr_save_context()
51 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_restore_context() local
52 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_restore_context()
54 while (msr < end) { in msr_restore_context()
55 if (msr->valid) in msr_restore_context()
56 wrmsrl(msr->info.msr_no, msr->info.reg.q); in msr_restore_context()
[all …]
/arch/arm64/kernel/
Dhead.S327 msr sp_el0, x5 // Save thread_info
330 msr vbar_el1, x8 // vector table address
387 msr SPsel, #1 // We want to use SP_EL{1,2}
392 msr sctlr_el1, x0
398 msr sctlr_el2, x0
417 msr hcr_el2, x0
433 msr cnthctl_el2, x0
435 msr cntvoff_el2, xzr // Clear virtual offset
458 msr vpidr_el2, x0
459 msr vmpidr_el2, x1
[all …]
/arch/arm/kernel/
Dfiqasm.S28 msr cpsr_c, r2 @ select FIQ mode
33 msr cpsr_c, r1 @ return to SVC mode
41 msr cpsr_c, r2 @ select FIQ mode
46 msr cpsr_c, r1 @ return to SVC mode

12345678