Home
last modified time | relevance | path

Searched refs:hw (Results 1 – 25 of 76) sorted by relevance

1234

/arch/powerpc/perf/
Dcore-fsl-emb.c159 if (event->hw.state & PERF_HES_STOPPED) in fsl_emb_pmu_read()
168 prev = local64_read(&event->hw.prev_count); in fsl_emb_pmu_read()
170 val = read_pmc(event->hw.idx); in fsl_emb_pmu_read()
171 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); in fsl_emb_pmu_read()
176 local64_sub(delta, &event->hw.period_left); in fsl_emb_pmu_read()
280 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) in fsl_emb_pmu_add()
297 event->hw.idx = i; in fsl_emb_pmu_add()
302 if (event->hw.sample_period) { in fsl_emb_pmu_add()
303 s64 left = local64_read(&event->hw.period_left); in fsl_emb_pmu_add()
307 local64_set(&event->hw.prev_count, val); in fsl_emb_pmu_add()
[all …]
Dcore-book3s.c455 if (event->hw.state & PERF_HES_STOPPED) in power_pmu_read()
458 if (!event->hw.idx) in power_pmu_read()
466 prev = local64_read(&event->hw.prev_count); in power_pmu_read()
468 val = read_pmc(event->hw.idx); in power_pmu_read()
472 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); in power_pmu_read()
475 local64_sub(delta, &event->hw.period_left); in power_pmu_read()
498 if (!event->hw.idx) in freeze_limited_counters()
500 val = (event->hw.idx == 5) ? pmc5 : pmc6; in freeze_limited_counters()
501 prev = local64_read(&event->hw.prev_count); in freeze_limited_counters()
502 event->hw.idx = 0; in freeze_limited_counters()
[all …]
/arch/sh/kernel/
Dperf_event.c124 struct hw_perf_event *hwc = &event->hw; in __hw_perf_event_init()
231 struct hw_perf_event *hwc = &event->hw; in sh_pmu_stop()
234 if (!(event->hw.state & PERF_HES_STOPPED)) { in sh_pmu_stop()
237 event->hw.state |= PERF_HES_STOPPED; in sh_pmu_stop()
240 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { in sh_pmu_stop()
241 sh_perf_event_update(event, &event->hw, idx); in sh_pmu_stop()
242 event->hw.state |= PERF_HES_UPTODATE; in sh_pmu_stop()
249 struct hw_perf_event *hwc = &event->hw; in sh_pmu_start()
256 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in sh_pmu_start()
259 event->hw.state = 0; in sh_pmu_start()
[all …]
/arch/blackfin/kernel/
Dperf_event.c304 struct hw_perf_event *hwc = &event->hw; in bfin_pmu_stop()
307 if (!(event->hw.state & PERF_HES_STOPPED)) { in bfin_pmu_stop()
310 event->hw.state |= PERF_HES_STOPPED; in bfin_pmu_stop()
313 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { in bfin_pmu_stop()
314 bfin_perf_event_update(event, &event->hw, idx); in bfin_pmu_stop()
315 event->hw.state |= PERF_HES_UPTODATE; in bfin_pmu_stop()
322 struct hw_perf_event *hwc = &event->hw; in bfin_pmu_start()
329 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in bfin_pmu_start()
332 event->hw.state = 0; in bfin_pmu_start()
341 __clear_bit(event->hw.idx, cpuc->used_mask); in bfin_pmu_del()
[all …]
/arch/arm/kernel/
Dbios32.c413 static void __init pcibios_init_hw(struct hw_pci *hw) in pcibios_init_hw() argument
419 for (nr = busnr = 0; nr < hw->nr_controllers; nr++) { in pcibios_init_hw()
425 sys->domain = hw->domain; in pcibios_init_hw()
427 sys->hw = hw; in pcibios_init_hw()
429 sys->swizzle = hw->swizzle; in pcibios_init_hw()
430 sys->map_irq = hw->map_irq; in pcibios_init_hw()
433 ret = hw->setup(nr, sys); in pcibios_init_hw()
443 sys->bus = hw->scan(nr, sys); in pcibios_init_hw()
450 list_add(&sys->node, &hw->buses); in pcibios_init_hw()
459 void __init pci_common_init(struct hw_pci *hw) in pci_common_init() argument
[all …]
Dperf_event.c212 struct hw_perf_event *hwc = &event->hw; in armpmu_read()
225 struct hw_perf_event *hwc = &event->hw; in armpmu_stop()
243 struct hw_perf_event *hwc = &event->hw; in armpmu_start()
269 struct hw_perf_event *hwc = &event->hw; in armpmu_del()
286 struct hw_perf_event *hwc = &event->hw; in armpmu_add()
303 event->hw.idx = idx; in armpmu_add()
324 struct hw_perf_event fake_event = event->hw; in validate_event()
487 struct hw_perf_event *hwc = &event->hw; in __hw_perf_event_init()
/arch/s390/kernel/
Dperf_cpum_cf.c322 struct hw_perf_event *hwc = &event->hw; in __hw_perf_event_init()
435 prev = local64_read(&event->hw.prev_count); in hw_perf_event_reset()
436 err = ecctr(event->hw.config, &new); in hw_perf_event_reset()
447 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); in hw_perf_event_reset()
458 prev = local64_read(&event->hw.prev_count); in hw_perf_event_update()
459 err = ecctr(event->hw.config, &new); in hw_perf_event_update()
462 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); in hw_perf_event_update()
473 if (event->hw.state & PERF_HES_STOPPED) in cpumf_pmu_read()
482 struct hw_perf_event *hwc = &event->hw; in cpumf_pmu_start()
513 struct hw_perf_event *hwc = &event->hw; in cpumf_pmu_stop()
[all …]
/arch/alpha/kernel/
Dperf_event.c196 event[0]->hw.idx = idx0; in ev67_check_constraints()
197 event[0]->hw.config_base = config; in ev67_check_constraints()
199 event[1]->hw.idx = idx0 ^ 1; in ev67_check_constraints()
200 event[1]->hw.config_base = config; in ev67_check_constraints()
341 evtype[n] = group->hw.event_base; in collect_events()
349 evtype[n] = pe->hw.event_base; in collect_events()
393 cpuc->current_idx[j] != pe->hw.idx) { in maybe_change_configuration()
394 alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0); in maybe_change_configuration()
403 struct hw_perf_event *hwc = &pe->hw; in maybe_change_configuration()
414 cpuc->config = cpuc->event[0]->hw.config_base; in maybe_change_configuration()
[all …]
/arch/c6x/kernel/
Dirq.c80 irq_hw_number_t hw) in core_domain_map() argument
82 if (hw < 4 || hw >= NR_PRIORITY_IRQS) in core_domain_map()
/arch/x86/kernel/cpu/
Dperf_event_intel.c779 intel_pmu_enable_bts(event->hw.config); in intel_pmu_enable_all()
850 __x86_pmu_enable_event(&event->hw, in intel_pmu_nhm_workaround()
892 struct hw_perf_event *hwc = &event->hw; in intel_pmu_disable_event()
955 struct hw_perf_event *hwc = &event->hw; in intel_pmu_enable_event()
1091 data.period = event->hw.last_period; in intel_pmu_handle_irq()
1115 struct hw_perf_event *hwc = &event->hw; in intel_bts_constraints()
1135 if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) { in intel_try_alt_er()
1136 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; in intel_try_alt_er()
1137 event->hw.config |= 0x01bb; in intel_try_alt_er()
1138 event->hw.extra_reg.idx = EXTRA_REG_RSP_1; in intel_try_alt_er()
[all …]
Dperf_event.c71 struct hw_perf_event *hwc = &event->hw; in x86_perf_event_update()
120 reg = &event->hw.extra_reg; in x86_pmu_extra_regs()
298 struct hw_perf_event *hwc = &event->hw; in x86_setup_perfctr()
437 event->hw.config = ARCH_PERFMON_EVENTSEL_INT; in x86_pmu_hw_config()
443 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR; in x86_pmu_hw_config()
445 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; in x86_pmu_hw_config()
448 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; in x86_pmu_hw_config()
481 event->hw.idx = -1; in __x86_pmu_event_init()
482 event->hw.last_cpu = -1; in __x86_pmu_event_init()
483 event->hw.last_tag = ~0ULL; in __x86_pmu_event_init()
[all …]
Dperf_event_amd.c151 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | in amd_pmu_hw_config()
154 event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY; in amd_pmu_hw_config()
156 event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY; in amd_pmu_hw_config()
161 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; in amd_pmu_hw_config()
189 struct hw_perf_event *hwc = &event->hw; in amd_put_event_constraints()
254 struct hw_perf_event *hwc = &event->hw; in amd_get_event_constraints()
528 struct hw_perf_event *hwc = &event->hw; in amd_get_event_constraints_f15h()
Dperf_event_p6.c68 struct hw_perf_event *hwc = &event->hw; in p6_pmu_disable_event()
80 struct hw_perf_event *hwc = &event->hw; in p6_pmu_enable_event()
Dperf_event_intel_ds.c320 data.period = event->hw.last_period; in intel_pmu_drain_bts_buffer()
343 event->hw.interrupts++; in intel_pmu_drain_bts_buffer()
427 if ((event->hw.config & c->cmask) == c->code) in intel_pebs_constraints()
438 struct hw_perf_event *hwc = &event->hw; in intel_pmu_pebs_enable()
448 struct hw_perf_event *hwc = &event->hw; in intel_pmu_pebs_disable()
568 data.period = event->hw.last_period; in __intel_pmu_pebs_event()
/arch/sparc/kernel/
Dperf_event.c686 cpuc->current_idx[i] != cp->hw.idx) { in maybe_change_configuration()
687 sparc_perf_event_update(cp, &cp->hw, in maybe_change_configuration()
696 struct hw_perf_event *hwc = &cp->hw; in maybe_change_configuration()
738 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base; in sparc_pmu_enable()
782 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in sparc_pmu_start()
783 sparc_perf_event_set_period(event, &event->hw, idx); in sparc_pmu_start()
786 event->hw.state = 0; in sparc_pmu_start()
788 sparc_pmu_enable_event(cpuc, &event->hw, idx); in sparc_pmu_start()
796 if (!(event->hw.state & PERF_HES_STOPPED)) { in sparc_pmu_stop()
797 sparc_pmu_disable_event(cpuc, &event->hw, idx); in sparc_pmu_stop()
[all …]
/arch/powerpc/platforms/cell/
Dspider-pic.c120 unsigned int hw = irqd_to_hwirq(d); in spider_set_irq_type() local
121 void __iomem *cfg = spider_get_irq_config(pic, hw); in spider_set_irq_type()
127 (hw < 47 || hw > 50)) in spider_set_irq_type()
158 out_be32(cfg + 4, (0x2 << 16) | (hw & 0xff)); in spider_set_irq_type()
172 irq_hw_number_t hw) in spider_host_map() argument
Dbeat_interrupt.c137 irq_hw_number_t hw) in beatic_pic_host_map() argument
141 err = beat_construct_and_connect_irq_plug(virq, hw); in beatic_pic_host_map()
/arch/arm/mach-footbridge/
DMakefile20 obj-$(CONFIG_ARCH_CATS) += cats-hw.o isa-timer.o
22 obj-$(CONFIG_ARCH_NETWINDER) += netwinder-hw.o isa-timer.o
/arch/arm/mach-at91/
Dirq.c166 irq_hw_number_t hw) in at91_aic_irq_map() argument
169 at91_aic_write(AT91_AIC_SVR(hw), virq); in at91_aic_irq_map()
172 at91_aic_write(AT91_AIC_SMR(hw), AT91_AIC_SRCTYPE_LOW); in at91_aic_irq_map()
/arch/powerpc/sysdev/
Di8259.c172 irq_hw_number_t hw) in i8259_host_map() argument
174 pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw); in i8259_host_map()
177 if (hw == 2) in i8259_host_map()
Dmpc8xx_pic.c90 irq_hw_number_t hw) in mpc8xx_pic_host_map() argument
92 pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw); in mpc8xx_pic_host_map()
/arch/powerpc/sysdev/xics/
Dxics-common.c326 irq_hw_number_t hw) in xics_host_map() argument
330 pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); in xics_host_map()
333 irq_radix_revmap_insert(xics_host, virq, hw); in xics_host_map()
339 if (hw == XICS_IPI) { in xics_host_map()
/arch/arm/vfp/
Dvfphw.S84 cmp r4, r10 @ this thread owns the hw context?
86 @ For UP, checking that this thread owns the hw context is
100 beq vfp_reload_hw @ then the hw state needs reloading
116 @ For SMP, if this thread does not own the hw context, then we
/arch/microblaze/kernel/
Dintc.c101 int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) in xintc_map() argument
105 if (intr_mask & (1 << hw)) { in xintc_map()
/arch/mips/kernel/
Dperf_event_mipsxx.c426 struct hw_perf_event *hwc = &event->hw; in mipspmu_start()
442 struct hw_perf_event *hwc = &event->hw; in mipspmu_stop()
456 struct hw_perf_event *hwc = &event->hw; in mipspmu_add()
473 event->hw.idx = idx; in mipspmu_add()
492 struct hw_perf_event *hwc = &event->hw; in mipspmu_del()
506 struct hw_perf_event *hwc = &event->hw; in mipspmu_read()
717 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) in validate_group()
721 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) in validate_group()
725 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) in validate_group()
737 struct hw_perf_event *hwc = &event->hw; in handle_associated_event()
[all …]

1234