Home
last modified time | relevance | path

Searched refs:hw (Results 1 – 25 of 154) sorted by relevance

1234567

/arch/powerpc/perf/
Dcore-fsl-emb.c183 if (event->hw.state & PERF_HES_STOPPED) in fsl_emb_pmu_read()
192 prev = local64_read(&event->hw.prev_count); in fsl_emb_pmu_read()
194 val = read_pmc(event->hw.idx); in fsl_emb_pmu_read()
195 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); in fsl_emb_pmu_read()
200 local64_sub(delta, &event->hw.period_left); in fsl_emb_pmu_read()
304 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) in fsl_emb_pmu_add()
321 event->hw.idx = i; in fsl_emb_pmu_add()
326 if (event->hw.sample_period) { in fsl_emb_pmu_add()
327 s64 left = local64_read(&event->hw.period_left); in fsl_emb_pmu_add()
331 local64_set(&event->hw.prev_count, val); in fsl_emb_pmu_add()
[all …]
Dcore-book3s.c1023 if (event->hw.state & PERF_HES_STOPPED) in power_pmu_read()
1026 if (!event->hw.idx) in power_pmu_read()
1030 val = read_pmc(event->hw.idx); in power_pmu_read()
1031 local64_set(&event->hw.prev_count, val); in power_pmu_read()
1041 prev = local64_read(&event->hw.prev_count); in power_pmu_read()
1043 val = read_pmc(event->hw.idx); in power_pmu_read()
1047 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); in power_pmu_read()
1061 prev = local64_read(&event->hw.period_left); in power_pmu_read()
1065 } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); in power_pmu_read()
1088 if (!event->hw.idx) in freeze_limited_counters()
[all …]
/arch/mips/alchemy/common/
Dclock.c117 static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw, in alchemy_clk_cpu_recalc() argument
179 struct clk_hw hw; member
183 #define to_auxpll_clk(x) container_of(x, struct alchemy_auxpll_clk, hw)
185 static unsigned long alchemy_clk_aux_recalc(struct clk_hw *hw, in alchemy_clk_aux_recalc() argument
188 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); in alchemy_clk_aux_recalc()
193 static int alchemy_clk_aux_setr(struct clk_hw *hw, in alchemy_clk_aux_setr() argument
197 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); in alchemy_clk_aux_setr()
213 static long alchemy_clk_aux_roundr(struct clk_hw *hw, in alchemy_clk_aux_roundr() argument
217 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); in alchemy_clk_aux_roundr()
259 a->hw.init = &id; in alchemy_clk_setup_aux()
[all …]
/arch/x86/kernel/cpu/
Dperf_event_intel_cqm.c299 if (a->hw.target == b->hw.target) { in __match_event()
300 b->hw.is_group_event = true; in __match_event()
317 return perf_cgroup_from_task(event->hw.target, event->ctx); in event_to_cgroup()
422 struct list_head *head = &group->hw.cqm_group_entry; in intel_cqm_xchg_rmid()
423 u32 old_rmid = group->hw.cqm_rmid; in intel_cqm_xchg_rmid()
443 group->hw.cqm_rmid = rmid; in intel_cqm_xchg_rmid()
444 list_for_each_entry(event, head, hw.cqm_group_entry) in intel_cqm_xchg_rmid()
445 event->hw.cqm_rmid = rmid; in intel_cqm_xchg_rmid()
495 hw.cqm_groups_entry); in intel_cqm_sched_in_event()
499 hw.cqm_groups_entry) { in intel_cqm_sched_in_event()
[all …]
Dperf_event_msr.c137 event->hw.idx = -1; in msr_event_init()
138 event->hw.event_base = msr[cfg].msr; in msr_event_init()
139 event->hw.config = cfg; in msr_event_init()
148 if (event->hw.event_base) in msr_read_counter()
149 rdmsrl(event->hw.event_base, now); in msr_read_counter()
162 prev = local64_read(&event->hw.prev_count); in msr_event_update()
165 if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev) in msr_event_update()
169 if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) in msr_event_update()
180 local64_set(&event->hw.prev_count, now); in msr_event_start()
Dperf_event_amd_iommu.c24 #define _GET_BANK(ev) ((u8)(ev->hw.extra_reg.reg >> 8))
25 #define _GET_CNTR(ev) ((u8)(ev->hw.extra_reg.reg))
28 #define _GET_CSOURCE(ev) ((ev->hw.config & 0xFFULL))
29 #define _GET_DEVID(ev) ((ev->hw.config >> 8) & 0xFFFFULL)
30 #define _GET_PASID(ev) ((ev->hw.config >> 24) & 0xFFFFULL)
31 #define _GET_DOMID(ev) ((ev->hw.config >> 40) & 0xFFFFULL)
32 #define _GET_DEVID_MASK(ev) ((ev->hw.extra_reg.config) & 0xFFFFULL)
33 #define _GET_PASID_MASK(ev) ((ev->hw.extra_reg.config >> 16) & 0xFFFFULL)
34 #define _GET_DOMID_MASK(ev) ((ev->hw.extra_reg.config >> 32) & 0xFFFFULL)
199 struct hw_perf_event *hwc = &event->hw; in perf_iommu_event_init()
[all …]
Dperf_event_intel_uncore_snb.c76 struct hw_perf_event *hwc = &event->hw; in snb_uncore_msr_enable_event()
86 wrmsrl(event->hw.config_base, 0); in snb_uncore_msr_disable_event()
247 struct hw_perf_event *hwc = &event->hw; in snb_uncore_imc_read_counter()
261 struct hw_perf_event *hwc = &event->hw; in snb_uncore_imc_event_init()
304 event->hw.idx = -1; in snb_uncore_imc_event_init()
305 event->hw.last_tag = ~0ULL; in snb_uncore_imc_event_init()
306 event->hw.extra_reg.idx = EXTRA_REG_NONE; in snb_uncore_imc_event_init()
307 event->hw.branch_reg.idx = EXTRA_REG_NONE; in snb_uncore_imc_event_init()
325 event->hw.event_base = base; in snb_uncore_imc_event_init()
326 event->hw.config = cfg; in snb_uncore_imc_event_init()
[all …]
Dperf_event_intel_uncore.c128 rdmsrl(event->hw.event_base, count); in uncore_msr_read_counter()
140 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; in uncore_get_constraint()
141 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; in uncore_get_constraint()
177 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; in uncore_put_constraint()
212 struct hw_perf_event *hwc = &event->hw; in uncore_assign_hw_event()
232 if (event->hw.idx == UNCORE_PMC_IDX_FIXED) in uncore_perf_event_update()
239 prev_count = local64_read(&event->hw.prev_count); in uncore_perf_event_update()
241 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) in uncore_perf_event_update()
397 if ((event->hw.config & c->cmask) == c->code) in uncore_get_event_constraint()
429 hwc = &box->event_list[i]->hw; in uncore_assign_events()
[all …]
/arch/arm/mach-omap2/
Dclkt2xxx_virt_prcm_set.c74 long omap2_round_to_table_rate(struct clk_hw *hw, unsigned long rate, in omap2_round_to_table_rate() argument
98 int omap2_select_table_rate(struct clk_hw *hw, unsigned long rate, in omap2_select_table_rate() argument
232 struct clk_hw_omap *hw = NULL; in omap2xxx_clkt_vps_init() local
239 hw = kzalloc(sizeof(*hw), GFP_KERNEL); in omap2xxx_clkt_vps_init()
240 if (!hw) in omap2xxx_clkt_vps_init()
247 hw->hw.init = &init; in omap2xxx_clkt_vps_init()
249 clk = clk_register(NULL, &hw->hw); in omap2xxx_clkt_vps_init()
253 kfree(hw); in omap2xxx_clkt_vps_init()
Dclock2xxx.h16 int omap2_select_table_rate(struct clk_hw *hw, unsigned long rate,
18 long omap2_round_to_table_rate(struct clk_hw *hw, unsigned long rate,
24 void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw);
Dclkt2xxx_dpllcore.c105 unsigned long omap2_dpllcore_recalc(struct clk_hw *hw, in omap2_dpllcore_recalc() argument
111 int omap2_reprogram_dpllcore(struct clk_hw *hw, unsigned long rate, in omap2_reprogram_dpllcore() argument
114 struct clk_hw_omap *clk = to_clk_hw_omap(hw); in omap2_reprogram_dpllcore()
191 void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw) in omap2xxx_clkt_dpllcore_init() argument
194 dpll_core_ck = to_clk_hw_omap(hw); in omap2xxx_clkt_dpllcore_init()
/arch/sh/kernel/
Dperf_event.c124 struct hw_perf_event *hwc = &event->hw; in __hw_perf_event_init()
223 struct hw_perf_event *hwc = &event->hw; in sh_pmu_stop()
226 if (!(event->hw.state & PERF_HES_STOPPED)) { in sh_pmu_stop()
229 event->hw.state |= PERF_HES_STOPPED; in sh_pmu_stop()
232 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { in sh_pmu_stop()
233 sh_perf_event_update(event, &event->hw, idx); in sh_pmu_stop()
234 event->hw.state |= PERF_HES_UPTODATE; in sh_pmu_stop()
241 struct hw_perf_event *hwc = &event->hw; in sh_pmu_start()
248 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in sh_pmu_start()
251 event->hw.state = 0; in sh_pmu_start()
[all …]
/arch/blackfin/kernel/
Dperf_event.c304 struct hw_perf_event *hwc = &event->hw; in bfin_pmu_stop()
307 if (!(event->hw.state & PERF_HES_STOPPED)) { in bfin_pmu_stop()
310 event->hw.state |= PERF_HES_STOPPED; in bfin_pmu_stop()
313 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { in bfin_pmu_stop()
314 bfin_perf_event_update(event, &event->hw, idx); in bfin_pmu_stop()
315 event->hw.state |= PERF_HES_UPTODATE; in bfin_pmu_stop()
322 struct hw_perf_event *hwc = &event->hw; in bfin_pmu_start()
329 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in bfin_pmu_start()
332 event->hw.state = 0; in bfin_pmu_start()
341 __clear_bit(event->hw.idx, cpuc->used_mask); in bfin_pmu_del()
[all …]
/arch/arm/kernel/
Dbios32.c444 static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, in pcibios_init_hw() argument
451 for (nr = busnr = 0; nr < hw->nr_controllers; nr++) { in pcibios_init_hw()
457 sys->swizzle = hw->swizzle; in pcibios_init_hw()
458 sys->map_irq = hw->map_irq; in pcibios_init_hw()
461 if (hw->private_data) in pcibios_init_hw()
462 sys->private_data = hw->private_data[nr]; in pcibios_init_hw()
464 ret = hw->setup(nr, sys); in pcibios_init_hw()
475 if (hw->scan) in pcibios_init_hw()
476 sys->bus = hw->scan(nr, sys); in pcibios_init_hw()
479 sys->busnr, hw->ops, sys, in pcibios_init_hw()
[all …]
/arch/xtensa/kernel/
Dperf_event.c147 new_raw_count = xtensa_pmu_read_counter(event->hw.idx); in xtensa_perf_event_update()
210 event->hw.config = xtensa_hw_ctl[event->attr.config]; in xtensa_pmu_event_init()
217 event->hw.config = ret; in xtensa_pmu_event_init()
225 event->hw.config = (event->attr.config & in xtensa_pmu_event_init()
245 struct hw_perf_event *hwc = &event->hw; in xtensa_pmu_start()
252 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in xtensa_pmu_start()
263 struct hw_perf_event *hwc = &event->hw; in xtensa_pmu_stop()
274 !(event->hw.state & PERF_HES_UPTODATE)) { in xtensa_pmu_stop()
275 xtensa_perf_event_update(event, &event->hw, idx); in xtensa_pmu_stop()
276 event->hw.state |= PERF_HES_UPTODATE; in xtensa_pmu_stop()
[all …]
/arch/nios2/
DMakefile26 KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_HW_MUL_SUPPORT),-mhw-mul,-mno-hw-mul)
27 KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_HW_MULX_SUPPORT),-mhw-mulx,-mno-hw-mulx)
28 KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_HW_DIV_SUPPORT),-mhw-div,-mno-hw-div)
/arch/s390/kernel/
Dperf_cpum_cf.c332 struct hw_perf_event *hwc = &event->hw; in __hw_perf_event_init()
440 prev = local64_read(&event->hw.prev_count); in hw_perf_event_reset()
441 err = ecctr(event->hw.config, &new); in hw_perf_event_reset()
452 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); in hw_perf_event_reset()
463 prev = local64_read(&event->hw.prev_count); in hw_perf_event_update()
464 err = ecctr(event->hw.config, &new); in hw_perf_event_update()
467 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); in hw_perf_event_update()
478 if (event->hw.state & PERF_HES_STOPPED) in cpumf_pmu_read()
487 struct hw_perf_event *hwc = &event->hw; in cpumf_pmu_start()
518 struct hw_perf_event *hwc = &event->hw; in cpumf_pmu_stop()
[all …]
Dperf_cpum_sf.c641 if (RAWSAMPLE_REG(&event->hw)) in hw_perf_event_destroy()
642 kfree((void *) RAWSAMPLE_REG(&event->hw)); in hw_perf_event_destroy()
686 struct hw_perf_event *hwc = &event->hw; in __hw_perf_event_init()
891 hwc = &cpuhw->event->hw; in cpumsf_pmu_enable()
1002 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_push_sample()
1159 unsigned long flags = SAMPL_FLAGS(&event->hw); in hw_collect_samples()
1166 sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(&event->hw); in hw_collect_samples()
1169 sample_size = event_sample_size(&event->hw); in hw_collect_samples()
1178 perf_event_count_update(event, SAMPL_RATE(&event->hw)); in hw_collect_samples()
1233 struct hw_perf_event *hwc = &event->hw; in hw_perf_event_update()
[all …]
/arch/arc/kernel/
Dperf_event.c121 arc_perf_event_update(event, &event->hw, event->hw.idx); in arc_pmu_read()
154 struct hw_perf_event *hwc = &event->hw; in arc_pmu_event_init()
216 struct hw_perf_event *hwc = &event->hw; in arc_pmu_event_set_period()
262 struct hw_perf_event *hwc = &event->hw; in arc_pmu_start()
287 struct hw_perf_event *hwc = &event->hw; in arc_pmu_stop()
301 if (!(event->hw.state & PERF_HES_STOPPED)) { in arc_pmu_stop()
308 event->hw.state |= PERF_HES_STOPPED; in arc_pmu_stop()
312 !(event->hw.state & PERF_HES_UPTODATE)) { in arc_pmu_stop()
313 arc_perf_event_update(event, &event->hw, idx); in arc_pmu_stop()
314 event->hw.state |= PERF_HES_UPTODATE; in arc_pmu_stop()
[all …]
/arch/c6x/kernel/
Dirq.c78 irq_hw_number_t hw) in core_domain_map() argument
80 if (hw < 4 || hw >= NR_PRIORITY_IRQS) in core_domain_map()
83 prio_to_virq[hw] = virq; in core_domain_map()
/arch/alpha/kernel/
Dperf_event.c198 event[0]->hw.idx = idx0; in ev67_check_constraints()
199 event[0]->hw.config_base = config; in ev67_check_constraints()
201 event[1]->hw.idx = idx0 ^ 1; in ev67_check_constraints()
202 event[1]->hw.config_base = config; in ev67_check_constraints()
350 evtype[n] = group->hw.event_base; in collect_events()
358 evtype[n] = pe->hw.event_base; in collect_events()
402 cpuc->current_idx[j] != pe->hw.idx) { in maybe_change_configuration()
403 alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0); in maybe_change_configuration()
412 struct hw_perf_event *hwc = &pe->hw; in maybe_change_configuration()
423 cpuc->config = cpuc->event[0]->hw.config_base; in maybe_change_configuration()
[all …]
/arch/tile/kernel/
Dperf_event.c412 struct hw_perf_event *hwc = &event->hw; in tile_pmu_enable_event()
461 struct hw_perf_event *hwc = &event->hw; in tile_pmu_disable_event()
504 struct hw_perf_event *hwc = &event->hw; in tile_perf_event_update()
550 struct hw_perf_event *hwc = &event->hw; in tile_event_set_period()
594 struct hw_perf_event *hwc = &event->hw; in tile_pmu_stop()
620 int idx = event->hw.idx; in tile_pmu_start()
622 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) in tile_pmu_start()
629 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in tile_pmu_start()
633 event->hw.state = 0; in tile_pmu_start()
658 hwc = &event->hw; in tile_pmu_add()
[all …]
/arch/arm/mach-vexpress/
Dspc.c493 struct clk_hw hw; member
497 #define to_clk_spc(spc) container_of(spc, struct clk_spc, hw)
498 static unsigned long spc_recalc_rate(struct clk_hw *hw, in spc_recalc_rate() argument
501 struct clk_spc *spc = to_clk_spc(hw); in spc_recalc_rate()
510 static long spc_round_rate(struct clk_hw *hw, unsigned long drate, in spc_round_rate() argument
513 struct clk_spc *spc = to_clk_spc(hw); in spc_round_rate()
518 static int spc_set_rate(struct clk_hw *hw, unsigned long rate, in spc_set_rate() argument
521 struct clk_spc *spc = to_clk_spc(hw); in spc_set_rate()
543 spc->hw.init = &init; in ve_spc_clk_register()
553 return devm_clk_register(cpu_dev, &spc->hw); in ve_spc_clk_register()
/arch/sparc/kernel/
Dperf_event.c929 cpuc->current_idx[i] != cp->hw.idx) { in read_in_all_counters()
930 sparc_perf_event_update(cp, &cp->hw, in read_in_all_counters()
933 if (cp->hw.state & PERF_HES_STOPPED) in read_in_all_counters()
934 cp->hw.state |= PERF_HES_ARCH; in read_in_all_counters()
955 struct hw_perf_event *hwc = &cp->hw; in calculate_single_pcr()
975 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; in calculate_single_pcr()
990 struct hw_perf_event *hwc = &cp->hw; in calculate_multiple_pcrs()
998 if (cp->hw.state & PERF_HES_ARCH) in calculate_multiple_pcrs()
1006 int idx = cp->hw.idx; in calculate_multiple_pcrs()
1008 cpuc->pcr[idx] |= cp->hw.config_base; in calculate_multiple_pcrs()
[all …]
/arch/arm/mach-footbridge/
DMakefile15 obj-$(CONFIG_ARCH_CATS) += cats-hw.o isa-timer.o
17 obj-$(CONFIG_ARCH_NETWINDER) += netwinder-hw.o isa-timer.o

1234567