Home
last modified time | relevance | path

Searched refs:events (Results 1 – 25 of 95) sorted by relevance

1234

/arch/um/os-Linux/
Dirq.c39 int os_epoll_triggered(int index, int events) in os_epoll_triggered() argument
41 return epoll_events[index].events & events; in os_epoll_triggered()
92 int os_add_epoll_fd(int events, int fd, void *data) in os_add_epoll_fd() argument
98 event.events = events | EPOLLET; in os_add_epoll_fd()
101 result = os_mod_epoll_fd(events, fd, data); in os_add_epoll_fd()
110 int os_mod_epoll_fd(int events, int fd, void *data) in os_mod_epoll_fd() argument
116 event.events = events; in os_mod_epoll_fd()
/arch/m68k/mac/
Dvia.c385 unsigned char irq_bit, events; in via1_irq() local
387 events = via1[vIFR] & via1[vIER] & 0x7F; in via1_irq()
388 if (!events) in via1_irq()
393 if (events & irq_bit) { in via1_irq()
401 events &= ~irq_bit; in via1_irq()
402 if (!events) in via1_irq()
409 if (events & irq_bit) { in via1_irq()
415 } while (events >= irq_bit); in via1_irq()
421 unsigned char irq_bit, events; in via2_irq() local
423 events = via2[gIFR] & via2[gIER] & 0x7F; in via2_irq()
[all …]
Dbaboon.c46 short events, irq_bit; in baboon_irq() local
49 events = baboon->mb_ifr & 0x07; in baboon_irq()
53 if (events & irq_bit) { in baboon_irq()
54 events &= ~irq_bit; in baboon_irq()
59 } while (events); in baboon_irq()
Dpsc.c122 unsigned char irq_bit, events; in psc_irq() local
124 events = psc_read_byte(pIFR) & psc_read_byte(pIER) & 0xF; in psc_irq()
125 if (!events) in psc_irq()
131 if (events & irq_bit) { in psc_irq()
137 } while (events >= irq_bit); in psc_irq()
Doss.c72 u16 events, irq_bit; in oss_nubus_irq() local
75 events = oss->irq_pending & OSS_IP_NUBUS; in oss_nubus_irq()
79 if (events & irq_bit) { in oss_nubus_irq()
80 events &= ~irq_bit; in oss_nubus_irq()
85 } while (events); in oss_nubus_irq()
/arch/x86/events/
DKconfig5 tristate "Intel uncore performance events"
9 Include support for Intel uncore performance events. These are
13 tristate "Intel/AMD rapl performance events"
17 Include support for Intel and AMD rapl performance events for power
21 tristate "Intel cstate performance events"
25 Include support for Intel cstate performance events for power
38 tristate "AMD Uncore performance events"
42 Include support for AMD uncore performance events for use with
Dmsr.c128 PMU_EVENT_GROUP(events, aperf);
129 PMU_EVENT_GROUP(events, mperf);
130 PMU_EVENT_GROUP(events, pperf);
131 PMU_EVENT_GROUP(events, smi);
132 PMU_EVENT_GROUP(events, ptsc);
133 PMU_EVENT_GROUP(events, irperf);
Drapl.c129 unsigned long events; member
709 .events = BIT(PERF_RAPL_PP0) |
717 .events = BIT(PERF_RAPL_PP0) |
725 .events = BIT(PERF_RAPL_PP0) |
734 .events = BIT(PERF_RAPL_PP0) |
743 .events = BIT(PERF_RAPL_PKG) |
751 .events = BIT(PERF_RAPL_PP0) |
761 .events = BIT(PERF_RAPL_PP0) |
771 .events = BIT(PERF_RAPL_PKG),
831 false, (void *) &rm->events); in rapl_pmu_init()
/arch/arm/kernel/
Dperf_event_xscale.c174 struct perf_event *event = cpuc->events[idx]; in xscale1pmu_handle_irq()
209 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); in xscale1pmu_enable_event() local
232 raw_spin_lock_irqsave(&events->pmu_lock, flags); in xscale1pmu_enable_event()
237 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); in xscale1pmu_enable_event()
245 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); in xscale1pmu_disable_event() local
266 raw_spin_lock_irqsave(&events->pmu_lock, flags); in xscale1pmu_disable_event()
271 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); in xscale1pmu_disable_event()
304 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); in xscale1pmu_start() local
306 raw_spin_lock_irqsave(&events->pmu_lock, flags); in xscale1pmu_start()
310 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); in xscale1pmu_start()
[all …]
Dperf_event_v6.c274 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); in armv6pmu_enable_event() local
297 raw_spin_lock_irqsave(&events->pmu_lock, flags); in armv6pmu_enable_event()
302 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); in armv6pmu_enable_event()
327 struct perf_event *event = cpuc->events[idx]; in armv6pmu_handle_irq()
366 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); in armv6pmu_start() local
368 raw_spin_lock_irqsave(&events->pmu_lock, flags); in armv6pmu_start()
372 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); in armv6pmu_start()
378 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); in armv6pmu_stop() local
380 raw_spin_lock_irqsave(&events->pmu_lock, flags); in armv6pmu_stop()
384 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); in armv6pmu_stop()
[all …]
Dperf_event_v7.c876 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); in armv7pmu_enable_event() local
889 raw_spin_lock_irqsave(&events->pmu_lock, flags); in armv7pmu_enable_event()
914 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); in armv7pmu_enable_event()
922 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); in armv7pmu_disable_event() local
934 raw_spin_lock_irqsave(&events->pmu_lock, flags); in armv7pmu_disable_event()
946 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); in armv7pmu_disable_event()
974 struct perf_event *event = cpuc->events[idx]; in armv7pmu_handle_irq()
1013 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); in armv7pmu_start() local
1015 raw_spin_lock_irqsave(&events->pmu_lock, flags); in armv7pmu_start()
1018 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); in armv7pmu_start()
[all …]
/arch/arm/boot/dts/
Dexynos4412-ppmu-common.dtsi12 events {
22 events {
32 events {
42 events {
/arch/um/kernel/
Dirq.c37 int events; member
133 if (!reg->events) in sigio_reg_handler()
136 if (os_epoll_triggered(idx, reg->events) <= 0) in sigio_reg_handler()
231 int events = 0; in update_irq_entry() local
234 events |= entry->reg[i].events; in update_irq_entry()
236 if (events) { in update_irq_entry()
238 os_add_epoll_fd(events, entry->fd, entry); in update_irq_entry()
257 int err, events = os_event_mask(type); in activate_fd() local
268 if (WARN_ON(irq_entry->reg[type].events)) { in activate_fd()
289 irq_entry->reg[type].events = events; in activate_fd()
[all …]
/arch/um/drivers/
Dubd_user.c40 kernel_pollfd.events = POLLIN; in start_io_thread()
70 kernel_pollfd.events = POLLIN; in ubd_read_poll()
75 kernel_pollfd.events = POLLOUT; in ubd_write_poll()
/arch/powerpc/platforms/powernv/
Dopal-irqchip.c43 __be64 events = 0; in opal_handle_events() local
63 if (opal_poll_events(&events) != OPAL_SUCCESS) in opal_handle_events()
65 e = be64_to_cpu(events) & opal_event_irqchip.mask; in opal_handle_events()
124 __be64 events; in opal_interrupt() local
126 opal_handle_interrupt(virq_to_hw(irq), &events); in opal_interrupt()
127 last_outstanding_events = be64_to_cpu(events); in opal_interrupt()
/arch/arm/mm/
Dcache-l2x0-pmu.c36 static struct perf_event *events[PMU_NR_COUNTERS]; variable
44 if (!events[i]) in l2x0_pmu_find_idx()
57 if (events[i]) in l2x0_pmu_num_active_counters()
160 struct perf_event *event = events[i]; in l2x0_pmu_poll()
246 events[idx] = event; in l2x0_pmu_event_add()
265 events[hw->idx] = NULL; in l2x0_pmu_event_del()
451 if (events[i]) in l2x0_pmu_suspend()
452 l2x0_pmu_event_stop(events[i], PERF_EF_UPDATE); in l2x0_pmu_suspend()
467 if (events[i]) in l2x0_pmu_resume()
468 l2x0_pmu_event_start(events[i], PERF_EF_RELOAD); in l2x0_pmu_resume()
/arch/arm64/kvm/
Dpmu.c143 static void kvm_vcpu_pmu_enable_el0(unsigned long events) in kvm_vcpu_pmu_enable_el0() argument
148 for_each_set_bit(counter, &events, 32) { in kvm_vcpu_pmu_enable_el0()
158 static void kvm_vcpu_pmu_disable_el0(unsigned long events) in kvm_vcpu_pmu_disable_el0() argument
163 for_each_set_bit(counter, &events, 32) { in kvm_vcpu_pmu_disable_el0()
Dguest.c806 struct kvm_vcpu_events *events) in __kvm_arm_vcpu_get_events() argument
808 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); in __kvm_arm_vcpu_get_events()
809 events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); in __kvm_arm_vcpu_get_events()
811 if (events->exception.serror_pending && events->exception.serror_has_esr) in __kvm_arm_vcpu_get_events()
812 events->exception.serror_esr = vcpu_get_vsesr(vcpu); in __kvm_arm_vcpu_get_events()
824 struct kvm_vcpu_events *events) in __kvm_arm_vcpu_set_events() argument
826 bool serror_pending = events->exception.serror_pending; in __kvm_arm_vcpu_set_events()
827 bool has_esr = events->exception.serror_has_esr; in __kvm_arm_vcpu_set_events()
828 bool ext_dabt_pending = events->exception.ext_dabt_pending; in __kvm_arm_vcpu_set_events()
834 if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK)) in __kvm_arm_vcpu_set_events()
[all …]
Darm.c1415 struct kvm_vcpu_events *events) in kvm_arm_vcpu_get_events() argument
1417 memset(events, 0, sizeof(*events)); in kvm_arm_vcpu_get_events()
1419 return __kvm_arm_vcpu_get_events(vcpu, events); in kvm_arm_vcpu_get_events()
1423 struct kvm_vcpu_events *events) in kvm_arm_vcpu_set_events() argument
1428 for (i = 0; i < ARRAY_SIZE(events->reserved); i++) in kvm_arm_vcpu_set_events()
1429 if (events->reserved[i]) in kvm_arm_vcpu_set_events()
1433 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) in kvm_arm_vcpu_set_events()
1434 if (events->exception.pad[i]) in kvm_arm_vcpu_set_events()
1437 return __kvm_arm_vcpu_set_events(vcpu, events); in kvm_arm_vcpu_set_events()
1533 struct kvm_vcpu_events events; in kvm_arch_vcpu_ioctl() local
[all …]
/arch/x86/events/intel/
Dcstate.c164 PMU_EVENT_GROUP(events, cstate_core_c1);
165 PMU_EVENT_GROUP(events, cstate_core_c3);
166 PMU_EVENT_GROUP(events, cstate_core_c6);
167 PMU_EVENT_GROUP(events, cstate_core_c7);
251 PMU_EVENT_GROUP(events, cstate_pkg_c2);
252 PMU_EVENT_GROUP(events, cstate_pkg_c3);
253 PMU_EVENT_GROUP(events, cstate_pkg_c6);
254 PMU_EVENT_GROUP(events, cstate_pkg_c7);
255 PMU_EVENT_GROUP(events, cstate_pkg_c8);
256 PMU_EVENT_GROUP(events, cstate_pkg_c9);
[all …]
/arch/nds32/kernel/
Dperf_event_cpu.c251 struct perf_event *event = cpuc->events[idx]; in nds32_pmu_handle_irq()
418 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); in nds32_pmu_enable_event() local
430 raw_spin_lock_irqsave(&events->pmu_lock, flags); in nds32_pmu_enable_event()
461 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); in nds32_pmu_enable_event()
469 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); in nds32_pmu_disable_event() local
480 raw_spin_lock_irqsave(&events->pmu_lock, flags); in nds32_pmu_disable_event()
492 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); in nds32_pmu_disable_event()
599 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); in nds32_pmu_start() local
601 raw_spin_lock_irqsave(&events->pmu_lock, flags); in nds32_pmu_start()
609 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); in nds32_pmu_start()
[all …]
/arch/powerpc/perf/
Dimc-pmu.c200 static void imc_free_events(struct imc_events *events, int nr_entries) in imc_free_events() argument
205 if (!events) in imc_free_events()
208 kfree(events[i].unit); in imc_free_events()
209 kfree(events[i].scale); in imc_free_events()
210 kfree(events[i].name); in imc_free_events()
213 kfree(events); in imc_free_events()
257 pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); in update_events_in_group()
258 if (!pmu->events) in update_events_in_group()
264 ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]); in update_events_in_group()
272 imc_free_events(pmu->events, ct); in update_events_in_group()
[all …]
/arch/powerpc/platforms/pseries/
DKconfig48 SPLPAR machines can log hypervisor preempt & dispatch events to a
49 kernel buffer. Saying Y here will enable logging these events,
75 to return information about hardware error and non-error events
76 which may need OS attention. RTAS returns events for multiple
78 to receive events.
128 bool "Hypervisor supplied PMU events (24x7 & GPCI)"
/arch/sparc/kernel/
Dperf_event.c98 unsigned long events[MAX_HWEVENTS]; member
835 enc = perf_event_get_enc(cpuc->events[idx]); in sparc_pmu_enable_event()
967 enc = perf_event_get_enc(cpuc->events[i]); in calculate_single_pcr()
1134 cpuc->events[i - 1] = cpuc->events[i]; in sparc_pmu_del()
1243 unsigned long *events, int n_ev) in sparc_check_constraints() argument
1265 msk0 = perf_event_get_msk(events[0]); in sparc_check_constraints()
1272 msk1 = perf_event_get_msk(events[1]); in sparc_check_constraints()
1346 struct perf_event *evts[], unsigned long *events, in collect_events() argument
1356 events[n] = group->hw.event_base; in collect_events()
1365 events[n] = event->hw.event_base; in collect_events()
[all …]
/arch/arm64/kernel/
Dtrace-events-emulation.h35 #define TRACE_INCLUDE_FILE trace-events-emulation

1234