Searched refs:counts (Results 1 – 5 of 5) sorted by relevance
241 struct msi_counts *counts = data; in count_non_bridge_devices() local251 counts->num_devices++; in count_non_bridge_devices()258 struct msi_counts *counts = data; in count_spare_msis() local262 if (dn == counts->requestor) in count_spare_msis()263 req = counts->request; in count_spare_msis()277 if (req < counts->quota) in count_spare_msis()278 counts->spare += counts->quota - req; in count_spare_msis()279 else if (req > counts->quota) in count_spare_msis()280 counts->over_quota++; in count_spare_msis()288 struct msi_counts counts; in msi_quota_for_device() local[all …]
258 unsigned short *counts = __ia64_per_cpu_var(shadow_flush_counts); in smp_flush_tlb_cpumask() local266 counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff; in smp_flush_tlb_cpumask()280 while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff)) in smp_flush_tlb_cpumask()
1251 short counts[MAX_PEBS_EVENTS] = {}; in intel_pmu_drain_pebs_nhm() local1274 counts[bit]++; in intel_pmu_drain_pebs_nhm()1312 counts[bit]++; in intel_pmu_drain_pebs_nhm()1316 if ((counts[bit] == 0) && (error[bit] == 0)) in intel_pmu_drain_pebs_nhm()1327 if (counts[bit]) { in intel_pmu_drain_pebs_nhm()1329 top, bit, counts[bit]); in intel_pmu_drain_pebs_nhm()
1252 u8 *counts = vphn_cpu_change_counts[cpu]; in setup_cpu_associativity_change_counters() local1256 counts[i] = hypervisor_counts[i]; in setup_cpu_associativity_change_counters()1278 u8 *counts = vphn_cpu_change_counts[cpu]; in update_cpu_associativity_changes_mask() local1282 if (hypervisor_counts[i] != counts[i]) { in update_cpu_associativity_changes_mask()1283 counts[i] = hypervisor_counts[i]; in update_cpu_associativity_changes_mask()
196 ! cycle counts for differnet sizes using byte-at-a-time vs. optimised):