• Home
  • Raw
  • Download

Lines Matching +full:smmu +full:- +full:v2

1 // SPDX-License-Identifier: GPL-2.0
9 * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
15 * filter_enable - 0 = no filtering, 1 = filtering enabled
16 * filter_span - 0 = exact match, 1 = pattern match
17 * filter_stream_id - pattern to filter against
19 * To match a partial StreamID where the X most-significant bits must match
20 * but the Y least-significant bits might differ, STREAMID is programmed
22 * STREAMID[Y - 1] == 0.
23 * STREAMID[Y - 2:0] == 1 (where Y > 1).
27 * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
28 * filter_span=1,filter_stream_id=0x42/ -a netperf
31 * information is available in the SMMU documentation.
33 * SMMU events are not attributable to a CPU, so task mode and sampling
124 event->attr._config); \
137 smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); in smmu_pmu_enable()
138 writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR); in smmu_pmu_enable()
145 writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR); in smmu_pmu_disable()
146 writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); in smmu_pmu_disable()
152 if (smmu_pmu->counter_mask & BIT(32)) in smmu_pmu_counter_set_value()
153 writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); in smmu_pmu_counter_set_value()
155 writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); in smmu_pmu_counter_set_value()
162 if (smmu_pmu->counter_mask & BIT(32)) in smmu_pmu_counter_get_value()
163 value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); in smmu_pmu_counter_get_value()
165 value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); in smmu_pmu_counter_get_value()
172 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0); in smmu_pmu_counter_enable()
177 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); in smmu_pmu_counter_disable()
182 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0); in smmu_pmu_interrupt_enable()
188 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); in smmu_pmu_interrupt_disable()
194 writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx)); in smmu_pmu_set_evtyper()
199 writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx)); in smmu_pmu_set_smr()
204 struct hw_perf_event *hwc = &event->hw; in smmu_pmu_event_update()
205 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_update()
207 u32 idx = hwc->idx; in smmu_pmu_event_update()
210 prev = local64_read(&hwc->prev_count); in smmu_pmu_event_update()
212 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); in smmu_pmu_event_update()
215 delta = now - prev; in smmu_pmu_event_update()
216 delta &= smmu_pmu->counter_mask; in smmu_pmu_event_update()
218 local64_add(delta, &event->count); in smmu_pmu_event_update()
224 u32 idx = hwc->idx; in smmu_pmu_set_period()
227 if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) { in smmu_pmu_set_period()
243 new = smmu_pmu->counter_mask >> 1; in smmu_pmu_set_period()
247 local64_set(&hwc->prev_count, new); in smmu_pmu_set_period()
253 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_set_event_filter()
278 unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters; in smmu_pmu_apply_event_filter()
286 cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs); in smmu_pmu_apply_event_filter()
288 * Per-counter filtering, or scheduling the first globally-filtered in smmu_pmu_apply_event_filter()
291 if (!smmu_pmu->global_filter || cur_idx == num_ctrs) { in smmu_pmu_apply_event_filter()
297 if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) { in smmu_pmu_apply_event_filter()
302 return -EAGAIN; in smmu_pmu_apply_event_filter()
309 unsigned int num_ctrs = smmu_pmu->num_counters; in smmu_pmu_get_event_idx()
311 idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs); in smmu_pmu_get_event_idx()
314 return -EAGAIN; in smmu_pmu_get_event_idx()
320 set_bit(idx, smmu_pmu->used_counters); in smmu_pmu_get_event_idx()
328 if (new->pmu != curr->pmu) in smmu_pmu_events_compatible()
331 if (to_smmu_pmu(new->pmu)->global_filter && in smmu_pmu_events_compatible()
345 struct hw_perf_event *hwc = &event->hw; in smmu_pmu_event_init()
346 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_init()
347 struct device *dev = smmu_pmu->dev; in smmu_pmu_event_init()
352 if (event->attr.type != event->pmu->type) in smmu_pmu_event_init()
353 return -ENOENT; in smmu_pmu_event_init()
355 if (hwc->sample_period) { in smmu_pmu_event_init()
357 return -EOPNOTSUPP; in smmu_pmu_event_init()
360 if (event->cpu < 0) { in smmu_pmu_event_init()
361 dev_dbg(dev, "Per-task mode not supported\n"); in smmu_pmu_event_init()
362 return -EOPNOTSUPP; in smmu_pmu_event_init()
368 (!test_bit(event_id, smmu_pmu->supported_events))) { in smmu_pmu_event_init()
370 return -EINVAL; in smmu_pmu_event_init()
374 if (!is_software_event(event->group_leader)) { in smmu_pmu_event_init()
375 if (!smmu_pmu_events_compatible(event->group_leader, event)) in smmu_pmu_event_init()
376 return -EINVAL; in smmu_pmu_event_init()
378 if (++group_num_events > smmu_pmu->num_counters) in smmu_pmu_event_init()
379 return -EINVAL; in smmu_pmu_event_init()
382 for_each_sibling_event(sibling, event->group_leader) { in smmu_pmu_event_init()
387 return -EINVAL; in smmu_pmu_event_init()
389 if (++group_num_events > smmu_pmu->num_counters) in smmu_pmu_event_init()
390 return -EINVAL; in smmu_pmu_event_init()
393 hwc->idx = -1; in smmu_pmu_event_init()
399 event->cpu = smmu_pmu->on_cpu; in smmu_pmu_event_init()
406 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_start()
407 struct hw_perf_event *hwc = &event->hw; in smmu_pmu_event_start()
408 int idx = hwc->idx; in smmu_pmu_event_start()
410 hwc->state = 0; in smmu_pmu_event_start()
419 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_stop()
420 struct hw_perf_event *hwc = &event->hw; in smmu_pmu_event_stop()
421 int idx = hwc->idx; in smmu_pmu_event_stop()
423 if (hwc->state & PERF_HES_STOPPED) in smmu_pmu_event_stop()
429 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in smmu_pmu_event_stop()
434 struct hw_perf_event *hwc = &event->hw; in smmu_pmu_event_add()
436 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_add()
442 hwc->idx = idx; in smmu_pmu_event_add()
443 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in smmu_pmu_event_add()
444 smmu_pmu->events[idx] = event; in smmu_pmu_event_add()
445 local64_set(&hwc->prev_count, 0); in smmu_pmu_event_add()
460 struct hw_perf_event *hwc = &event->hw; in smmu_pmu_event_del()
461 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_del()
462 int idx = hwc->idx; in smmu_pmu_event_del()
466 smmu_pmu->events[idx] = NULL; in smmu_pmu_event_del()
467 clear_bit(idx, smmu_pmu->used_counters); in smmu_pmu_event_del()
485 return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu)); in smmu_pmu_cpumask_show()
509 return sprintf(page, "event=0x%02llx\n", pmu_attr->id); in smmu_pmu_event_show()
545 if (test_bit(pmu_attr->id, smmu_pmu->supported_events)) in smmu_pmu_event_is_visible()
546 return attr->mode; in smmu_pmu_event_is_visible()
558 PMU_FORMAT_ATTR(event, "config:0-15");
559 PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
593 if (cpu != smmu_pmu->on_cpu) in smmu_pmu_offline_cpu()
600 perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target); in smmu_pmu_offline_cpu()
601 smmu_pmu->on_cpu = target; in smmu_pmu_offline_cpu()
602 WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, cpumask_of(target))); in smmu_pmu_offline_cpu()
613 ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0); in smmu_pmu_handle_irq()
617 writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); in smmu_pmu_handle_irq()
619 for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) { in smmu_pmu_handle_irq()
620 struct perf_event *event = smmu_pmu->events[idx]; in smmu_pmu_handle_irq()
627 hwc = &event->hw; in smmu_pmu_handle_irq()
648 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; in smmu_pmu_write_msi_msg()
651 writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0); in smmu_pmu_write_msi_msg()
652 writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1); in smmu_pmu_write_msi_msg()
654 pmu->reg_base + SMMU_PMCG_IRQ_CFG2); in smmu_pmu_write_msi_msg()
660 struct device *dev = pmu->dev; in smmu_pmu_setup_msi()
664 writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0); in smmu_pmu_setup_msi()
667 if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI)) in smmu_pmu_setup_msi()
678 pmu->irq = desc->irq; in smmu_pmu_setup_msi()
687 int irq, ret = -ENXIO; in smmu_pmu_setup_irq()
691 irq = pmu->irq; in smmu_pmu_setup_irq()
693 ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq, in smmu_pmu_setup_irq()
694 flags, "smmuv3-pmu", pmu); in smmu_pmu_setup_irq()
700 u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0); in smmu_pmu_reset()
702 smmu_pmu_disable(&smmu_pmu->pmu); in smmu_pmu_reset()
706 smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); in smmu_pmu_reset()
708 smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); in smmu_pmu_reset()
710 smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); in smmu_pmu_reset()
717 model = *(u32 *)dev_get_platdata(smmu_pmu->dev); in smmu_pmu_get_acpi_options()
722 smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY; in smmu_pmu_get_acpi_options()
726 dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options); in smmu_pmu_get_acpi_options()
737 struct device *dev = &pdev->dev; in smmu_pmu_probe()
741 return -ENOMEM; in smmu_pmu_probe()
743 smmu_pmu->dev = dev; in smmu_pmu_probe()
746 smmu_pmu->pmu = (struct pmu) { in smmu_pmu_probe()
761 smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0); in smmu_pmu_probe()
762 if (IS_ERR(smmu_pmu->reg_base)) in smmu_pmu_probe()
763 return PTR_ERR(smmu_pmu->reg_base); in smmu_pmu_probe()
765 cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR); in smmu_pmu_probe()
769 smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1); in smmu_pmu_probe()
770 if (IS_ERR(smmu_pmu->reloc_base)) in smmu_pmu_probe()
771 return PTR_ERR(smmu_pmu->reloc_base); in smmu_pmu_probe()
773 smmu_pmu->reloc_base = smmu_pmu->reg_base; in smmu_pmu_probe()
778 smmu_pmu->irq = irq; in smmu_pmu_probe()
780 ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0); in smmu_pmu_probe()
781 ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1); in smmu_pmu_probe()
782 bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64, in smmu_pmu_probe()
785 smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1; in smmu_pmu_probe()
787 smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE); in smmu_pmu_probe()
790 smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0); in smmu_pmu_probe()
796 dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start); in smmu_pmu_probe()
800 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx", in smmu_pmu_probe()
801 (res_0->start) >> SMMU_PMCG_PA_SHIFT); in smmu_pmu_probe()
803 dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start); in smmu_pmu_probe()
804 return -EINVAL; in smmu_pmu_probe()
810 smmu_pmu->on_cpu = raw_smp_processor_id(); in smmu_pmu_probe()
811 WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, in smmu_pmu_probe()
812 cpumask_of(smmu_pmu->on_cpu))); in smmu_pmu_probe()
815 &smmu_pmu->node); in smmu_pmu_probe()
818 err, &res_0->start); in smmu_pmu_probe()
822 err = perf_pmu_register(&smmu_pmu->pmu, name, -1); in smmu_pmu_probe()
825 err, &res_0->start); in smmu_pmu_probe()
830 &res_0->start, smmu_pmu->num_counters, in smmu_pmu_probe()
831 smmu_pmu->global_filter ? "Global(Counter0)" : in smmu_pmu_probe()
837 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); in smmu_pmu_probe()
839 irq_set_affinity_hint(smmu_pmu->irq, NULL); in smmu_pmu_probe()
847 perf_pmu_unregister(&smmu_pmu->pmu); in smmu_pmu_remove()
848 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); in smmu_pmu_remove()
849 irq_set_affinity_hint(smmu_pmu->irq, NULL); in smmu_pmu_remove()
858 smmu_pmu_disable(&smmu_pmu->pmu); in smmu_pmu_shutdown()
863 .name = "arm-smmu-v3-pmcg",
901 MODULE_LICENSE("GPL v2");