Lines Matching +full:odd +full:- +full:numbered
12 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
140 * - config: bits 0-7: event type
146 return (event->attr.config) & L3_EVTYPE_MASK; in get_event_type()
151 return !!(event->attr.config & BIT_ULL(L3_EVENT_LC_BIT)); in event_uses_long_counter()
194 * counters. The PMU only supports chaining of adjacent even/odd pairs
195 * and for simplicity the driver always configures the odd counter to
196 * count the overflows of the lower-numbered even counter. Note that since
203 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__64bit_counter_start()
204 int idx = event->hw.idx; in qcom_l3_cache__64bit_counter_start()
208 /* Set the odd counter to count the overflows of the even counter */ in qcom_l3_cache__64bit_counter_start()
209 gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG); in qcom_l3_cache__64bit_counter_start()
211 writel_relaxed(gang, l3pmu->regs + L3_M_BC_GANG); in qcom_l3_cache__64bit_counter_start()
214 local64_set(&event->hw.prev_count, 0); in qcom_l3_cache__64bit_counter_start()
215 writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)); in qcom_l3_cache__64bit_counter_start()
216 writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); in qcom_l3_cache__64bit_counter_start()
222 writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(idx + 1)); in qcom_l3_cache__64bit_counter_start()
223 writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx)); in qcom_l3_cache__64bit_counter_start()
226 writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx + 1)); in qcom_l3_cache__64bit_counter_start()
227 writel_relaxed(PMCNTENSET(idx + 1), l3pmu->regs + L3_M_BC_CNTENSET); in qcom_l3_cache__64bit_counter_start()
228 writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx)); in qcom_l3_cache__64bit_counter_start()
229 writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET); in qcom_l3_cache__64bit_counter_start()
235 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__64bit_counter_stop()
236 int idx = event->hw.idx; in qcom_l3_cache__64bit_counter_stop()
237 u32 gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG); in qcom_l3_cache__64bit_counter_stop()
240 writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR); in qcom_l3_cache__64bit_counter_stop()
241 writel_relaxed(PMCNTENCLR(idx + 1), l3pmu->regs + L3_M_BC_CNTENCLR); in qcom_l3_cache__64bit_counter_stop()
244 writel_relaxed(gang & ~GANG_EN(idx + 1), l3pmu->regs + L3_M_BC_GANG); in qcom_l3_cache__64bit_counter_stop()
249 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__64bit_counter_update()
250 int idx = event->hw.idx; in qcom_l3_cache__64bit_counter_update()
255 prev = local64_read(&event->hw.prev_count); in qcom_l3_cache__64bit_counter_update()
257 hi = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)); in qcom_l3_cache__64bit_counter_update()
258 lo = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); in qcom_l3_cache__64bit_counter_update()
259 } while (hi != readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1))); in qcom_l3_cache__64bit_counter_update()
261 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); in qcom_l3_cache__64bit_counter_update()
263 local64_add(new - prev, &event->count); in qcom_l3_cache__64bit_counter_update()
277 * the counter. This feature allows the counters to be left free-running
284 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__32bit_counter_start()
285 int idx = event->hw.idx; in qcom_l3_cache__32bit_counter_start()
287 u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL); in qcom_l3_cache__32bit_counter_start()
290 writel_relaxed(irqctl | PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL); in qcom_l3_cache__32bit_counter_start()
293 local64_set(&event->hw.prev_count, 0); in qcom_l3_cache__32bit_counter_start()
294 writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); in qcom_l3_cache__32bit_counter_start()
297 writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx)); in qcom_l3_cache__32bit_counter_start()
300 writel_relaxed(PMINTENSET(idx), l3pmu->regs + L3_M_BC_INTENSET); in qcom_l3_cache__32bit_counter_start()
303 writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx)); in qcom_l3_cache__32bit_counter_start()
304 writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET); in qcom_l3_cache__32bit_counter_start()
310 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__32bit_counter_stop()
311 int idx = event->hw.idx; in qcom_l3_cache__32bit_counter_stop()
312 u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL); in qcom_l3_cache__32bit_counter_stop()
315 writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR); in qcom_l3_cache__32bit_counter_stop()
318 writel_relaxed(PMINTENCLR(idx), l3pmu->regs + L3_M_BC_INTENCLR); in qcom_l3_cache__32bit_counter_stop()
321 writel_relaxed(irqctl & ~PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL); in qcom_l3_cache__32bit_counter_stop()
326 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__32bit_counter_update()
327 int idx = event->hw.idx; in qcom_l3_cache__32bit_counter_update()
331 prev = local64_read(&event->hw.prev_count); in qcom_l3_cache__32bit_counter_update()
332 new = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); in qcom_l3_cache__32bit_counter_update()
333 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); in qcom_l3_cache__32bit_counter_update()
335 local64_add(new - prev, &event->count); in qcom_l3_cache__32bit_counter_update()
362 writel_relaxed(BC_RESET, l3pmu->regs + L3_M_BC_CR); in qcom_l3_cache__init()
368 writel(BC_SATROLL_CR_RESET, l3pmu->regs + L3_M_BC_SATROLL_CR); in qcom_l3_cache__init()
370 writel_relaxed(BC_CNTENCLR_RESET, l3pmu->regs + L3_M_BC_CNTENCLR); in qcom_l3_cache__init()
371 writel_relaxed(BC_INTENCLR_RESET, l3pmu->regs + L3_M_BC_INTENCLR); in qcom_l3_cache__init()
372 writel_relaxed(PMOVSRCLR_RESET, l3pmu->regs + L3_M_BC_OVSR); in qcom_l3_cache__init()
373 writel_relaxed(BC_GANG_RESET, l3pmu->regs + L3_M_BC_GANG); in qcom_l3_cache__init()
374 writel_relaxed(BC_IRQCTL_RESET, l3pmu->regs + L3_M_BC_IRQCTL); in qcom_l3_cache__init()
375 writel_relaxed(PM_CR_RESET, l3pmu->regs + L3_HML3_PM_CR); in qcom_l3_cache__init()
378 writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(i)); in qcom_l3_cache__init()
379 writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(i)); in qcom_l3_cache__init()
382 writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRA); in qcom_l3_cache__init()
383 writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRAM); in qcom_l3_cache__init()
384 writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRB); in qcom_l3_cache__init()
385 writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRBM); in qcom_l3_cache__init()
386 writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRC); in qcom_l3_cache__init()
387 writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRCM); in qcom_l3_cache__init()
393 writel(BC_ENABLE, l3pmu->regs + L3_M_BC_CR); in qcom_l3_cache__init()
400 long status = readl_relaxed(l3pmu->regs + L3_M_BC_OVSR); in qcom_l3_cache__handle_irq()
407 writel_relaxed(status, l3pmu->regs + L3_M_BC_OVSR); in qcom_l3_cache__handle_irq()
413 event = l3pmu->events[idx]; in qcom_l3_cache__handle_irq()
424 ops->update(event); in qcom_l3_cache__handle_irq()
442 writel_relaxed(BC_ENABLE, l3pmu->regs + L3_M_BC_CR); in qcom_l3_cache__pmu_enable()
449 writel_relaxed(0, l3pmu->regs + L3_M_BC_CR); in qcom_l3_cache__pmu_disable()
461 struct perf_event *leader = event->group_leader; in qcom_l3_cache__validate_event_group()
465 if (leader->pmu != event->pmu && !is_software_event(leader)) in qcom_l3_cache__validate_event_group()
474 if (sibling->pmu != event->pmu) in qcom_l3_cache__validate_event_group()
488 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__event_init()
489 struct hw_perf_event *hwc = &event->hw; in qcom_l3_cache__event_init()
494 if (event->attr.type != event->pmu->type) in qcom_l3_cache__event_init()
495 return -ENOENT; in qcom_l3_cache__event_init()
498 * There are no per-counter mode filters in the PMU. in qcom_l3_cache__event_init()
500 if (event->attr.exclude_user || event->attr.exclude_kernel || in qcom_l3_cache__event_init()
501 event->attr.exclude_hv || event->attr.exclude_idle) in qcom_l3_cache__event_init()
502 return -EINVAL; in qcom_l3_cache__event_init()
505 * Sampling not supported since these events are not core-attributable. in qcom_l3_cache__event_init()
507 if (hwc->sample_period) in qcom_l3_cache__event_init()
508 return -EINVAL; in qcom_l3_cache__event_init()
512 * not attributable to any CPU and therefore cannot attribute per-task. in qcom_l3_cache__event_init()
514 if (event->cpu < 0) in qcom_l3_cache__event_init()
515 return -EINVAL; in qcom_l3_cache__event_init()
519 return -EINVAL; in qcom_l3_cache__event_init()
521 hwc->idx = -1; in qcom_l3_cache__event_init()
527 * but can lead to issues for off-core PMUs, like this one, where in qcom_l3_cache__event_init()
534 event->cpu = cpumask_first(&l3pmu->cpumask); in qcom_l3_cache__event_init()
541 struct hw_perf_event *hwc = &event->hw; in qcom_l3_cache__event_start()
544 hwc->state = 0; in qcom_l3_cache__event_start()
545 ops->start(event); in qcom_l3_cache__event_start()
550 struct hw_perf_event *hwc = &event->hw; in qcom_l3_cache__event_stop()
553 if (hwc->state & PERF_HES_STOPPED) in qcom_l3_cache__event_stop()
556 ops->stop(event, flags); in qcom_l3_cache__event_stop()
558 ops->update(event); in qcom_l3_cache__event_stop()
559 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in qcom_l3_cache__event_stop()
564 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__event_add()
565 struct hw_perf_event *hwc = &event->hw; in qcom_l3_cache__event_add()
572 idx = bitmap_find_free_region(l3pmu->used_mask, L3_NUM_COUNTERS, order); in qcom_l3_cache__event_add()
575 return -EAGAIN; in qcom_l3_cache__event_add()
577 hwc->idx = idx; in qcom_l3_cache__event_add()
578 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in qcom_l3_cache__event_add()
579 l3pmu->events[idx] = event; in qcom_l3_cache__event_add()
592 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__event_del()
593 struct hw_perf_event *hwc = &event->hw; in qcom_l3_cache__event_del()
598 l3pmu->events[hwc->idx] = NULL; in qcom_l3_cache__event_del()
599 bitmap_release_region(l3pmu->used_mask, hwc->idx, order); in qcom_l3_cache__event_del()
609 ops->update(event); in qcom_l3_cache__event_read()
616 * - formats, used by perf user space and other tools to configure events
617 * - events, used by perf user space and other tools to create events
619 * perf stat -a -e l3cache_0_0/event=read-miss/ ls
620 * perf stat -a -e l3cache_0_0/event=0x21/ ls
621 * - cpumask, used by perf user space and other tools to know on which CPUs
633 return sprintf(buf, "%s\n", (char *) eattr->var); in l3cache_pmu_format_show()
643 L3CACHE_PMU_FORMAT_ATTR(event, "config:0-7"),
661 return sprintf(page, "event=0x%02llx\n", pmu_attr->id); in l3cache_pmu_event_show()
672 L3CACHE_EVENT_ATTR(read-hit, L3_EVENT_READ_HIT),
673 L3CACHE_EVENT_ATTR(read-miss, L3_EVENT_READ_MISS),
674 L3CACHE_EVENT_ATTR(read-hit-d-side, L3_EVENT_READ_HIT_D),
675 L3CACHE_EVENT_ATTR(read-miss-d-side, L3_EVENT_READ_MISS_D),
676 L3CACHE_EVENT_ATTR(write-hit, L3_EVENT_WRITE_HIT),
677 L3CACHE_EVENT_ATTR(write-miss, L3_EVENT_WRITE_MISS),
693 return cpumap_print_to_pagebuf(true, buf, &l3pmu->cpumask); in qcom_l3_cache_pmu_cpumask_show()
726 if (cpumask_empty(&l3pmu->cpumask)) in qcom_l3_cache_pmu_online_cpu()
727 cpumask_set_cpu(cpu, &l3pmu->cpumask); in qcom_l3_cache_pmu_online_cpu()
737 if (!cpumask_test_and_clear_cpu(cpu, &l3pmu->cpumask)) in qcom_l3_cache_pmu_offline_cpu()
742 perf_pmu_migrate_context(&l3pmu->pmu, cpu, target); in qcom_l3_cache_pmu_offline_cpu()
743 cpumask_set_cpu(target, &l3pmu->cpumask); in qcom_l3_cache_pmu_offline_cpu()
757 acpi_dev = ACPI_COMPANION(&pdev->dev); in qcom_l3_cache_pmu_probe()
759 return -ENODEV; in qcom_l3_cache_pmu_probe()
761 l3pmu = devm_kzalloc(&pdev->dev, sizeof(*l3pmu), GFP_KERNEL); in qcom_l3_cache_pmu_probe()
762 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "l3cache_%s_%s", in qcom_l3_cache_pmu_probe()
763 acpi_dev->parent->pnp.unique_id, acpi_dev->pnp.unique_id); in qcom_l3_cache_pmu_probe()
765 return -ENOMEM; in qcom_l3_cache_pmu_probe()
767 l3pmu->pmu = (struct pmu) { in qcom_l3_cache_pmu_probe()
783 l3pmu->regs = devm_ioremap_resource(&pdev->dev, memrc); in qcom_l3_cache_pmu_probe()
784 if (IS_ERR(l3pmu->regs)) { in qcom_l3_cache_pmu_probe()
785 dev_err(&pdev->dev, "Can't map PMU @%pa\n", &memrc->start); in qcom_l3_cache_pmu_probe()
786 return PTR_ERR(l3pmu->regs); in qcom_l3_cache_pmu_probe()
795 ret = devm_request_irq(&pdev->dev, ret, qcom_l3_cache__handle_irq, 0, in qcom_l3_cache_pmu_probe()
798 dev_err(&pdev->dev, "Request for IRQ failed for slice @%pa\n", in qcom_l3_cache_pmu_probe()
799 &memrc->start); in qcom_l3_cache_pmu_probe()
804 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, &l3pmu->node); in qcom_l3_cache_pmu_probe()
806 dev_err(&pdev->dev, "Error %d registering hotplug", ret); in qcom_l3_cache_pmu_probe()
810 ret = perf_pmu_register(&l3pmu->pmu, name, -1); in qcom_l3_cache_pmu_probe()
812 dev_err(&pdev->dev, "Failed to register L3 cache PMU (%d)\n", ret); in qcom_l3_cache_pmu_probe()
816 dev_info(&pdev->dev, "Registered %s, type: %d\n", name, l3pmu->pmu.type); in qcom_l3_cache_pmu_probe()
829 .name = "qcom-l3cache-pmu",