• Home
  • Raw
  • Download

Lines Matching +full:ddr +full:- +full:pmu

1 // SPDX-License-Identifier: GPL-2.0-or-later
59 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
75 { .compatible = "fsl,imx6q-mmdc", .data = (void *)&imx6q_data},
76 { .compatible = "fsl,imx6qp-mmdc", .data = (void *)&imx6qp_data},
85 PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
86 PMU_EVENT_ATTR_STRING(busy-cycles, mmdc_pmu_busy_cycles, "event=0x01")
87 PMU_EVENT_ATTR_STRING(read-accesses, mmdc_pmu_read_accesses, "event=0x02")
88 PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "event=0x03")
89 PMU_EVENT_ATTR_STRING(read-bytes, mmdc_pmu_read_bytes, "event=0x04")
90 PMU_EVENT_ATTR_STRING(read-bytes.unit, mmdc_pmu_read_bytes_unit, "MB");
91 PMU_EVENT_ATTR_STRING(read-bytes.scale, mmdc_pmu_read_bytes_scale, "0.000001");
92 PMU_EVENT_ATTR_STRING(write-bytes, mmdc_pmu_write_bytes, "event=0x05")
93 PMU_EVENT_ATTR_STRING(write-bytes.unit, mmdc_pmu_write_bytes_unit, "MB");
94 PMU_EVENT_ATTR_STRING(write-bytes.scale, mmdc_pmu_write_bytes_scale, "0.000001");
97 struct pmu pmu; member
111 * Polling period is set to one second, overflow of total-cycles (the fastest
129 return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu); in mmdc_pmu_cpumask_show()
163 PMU_FORMAT_ATTR(event, "config:0-63");
164 PMU_FORMAT_ATTR(axi_id, "config1:0-63");
188 mmdc_base = pmu_mmdc->mmdc_base; in mmdc_pmu_read_counter()
221 if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu)) in mmdc_pmu_offline_cpu()
228 perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target); in mmdc_pmu_offline_cpu()
229 cpumask_set_cpu(target, &pmu_mmdc->cpu); in mmdc_pmu_offline_cpu()
235 struct pmu *pmu, in mmdc_pmu_group_event_is_valid() argument
238 int cfg = event->attr.config; in mmdc_pmu_group_event_is_valid()
243 if (event->pmu != pmu) in mmdc_pmu_group_event_is_valid()
250 * Each event has a single fixed-purpose counter, so we can only have a
257 struct pmu *pmu = event->pmu; in mmdc_pmu_group_is_valid() local
258 struct perf_event *leader = event->group_leader; in mmdc_pmu_group_is_valid()
262 set_bit(leader->attr.config, &counter_mask); in mmdc_pmu_group_is_valid()
265 if (!mmdc_pmu_group_event_is_valid(event, pmu, &counter_mask)) in mmdc_pmu_group_is_valid()
270 if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask)) in mmdc_pmu_group_is_valid()
279 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_init()
280 int cfg = event->attr.config; in mmdc_pmu_event_init()
282 if (event->attr.type != event->pmu->type) in mmdc_pmu_event_init()
283 return -ENOENT; in mmdc_pmu_event_init()
285 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) in mmdc_pmu_event_init()
286 return -EOPNOTSUPP; in mmdc_pmu_event_init()
288 if (event->cpu < 0) { in mmdc_pmu_event_init()
289 dev_warn(pmu_mmdc->dev, "Can't provide per-task data!\n"); in mmdc_pmu_event_init()
290 return -EOPNOTSUPP; in mmdc_pmu_event_init()
293 if (event->attr.sample_period) in mmdc_pmu_event_init()
294 return -EINVAL; in mmdc_pmu_event_init()
297 return -EINVAL; in mmdc_pmu_event_init()
300 return -EINVAL; in mmdc_pmu_event_init()
302 event->cpu = cpumask_first(&pmu_mmdc->cpu); in mmdc_pmu_event_init()
308 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_update()
309 struct hw_perf_event *hwc = &event->hw; in mmdc_pmu_event_update()
313 prev_raw_count = local64_read(&hwc->prev_count); in mmdc_pmu_event_update()
315 event->attr.config); in mmdc_pmu_event_update()
316 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, in mmdc_pmu_event_update()
319 delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF; in mmdc_pmu_event_update()
321 local64_add(delta, &event->count); in mmdc_pmu_event_update()
326 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_start()
327 struct hw_perf_event *hwc = &event->hw; in mmdc_pmu_event_start()
331 mmdc_base = pmu_mmdc->mmdc_base; in mmdc_pmu_event_start()
338 hrtimer_start(&pmu_mmdc->hrtimer, mmdc_pmu_timer_period(), in mmdc_pmu_event_start()
341 local64_set(&hwc->prev_count, 0); in mmdc_pmu_event_start()
348 val = event->attr.config1; in mmdc_pmu_event_start()
354 if (pmu_mmdc->devtype_data->flags & MMDC_FLAG_PROFILE_SEL) in mmdc_pmu_event_start()
362 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_add()
363 struct hw_perf_event *hwc = &event->hw; in mmdc_pmu_event_add()
365 int cfg = event->attr.config; in mmdc_pmu_event_add()
370 if (pmu_mmdc->mmdc_events[cfg] != NULL) in mmdc_pmu_event_add()
371 return -EAGAIN; in mmdc_pmu_event_add()
373 pmu_mmdc->mmdc_events[cfg] = event; in mmdc_pmu_event_add()
374 pmu_mmdc->active_events++; in mmdc_pmu_event_add()
376 local64_set(&hwc->prev_count, mmdc_pmu_read_counter(pmu_mmdc, cfg)); in mmdc_pmu_event_add()
383 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_stop()
386 mmdc_base = pmu_mmdc->mmdc_base; in mmdc_pmu_event_stop()
399 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_del()
400 int cfg = event->attr.config; in mmdc_pmu_event_del()
402 pmu_mmdc->mmdc_events[cfg] = NULL; in mmdc_pmu_event_del()
403 pmu_mmdc->active_events--; in mmdc_pmu_event_del()
405 if (pmu_mmdc->active_events == 0) in mmdc_pmu_event_del()
406 hrtimer_cancel(&pmu_mmdc->hrtimer); in mmdc_pmu_event_del()
416 struct perf_event *event = pmu_mmdc->mmdc_events[i]; in mmdc_pmu_overflow_handler()
438 .pmu = (struct pmu) { in mmdc_pmu_init()
454 pmu_mmdc->id = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL); in mmdc_pmu_init()
456 return pmu_mmdc->id; in mmdc_pmu_init()
463 ida_simple_remove(&mmdc_ida, pmu_mmdc->id); in imx_mmdc_remove()
464 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); in imx_mmdc_remove()
465 perf_pmu_unregister(&pmu_mmdc->pmu); in imx_mmdc_remove()
466 iounmap(pmu_mmdc->mmdc_base); in imx_mmdc_remove()
467 clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk); in imx_mmdc_remove()
479 of_match_device(imx_mmdc_dt_ids, &pdev->dev); in imx_mmdc_perf_init()
483 pr_err("failed to allocate PMU device!\n"); in imx_mmdc_perf_init()
484 return -ENOMEM; in imx_mmdc_perf_init()
499 ret = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev); in imx_mmdc_perf_init()
503 name = devm_kasprintf(&pdev->dev, in imx_mmdc_perf_init()
506 pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk; in imx_mmdc_perf_init()
507 pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data; in imx_mmdc_perf_init()
509 hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC, in imx_mmdc_perf_init()
511 pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler; in imx_mmdc_perf_init()
513 cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu); in imx_mmdc_perf_init()
515 /* Register the pmu instance for cpu hotplug */ in imx_mmdc_perf_init()
516 cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); in imx_mmdc_perf_init()
518 ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1); in imx_mmdc_perf_init()
526 pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret); in imx_mmdc_perf_init()
527 ida_simple_remove(&mmdc_ida, pmu_mmdc->id); in imx_mmdc_perf_init()
528 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); in imx_mmdc_perf_init()
529 hrtimer_cancel(&pmu_mmdc->hrtimer); in imx_mmdc_perf_init()
542 struct device_node *np = pdev->dev.of_node; in imx_mmdc_probe()
549 mmdc_ipg_clk = devm_clk_get(&pdev->dev, NULL); in imx_mmdc_probe()
555 dev_err(&pdev->dev, "Unable to enable mmdc ipg clock.\n"); in imx_mmdc_probe()
563 /* Get ddr type */ in imx_mmdc_probe()
591 .name = "imx-mmdc",