Lines Matching +full:ddr +full:- +full:pmu
10 * http://www.opensource.org/licenses/gpl-license.html
64 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
80 { .compatible = "fsl,imx6q-mmdc", .data = (void *)&imx6q_data},
81 { .compatible = "fsl,imx6qp-mmdc", .data = (void *)&imx6qp_data},
90 PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
91 PMU_EVENT_ATTR_STRING(busy-cycles, mmdc_pmu_busy_cycles, "event=0x01")
92 PMU_EVENT_ATTR_STRING(read-accesses, mmdc_pmu_read_accesses, "event=0x02")
93 PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "event=0x03")
94 PMU_EVENT_ATTR_STRING(read-bytes, mmdc_pmu_read_bytes, "event=0x04")
95 PMU_EVENT_ATTR_STRING(read-bytes.unit, mmdc_pmu_read_bytes_unit, "MB");
96 PMU_EVENT_ATTR_STRING(read-bytes.scale, mmdc_pmu_read_bytes_scale, "0.000001");
97 PMU_EVENT_ATTR_STRING(write-bytes, mmdc_pmu_write_bytes, "event=0x05")
98 PMU_EVENT_ATTR_STRING(write-bytes.unit, mmdc_pmu_write_bytes_unit, "MB");
99 PMU_EVENT_ATTR_STRING(write-bytes.scale, mmdc_pmu_write_bytes_scale, "0.000001");
102 struct pmu pmu; member
114 * Polling period is set to one second, overflow of total-cycles (the fastest
132 return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu); in mmdc_pmu_cpumask_show()
166 PMU_FORMAT_ATTR(event, "config:0-63");
167 PMU_FORMAT_ATTR(axi_id, "config1:0-63");
191 mmdc_base = pmu_mmdc->mmdc_base; in mmdc_pmu_read_counter()
224 if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu)) in mmdc_pmu_offline_cpu()
231 perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target); in mmdc_pmu_offline_cpu()
232 cpumask_set_cpu(target, &pmu_mmdc->cpu); in mmdc_pmu_offline_cpu()
238 struct pmu *pmu, in mmdc_pmu_group_event_is_valid() argument
241 int cfg = event->attr.config; in mmdc_pmu_group_event_is_valid()
246 if (event->pmu != pmu) in mmdc_pmu_group_event_is_valid()
253 * Each event has a single fixed-purpose counter, so we can only have a
260 struct pmu *pmu = event->pmu; in mmdc_pmu_group_is_valid() local
261 struct perf_event *leader = event->group_leader; in mmdc_pmu_group_is_valid()
265 set_bit(leader->attr.config, &counter_mask); in mmdc_pmu_group_is_valid()
268 if (!mmdc_pmu_group_event_is_valid(event, pmu, &counter_mask)) in mmdc_pmu_group_is_valid()
273 if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask)) in mmdc_pmu_group_is_valid()
282 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_init()
283 int cfg = event->attr.config; in mmdc_pmu_event_init()
285 if (event->attr.type != event->pmu->type) in mmdc_pmu_event_init()
286 return -ENOENT; in mmdc_pmu_event_init()
288 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) in mmdc_pmu_event_init()
289 return -EOPNOTSUPP; in mmdc_pmu_event_init()
291 if (event->cpu < 0) { in mmdc_pmu_event_init()
292 dev_warn(pmu_mmdc->dev, "Can't provide per-task data!\n"); in mmdc_pmu_event_init()
293 return -EOPNOTSUPP; in mmdc_pmu_event_init()
296 if (event->attr.exclude_user || in mmdc_pmu_event_init()
297 event->attr.exclude_kernel || in mmdc_pmu_event_init()
298 event->attr.exclude_hv || in mmdc_pmu_event_init()
299 event->attr.exclude_idle || in mmdc_pmu_event_init()
300 event->attr.exclude_host || in mmdc_pmu_event_init()
301 event->attr.exclude_guest || in mmdc_pmu_event_init()
302 event->attr.sample_period) in mmdc_pmu_event_init()
303 return -EINVAL; in mmdc_pmu_event_init()
306 return -EINVAL; in mmdc_pmu_event_init()
309 return -EINVAL; in mmdc_pmu_event_init()
311 event->cpu = cpumask_first(&pmu_mmdc->cpu); in mmdc_pmu_event_init()
317 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_update()
318 struct hw_perf_event *hwc = &event->hw; in mmdc_pmu_event_update()
322 prev_raw_count = local64_read(&hwc->prev_count); in mmdc_pmu_event_update()
324 event->attr.config); in mmdc_pmu_event_update()
325 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, in mmdc_pmu_event_update()
328 delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF; in mmdc_pmu_event_update()
330 local64_add(delta, &event->count); in mmdc_pmu_event_update()
335 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_start()
336 struct hw_perf_event *hwc = &event->hw; in mmdc_pmu_event_start()
340 mmdc_base = pmu_mmdc->mmdc_base; in mmdc_pmu_event_start()
347 hrtimer_start(&pmu_mmdc->hrtimer, mmdc_pmu_timer_period(), in mmdc_pmu_event_start()
350 local64_set(&hwc->prev_count, 0); in mmdc_pmu_event_start()
357 val = event->attr.config1; in mmdc_pmu_event_start()
363 if (pmu_mmdc->devtype_data->flags & MMDC_FLAG_PROFILE_SEL) in mmdc_pmu_event_start()
371 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_add()
372 struct hw_perf_event *hwc = &event->hw; in mmdc_pmu_event_add()
374 int cfg = event->attr.config; in mmdc_pmu_event_add()
379 if (pmu_mmdc->mmdc_events[cfg] != NULL) in mmdc_pmu_event_add()
380 return -EAGAIN; in mmdc_pmu_event_add()
382 pmu_mmdc->mmdc_events[cfg] = event; in mmdc_pmu_event_add()
383 pmu_mmdc->active_events++; in mmdc_pmu_event_add()
385 local64_set(&hwc->prev_count, mmdc_pmu_read_counter(pmu_mmdc, cfg)); in mmdc_pmu_event_add()
392 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_stop()
395 mmdc_base = pmu_mmdc->mmdc_base; in mmdc_pmu_event_stop()
408 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_del()
409 int cfg = event->attr.config; in mmdc_pmu_event_del()
411 pmu_mmdc->mmdc_events[cfg] = NULL; in mmdc_pmu_event_del()
412 pmu_mmdc->active_events--; in mmdc_pmu_event_del()
414 if (pmu_mmdc->active_events == 0) in mmdc_pmu_event_del()
415 hrtimer_cancel(&pmu_mmdc->hrtimer); in mmdc_pmu_event_del()
425 struct perf_event *event = pmu_mmdc->mmdc_events[i]; in mmdc_pmu_overflow_handler()
449 .pmu = (struct pmu) { in mmdc_pmu_init()
473 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); in imx_mmdc_remove()
474 perf_pmu_unregister(&pmu_mmdc->pmu); in imx_mmdc_remove()
486 of_match_device(imx_mmdc_dt_ids, &pdev->dev); in imx_mmdc_perf_init()
490 pr_err("failed to allocate PMU device!\n"); in imx_mmdc_perf_init()
491 return -ENOMEM; in imx_mmdc_perf_init()
506 mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev); in imx_mmdc_perf_init()
510 name = devm_kasprintf(&pdev->dev, in imx_mmdc_perf_init()
513 pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data; in imx_mmdc_perf_init()
515 hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC, in imx_mmdc_perf_init()
517 pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler; in imx_mmdc_perf_init()
519 cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu); in imx_mmdc_perf_init()
521 /* Register the pmu instance for cpu hotplug */ in imx_mmdc_perf_init()
522 cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); in imx_mmdc_perf_init()
524 ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1); in imx_mmdc_perf_init()
532 pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret); in imx_mmdc_perf_init()
533 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); in imx_mmdc_perf_init()
534 hrtimer_cancel(&pmu_mmdc->hrtimer); in imx_mmdc_perf_init()
547 struct device_node *np = pdev->dev.of_node; in imx_mmdc_probe()
555 /* Get ddr type */ in imx_mmdc_probe()
577 .name = "imx-mmdc",