1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HiSilicon PA uncore Hardware event counters support
4 *
5 * Copyright (C) 2020 HiSilicon Limited
6 * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
7 *
8 * This code is based on the uncore PMUs like arm-cci and arm-ccn.
9 */
10 #include <linux/acpi.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/list.h>
15 #include <linux/smp.h>
16
17 #include "hisi_uncore_pmu.h"
18
19 /* PA register definition */
20 #define PA_PERF_CTRL 0x1c00
21 #define PA_EVENT_CTRL 0x1c04
22 #define PA_TT_CTRL 0x1c08
23 #define PA_TGTID_CTRL 0x1c14
24 #define PA_SRCID_CTRL 0x1c18
25 #define PA_INT_MASK 0x1c70
26 #define PA_INT_STATUS 0x1c78
27 #define PA_INT_CLEAR 0x1c7c
28 #define PA_EVENT_TYPE0 0x1c80
29 #define PA_PMU_VERSION 0x1cf0
30 #define PA_EVENT_CNT0_L 0x1f00
31
32 #define PA_EVTYPE_MASK 0xff
33 #define PA_NR_COUNTERS 0x8
34 #define PA_PERF_CTRL_EN BIT(0)
35 #define PA_TRACETAG_EN BIT(4)
36 #define PA_TGTID_EN BIT(11)
37 #define PA_SRCID_EN BIT(11)
38 #define PA_TGTID_NONE 0
39 #define PA_SRCID_NONE 0
40 #define PA_TGTID_MSK_SHIFT 12
41 #define PA_SRCID_MSK_SHIFT 12
42
43 HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_cmd, config1, 10, 0);
44 HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_msk, config1, 21, 11);
45 HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22);
46 HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33);
47 HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44);
48
hisi_pa_pmu_enable_tracetag(struct perf_event * event)49 static void hisi_pa_pmu_enable_tracetag(struct perf_event *event)
50 {
51 struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
52 u32 tt_en = hisi_get_tracetag_en(event);
53
54 if (tt_en) {
55 u32 val;
56
57 val = readl(pa_pmu->base + PA_TT_CTRL);
58 val |= PA_TRACETAG_EN;
59 writel(val, pa_pmu->base + PA_TT_CTRL);
60 }
61 }
62
hisi_pa_pmu_clear_tracetag(struct perf_event * event)63 static void hisi_pa_pmu_clear_tracetag(struct perf_event *event)
64 {
65 struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
66 u32 tt_en = hisi_get_tracetag_en(event);
67
68 if (tt_en) {
69 u32 val;
70
71 val = readl(pa_pmu->base + PA_TT_CTRL);
72 val &= ~PA_TRACETAG_EN;
73 writel(val, pa_pmu->base + PA_TT_CTRL);
74 }
75 }
76
hisi_pa_pmu_config_tgtid(struct perf_event * event)77 static void hisi_pa_pmu_config_tgtid(struct perf_event *event)
78 {
79 struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
80 u32 cmd = hisi_get_tgtid_cmd(event);
81
82 if (cmd) {
83 u32 msk = hisi_get_tgtid_msk(event);
84 u32 val = cmd | PA_TGTID_EN | (msk << PA_TGTID_MSK_SHIFT);
85
86 writel(val, pa_pmu->base + PA_TGTID_CTRL);
87 }
88 }
89
hisi_pa_pmu_clear_tgtid(struct perf_event * event)90 static void hisi_pa_pmu_clear_tgtid(struct perf_event *event)
91 {
92 struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
93 u32 cmd = hisi_get_tgtid_cmd(event);
94
95 if (cmd)
96 writel(PA_TGTID_NONE, pa_pmu->base + PA_TGTID_CTRL);
97 }
98
hisi_pa_pmu_config_srcid(struct perf_event * event)99 static void hisi_pa_pmu_config_srcid(struct perf_event *event)
100 {
101 struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
102 u32 cmd = hisi_get_srcid_cmd(event);
103
104 if (cmd) {
105 u32 msk = hisi_get_srcid_msk(event);
106 u32 val = cmd | PA_SRCID_EN | (msk << PA_SRCID_MSK_SHIFT);
107
108 writel(val, pa_pmu->base + PA_SRCID_CTRL);
109 }
110 }
111
hisi_pa_pmu_clear_srcid(struct perf_event * event)112 static void hisi_pa_pmu_clear_srcid(struct perf_event *event)
113 {
114 struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
115 u32 cmd = hisi_get_srcid_cmd(event);
116
117 if (cmd)
118 writel(PA_SRCID_NONE, pa_pmu->base + PA_SRCID_CTRL);
119 }
120
hisi_pa_pmu_enable_filter(struct perf_event * event)121 static void hisi_pa_pmu_enable_filter(struct perf_event *event)
122 {
123 if (event->attr.config1 != 0x0) {
124 hisi_pa_pmu_enable_tracetag(event);
125 hisi_pa_pmu_config_srcid(event);
126 hisi_pa_pmu_config_tgtid(event);
127 }
128 }
129
hisi_pa_pmu_disable_filter(struct perf_event * event)130 static void hisi_pa_pmu_disable_filter(struct perf_event *event)
131 {
132 if (event->attr.config1 != 0x0) {
133 hisi_pa_pmu_clear_tgtid(event);
134 hisi_pa_pmu_clear_srcid(event);
135 hisi_pa_pmu_clear_tracetag(event);
136 }
137 }
138
hisi_pa_pmu_get_counter_offset(int idx)139 static u32 hisi_pa_pmu_get_counter_offset(int idx)
140 {
141 return (PA_EVENT_CNT0_L + idx * 8);
142 }
143
hisi_pa_pmu_read_counter(struct hisi_pmu * pa_pmu,struct hw_perf_event * hwc)144 static u64 hisi_pa_pmu_read_counter(struct hisi_pmu *pa_pmu,
145 struct hw_perf_event *hwc)
146 {
147 return readq(pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx));
148 }
149
hisi_pa_pmu_write_counter(struct hisi_pmu * pa_pmu,struct hw_perf_event * hwc,u64 val)150 static void hisi_pa_pmu_write_counter(struct hisi_pmu *pa_pmu,
151 struct hw_perf_event *hwc, u64 val)
152 {
153 writeq(val, pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx));
154 }
155
hisi_pa_pmu_write_evtype(struct hisi_pmu * pa_pmu,int idx,u32 type)156 static void hisi_pa_pmu_write_evtype(struct hisi_pmu *pa_pmu, int idx,
157 u32 type)
158 {
159 u32 reg, reg_idx, shift, val;
160
161 /*
162 * Select the appropriate event select register(PA_EVENT_TYPE0/1).
163 * There are 2 event select registers for the 8 hardware counters.
164 * Event code is 8-bits and for the former 4 hardware counters,
165 * PA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
166 * PA_EVENT_TYPE1 is chosen.
167 */
168 reg = PA_EVENT_TYPE0 + (idx / 4) * 4;
169 reg_idx = idx % 4;
170 shift = 8 * reg_idx;
171
172 /* Write event code to pa_EVENT_TYPEx Register */
173 val = readl(pa_pmu->base + reg);
174 val &= ~(PA_EVTYPE_MASK << shift);
175 val |= (type << shift);
176 writel(val, pa_pmu->base + reg);
177 }
178
hisi_pa_pmu_start_counters(struct hisi_pmu * pa_pmu)179 static void hisi_pa_pmu_start_counters(struct hisi_pmu *pa_pmu)
180 {
181 u32 val;
182
183 val = readl(pa_pmu->base + PA_PERF_CTRL);
184 val |= PA_PERF_CTRL_EN;
185 writel(val, pa_pmu->base + PA_PERF_CTRL);
186 }
187
hisi_pa_pmu_stop_counters(struct hisi_pmu * pa_pmu)188 static void hisi_pa_pmu_stop_counters(struct hisi_pmu *pa_pmu)
189 {
190 u32 val;
191
192 val = readl(pa_pmu->base + PA_PERF_CTRL);
193 val &= ~(PA_PERF_CTRL_EN);
194 writel(val, pa_pmu->base + PA_PERF_CTRL);
195 }
196
hisi_pa_pmu_enable_counter(struct hisi_pmu * pa_pmu,struct hw_perf_event * hwc)197 static void hisi_pa_pmu_enable_counter(struct hisi_pmu *pa_pmu,
198 struct hw_perf_event *hwc)
199 {
200 u32 val;
201
202 /* Enable counter index in PA_EVENT_CTRL register */
203 val = readl(pa_pmu->base + PA_EVENT_CTRL);
204 val |= 1 << hwc->idx;
205 writel(val, pa_pmu->base + PA_EVENT_CTRL);
206 }
207
hisi_pa_pmu_disable_counter(struct hisi_pmu * pa_pmu,struct hw_perf_event * hwc)208 static void hisi_pa_pmu_disable_counter(struct hisi_pmu *pa_pmu,
209 struct hw_perf_event *hwc)
210 {
211 u32 val;
212
213 /* Clear counter index in PA_EVENT_CTRL register */
214 val = readl(pa_pmu->base + PA_EVENT_CTRL);
215 val &= ~(1 << hwc->idx);
216 writel(val, pa_pmu->base + PA_EVENT_CTRL);
217 }
218
hisi_pa_pmu_enable_counter_int(struct hisi_pmu * pa_pmu,struct hw_perf_event * hwc)219 static void hisi_pa_pmu_enable_counter_int(struct hisi_pmu *pa_pmu,
220 struct hw_perf_event *hwc)
221 {
222 u32 val;
223
224 /* Write 0 to enable interrupt */
225 val = readl(pa_pmu->base + PA_INT_MASK);
226 val &= ~(1 << hwc->idx);
227 writel(val, pa_pmu->base + PA_INT_MASK);
228 }
229
hisi_pa_pmu_disable_counter_int(struct hisi_pmu * pa_pmu,struct hw_perf_event * hwc)230 static void hisi_pa_pmu_disable_counter_int(struct hisi_pmu *pa_pmu,
231 struct hw_perf_event *hwc)
232 {
233 u32 val;
234
235 /* Write 1 to mask interrupt */
236 val = readl(pa_pmu->base + PA_INT_MASK);
237 val |= 1 << hwc->idx;
238 writel(val, pa_pmu->base + PA_INT_MASK);
239 }
240
hisi_pa_pmu_get_int_status(struct hisi_pmu * pa_pmu)241 static u32 hisi_pa_pmu_get_int_status(struct hisi_pmu *pa_pmu)
242 {
243 return readl(pa_pmu->base + PA_INT_STATUS);
244 }
245
hisi_pa_pmu_clear_int_status(struct hisi_pmu * pa_pmu,int idx)246 static void hisi_pa_pmu_clear_int_status(struct hisi_pmu *pa_pmu, int idx)
247 {
248 writel(1 << idx, pa_pmu->base + PA_INT_CLEAR);
249 }
250
251 static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = {
252 { "HISI0273", },
253 {}
254 };
255 MODULE_DEVICE_TABLE(acpi, hisi_pa_pmu_acpi_match);
256
hisi_pa_pmu_init_data(struct platform_device * pdev,struct hisi_pmu * pa_pmu)257 static int hisi_pa_pmu_init_data(struct platform_device *pdev,
258 struct hisi_pmu *pa_pmu)
259 {
260 /*
261 * Use the SCCL_ID and the index ID to identify the PA PMU,
262 * while SCCL_ID is the nearst SCCL_ID from this SICL and
263 * CPU core is chosen from this SCCL to manage this PMU.
264 */
265 if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
266 &pa_pmu->sccl_id)) {
267 dev_err(&pdev->dev, "Cannot read sccl-id!\n");
268 return -EINVAL;
269 }
270
271 if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
272 &pa_pmu->index_id)) {
273 dev_err(&pdev->dev, "Cannot read idx-id!\n");
274 return -EINVAL;
275 }
276
277 pa_pmu->ccl_id = -1;
278
279 pa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
280 if (IS_ERR(pa_pmu->base)) {
281 dev_err(&pdev->dev, "ioremap failed for pa_pmu resource.\n");
282 return PTR_ERR(pa_pmu->base);
283 }
284
285 pa_pmu->identifier = readl(pa_pmu->base + PA_PMU_VERSION);
286
287 return 0;
288 }
289
290 static struct attribute *hisi_pa_pmu_v2_format_attr[] = {
291 HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
292 HISI_PMU_FORMAT_ATTR(tgtid_cmd, "config1:0-10"),
293 HISI_PMU_FORMAT_ATTR(tgtid_msk, "config1:11-21"),
294 HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:22-32"),
295 HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:33-43"),
296 HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:44"),
297 NULL,
298 };
299
300 static const struct attribute_group hisi_pa_pmu_v2_format_group = {
301 .name = "format",
302 .attrs = hisi_pa_pmu_v2_format_attr,
303 };
304
305 static struct attribute *hisi_pa_pmu_v2_events_attr[] = {
306 HISI_PMU_EVENT_ATTR(rx_req, 0x40),
307 HISI_PMU_EVENT_ATTR(tx_req, 0x5c),
308 HISI_PMU_EVENT_ATTR(cycle, 0x78),
309 NULL
310 };
311
312 static const struct attribute_group hisi_pa_pmu_v2_events_group = {
313 .name = "events",
314 .attrs = hisi_pa_pmu_v2_events_attr,
315 };
316
317 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
318
319 static struct attribute *hisi_pa_pmu_cpumask_attrs[] = {
320 &dev_attr_cpumask.attr,
321 NULL
322 };
323
324 static const struct attribute_group hisi_pa_pmu_cpumask_attr_group = {
325 .attrs = hisi_pa_pmu_cpumask_attrs,
326 };
327
328 static struct device_attribute hisi_pa_pmu_identifier_attr =
329 __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
330
331 static struct attribute *hisi_pa_pmu_identifier_attrs[] = {
332 &hisi_pa_pmu_identifier_attr.attr,
333 NULL
334 };
335
336 static const struct attribute_group hisi_pa_pmu_identifier_group = {
337 .attrs = hisi_pa_pmu_identifier_attrs,
338 };
339
340 static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = {
341 &hisi_pa_pmu_v2_format_group,
342 &hisi_pa_pmu_v2_events_group,
343 &hisi_pa_pmu_cpumask_attr_group,
344 &hisi_pa_pmu_identifier_group,
345 NULL
346 };
347
348 static const struct hisi_uncore_ops hisi_uncore_pa_ops = {
349 .write_evtype = hisi_pa_pmu_write_evtype,
350 .get_event_idx = hisi_uncore_pmu_get_event_idx,
351 .start_counters = hisi_pa_pmu_start_counters,
352 .stop_counters = hisi_pa_pmu_stop_counters,
353 .enable_counter = hisi_pa_pmu_enable_counter,
354 .disable_counter = hisi_pa_pmu_disable_counter,
355 .enable_counter_int = hisi_pa_pmu_enable_counter_int,
356 .disable_counter_int = hisi_pa_pmu_disable_counter_int,
357 .write_counter = hisi_pa_pmu_write_counter,
358 .read_counter = hisi_pa_pmu_read_counter,
359 .get_int_status = hisi_pa_pmu_get_int_status,
360 .clear_int_status = hisi_pa_pmu_clear_int_status,
361 .enable_filter = hisi_pa_pmu_enable_filter,
362 .disable_filter = hisi_pa_pmu_disable_filter,
363 };
364
hisi_pa_pmu_dev_probe(struct platform_device * pdev,struct hisi_pmu * pa_pmu)365 static int hisi_pa_pmu_dev_probe(struct platform_device *pdev,
366 struct hisi_pmu *pa_pmu)
367 {
368 int ret;
369
370 ret = hisi_pa_pmu_init_data(pdev, pa_pmu);
371 if (ret)
372 return ret;
373
374 ret = hisi_uncore_pmu_init_irq(pa_pmu, pdev);
375 if (ret)
376 return ret;
377
378 pa_pmu->pmu_events.attr_groups = hisi_pa_pmu_v2_attr_groups;
379 pa_pmu->num_counters = PA_NR_COUNTERS;
380 pa_pmu->ops = &hisi_uncore_pa_ops;
381 pa_pmu->check_event = 0xB0;
382 pa_pmu->counter_bits = 64;
383 pa_pmu->dev = &pdev->dev;
384 pa_pmu->on_cpu = -1;
385
386 return 0;
387 }
388
hisi_pa_pmu_probe(struct platform_device * pdev)389 static int hisi_pa_pmu_probe(struct platform_device *pdev)
390 {
391 struct hisi_pmu *pa_pmu;
392 char *name;
393 int ret;
394
395 pa_pmu = devm_kzalloc(&pdev->dev, sizeof(*pa_pmu), GFP_KERNEL);
396 if (!pa_pmu)
397 return -ENOMEM;
398
399 ret = hisi_pa_pmu_dev_probe(pdev, pa_pmu);
400 if (ret)
401 return ret;
402 /*
403 * PA is attached in SICL and the CPU core is chosen to manage this
404 * PMU which is the nearest SCCL, while its SCCL_ID is greater than
405 * one with the SICL_ID.
406 */
407 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%u_pa%u",
408 pa_pmu->sccl_id - 1, pa_pmu->index_id);
409 if (!name)
410 return -ENOMEM;
411
412 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
413 &pa_pmu->node);
414 if (ret) {
415 dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
416 return ret;
417 }
418
419 pa_pmu->pmu = (struct pmu) {
420 .module = THIS_MODULE,
421 .task_ctx_nr = perf_invalid_context,
422 .event_init = hisi_uncore_pmu_event_init,
423 .pmu_enable = hisi_uncore_pmu_enable,
424 .pmu_disable = hisi_uncore_pmu_disable,
425 .add = hisi_uncore_pmu_add,
426 .del = hisi_uncore_pmu_del,
427 .start = hisi_uncore_pmu_start,
428 .stop = hisi_uncore_pmu_stop,
429 .read = hisi_uncore_pmu_read,
430 .attr_groups = pa_pmu->pmu_events.attr_groups,
431 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
432 };
433
434 ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
435 if (ret) {
436 dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
437 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
438 &pa_pmu->node);
439 return ret;
440 }
441
442 platform_set_drvdata(pdev, pa_pmu);
443 return ret;
444 }
445
hisi_pa_pmu_remove(struct platform_device * pdev)446 static int hisi_pa_pmu_remove(struct platform_device *pdev)
447 {
448 struct hisi_pmu *pa_pmu = platform_get_drvdata(pdev);
449
450 perf_pmu_unregister(&pa_pmu->pmu);
451 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
452 &pa_pmu->node);
453 return 0;
454 }
455
456 static struct platform_driver hisi_pa_pmu_driver = {
457 .driver = {
458 .name = "hisi_pa_pmu",
459 .acpi_match_table = hisi_pa_pmu_acpi_match,
460 .suppress_bind_attrs = true,
461 },
462 .probe = hisi_pa_pmu_probe,
463 .remove = hisi_pa_pmu_remove,
464 };
465
hisi_pa_pmu_module_init(void)466 static int __init hisi_pa_pmu_module_init(void)
467 {
468 int ret;
469
470 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
471 "AP_PERF_ARM_HISI_PA_ONLINE",
472 hisi_uncore_pmu_online_cpu,
473 hisi_uncore_pmu_offline_cpu);
474 if (ret) {
475 pr_err("PA PMU: cpuhp state setup failed, ret = %d\n", ret);
476 return ret;
477 }
478
479 ret = platform_driver_register(&hisi_pa_pmu_driver);
480 if (ret)
481 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE);
482
483 return ret;
484 }
485 module_init(hisi_pa_pmu_module_init);
486
hisi_pa_pmu_module_exit(void)487 static void __exit hisi_pa_pmu_module_exit(void)
488 {
489 platform_driver_unregister(&hisi_pa_pmu_driver);
490 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE);
491 }
492 module_exit(hisi_pa_pmu_module_exit);
493
494 MODULE_DESCRIPTION("HiSilicon Protocol Adapter uncore PMU driver");
495 MODULE_LICENSE("GPL v2");
496 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
497 MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
498