1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HiSilicon SoC HHA uncore Hardware event counters support
4 *
5 * Copyright (C) 2017 Hisilicon Limited
6 * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
7 * Anurup M <anurup.m@huawei.com>
8 *
9 * This code is based on the uncore PMUs like arm-cci and arm-ccn.
10 */
11 #include <linux/acpi.h>
12 #include <linux/bug.h>
13 #include <linux/cpuhotplug.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/list.h>
17 #include <linux/platform_device.h>
18 #include <linux/smp.h>
19
20 #include "hisi_uncore_pmu.h"
21
22 /* HHA register definition */
23 #define HHA_INT_MASK 0x0804
24 #define HHA_INT_STATUS 0x0808
25 #define HHA_INT_CLEAR 0x080C
26 #define HHA_PERF_CTRL 0x1E00
27 #define HHA_EVENT_CTRL 0x1E04
28 #define HHA_EVENT_TYPE0 0x1E80
29 /*
30 * Each counter is 48-bits and [48:63] are reserved
31 * which are Read-As-Zero and Writes-Ignored.
32 */
33 #define HHA_CNT0_LOWER 0x1F00
34
35 /* HHA has 16-counters */
36 #define HHA_NR_COUNTERS 0x10
37
38 #define HHA_PERF_CTRL_EN 0x1
39 #define HHA_EVTYPE_NONE 0xff
40
41 /*
42 * Select the counter register offset using the counter index
43 * each counter is 48-bits.
44 */
hisi_hha_pmu_get_counter_offset(int cntr_idx)45 static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx)
46 {
47 return (HHA_CNT0_LOWER + (cntr_idx * 8));
48 }
49
hisi_hha_pmu_read_counter(struct hisi_pmu * hha_pmu,struct hw_perf_event * hwc)50 static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu,
51 struct hw_perf_event *hwc)
52 {
53 u32 idx = hwc->idx;
54
55 if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
56 dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
57 return 0;
58 }
59
60 /* Read 64 bits and like L3C, top 16 bits are RAZ */
61 return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
62 }
63
hisi_hha_pmu_write_counter(struct hisi_pmu * hha_pmu,struct hw_perf_event * hwc,u64 val)64 static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu,
65 struct hw_perf_event *hwc, u64 val)
66 {
67 u32 idx = hwc->idx;
68
69 if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
70 dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
71 return;
72 }
73
74 /* Write 64 bits and like L3C, top 16 bits are WI */
75 writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
76 }
77
hisi_hha_pmu_write_evtype(struct hisi_pmu * hha_pmu,int idx,u32 type)78 static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
79 u32 type)
80 {
81 u32 reg, reg_idx, shift, val;
82
83 /*
84 * Select the appropriate event select register(HHA_EVENT_TYPEx).
85 * There are 4 event select registers for the 16 hardware counters.
86 * Event code is 8-bits and for the first 4 hardware counters,
87 * HHA_EVENT_TYPE0 is chosen. For the next 4 hardware counters,
88 * HHA_EVENT_TYPE1 is chosen and so on.
89 */
90 reg = HHA_EVENT_TYPE0 + 4 * (idx / 4);
91 reg_idx = idx % 4;
92 shift = 8 * reg_idx;
93
94 /* Write event code to HHA_EVENT_TYPEx register */
95 val = readl(hha_pmu->base + reg);
96 val &= ~(HHA_EVTYPE_NONE << shift);
97 val |= (type << shift);
98 writel(val, hha_pmu->base + reg);
99 }
100
hisi_hha_pmu_start_counters(struct hisi_pmu * hha_pmu)101 static void hisi_hha_pmu_start_counters(struct hisi_pmu *hha_pmu)
102 {
103 u32 val;
104
105 /*
106 * Set perf_enable bit in HHA_PERF_CTRL to start event
107 * counting for all enabled counters.
108 */
109 val = readl(hha_pmu->base + HHA_PERF_CTRL);
110 val |= HHA_PERF_CTRL_EN;
111 writel(val, hha_pmu->base + HHA_PERF_CTRL);
112 }
113
hisi_hha_pmu_stop_counters(struct hisi_pmu * hha_pmu)114 static void hisi_hha_pmu_stop_counters(struct hisi_pmu *hha_pmu)
115 {
116 u32 val;
117
118 /*
119 * Clear perf_enable bit in HHA_PERF_CTRL to stop event
120 * counting for all enabled counters.
121 */
122 val = readl(hha_pmu->base + HHA_PERF_CTRL);
123 val &= ~(HHA_PERF_CTRL_EN);
124 writel(val, hha_pmu->base + HHA_PERF_CTRL);
125 }
126
hisi_hha_pmu_enable_counter(struct hisi_pmu * hha_pmu,struct hw_perf_event * hwc)127 static void hisi_hha_pmu_enable_counter(struct hisi_pmu *hha_pmu,
128 struct hw_perf_event *hwc)
129 {
130 u32 val;
131
132 /* Enable counter index in HHA_EVENT_CTRL register */
133 val = readl(hha_pmu->base + HHA_EVENT_CTRL);
134 val |= (1 << hwc->idx);
135 writel(val, hha_pmu->base + HHA_EVENT_CTRL);
136 }
137
hisi_hha_pmu_disable_counter(struct hisi_pmu * hha_pmu,struct hw_perf_event * hwc)138 static void hisi_hha_pmu_disable_counter(struct hisi_pmu *hha_pmu,
139 struct hw_perf_event *hwc)
140 {
141 u32 val;
142
143 /* Clear counter index in HHA_EVENT_CTRL register */
144 val = readl(hha_pmu->base + HHA_EVENT_CTRL);
145 val &= ~(1 << hwc->idx);
146 writel(val, hha_pmu->base + HHA_EVENT_CTRL);
147 }
148
hisi_hha_pmu_enable_counter_int(struct hisi_pmu * hha_pmu,struct hw_perf_event * hwc)149 static void hisi_hha_pmu_enable_counter_int(struct hisi_pmu *hha_pmu,
150 struct hw_perf_event *hwc)
151 {
152 u32 val;
153
154 /* Write 0 to enable interrupt */
155 val = readl(hha_pmu->base + HHA_INT_MASK);
156 val &= ~(1 << hwc->idx);
157 writel(val, hha_pmu->base + HHA_INT_MASK);
158 }
159
hisi_hha_pmu_disable_counter_int(struct hisi_pmu * hha_pmu,struct hw_perf_event * hwc)160 static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu,
161 struct hw_perf_event *hwc)
162 {
163 u32 val;
164
165 /* Write 1 to mask interrupt */
166 val = readl(hha_pmu->base + HHA_INT_MASK);
167 val |= (1 << hwc->idx);
168 writel(val, hha_pmu->base + HHA_INT_MASK);
169 }
170
hisi_hha_pmu_isr(int irq,void * dev_id)171 static irqreturn_t hisi_hha_pmu_isr(int irq, void *dev_id)
172 {
173 struct hisi_pmu *hha_pmu = dev_id;
174 struct perf_event *event;
175 unsigned long overflown;
176 int idx;
177
178 /* Read HHA_INT_STATUS register */
179 overflown = readl(hha_pmu->base + HHA_INT_STATUS);
180 if (!overflown)
181 return IRQ_NONE;
182
183 /*
184 * Find the counter index which overflowed if the bit was set
185 * and handle it
186 */
187 for_each_set_bit(idx, &overflown, HHA_NR_COUNTERS) {
188 /* Write 1 to clear the IRQ status flag */
189 writel((1 << idx), hha_pmu->base + HHA_INT_CLEAR);
190
191 /* Get the corresponding event struct */
192 event = hha_pmu->pmu_events.hw_events[idx];
193 if (!event)
194 continue;
195
196 hisi_uncore_pmu_event_update(event);
197 hisi_uncore_pmu_set_event_period(event);
198 }
199
200 return IRQ_HANDLED;
201 }
202
hisi_hha_pmu_init_irq(struct hisi_pmu * hha_pmu,struct platform_device * pdev)203 static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
204 struct platform_device *pdev)
205 {
206 int irq, ret;
207
208 /* Read and init IRQ */
209 irq = platform_get_irq(pdev, 0);
210 if (irq < 0)
211 return irq;
212
213 ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
214 IRQF_NOBALANCING | IRQF_NO_THREAD,
215 dev_name(&pdev->dev), hha_pmu);
216 if (ret < 0) {
217 dev_err(&pdev->dev,
218 "Fail to request IRQ:%d ret:%d\n", irq, ret);
219 return ret;
220 }
221
222 hha_pmu->irq = irq;
223
224 return 0;
225 }
226
227 static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = {
228 { "HISI0243", },
229 {},
230 };
231 MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match);
232
hisi_hha_pmu_init_data(struct platform_device * pdev,struct hisi_pmu * hha_pmu)233 static int hisi_hha_pmu_init_data(struct platform_device *pdev,
234 struct hisi_pmu *hha_pmu)
235 {
236 unsigned long long id;
237 struct resource *res;
238 acpi_status status;
239
240 status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
241 "_UID", NULL, &id);
242 if (ACPI_FAILURE(status))
243 return -EINVAL;
244
245 hha_pmu->index_id = id;
246
247 /*
248 * Use SCCL_ID and UID to identify the HHA PMU, while
249 * SCCL_ID is in MPIDR[aff2].
250 */
251 if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
252 &hha_pmu->sccl_id)) {
253 dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
254 return -EINVAL;
255 }
256 /* HHA PMUs only share the same SCCL */
257 hha_pmu->ccl_id = -1;
258
259 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
260 hha_pmu->base = devm_ioremap_resource(&pdev->dev, res);
261 if (IS_ERR(hha_pmu->base)) {
262 dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n");
263 return PTR_ERR(hha_pmu->base);
264 }
265
266 return 0;
267 }
268
269 static struct attribute *hisi_hha_pmu_format_attr[] = {
270 HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
271 NULL,
272 };
273
274 static const struct attribute_group hisi_hha_pmu_format_group = {
275 .name = "format",
276 .attrs = hisi_hha_pmu_format_attr,
277 };
278
279 static struct attribute *hisi_hha_pmu_events_attr[] = {
280 HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00),
281 HISI_PMU_EVENT_ATTR(rx_outer, 0x01),
282 HISI_PMU_EVENT_ATTR(rx_sccl, 0x02),
283 HISI_PMU_EVENT_ATTR(rx_ccix, 0x03),
284 HISI_PMU_EVENT_ATTR(rx_wbi, 0x04),
285 HISI_PMU_EVENT_ATTR(rx_wbip, 0x05),
286 HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11),
287 HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c),
288 HISI_PMU_EVENT_ATTR(wr_ddr_64b, 0x1d),
289 HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e),
290 HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f),
291 HISI_PMU_EVENT_ATTR(spill_num, 0x20),
292 HISI_PMU_EVENT_ATTR(spill_success, 0x21),
293 HISI_PMU_EVENT_ATTR(bi_num, 0x23),
294 HISI_PMU_EVENT_ATTR(mediated_num, 0x32),
295 HISI_PMU_EVENT_ATTR(tx_snp_num, 0x33),
296 HISI_PMU_EVENT_ATTR(tx_snp_outer, 0x34),
297 HISI_PMU_EVENT_ATTR(tx_snp_ccix, 0x35),
298 HISI_PMU_EVENT_ATTR(rx_snprspdata, 0x38),
299 HISI_PMU_EVENT_ATTR(rx_snprsp_outer, 0x3c),
300 HISI_PMU_EVENT_ATTR(sdir-lookup, 0x40),
301 HISI_PMU_EVENT_ATTR(edir-lookup, 0x41),
302 HISI_PMU_EVENT_ATTR(sdir-hit, 0x42),
303 HISI_PMU_EVENT_ATTR(edir-hit, 0x43),
304 HISI_PMU_EVENT_ATTR(sdir-home-migrate, 0x4c),
305 HISI_PMU_EVENT_ATTR(edir-home-migrate, 0x4d),
306 NULL,
307 };
308
309 static const struct attribute_group hisi_hha_pmu_events_group = {
310 .name = "events",
311 .attrs = hisi_hha_pmu_events_attr,
312 };
313
314 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
315
316 static struct attribute *hisi_hha_pmu_cpumask_attrs[] = {
317 &dev_attr_cpumask.attr,
318 NULL,
319 };
320
321 static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = {
322 .attrs = hisi_hha_pmu_cpumask_attrs,
323 };
324
325 static const struct attribute_group *hisi_hha_pmu_attr_groups[] = {
326 &hisi_hha_pmu_format_group,
327 &hisi_hha_pmu_events_group,
328 &hisi_hha_pmu_cpumask_attr_group,
329 NULL,
330 };
331
332 static const struct hisi_uncore_ops hisi_uncore_hha_ops = {
333 .write_evtype = hisi_hha_pmu_write_evtype,
334 .get_event_idx = hisi_uncore_pmu_get_event_idx,
335 .start_counters = hisi_hha_pmu_start_counters,
336 .stop_counters = hisi_hha_pmu_stop_counters,
337 .enable_counter = hisi_hha_pmu_enable_counter,
338 .disable_counter = hisi_hha_pmu_disable_counter,
339 .enable_counter_int = hisi_hha_pmu_enable_counter_int,
340 .disable_counter_int = hisi_hha_pmu_disable_counter_int,
341 .write_counter = hisi_hha_pmu_write_counter,
342 .read_counter = hisi_hha_pmu_read_counter,
343 };
344
hisi_hha_pmu_dev_probe(struct platform_device * pdev,struct hisi_pmu * hha_pmu)345 static int hisi_hha_pmu_dev_probe(struct platform_device *pdev,
346 struct hisi_pmu *hha_pmu)
347 {
348 int ret;
349
350 ret = hisi_hha_pmu_init_data(pdev, hha_pmu);
351 if (ret)
352 return ret;
353
354 ret = hisi_hha_pmu_init_irq(hha_pmu, pdev);
355 if (ret)
356 return ret;
357
358 hha_pmu->num_counters = HHA_NR_COUNTERS;
359 hha_pmu->counter_bits = 48;
360 hha_pmu->ops = &hisi_uncore_hha_ops;
361 hha_pmu->dev = &pdev->dev;
362 hha_pmu->on_cpu = -1;
363 hha_pmu->check_event = 0x65;
364
365 return 0;
366 }
367
hisi_hha_pmu_probe(struct platform_device * pdev)368 static int hisi_hha_pmu_probe(struct platform_device *pdev)
369 {
370 struct hisi_pmu *hha_pmu;
371 char *name;
372 int ret;
373
374 hha_pmu = devm_kzalloc(&pdev->dev, sizeof(*hha_pmu), GFP_KERNEL);
375 if (!hha_pmu)
376 return -ENOMEM;
377
378 platform_set_drvdata(pdev, hha_pmu);
379
380 ret = hisi_hha_pmu_dev_probe(pdev, hha_pmu);
381 if (ret)
382 return ret;
383
384 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
385 &hha_pmu->node);
386 if (ret) {
387 dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
388 return ret;
389 }
390
391 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
392 hha_pmu->sccl_id, hha_pmu->index_id);
393 hha_pmu->pmu = (struct pmu) {
394 .name = name,
395 .module = THIS_MODULE,
396 .task_ctx_nr = perf_invalid_context,
397 .event_init = hisi_uncore_pmu_event_init,
398 .pmu_enable = hisi_uncore_pmu_enable,
399 .pmu_disable = hisi_uncore_pmu_disable,
400 .add = hisi_uncore_pmu_add,
401 .del = hisi_uncore_pmu_del,
402 .start = hisi_uncore_pmu_start,
403 .stop = hisi_uncore_pmu_stop,
404 .read = hisi_uncore_pmu_read,
405 .attr_groups = hisi_hha_pmu_attr_groups,
406 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
407 };
408
409 ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
410 if (ret) {
411 dev_err(hha_pmu->dev, "HHA PMU register failed!\n");
412 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
413 &hha_pmu->node);
414 }
415
416 return ret;
417 }
418
hisi_hha_pmu_remove(struct platform_device * pdev)419 static int hisi_hha_pmu_remove(struct platform_device *pdev)
420 {
421 struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev);
422
423 perf_pmu_unregister(&hha_pmu->pmu);
424 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
425 &hha_pmu->node);
426
427 return 0;
428 }
429
430 static struct platform_driver hisi_hha_pmu_driver = {
431 .driver = {
432 .name = "hisi_hha_pmu",
433 .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match),
434 .suppress_bind_attrs = true,
435 },
436 .probe = hisi_hha_pmu_probe,
437 .remove = hisi_hha_pmu_remove,
438 };
439
hisi_hha_pmu_module_init(void)440 static int __init hisi_hha_pmu_module_init(void)
441 {
442 int ret;
443
444 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
445 "AP_PERF_ARM_HISI_HHA_ONLINE",
446 hisi_uncore_pmu_online_cpu,
447 hisi_uncore_pmu_offline_cpu);
448 if (ret) {
449 pr_err("HHA PMU: Error setup hotplug, ret = %d;\n", ret);
450 return ret;
451 }
452
453 ret = platform_driver_register(&hisi_hha_pmu_driver);
454 if (ret)
455 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
456
457 return ret;
458 }
459 module_init(hisi_hha_pmu_module_init);
460
hisi_hha_pmu_module_exit(void)461 static void __exit hisi_hha_pmu_module_exit(void)
462 {
463 platform_driver_unregister(&hisi_hha_pmu_driver);
464 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
465 }
466 module_exit(hisi_hha_pmu_module_exit);
467
468 MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver");
469 MODULE_LICENSE("GPL v2");
470 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
471 MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
472