• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 Advanced Micro Devices, Inc.
3  *
4  * Author: Steven Kinney <Steven.Kinney@amd.com>
5  * Author: Suravee Suthikulpanit <Suraveee.Suthikulpanit@amd.com>
6  *
7  * Perf: amd_iommu - AMD IOMMU Performance Counter PMU implementation
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include <linux/perf_event.h>
15 #include <linux/module.h>
16 #include <linux/cpumask.h>
17 #include <linux/slab.h>
18 
19 #include "perf_event.h"
20 #include "perf_event_amd_iommu.h"
21 
22 #define COUNTER_SHIFT		16
23 
24 #define _GET_BANK(ev)       ((u8)(ev->hw.extra_reg.reg >> 8))
25 #define _GET_CNTR(ev)       ((u8)(ev->hw.extra_reg.reg))
26 
27 /* iommu pmu config masks */
28 #define _GET_CSOURCE(ev)    ((ev->hw.config & 0xFFULL))
29 #define _GET_DEVID(ev)      ((ev->hw.config >> 8)  & 0xFFFFULL)
30 #define _GET_PASID(ev)      ((ev->hw.config >> 24) & 0xFFFFULL)
31 #define _GET_DOMID(ev)      ((ev->hw.config >> 40) & 0xFFFFULL)
32 #define _GET_DEVID_MASK(ev) ((ev->hw.extra_reg.config)  & 0xFFFFULL)
33 #define _GET_PASID_MASK(ev) ((ev->hw.extra_reg.config >> 16) & 0xFFFFULL)
34 #define _GET_DOMID_MASK(ev) ((ev->hw.extra_reg.config >> 32) & 0xFFFFULL)
35 
36 static struct perf_amd_iommu __perf_iommu;
37 
38 struct perf_amd_iommu {
39 	struct pmu pmu;
40 	u8 max_banks;
41 	u8 max_counters;
42 	u64 cntr_assign_mask;
43 	raw_spinlock_t lock;
44 	const struct attribute_group *attr_groups[4];
45 };
46 
47 #define format_group	attr_groups[0]
48 #define cpumask_group	attr_groups[1]
49 #define events_group	attr_groups[2]
50 #define null_group	attr_groups[3]
51 
52 /*---------------------------------------------
53  * sysfs format attributes
54  *---------------------------------------------*/
55 PMU_FORMAT_ATTR(csource,    "config:0-7");
56 PMU_FORMAT_ATTR(devid,      "config:8-23");
57 PMU_FORMAT_ATTR(pasid,      "config:24-39");
58 PMU_FORMAT_ATTR(domid,      "config:40-55");
59 PMU_FORMAT_ATTR(devid_mask, "config1:0-15");
60 PMU_FORMAT_ATTR(pasid_mask, "config1:16-31");
61 PMU_FORMAT_ATTR(domid_mask, "config1:32-47");
62 
63 static struct attribute *iommu_format_attrs[] = {
64 	&format_attr_csource.attr,
65 	&format_attr_devid.attr,
66 	&format_attr_pasid.attr,
67 	&format_attr_domid.attr,
68 	&format_attr_devid_mask.attr,
69 	&format_attr_pasid_mask.attr,
70 	&format_attr_domid_mask.attr,
71 	NULL,
72 };
73 
74 static struct attribute_group amd_iommu_format_group = {
75 	.name = "format",
76 	.attrs = iommu_format_attrs,
77 };
78 
79 /*---------------------------------------------
80  * sysfs events attributes
81  *---------------------------------------------*/
82 struct amd_iommu_event_desc {
83 	struct kobj_attribute attr;
84 	const char *event;
85 };
86 
_iommu_event_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)87 static ssize_t _iommu_event_show(struct kobject *kobj,
88 				struct kobj_attribute *attr, char *buf)
89 {
90 	struct amd_iommu_event_desc *event =
91 		container_of(attr, struct amd_iommu_event_desc, attr);
92 	return sprintf(buf, "%s\n", event->event);
93 }
94 
95 #define AMD_IOMMU_EVENT_DESC(_name, _event)			\
96 {								\
97 	.attr  = __ATTR(_name, 0444, _iommu_event_show, NULL),	\
98 	.event = _event,					\
99 }
100 
101 static struct amd_iommu_event_desc amd_iommu_v2_event_descs[] = {
102 	AMD_IOMMU_EVENT_DESC(mem_pass_untrans,        "csource=0x01"),
103 	AMD_IOMMU_EVENT_DESC(mem_pass_pretrans,       "csource=0x02"),
104 	AMD_IOMMU_EVENT_DESC(mem_pass_excl,           "csource=0x03"),
105 	AMD_IOMMU_EVENT_DESC(mem_target_abort,        "csource=0x04"),
106 	AMD_IOMMU_EVENT_DESC(mem_trans_total,         "csource=0x05"),
107 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_hit,   "csource=0x06"),
108 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_mis,   "csource=0x07"),
109 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_hit,   "csource=0x08"),
110 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_mis,   "csource=0x09"),
111 	AMD_IOMMU_EVENT_DESC(mem_dte_hit,             "csource=0x0a"),
112 	AMD_IOMMU_EVENT_DESC(mem_dte_mis,             "csource=0x0b"),
113 	AMD_IOMMU_EVENT_DESC(page_tbl_read_tot,       "csource=0x0c"),
114 	AMD_IOMMU_EVENT_DESC(page_tbl_read_nst,       "csource=0x0d"),
115 	AMD_IOMMU_EVENT_DESC(page_tbl_read_gst,       "csource=0x0e"),
116 	AMD_IOMMU_EVENT_DESC(int_dte_hit,             "csource=0x0f"),
117 	AMD_IOMMU_EVENT_DESC(int_dte_mis,             "csource=0x10"),
118 	AMD_IOMMU_EVENT_DESC(cmd_processed,           "csource=0x11"),
119 	AMD_IOMMU_EVENT_DESC(cmd_processed_inv,       "csource=0x12"),
120 	AMD_IOMMU_EVENT_DESC(tlb_inv,                 "csource=0x13"),
121 	{ /* end: all zeroes */ },
122 };
123 
124 /*---------------------------------------------
125  * sysfs cpumask attributes
126  *---------------------------------------------*/
127 static cpumask_t iommu_cpumask;
128 
_iommu_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)129 static ssize_t _iommu_cpumask_show(struct device *dev,
130 				   struct device_attribute *attr,
131 				   char *buf)
132 {
133 	int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &iommu_cpumask);
134 	buf[n++] = '\n';
135 	buf[n] = '\0';
136 	return n;
137 }
138 static DEVICE_ATTR(cpumask, S_IRUGO, _iommu_cpumask_show, NULL);
139 
140 static struct attribute *iommu_cpumask_attrs[] = {
141 	&dev_attr_cpumask.attr,
142 	NULL,
143 };
144 
145 static struct attribute_group amd_iommu_cpumask_group = {
146 	.attrs = iommu_cpumask_attrs,
147 };
148 
149 /*---------------------------------------------*/
150 
get_next_avail_iommu_bnk_cntr(struct perf_amd_iommu * perf_iommu)151 static int get_next_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu)
152 {
153 	unsigned long flags;
154 	int shift, bank, cntr, retval;
155 	int max_banks = perf_iommu->max_banks;
156 	int max_cntrs = perf_iommu->max_counters;
157 
158 	raw_spin_lock_irqsave(&perf_iommu->lock, flags);
159 
160 	for (bank = 0, shift = 0; bank < max_banks; bank++) {
161 		for (cntr = 0; cntr < max_cntrs; cntr++) {
162 			shift = bank + (bank*3) + cntr;
163 			if (perf_iommu->cntr_assign_mask & (1ULL<<shift)) {
164 				continue;
165 			} else {
166 				perf_iommu->cntr_assign_mask |= (1ULL<<shift);
167 				retval = ((u16)((u16)bank<<8) | (u8)(cntr));
168 				goto out;
169 			}
170 		}
171 	}
172 	retval = -ENOSPC;
173 out:
174 	raw_spin_unlock_irqrestore(&perf_iommu->lock, flags);
175 	return retval;
176 }
177 
clear_avail_iommu_bnk_cntr(struct perf_amd_iommu * perf_iommu,u8 bank,u8 cntr)178 static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu,
179 					u8 bank, u8 cntr)
180 {
181 	unsigned long flags;
182 	int max_banks, max_cntrs;
183 	int shift = 0;
184 
185 	max_banks = perf_iommu->max_banks;
186 	max_cntrs = perf_iommu->max_counters;
187 
188 	if ((bank > max_banks) || (cntr > max_cntrs))
189 		return -EINVAL;
190 
191 	shift = bank + cntr + (bank*3);
192 
193 	raw_spin_lock_irqsave(&perf_iommu->lock, flags);
194 	perf_iommu->cntr_assign_mask &= ~(1ULL<<shift);
195 	raw_spin_unlock_irqrestore(&perf_iommu->lock, flags);
196 
197 	return 0;
198 }
199 
perf_iommu_event_init(struct perf_event * event)200 static int perf_iommu_event_init(struct perf_event *event)
201 {
202 	struct hw_perf_event *hwc = &event->hw;
203 	struct perf_amd_iommu *perf_iommu;
204 	u64 config, config1;
205 
206 	/* test the event attr type check for PMU enumeration */
207 	if (event->attr.type != event->pmu->type)
208 		return -ENOENT;
209 
210 	/*
211 	 * IOMMU counters are shared across all cores.
212 	 * Therefore, it does not support per-process mode.
213 	 * Also, it does not support event sampling mode.
214 	 */
215 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
216 		return -EINVAL;
217 
218 	/* IOMMU counters do not have usr/os/guest/host bits */
219 	if (event->attr.exclude_user || event->attr.exclude_kernel ||
220 	    event->attr.exclude_host || event->attr.exclude_guest)
221 		return -EINVAL;
222 
223 	if (event->cpu < 0)
224 		return -EINVAL;
225 
226 	perf_iommu = &__perf_iommu;
227 
228 	if (event->pmu != &perf_iommu->pmu)
229 		return -ENOENT;
230 
231 	if (perf_iommu) {
232 		config = event->attr.config;
233 		config1 = event->attr.config1;
234 	} else {
235 		return -EINVAL;
236 	}
237 
238 	/* integrate with iommu base devid (0000), assume one iommu */
239 	perf_iommu->max_banks =
240 		amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID);
241 	perf_iommu->max_counters =
242 		amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID);
243 	if ((perf_iommu->max_banks == 0) || (perf_iommu->max_counters == 0))
244 		return -EINVAL;
245 
246 	/* update the hw_perf_event struct with the iommu config data */
247 	hwc->config = config;
248 	hwc->extra_reg.config = config1;
249 
250 	return 0;
251 }
252 
perf_iommu_enable_event(struct perf_event * ev)253 static void perf_iommu_enable_event(struct perf_event *ev)
254 {
255 	u8 csource = _GET_CSOURCE(ev);
256 	u16 devid = _GET_DEVID(ev);
257 	u64 reg = 0ULL;
258 
259 	reg = csource;
260 	amd_iommu_pc_get_set_reg_val(devid,
261 			_GET_BANK(ev), _GET_CNTR(ev) ,
262 			 IOMMU_PC_COUNTER_SRC_REG, &reg, true);
263 
264 	reg = 0ULL | devid | (_GET_DEVID_MASK(ev) << 32);
265 	if (reg)
266 		reg |= (1UL << 31);
267 	amd_iommu_pc_get_set_reg_val(devid,
268 			_GET_BANK(ev), _GET_CNTR(ev) ,
269 			 IOMMU_PC_DEVID_MATCH_REG, &reg, true);
270 
271 	reg = 0ULL | _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32);
272 	if (reg)
273 		reg |= (1UL << 31);
274 	amd_iommu_pc_get_set_reg_val(devid,
275 			_GET_BANK(ev), _GET_CNTR(ev) ,
276 			 IOMMU_PC_PASID_MATCH_REG, &reg, true);
277 
278 	reg = 0ULL | _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32);
279 	if (reg)
280 		reg |= (1UL << 31);
281 	amd_iommu_pc_get_set_reg_val(devid,
282 			_GET_BANK(ev), _GET_CNTR(ev) ,
283 			 IOMMU_PC_DOMID_MATCH_REG, &reg, true);
284 }
285 
perf_iommu_disable_event(struct perf_event * event)286 static void perf_iommu_disable_event(struct perf_event *event)
287 {
288 	u64 reg = 0ULL;
289 
290 	amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
291 			_GET_BANK(event), _GET_CNTR(event),
292 			IOMMU_PC_COUNTER_SRC_REG, &reg, true);
293 }
294 
perf_iommu_start(struct perf_event * event,int flags)295 static void perf_iommu_start(struct perf_event *event, int flags)
296 {
297 	struct hw_perf_event *hwc = &event->hw;
298 
299 	pr_debug("perf: amd_iommu:perf_iommu_start\n");
300 	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
301 		return;
302 
303 	WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
304 	hwc->state = 0;
305 
306 	if (flags & PERF_EF_RELOAD) {
307 		u64 prev_raw_count =  local64_read(&hwc->prev_count);
308 		amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
309 				_GET_BANK(event), _GET_CNTR(event),
310 				IOMMU_PC_COUNTER_REG, &prev_raw_count, true);
311 	}
312 
313 	perf_iommu_enable_event(event);
314 	perf_event_update_userpage(event);
315 
316 }
317 
perf_iommu_read(struct perf_event * event)318 static void perf_iommu_read(struct perf_event *event)
319 {
320 	u64 count = 0ULL;
321 	u64 prev_raw_count = 0ULL;
322 	u64 delta = 0ULL;
323 	struct hw_perf_event *hwc = &event->hw;
324 	pr_debug("perf: amd_iommu:perf_iommu_read\n");
325 
326 	amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
327 				_GET_BANK(event), _GET_CNTR(event),
328 				IOMMU_PC_COUNTER_REG, &count, false);
329 
330 	/* IOMMU pc counter register is only 48 bits */
331 	count &= 0xFFFFFFFFFFFFULL;
332 
333 	prev_raw_count =  local64_read(&hwc->prev_count);
334 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
335 					count) != prev_raw_count)
336 		return;
337 
338 	/* Handling 48-bit counter overflowing */
339 	delta = (count << COUNTER_SHIFT) - (prev_raw_count << COUNTER_SHIFT);
340 	delta >>= COUNTER_SHIFT;
341 	local64_add(delta, &event->count);
342 
343 }
344 
perf_iommu_stop(struct perf_event * event,int flags)345 static void perf_iommu_stop(struct perf_event *event, int flags)
346 {
347 	struct hw_perf_event *hwc = &event->hw;
348 	u64 config;
349 
350 	pr_debug("perf: amd_iommu:perf_iommu_stop\n");
351 
352 	if (hwc->state & PERF_HES_UPTODATE)
353 		return;
354 
355 	perf_iommu_disable_event(event);
356 	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
357 	hwc->state |= PERF_HES_STOPPED;
358 
359 	if (hwc->state & PERF_HES_UPTODATE)
360 		return;
361 
362 	config = hwc->config;
363 	perf_iommu_read(event);
364 	hwc->state |= PERF_HES_UPTODATE;
365 }
366 
perf_iommu_add(struct perf_event * event,int flags)367 static int perf_iommu_add(struct perf_event *event, int flags)
368 {
369 	int retval;
370 	struct perf_amd_iommu *perf_iommu =
371 			container_of(event->pmu, struct perf_amd_iommu, pmu);
372 
373 	pr_debug("perf: amd_iommu:perf_iommu_add\n");
374 	event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
375 
376 	/* request an iommu bank/counter */
377 	retval = get_next_avail_iommu_bnk_cntr(perf_iommu);
378 	if (retval != -ENOSPC)
379 		event->hw.extra_reg.reg = (u16)retval;
380 	else
381 		return retval;
382 
383 	if (flags & PERF_EF_START)
384 		perf_iommu_start(event, PERF_EF_RELOAD);
385 
386 	return 0;
387 }
388 
perf_iommu_del(struct perf_event * event,int flags)389 static void perf_iommu_del(struct perf_event *event, int flags)
390 {
391 	struct perf_amd_iommu *perf_iommu =
392 			container_of(event->pmu, struct perf_amd_iommu, pmu);
393 
394 	pr_debug("perf: amd_iommu:perf_iommu_del\n");
395 	perf_iommu_stop(event, PERF_EF_UPDATE);
396 
397 	/* clear the assigned iommu bank/counter */
398 	clear_avail_iommu_bnk_cntr(perf_iommu,
399 				     _GET_BANK(event),
400 				     _GET_CNTR(event));
401 
402 	perf_event_update_userpage(event);
403 }
404 
_init_events_attrs(struct perf_amd_iommu * perf_iommu)405 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
406 {
407 	struct attribute **attrs;
408 	struct attribute_group *attr_group;
409 	int i = 0, j;
410 
411 	while (amd_iommu_v2_event_descs[i].attr.attr.name)
412 		i++;
413 
414 	attr_group = kzalloc(sizeof(struct attribute *)
415 		* (i + 1) + sizeof(*attr_group), GFP_KERNEL);
416 	if (!attr_group)
417 		return -ENOMEM;
418 
419 	attrs = (struct attribute **)(attr_group + 1);
420 	for (j = 0; j < i; j++)
421 		attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr;
422 
423 	attr_group->name = "events";
424 	attr_group->attrs = attrs;
425 	perf_iommu->events_group = attr_group;
426 
427 	return 0;
428 }
429 
amd_iommu_pc_exit(void)430 static __init void amd_iommu_pc_exit(void)
431 {
432 	if (__perf_iommu.events_group != NULL) {
433 		kfree(__perf_iommu.events_group);
434 		__perf_iommu.events_group = NULL;
435 	}
436 }
437 
_init_perf_amd_iommu(struct perf_amd_iommu * perf_iommu,char * name)438 static __init int _init_perf_amd_iommu(
439 	struct perf_amd_iommu *perf_iommu, char *name)
440 {
441 	int ret;
442 
443 	raw_spin_lock_init(&perf_iommu->lock);
444 
445 	/* Init format attributes */
446 	perf_iommu->format_group = &amd_iommu_format_group;
447 
448 	/* Init cpumask attributes to only core 0 */
449 	cpumask_set_cpu(0, &iommu_cpumask);
450 	perf_iommu->cpumask_group = &amd_iommu_cpumask_group;
451 
452 	/* Init events attributes */
453 	if (_init_events_attrs(perf_iommu) != 0)
454 		pr_err("perf: amd_iommu: Only support raw events.\n");
455 
456 	/* Init null attributes */
457 	perf_iommu->null_group = NULL;
458 	perf_iommu->pmu.attr_groups = perf_iommu->attr_groups;
459 
460 	ret = perf_pmu_register(&perf_iommu->pmu, name, -1);
461 	if (ret) {
462 		pr_err("perf: amd_iommu: Failed to initialized.\n");
463 		amd_iommu_pc_exit();
464 	} else {
465 		pr_info("perf: amd_iommu: Detected. (%d banks, %d counters/bank)\n",
466 			amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID),
467 			amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID));
468 	}
469 
470 	return ret;
471 }
472 
473 static struct perf_amd_iommu __perf_iommu = {
474 	.pmu = {
475 		.event_init	= perf_iommu_event_init,
476 		.add		= perf_iommu_add,
477 		.del		= perf_iommu_del,
478 		.start		= perf_iommu_start,
479 		.stop		= perf_iommu_stop,
480 		.read		= perf_iommu_read,
481 	},
482 	.max_banks		= 0x00,
483 	.max_counters		= 0x00,
484 	.cntr_assign_mask	= 0ULL,
485 	.format_group		= NULL,
486 	.cpumask_group		= NULL,
487 	.events_group		= NULL,
488 	.null_group		= NULL,
489 };
490 
amd_iommu_pc_init(void)491 static __init int amd_iommu_pc_init(void)
492 {
493 	/* Make sure the IOMMU PC resource is available */
494 	if (!amd_iommu_pc_supported())
495 		return -ENODEV;
496 
497 	_init_perf_amd_iommu(&__perf_iommu, "amd_iommu");
498 
499 	return 0;
500 }
501 
502 device_initcall(amd_iommu_pc_init);
503