• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Support Intel uncore PerfMon discovery mechanism.
4  * Copyright(c) 2021 Intel Corporation.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include "uncore.h"
9 #include "uncore_discovery.h"
10 
11 static struct rb_root discovery_tables = RB_ROOT;
12 static int num_discovered_types[UNCORE_ACCESS_MAX];
13 
has_generic_discovery_table(void)14 static bool has_generic_discovery_table(void)
15 {
16 	struct pci_dev *dev;
17 	int dvsec;
18 
19 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, UNCORE_DISCOVERY_TABLE_DEVICE, NULL);
20 	if (!dev)
21 		return false;
22 
23 	/* A discovery table device has the unique capability ID. */
24 	dvsec = pci_find_next_ext_capability(dev, 0, UNCORE_EXT_CAP_ID_DISCOVERY);
25 	pci_dev_put(dev);
26 	if (dvsec)
27 		return true;
28 
29 	return false;
30 }
31 
32 static int logical_die_id;
33 
get_device_die_id(struct pci_dev * dev)34 static int get_device_die_id(struct pci_dev *dev)
35 {
36 	int cpu, node = pcibus_to_node(dev->bus);
37 
38 	/*
39 	 * If the NUMA info is not available, assume that the logical die id is
40 	 * continuous in the order in which the discovery table devices are
41 	 * detected.
42 	 */
43 	if (node < 0)
44 		return logical_die_id++;
45 
46 	for_each_cpu(cpu, cpumask_of_node(node)) {
47 		struct cpuinfo_x86 *c = &cpu_data(cpu);
48 
49 		if (c->initialized && cpu_to_node(cpu) == node)
50 			return c->logical_die_id;
51 	}
52 
53 	/*
54 	 * All CPUs of a node may be offlined. For this case,
55 	 * the PCI and MMIO type of uncore blocks which are
56 	 * enumerated by the device will be unavailable.
57 	 */
58 	return -1;
59 }
60 
61 #define __node_2_type(cur)	\
62 	rb_entry((cur), struct intel_uncore_discovery_type, node)
63 
__type_cmp(const void * key,const struct rb_node * b)64 static inline int __type_cmp(const void *key, const struct rb_node *b)
65 {
66 	struct intel_uncore_discovery_type *type_b = __node_2_type(b);
67 	const u16 *type_id = key;
68 
69 	if (type_b->type > *type_id)
70 		return -1;
71 	else if (type_b->type < *type_id)
72 		return 1;
73 
74 	return 0;
75 }
76 
77 static inline struct intel_uncore_discovery_type *
search_uncore_discovery_type(u16 type_id)78 search_uncore_discovery_type(u16 type_id)
79 {
80 	struct rb_node *node = rb_find(&type_id, &discovery_tables, __type_cmp);
81 
82 	return (node) ? __node_2_type(node) : NULL;
83 }
84 
__type_less(struct rb_node * a,const struct rb_node * b)85 static inline bool __type_less(struct rb_node *a, const struct rb_node *b)
86 {
87 	return (__node_2_type(a)->type < __node_2_type(b)->type);
88 }
89 
90 static struct intel_uncore_discovery_type *
add_uncore_discovery_type(struct uncore_unit_discovery * unit)91 add_uncore_discovery_type(struct uncore_unit_discovery *unit)
92 {
93 	struct intel_uncore_discovery_type *type;
94 
95 	if (unit->access_type >= UNCORE_ACCESS_MAX) {
96 		pr_warn("Unsupported access type %d\n", unit->access_type);
97 		return NULL;
98 	}
99 
100 	type = kzalloc(sizeof(struct intel_uncore_discovery_type), GFP_KERNEL);
101 	if (!type)
102 		return NULL;
103 
104 	type->box_ctrl_die = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL);
105 	if (!type->box_ctrl_die)
106 		goto free_type;
107 
108 	type->access_type = unit->access_type;
109 	num_discovered_types[type->access_type]++;
110 	type->type = unit->box_type;
111 
112 	rb_add(&type->node, &discovery_tables, __type_less);
113 
114 	return type;
115 
116 free_type:
117 	kfree(type);
118 
119 	return NULL;
120 
121 }
122 
123 static struct intel_uncore_discovery_type *
get_uncore_discovery_type(struct uncore_unit_discovery * unit)124 get_uncore_discovery_type(struct uncore_unit_discovery *unit)
125 {
126 	struct intel_uncore_discovery_type *type;
127 
128 	type = search_uncore_discovery_type(unit->box_type);
129 	if (type)
130 		return type;
131 
132 	return add_uncore_discovery_type(unit);
133 }
134 
135 static void
uncore_insert_box_info(struct uncore_unit_discovery * unit,int die,bool parsed)136 uncore_insert_box_info(struct uncore_unit_discovery *unit,
137 		       int die, bool parsed)
138 {
139 	struct intel_uncore_discovery_type *type;
140 	unsigned int *box_offset, *ids;
141 	int i;
142 
143 	if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) {
144 		pr_info("Invalid address is detected for uncore type %d box %d, "
145 			"Disable the uncore unit.\n",
146 			unit->box_type, unit->box_id);
147 		return;
148 	}
149 
150 	if (parsed) {
151 		type = search_uncore_discovery_type(unit->box_type);
152 		if (!type) {
153 			pr_info("A spurious uncore type %d is detected, "
154 				"Disable the uncore type.\n",
155 				unit->box_type);
156 			return;
157 		}
158 		/* Store the first box of each die */
159 		if (!type->box_ctrl_die[die])
160 			type->box_ctrl_die[die] = unit->ctl;
161 		return;
162 	}
163 
164 	type = get_uncore_discovery_type(unit);
165 	if (!type)
166 		return;
167 
168 	box_offset = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
169 	if (!box_offset)
170 		return;
171 
172 	ids = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
173 	if (!ids)
174 		goto free_box_offset;
175 
176 	/* Store generic information for the first box */
177 	if (!type->num_boxes) {
178 		type->box_ctrl = unit->ctl;
179 		type->box_ctrl_die[die] = unit->ctl;
180 		type->num_counters = unit->num_regs;
181 		type->counter_width = unit->bit_width;
182 		type->ctl_offset = unit->ctl_offset;
183 		type->ctr_offset = unit->ctr_offset;
184 		*ids = unit->box_id;
185 		goto end;
186 	}
187 
188 	for (i = 0; i < type->num_boxes; i++) {
189 		ids[i] = type->ids[i];
190 		box_offset[i] = type->box_offset[i];
191 
192 		if (unit->box_id == ids[i]) {
193 			pr_info("Duplicate uncore type %d box ID %d is detected, "
194 				"Drop the duplicate uncore unit.\n",
195 				unit->box_type, unit->box_id);
196 			goto free_ids;
197 		}
198 	}
199 	ids[i] = unit->box_id;
200 	box_offset[i] = unit->ctl - type->box_ctrl;
201 	kfree(type->ids);
202 	kfree(type->box_offset);
203 end:
204 	type->ids = ids;
205 	type->box_offset = box_offset;
206 	type->num_boxes++;
207 	return;
208 
209 free_ids:
210 	kfree(ids);
211 
212 free_box_offset:
213 	kfree(box_offset);
214 
215 }
216 
parse_discovery_table(struct pci_dev * dev,int die,u32 bar_offset,bool * parsed)217 static int parse_discovery_table(struct pci_dev *dev, int die,
218 				 u32 bar_offset, bool *parsed)
219 {
220 	struct uncore_global_discovery global;
221 	struct uncore_unit_discovery unit;
222 	void __iomem *io_addr;
223 	resource_size_t addr;
224 	unsigned long size;
225 	u32 val;
226 	int i;
227 
228 	pci_read_config_dword(dev, bar_offset, &val);
229 
230 	if (val & UNCORE_DISCOVERY_MASK)
231 		return -EINVAL;
232 
233 	addr = (resource_size_t)(val & ~UNCORE_DISCOVERY_MASK);
234 	size = UNCORE_DISCOVERY_GLOBAL_MAP_SIZE;
235 	io_addr = ioremap(addr, size);
236 	if (!io_addr)
237 		return -ENOMEM;
238 
239 	/* Read Global Discovery State */
240 	memcpy_fromio(&global, io_addr, sizeof(struct uncore_global_discovery));
241 	if (uncore_discovery_invalid_unit(global)) {
242 		pr_info("Invalid Global Discovery State: 0x%llx 0x%llx 0x%llx\n",
243 			global.table1, global.ctl, global.table3);
244 		iounmap(io_addr);
245 		return -EINVAL;
246 	}
247 	iounmap(io_addr);
248 
249 	size = (1 + global.max_units) * global.stride * 8;
250 	io_addr = ioremap(addr, size);
251 	if (!io_addr)
252 		return -ENOMEM;
253 
254 	/* Parsing Unit Discovery State */
255 	for (i = 0; i < global.max_units; i++) {
256 		memcpy_fromio(&unit, io_addr + (i + 1) * (global.stride * 8),
257 			      sizeof(struct uncore_unit_discovery));
258 
259 		if (uncore_discovery_invalid_unit(unit))
260 			continue;
261 
262 		if (unit.access_type >= UNCORE_ACCESS_MAX)
263 			continue;
264 
265 		uncore_insert_box_info(&unit, die, *parsed);
266 	}
267 
268 	*parsed = true;
269 	iounmap(io_addr);
270 	return 0;
271 }
272 
intel_uncore_has_discovery_tables(void)273 bool intel_uncore_has_discovery_tables(void)
274 {
275 	u32 device, val, entry_id, bar_offset;
276 	int die, dvsec = 0, ret = true;
277 	struct pci_dev *dev = NULL;
278 	bool parsed = false;
279 
280 	if (has_generic_discovery_table())
281 		device = UNCORE_DISCOVERY_TABLE_DEVICE;
282 	else
283 		device = PCI_ANY_ID;
284 
285 	/*
286 	 * Start a new search and iterates through the list of
287 	 * the discovery table devices.
288 	 */
289 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
290 		while ((dvsec = pci_find_next_ext_capability(dev, dvsec, UNCORE_EXT_CAP_ID_DISCOVERY))) {
291 			pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC_OFFSET, &val);
292 			entry_id = val & UNCORE_DISCOVERY_DVSEC_ID_MASK;
293 			if (entry_id != UNCORE_DISCOVERY_DVSEC_ID_PMON)
294 				continue;
295 
296 			pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC2_OFFSET, &val);
297 
298 			if (val & ~UNCORE_DISCOVERY_DVSEC2_BIR_MASK) {
299 				ret = false;
300 				goto err;
301 			}
302 			bar_offset = UNCORE_DISCOVERY_BIR_BASE +
303 				     (val & UNCORE_DISCOVERY_DVSEC2_BIR_MASK) * UNCORE_DISCOVERY_BIR_STEP;
304 
305 			die = get_device_die_id(dev);
306 			if (die < 0)
307 				continue;
308 
309 			parse_discovery_table(dev, die, bar_offset, &parsed);
310 		}
311 	}
312 
313 	/* None of the discovery tables are available */
314 	if (!parsed)
315 		ret = false;
316 err:
317 	pci_dev_put(dev);
318 
319 	return ret;
320 }
321 
intel_uncore_clear_discovery_tables(void)322 void intel_uncore_clear_discovery_tables(void)
323 {
324 	struct intel_uncore_discovery_type *type, *next;
325 
326 	rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) {
327 		kfree(type->box_ctrl_die);
328 		kfree(type);
329 	}
330 }
331 
332 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
333 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
334 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
335 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
336 DEFINE_UNCORE_FORMAT_ATTR(thresh, thresh, "config:24-31");
337 
338 static struct attribute *generic_uncore_formats_attr[] = {
339 	&format_attr_event.attr,
340 	&format_attr_umask.attr,
341 	&format_attr_edge.attr,
342 	&format_attr_inv.attr,
343 	&format_attr_thresh.attr,
344 	NULL,
345 };
346 
347 static const struct attribute_group generic_uncore_format_group = {
348 	.name = "format",
349 	.attrs = generic_uncore_formats_attr,
350 };
351 
intel_generic_uncore_msr_init_box(struct intel_uncore_box * box)352 void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
353 {
354 	wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
355 }
356 
intel_generic_uncore_msr_disable_box(struct intel_uncore_box * box)357 void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
358 {
359 	wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
360 }
361 
intel_generic_uncore_msr_enable_box(struct intel_uncore_box * box)362 void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
363 {
364 	wrmsrl(uncore_msr_box_ctl(box), 0);
365 }
366 
intel_generic_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)367 static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box,
368 					    struct perf_event *event)
369 {
370 	struct hw_perf_event *hwc = &event->hw;
371 
372 	wrmsrl(hwc->config_base, hwc->config);
373 }
374 
intel_generic_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)375 static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box,
376 					     struct perf_event *event)
377 {
378 	struct hw_perf_event *hwc = &event->hw;
379 
380 	wrmsrl(hwc->config_base, 0);
381 }
382 
383 static struct intel_uncore_ops generic_uncore_msr_ops = {
384 	.init_box		= intel_generic_uncore_msr_init_box,
385 	.disable_box		= intel_generic_uncore_msr_disable_box,
386 	.enable_box		= intel_generic_uncore_msr_enable_box,
387 	.disable_event		= intel_generic_uncore_msr_disable_event,
388 	.enable_event		= intel_generic_uncore_msr_enable_event,
389 	.read_counter		= uncore_msr_read_counter,
390 };
391 
intel_generic_uncore_pci_init_box(struct intel_uncore_box * box)392 void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
393 {
394 	struct pci_dev *pdev = box->pci_dev;
395 	int box_ctl = uncore_pci_box_ctl(box);
396 
397 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
398 	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT);
399 }
400 
intel_generic_uncore_pci_disable_box(struct intel_uncore_box * box)401 void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
402 {
403 	struct pci_dev *pdev = box->pci_dev;
404 	int box_ctl = uncore_pci_box_ctl(box);
405 
406 	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ);
407 }
408 
intel_generic_uncore_pci_enable_box(struct intel_uncore_box * box)409 void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
410 {
411 	struct pci_dev *pdev = box->pci_dev;
412 	int box_ctl = uncore_pci_box_ctl(box);
413 
414 	pci_write_config_dword(pdev, box_ctl, 0);
415 }
416 
intel_generic_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)417 static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box,
418 					    struct perf_event *event)
419 {
420 	struct pci_dev *pdev = box->pci_dev;
421 	struct hw_perf_event *hwc = &event->hw;
422 
423 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
424 }
425 
intel_generic_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)426 void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
427 					    struct perf_event *event)
428 {
429 	struct pci_dev *pdev = box->pci_dev;
430 	struct hw_perf_event *hwc = &event->hw;
431 
432 	pci_write_config_dword(pdev, hwc->config_base, 0);
433 }
434 
intel_generic_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)435 u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
436 					  struct perf_event *event)
437 {
438 	struct pci_dev *pdev = box->pci_dev;
439 	struct hw_perf_event *hwc = &event->hw;
440 	u64 count = 0;
441 
442 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
443 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
444 
445 	return count;
446 }
447 
448 static struct intel_uncore_ops generic_uncore_pci_ops = {
449 	.init_box	= intel_generic_uncore_pci_init_box,
450 	.disable_box	= intel_generic_uncore_pci_disable_box,
451 	.enable_box	= intel_generic_uncore_pci_enable_box,
452 	.disable_event	= intel_generic_uncore_pci_disable_event,
453 	.enable_event	= intel_generic_uncore_pci_enable_event,
454 	.read_counter	= intel_generic_uncore_pci_read_counter,
455 };
456 
457 #define UNCORE_GENERIC_MMIO_SIZE		0x4000
458 
generic_uncore_mmio_box_ctl(struct intel_uncore_box * box)459 static unsigned int generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
460 {
461 	struct intel_uncore_type *type = box->pmu->type;
462 
463 	if (!type->box_ctls || !type->box_ctls[box->dieid] || !type->mmio_offsets)
464 		return 0;
465 
466 	return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx];
467 }
468 
intel_generic_uncore_mmio_init_box(struct intel_uncore_box * box)469 void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
470 {
471 	unsigned int box_ctl = generic_uncore_mmio_box_ctl(box);
472 	struct intel_uncore_type *type = box->pmu->type;
473 	resource_size_t addr;
474 
475 	if (!box_ctl) {
476 		pr_warn("Uncore type %d box %d: Invalid box control address.\n",
477 			type->type_id, type->box_ids[box->pmu->pmu_idx]);
478 		return;
479 	}
480 
481 	addr = box_ctl;
482 	box->io_addr = ioremap(addr, UNCORE_GENERIC_MMIO_SIZE);
483 	if (!box->io_addr) {
484 		pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n",
485 			type->type_id, type->box_ids[box->pmu->pmu_idx],
486 			(unsigned long long)addr);
487 		return;
488 	}
489 
490 	writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr);
491 }
492 
intel_generic_uncore_mmio_disable_box(struct intel_uncore_box * box)493 void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
494 {
495 	if (!box->io_addr)
496 		return;
497 
498 	writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr);
499 }
500 
intel_generic_uncore_mmio_enable_box(struct intel_uncore_box * box)501 void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
502 {
503 	if (!box->io_addr)
504 		return;
505 
506 	writel(0, box->io_addr);
507 }
508 
intel_generic_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)509 static void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
510 					     struct perf_event *event)
511 {
512 	struct hw_perf_event *hwc = &event->hw;
513 
514 	if (!box->io_addr)
515 		return;
516 
517 	writel(hwc->config, box->io_addr + hwc->config_base);
518 }
519 
intel_generic_uncore_mmio_disable_event(struct intel_uncore_box * box,struct perf_event * event)520 void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
521 					     struct perf_event *event)
522 {
523 	struct hw_perf_event *hwc = &event->hw;
524 
525 	if (!box->io_addr)
526 		return;
527 
528 	writel(0, box->io_addr + hwc->config_base);
529 }
530 
531 static struct intel_uncore_ops generic_uncore_mmio_ops = {
532 	.init_box	= intel_generic_uncore_mmio_init_box,
533 	.exit_box	= uncore_mmio_exit_box,
534 	.disable_box	= intel_generic_uncore_mmio_disable_box,
535 	.enable_box	= intel_generic_uncore_mmio_enable_box,
536 	.disable_event	= intel_generic_uncore_mmio_disable_event,
537 	.enable_event	= intel_generic_uncore_mmio_enable_event,
538 	.read_counter	= uncore_mmio_read_counter,
539 };
540 
uncore_update_uncore_type(enum uncore_access_type type_id,struct intel_uncore_type * uncore,struct intel_uncore_discovery_type * type)541 static bool uncore_update_uncore_type(enum uncore_access_type type_id,
542 				      struct intel_uncore_type *uncore,
543 				      struct intel_uncore_discovery_type *type)
544 {
545 	uncore->type_id = type->type;
546 	uncore->num_boxes = type->num_boxes;
547 	uncore->num_counters = type->num_counters;
548 	uncore->perf_ctr_bits = type->counter_width;
549 	uncore->box_ids = type->ids;
550 
551 	switch (type_id) {
552 	case UNCORE_ACCESS_MSR:
553 		uncore->ops = &generic_uncore_msr_ops;
554 		uncore->perf_ctr = (unsigned int)type->box_ctrl + type->ctr_offset;
555 		uncore->event_ctl = (unsigned int)type->box_ctrl + type->ctl_offset;
556 		uncore->box_ctl = (unsigned int)type->box_ctrl;
557 		uncore->msr_offsets = type->box_offset;
558 		break;
559 	case UNCORE_ACCESS_PCI:
560 		uncore->ops = &generic_uncore_pci_ops;
561 		uncore->perf_ctr = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctr_offset;
562 		uncore->event_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctl_offset;
563 		uncore->box_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl);
564 		uncore->box_ctls = type->box_ctrl_die;
565 		uncore->pci_offsets = type->box_offset;
566 		break;
567 	case UNCORE_ACCESS_MMIO:
568 		uncore->ops = &generic_uncore_mmio_ops;
569 		uncore->perf_ctr = (unsigned int)type->ctr_offset;
570 		uncore->event_ctl = (unsigned int)type->ctl_offset;
571 		uncore->box_ctl = (unsigned int)type->box_ctrl;
572 		uncore->box_ctls = type->box_ctrl_die;
573 		uncore->mmio_offsets = type->box_offset;
574 		uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE;
575 		break;
576 	default:
577 		return false;
578 	}
579 
580 	return true;
581 }
582 
583 struct intel_uncore_type **
intel_uncore_generic_init_uncores(enum uncore_access_type type_id,int num_extra)584 intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra)
585 {
586 	struct intel_uncore_discovery_type *type;
587 	struct intel_uncore_type **uncores;
588 	struct intel_uncore_type *uncore;
589 	struct rb_node *node;
590 	int i = 0;
591 
592 	uncores = kcalloc(num_discovered_types[type_id] + num_extra + 1,
593 			  sizeof(struct intel_uncore_type *), GFP_KERNEL);
594 	if (!uncores)
595 		return empty_uncore;
596 
597 	for (node = rb_first(&discovery_tables); node; node = rb_next(node)) {
598 		type = rb_entry(node, struct intel_uncore_discovery_type, node);
599 		if (type->access_type != type_id)
600 			continue;
601 
602 		uncore = kzalloc(sizeof(struct intel_uncore_type), GFP_KERNEL);
603 		if (!uncore)
604 			break;
605 
606 		uncore->event_mask = GENERIC_PMON_RAW_EVENT_MASK;
607 		uncore->format_group = &generic_uncore_format_group;
608 
609 		if (!uncore_update_uncore_type(type_id, uncore, type)) {
610 			kfree(uncore);
611 			continue;
612 		}
613 		uncores[i++] = uncore;
614 	}
615 
616 	return uncores;
617 }
618 
intel_uncore_generic_uncore_cpu_init(void)619 void intel_uncore_generic_uncore_cpu_init(void)
620 {
621 	uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR, 0);
622 }
623 
intel_uncore_generic_uncore_pci_init(void)624 int intel_uncore_generic_uncore_pci_init(void)
625 {
626 	uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI, 0);
627 
628 	return 0;
629 }
630 
intel_uncore_generic_uncore_mmio_init(void)631 void intel_uncore_generic_uncore_mmio_init(void)
632 {
633 	uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO, 0);
634 }
635