1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/slab.h>
3 #include <linux/pci.h>
4 #include <asm/apicdef.h>
5 #include <asm/intel-family.h>
6 #include <linux/io-64-nonatomic-lo-hi.h>
7
8 #include <linux/perf_event.h>
9 #include "../perf_event.h"
10
11 #define UNCORE_PMU_NAME_LEN 32
12 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
13 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
14
15 #define UNCORE_FIXED_EVENT 0xff
16 #define UNCORE_PMC_IDX_MAX_GENERIC 8
17 #define UNCORE_PMC_IDX_MAX_FIXED 1
18 #define UNCORE_PMC_IDX_MAX_FREERUNNING 1
19 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
20 #define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \
21 UNCORE_PMC_IDX_MAX_FIXED)
22 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \
23 UNCORE_PMC_IDX_MAX_FREERUNNING)
24
25 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
26 ((dev << 24) | (func << 16) | (type << 8) | idx)
27 #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
28 #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
29 #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
30 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
31 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
32 #define UNCORE_EXTRA_PCI_DEV 0xff
33 #define UNCORE_EXTRA_PCI_DEV_MAX 4
34
35 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
36
37 struct pci_extra_dev {
38 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
39 };
40
41 struct intel_uncore_ops;
42 struct intel_uncore_pmu;
43 struct intel_uncore_box;
44 struct uncore_event_desc;
45 struct freerunning_counters;
46 struct intel_uncore_topology;
47
48 struct intel_uncore_type {
49 const char *name;
50 int num_counters;
51 int num_boxes;
52 int perf_ctr_bits;
53 int fixed_ctr_bits;
54 int num_freerunning_types;
55 int type_id;
56 unsigned perf_ctr;
57 unsigned event_ctl;
58 unsigned event_mask;
59 unsigned event_mask_ext;
60 unsigned fixed_ctr;
61 unsigned fixed_ctl;
62 unsigned box_ctl;
63 u64 *box_ctls; /* Unit ctrl addr of the first box of each die */
64 union {
65 unsigned msr_offset;
66 unsigned mmio_offset;
67 };
68 unsigned mmio_map_size;
69 unsigned num_shared_regs:8;
70 unsigned single_fixed:1;
71 unsigned pair_ctr_ctl:1;
72 union {
73 unsigned *msr_offsets;
74 unsigned *pci_offsets;
75 unsigned *mmio_offsets;
76 };
77 unsigned *box_ids;
78 struct event_constraint unconstrainted;
79 struct event_constraint *constraints;
80 struct intel_uncore_pmu *pmus;
81 struct intel_uncore_ops *ops;
82 struct uncore_event_desc *event_descs;
83 struct freerunning_counters *freerunning;
84 const struct attribute_group *attr_groups[4];
85 const struct attribute_group **attr_update;
86 struct pmu *pmu; /* for custom pmu ops */
87 /*
88 * Uncore PMU would store relevant platform topology configuration here
89 * to identify which platform component each PMON block of that type is
90 * supposed to monitor.
91 */
92 struct intel_uncore_topology *topology;
93 /*
94 * Optional callbacks for managing mapping of Uncore units to PMONs
95 */
96 int (*get_topology)(struct intel_uncore_type *type);
97 int (*set_mapping)(struct intel_uncore_type *type);
98 void (*cleanup_mapping)(struct intel_uncore_type *type);
99 };
100
101 #define pmu_group attr_groups[0]
102 #define format_group attr_groups[1]
103 #define events_group attr_groups[2]
104
105 struct intel_uncore_ops {
106 void (*init_box)(struct intel_uncore_box *);
107 void (*exit_box)(struct intel_uncore_box *);
108 void (*disable_box)(struct intel_uncore_box *);
109 void (*enable_box)(struct intel_uncore_box *);
110 void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
111 void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
112 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
113 int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
114 struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
115 struct perf_event *);
116 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
117 };
118
119 struct intel_uncore_pmu {
120 struct pmu pmu;
121 char name[UNCORE_PMU_NAME_LEN];
122 int pmu_idx;
123 int func_id;
124 bool registered;
125 atomic_t activeboxes;
126 struct intel_uncore_type *type;
127 struct intel_uncore_box **boxes;
128 };
129
130 struct intel_uncore_extra_reg {
131 raw_spinlock_t lock;
132 u64 config, config1, config2;
133 atomic_t ref;
134 };
135
136 struct intel_uncore_box {
137 int dieid; /* Logical die ID */
138 int n_active; /* number of active events */
139 int n_events;
140 int cpu; /* cpu to collect events */
141 unsigned long flags;
142 atomic_t refcnt;
143 struct perf_event *events[UNCORE_PMC_IDX_MAX];
144 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
145 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
146 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
147 u64 tags[UNCORE_PMC_IDX_MAX];
148 struct pci_dev *pci_dev;
149 struct intel_uncore_pmu *pmu;
150 u64 hrtimer_duration; /* hrtimer timeout for this box */
151 struct hrtimer hrtimer;
152 struct list_head list;
153 struct list_head active_list;
154 void __iomem *io_addr;
155 struct intel_uncore_extra_reg shared_regs[];
156 };
157
158 /* CFL uncore 8th cbox MSRs */
159 #define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70
160 #define CFL_UNC_CBO_7_PER_CTR0 0xf76
161
162 #define UNCORE_BOX_FLAG_INITIATED 0
163 /* event config registers are 8-byte apart */
164 #define UNCORE_BOX_FLAG_CTL_OFFS8 1
165 /* CFL 8th CBOX has different MSR space */
166 #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
167
168 struct uncore_event_desc {
169 struct device_attribute attr;
170 const char *config;
171 };
172
173 struct freerunning_counters {
174 unsigned int counter_base;
175 unsigned int counter_offset;
176 unsigned int box_offset;
177 unsigned int num_counters;
178 unsigned int bits;
179 unsigned *box_offsets;
180 };
181
182 struct intel_uncore_topology {
183 u64 configuration;
184 int segment;
185 };
186
187 struct pci2phy_map {
188 struct list_head list;
189 int segment;
190 int pbus_to_dieid[256];
191 };
192
193 struct pci2phy_map *__find_pci2phy_map(int segment);
194 int uncore_pcibus_to_dieid(struct pci_bus *bus);
195 int uncore_die_to_segment(int die);
196
197 ssize_t uncore_event_show(struct device *dev,
198 struct device_attribute *attr, char *buf);
199
dev_to_uncore_pmu(struct device * dev)200 static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev)
201 {
202 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu);
203 }
204
205 #define to_device_attribute(n) container_of(n, struct device_attribute, attr)
206 #define to_dev_ext_attribute(n) container_of(n, struct dev_ext_attribute, attr)
207 #define attr_to_ext_attr(n) to_dev_ext_attribute(to_device_attribute(n))
208
209 extern int __uncore_max_dies;
210 #define uncore_max_dies() (__uncore_max_dies)
211
212 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
213 { \
214 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
215 .config = _config, \
216 }
217
218 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
219 static ssize_t __uncore_##_var##_show(struct device *dev, \
220 struct device_attribute *attr, \
221 char *page) \
222 { \
223 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
224 return sprintf(page, _format "\n"); \
225 } \
226 static struct device_attribute format_attr_##_var = \
227 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
228
uncore_pmc_fixed(int idx)229 static inline bool uncore_pmc_fixed(int idx)
230 {
231 return idx == UNCORE_PMC_IDX_FIXED;
232 }
233
uncore_pmc_freerunning(int idx)234 static inline bool uncore_pmc_freerunning(int idx)
235 {
236 return idx == UNCORE_PMC_IDX_FREERUNNING;
237 }
238
uncore_mmio_is_valid_offset(struct intel_uncore_box * box,unsigned long offset)239 static inline bool uncore_mmio_is_valid_offset(struct intel_uncore_box *box,
240 unsigned long offset)
241 {
242 if (offset < box->pmu->type->mmio_map_size)
243 return true;
244
245 pr_warn_once("perf uncore: Invalid offset 0x%lx exceeds mapped area of %s.\n",
246 offset, box->pmu->type->name);
247
248 return false;
249 }
250
251 static inline
uncore_mmio_box_ctl(struct intel_uncore_box * box)252 unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box)
253 {
254 return box->pmu->type->box_ctl +
255 box->pmu->type->mmio_offset * box->pmu->pmu_idx;
256 }
257
uncore_pci_box_ctl(struct intel_uncore_box * box)258 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
259 {
260 return box->pmu->type->box_ctl;
261 }
262
uncore_pci_fixed_ctl(struct intel_uncore_box * box)263 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
264 {
265 return box->pmu->type->fixed_ctl;
266 }
267
uncore_pci_fixed_ctr(struct intel_uncore_box * box)268 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
269 {
270 return box->pmu->type->fixed_ctr;
271 }
272
273 static inline
uncore_pci_event_ctl(struct intel_uncore_box * box,int idx)274 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
275 {
276 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
277 return idx * 8 + box->pmu->type->event_ctl;
278
279 return idx * 4 + box->pmu->type->event_ctl;
280 }
281
282 static inline
uncore_pci_perf_ctr(struct intel_uncore_box * box,int idx)283 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
284 {
285 return idx * 8 + box->pmu->type->perf_ctr;
286 }
287
uncore_msr_box_offset(struct intel_uncore_box * box)288 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
289 {
290 struct intel_uncore_pmu *pmu = box->pmu;
291 return pmu->type->msr_offsets ?
292 pmu->type->msr_offsets[pmu->pmu_idx] :
293 pmu->type->msr_offset * pmu->pmu_idx;
294 }
295
uncore_msr_box_ctl(struct intel_uncore_box * box)296 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
297 {
298 if (!box->pmu->type->box_ctl)
299 return 0;
300 return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
301 }
302
uncore_msr_fixed_ctl(struct intel_uncore_box * box)303 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
304 {
305 if (!box->pmu->type->fixed_ctl)
306 return 0;
307 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
308 }
309
uncore_msr_fixed_ctr(struct intel_uncore_box * box)310 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
311 {
312 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
313 }
314
315
316 /*
317 * In the uncore document, there is no event-code assigned to free running
318 * counters. Some events need to be defined to indicate the free running
319 * counters. The events are encoded as event-code + umask-code.
320 *
321 * The event-code for all free running counters is 0xff, which is the same as
322 * the fixed counters.
323 *
324 * The umask-code is used to distinguish a fixed counter and a free running
325 * counter, and different types of free running counters.
326 * - For fixed counters, the umask-code is 0x0X.
327 * X indicates the index of the fixed counter, which starts from 0.
328 * - For free running counters, the umask-code uses the rest of the space.
329 * It would bare the format of 0xXY.
330 * X stands for the type of free running counters, which starts from 1.
331 * Y stands for the index of free running counters of same type, which
332 * starts from 0.
333 *
334 * For example, there are three types of IIO free running counters on Skylake
335 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
336 * The event-code for all the free running counters is 0xff.
337 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
338 * which umask-code starts from 0x10.
339 * So 'ioclk' is encoded as event=0xff,umask=0x10
340 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
341 * the second type, which umask-code starts from 0x20.
342 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
343 */
uncore_freerunning_idx(u64 config)344 static inline unsigned int uncore_freerunning_idx(u64 config)
345 {
346 return ((config >> 8) & 0xf);
347 }
348
349 #define UNCORE_FREERUNNING_UMASK_START 0x10
350
uncore_freerunning_type(u64 config)351 static inline unsigned int uncore_freerunning_type(u64 config)
352 {
353 return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
354 }
355
356 static inline
uncore_freerunning_counter(struct intel_uncore_box * box,struct perf_event * event)357 unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
358 struct perf_event *event)
359 {
360 unsigned int type = uncore_freerunning_type(event->hw.config);
361 unsigned int idx = uncore_freerunning_idx(event->hw.config);
362 struct intel_uncore_pmu *pmu = box->pmu;
363
364 return pmu->type->freerunning[type].counter_base +
365 pmu->type->freerunning[type].counter_offset * idx +
366 (pmu->type->freerunning[type].box_offsets ?
367 pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] :
368 pmu->type->freerunning[type].box_offset * pmu->pmu_idx);
369 }
370
371 static inline
uncore_msr_event_ctl(struct intel_uncore_box * box,int idx)372 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
373 {
374 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
375 return CFL_UNC_CBO_7_PERFEVTSEL0 +
376 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
377 } else {
378 return box->pmu->type->event_ctl +
379 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
380 uncore_msr_box_offset(box);
381 }
382 }
383
384 static inline
uncore_msr_perf_ctr(struct intel_uncore_box * box,int idx)385 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
386 {
387 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
388 return CFL_UNC_CBO_7_PER_CTR0 +
389 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
390 } else {
391 return box->pmu->type->perf_ctr +
392 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
393 uncore_msr_box_offset(box);
394 }
395 }
396
397 static inline
uncore_fixed_ctl(struct intel_uncore_box * box)398 unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
399 {
400 if (box->pci_dev || box->io_addr)
401 return uncore_pci_fixed_ctl(box);
402 else
403 return uncore_msr_fixed_ctl(box);
404 }
405
406 static inline
uncore_fixed_ctr(struct intel_uncore_box * box)407 unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
408 {
409 if (box->pci_dev || box->io_addr)
410 return uncore_pci_fixed_ctr(box);
411 else
412 return uncore_msr_fixed_ctr(box);
413 }
414
415 static inline
uncore_event_ctl(struct intel_uncore_box * box,int idx)416 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
417 {
418 if (box->pci_dev || box->io_addr)
419 return uncore_pci_event_ctl(box, idx);
420 else
421 return uncore_msr_event_ctl(box, idx);
422 }
423
424 static inline
uncore_perf_ctr(struct intel_uncore_box * box,int idx)425 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
426 {
427 if (box->pci_dev || box->io_addr)
428 return uncore_pci_perf_ctr(box, idx);
429 else
430 return uncore_msr_perf_ctr(box, idx);
431 }
432
uncore_perf_ctr_bits(struct intel_uncore_box * box)433 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
434 {
435 return box->pmu->type->perf_ctr_bits;
436 }
437
uncore_fixed_ctr_bits(struct intel_uncore_box * box)438 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
439 {
440 return box->pmu->type->fixed_ctr_bits;
441 }
442
443 static inline
uncore_freerunning_bits(struct intel_uncore_box * box,struct perf_event * event)444 unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
445 struct perf_event *event)
446 {
447 unsigned int type = uncore_freerunning_type(event->hw.config);
448
449 return box->pmu->type->freerunning[type].bits;
450 }
451
uncore_num_freerunning(struct intel_uncore_box * box,struct perf_event * event)452 static inline int uncore_num_freerunning(struct intel_uncore_box *box,
453 struct perf_event *event)
454 {
455 unsigned int type = uncore_freerunning_type(event->hw.config);
456
457 return box->pmu->type->freerunning[type].num_counters;
458 }
459
uncore_num_freerunning_types(struct intel_uncore_box * box,struct perf_event * event)460 static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
461 struct perf_event *event)
462 {
463 return box->pmu->type->num_freerunning_types;
464 }
465
check_valid_freerunning_event(struct intel_uncore_box * box,struct perf_event * event)466 static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
467 struct perf_event *event)
468 {
469 unsigned int type = uncore_freerunning_type(event->hw.config);
470 unsigned int idx = uncore_freerunning_idx(event->hw.config);
471
472 return (type < uncore_num_freerunning_types(box, event)) &&
473 (idx < uncore_num_freerunning(box, event));
474 }
475
uncore_num_counters(struct intel_uncore_box * box)476 static inline int uncore_num_counters(struct intel_uncore_box *box)
477 {
478 return box->pmu->type->num_counters;
479 }
480
is_freerunning_event(struct perf_event * event)481 static inline bool is_freerunning_event(struct perf_event *event)
482 {
483 u64 cfg = event->attr.config;
484
485 return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
486 (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
487 }
488
489 /* Check and reject invalid config */
uncore_freerunning_hw_config(struct intel_uncore_box * box,struct perf_event * event)490 static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
491 struct perf_event *event)
492 {
493 if (is_freerunning_event(event))
494 return 0;
495
496 return -EINVAL;
497 }
498
uncore_disable_event(struct intel_uncore_box * box,struct perf_event * event)499 static inline void uncore_disable_event(struct intel_uncore_box *box,
500 struct perf_event *event)
501 {
502 box->pmu->type->ops->disable_event(box, event);
503 }
504
uncore_enable_event(struct intel_uncore_box * box,struct perf_event * event)505 static inline void uncore_enable_event(struct intel_uncore_box *box,
506 struct perf_event *event)
507 {
508 box->pmu->type->ops->enable_event(box, event);
509 }
510
uncore_read_counter(struct intel_uncore_box * box,struct perf_event * event)511 static inline u64 uncore_read_counter(struct intel_uncore_box *box,
512 struct perf_event *event)
513 {
514 return box->pmu->type->ops->read_counter(box, event);
515 }
516
uncore_box_init(struct intel_uncore_box * box)517 static inline void uncore_box_init(struct intel_uncore_box *box)
518 {
519 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
520 if (box->pmu->type->ops->init_box)
521 box->pmu->type->ops->init_box(box);
522 }
523 }
524
uncore_box_exit(struct intel_uncore_box * box)525 static inline void uncore_box_exit(struct intel_uncore_box *box)
526 {
527 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
528 if (box->pmu->type->ops->exit_box)
529 box->pmu->type->ops->exit_box(box);
530 }
531 }
532
uncore_box_is_fake(struct intel_uncore_box * box)533 static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
534 {
535 return (box->dieid < 0);
536 }
537
uncore_event_to_pmu(struct perf_event * event)538 static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
539 {
540 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
541 }
542
uncore_event_to_box(struct perf_event * event)543 static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
544 {
545 return event->pmu_private;
546 }
547
548 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
549 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
550 void uncore_mmio_exit_box(struct intel_uncore_box *box);
551 u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
552 struct perf_event *event);
553 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
554 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
555 void uncore_pmu_event_start(struct perf_event *event, int flags);
556 void uncore_pmu_event_stop(struct perf_event *event, int flags);
557 int uncore_pmu_event_add(struct perf_event *event, int flags);
558 void uncore_pmu_event_del(struct perf_event *event, int flags);
559 void uncore_pmu_event_read(struct perf_event *event);
560 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
561 struct event_constraint *
562 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
563 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
564 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
565 void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu);
566
567 extern struct intel_uncore_type *empty_uncore[];
568 extern struct intel_uncore_type **uncore_msr_uncores;
569 extern struct intel_uncore_type **uncore_pci_uncores;
570 extern struct intel_uncore_type **uncore_mmio_uncores;
571 extern struct pci_driver *uncore_pci_driver;
572 extern struct pci_driver *uncore_pci_sub_driver;
573 extern raw_spinlock_t pci2phy_map_lock;
574 extern struct list_head pci2phy_map_head;
575 extern struct pci_extra_dev *uncore_extra_pci_dev;
576 extern struct event_constraint uncore_constraint_empty;
577
578 /* uncore_snb.c */
579 int snb_uncore_pci_init(void);
580 int ivb_uncore_pci_init(void);
581 int hsw_uncore_pci_init(void);
582 int bdw_uncore_pci_init(void);
583 int skl_uncore_pci_init(void);
584 void snb_uncore_cpu_init(void);
585 void nhm_uncore_cpu_init(void);
586 void skl_uncore_cpu_init(void);
587 void icl_uncore_cpu_init(void);
588 void adl_uncore_cpu_init(void);
589 void tgl_uncore_cpu_init(void);
590 void tgl_uncore_mmio_init(void);
591 void tgl_l_uncore_mmio_init(void);
592 int snb_pci2phy_map_init(int devid);
593
594 /* uncore_snbep.c */
595 int snbep_uncore_pci_init(void);
596 void snbep_uncore_cpu_init(void);
597 int ivbep_uncore_pci_init(void);
598 void ivbep_uncore_cpu_init(void);
599 int hswep_uncore_pci_init(void);
600 void hswep_uncore_cpu_init(void);
601 int bdx_uncore_pci_init(void);
602 void bdx_uncore_cpu_init(void);
603 int knl_uncore_pci_init(void);
604 void knl_uncore_cpu_init(void);
605 int skx_uncore_pci_init(void);
606 void skx_uncore_cpu_init(void);
607 int snr_uncore_pci_init(void);
608 void snr_uncore_cpu_init(void);
609 void snr_uncore_mmio_init(void);
610 int icx_uncore_pci_init(void);
611 void icx_uncore_cpu_init(void);
612 void icx_uncore_mmio_init(void);
613 int spr_uncore_pci_init(void);
614 void spr_uncore_cpu_init(void);
615 void spr_uncore_mmio_init(void);
616
617 /* uncore_nhmex.c */
618 void nhmex_uncore_cpu_init(void);
619