• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
3 #include "uncore.h"
4 
5 /* Uncore IMC PCI IDs */
6 #define PCI_DEVICE_ID_INTEL_SNB_IMC	0x0100
7 #define PCI_DEVICE_ID_INTEL_IVB_IMC	0x0154
8 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC	0x0150
9 #define PCI_DEVICE_ID_INTEL_HSW_IMC	0x0c00
10 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC	0x0a04
11 #define PCI_DEVICE_ID_INTEL_BDW_IMC	0x1604
12 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC	0x1904
13 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC	0x190c
14 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC	0x1900
15 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC	0x1910
16 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC	0x190f
17 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC	0x191f
18 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC	0x590c
19 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC	0x5904
20 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC	0x5914
21 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC	0x590f
22 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC	0x591f
23 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC	0x3ecc
24 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC	0x3ed0
25 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC	0x3e10
26 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC	0x3ec4
27 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC	0x3e0f
28 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC	0x3e1f
29 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC	0x3ec2
30 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC	0x3e30
31 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC	0x3e18
32 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC	0x3ec6
33 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC	0x3e31
34 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC	0x3e33
35 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC	0x3eca
36 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC	0x3e32
37 
38 /* SNB event control */
39 #define SNB_UNC_CTL_EV_SEL_MASK			0x000000ff
40 #define SNB_UNC_CTL_UMASK_MASK			0x0000ff00
41 #define SNB_UNC_CTL_EDGE_DET			(1 << 18)
42 #define SNB_UNC_CTL_EN				(1 << 22)
43 #define SNB_UNC_CTL_INVERT			(1 << 23)
44 #define SNB_UNC_CTL_CMASK_MASK			0x1f000000
45 #define NHM_UNC_CTL_CMASK_MASK			0xff000000
46 #define NHM_UNC_FIXED_CTR_CTL_EN		(1 << 0)
47 
48 #define SNB_UNC_RAW_EVENT_MASK			(SNB_UNC_CTL_EV_SEL_MASK | \
49 						 SNB_UNC_CTL_UMASK_MASK | \
50 						 SNB_UNC_CTL_EDGE_DET | \
51 						 SNB_UNC_CTL_INVERT | \
52 						 SNB_UNC_CTL_CMASK_MASK)
53 
54 #define NHM_UNC_RAW_EVENT_MASK			(SNB_UNC_CTL_EV_SEL_MASK | \
55 						 SNB_UNC_CTL_UMASK_MASK | \
56 						 SNB_UNC_CTL_EDGE_DET | \
57 						 SNB_UNC_CTL_INVERT | \
58 						 NHM_UNC_CTL_CMASK_MASK)
59 
60 /* SNB global control register */
61 #define SNB_UNC_PERF_GLOBAL_CTL                 0x391
62 #define SNB_UNC_FIXED_CTR_CTRL                  0x394
63 #define SNB_UNC_FIXED_CTR                       0x395
64 
65 /* SNB uncore global control */
66 #define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
67 #define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
68 
69 /* SNB Cbo register */
70 #define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
71 #define SNB_UNC_CBO_0_PER_CTR0                  0x706
72 #define SNB_UNC_CBO_MSR_OFFSET                  0x10
73 
74 /* SNB ARB register */
75 #define SNB_UNC_ARB_PER_CTR0			0x3b0
76 #define SNB_UNC_ARB_PERFEVTSEL0			0x3b2
77 #define SNB_UNC_ARB_MSR_OFFSET			0x10
78 
79 /* NHM global control register */
80 #define NHM_UNC_PERF_GLOBAL_CTL                 0x391
81 #define NHM_UNC_FIXED_CTR                       0x394
82 #define NHM_UNC_FIXED_CTR_CTRL                  0x395
83 
84 /* NHM uncore global control */
85 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
86 #define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
87 
88 /* NHM uncore register */
89 #define NHM_UNC_PERFEVTSEL0                     0x3c0
90 #define NHM_UNC_UNCORE_PMC0                     0x3b0
91 
92 /* SKL uncore global control */
93 #define SKL_UNC_PERF_GLOBAL_CTL			0xe01
94 #define SKL_UNC_GLOBAL_CTL_CORE_ALL		((1 << 5) - 1)
95 
96 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
97 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
98 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
99 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
100 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
101 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
102 
103 /* Sandy Bridge uncore support */
snb_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)104 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
105 {
106 	struct hw_perf_event *hwc = &event->hw;
107 
108 	if (hwc->idx < UNCORE_PMC_IDX_FIXED)
109 		wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
110 	else
111 		wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
112 }
113 
snb_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)114 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
115 {
116 	wrmsrl(event->hw.config_base, 0);
117 }
118 
snb_uncore_msr_init_box(struct intel_uncore_box * box)119 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
120 {
121 	if (box->pmu->pmu_idx == 0) {
122 		wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
123 			SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
124 	}
125 }
126 
snb_uncore_msr_enable_box(struct intel_uncore_box * box)127 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
128 {
129 	wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
130 		SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
131 }
132 
snb_uncore_msr_exit_box(struct intel_uncore_box * box)133 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
134 {
135 	if (box->pmu->pmu_idx == 0)
136 		wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
137 }
138 
139 static struct uncore_event_desc snb_uncore_events[] = {
140 	INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
141 	{ /* end: all zeroes */ },
142 };
143 
144 static struct attribute *snb_uncore_formats_attr[] = {
145 	&format_attr_event.attr,
146 	&format_attr_umask.attr,
147 	&format_attr_edge.attr,
148 	&format_attr_inv.attr,
149 	&format_attr_cmask5.attr,
150 	NULL,
151 };
152 
153 static const struct attribute_group snb_uncore_format_group = {
154 	.name		= "format",
155 	.attrs		= snb_uncore_formats_attr,
156 };
157 
158 static struct intel_uncore_ops snb_uncore_msr_ops = {
159 	.init_box	= snb_uncore_msr_init_box,
160 	.enable_box	= snb_uncore_msr_enable_box,
161 	.exit_box	= snb_uncore_msr_exit_box,
162 	.disable_event	= snb_uncore_msr_disable_event,
163 	.enable_event	= snb_uncore_msr_enable_event,
164 	.read_counter	= uncore_msr_read_counter,
165 };
166 
167 static struct event_constraint snb_uncore_arb_constraints[] = {
168 	UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
169 	UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
170 	EVENT_CONSTRAINT_END
171 };
172 
173 static struct intel_uncore_type snb_uncore_cbox = {
174 	.name		= "cbox",
175 	.num_counters   = 2,
176 	.num_boxes	= 4,
177 	.perf_ctr_bits	= 44,
178 	.fixed_ctr_bits	= 48,
179 	.perf_ctr	= SNB_UNC_CBO_0_PER_CTR0,
180 	.event_ctl	= SNB_UNC_CBO_0_PERFEVTSEL0,
181 	.fixed_ctr	= SNB_UNC_FIXED_CTR,
182 	.fixed_ctl	= SNB_UNC_FIXED_CTR_CTRL,
183 	.single_fixed	= 1,
184 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
185 	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
186 	.ops		= &snb_uncore_msr_ops,
187 	.format_group	= &snb_uncore_format_group,
188 	.event_descs	= snb_uncore_events,
189 };
190 
191 static struct intel_uncore_type snb_uncore_arb = {
192 	.name		= "arb",
193 	.num_counters   = 2,
194 	.num_boxes	= 1,
195 	.perf_ctr_bits	= 44,
196 	.perf_ctr	= SNB_UNC_ARB_PER_CTR0,
197 	.event_ctl	= SNB_UNC_ARB_PERFEVTSEL0,
198 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
199 	.msr_offset	= SNB_UNC_ARB_MSR_OFFSET,
200 	.constraints	= snb_uncore_arb_constraints,
201 	.ops		= &snb_uncore_msr_ops,
202 	.format_group	= &snb_uncore_format_group,
203 };
204 
205 static struct intel_uncore_type *snb_msr_uncores[] = {
206 	&snb_uncore_cbox,
207 	&snb_uncore_arb,
208 	NULL,
209 };
210 
snb_uncore_cpu_init(void)211 void snb_uncore_cpu_init(void)
212 {
213 	uncore_msr_uncores = snb_msr_uncores;
214 	if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
215 		snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
216 }
217 
skl_uncore_msr_init_box(struct intel_uncore_box * box)218 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
219 {
220 	if (box->pmu->pmu_idx == 0) {
221 		wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
222 			SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
223 	}
224 }
225 
skl_uncore_msr_enable_box(struct intel_uncore_box * box)226 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
227 {
228 	wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
229 		SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
230 }
231 
skl_uncore_msr_exit_box(struct intel_uncore_box * box)232 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
233 {
234 	if (box->pmu->pmu_idx == 0)
235 		wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
236 }
237 
238 static struct intel_uncore_ops skl_uncore_msr_ops = {
239 	.init_box	= skl_uncore_msr_init_box,
240 	.enable_box	= skl_uncore_msr_enable_box,
241 	.exit_box	= skl_uncore_msr_exit_box,
242 	.disable_event	= snb_uncore_msr_disable_event,
243 	.enable_event	= snb_uncore_msr_enable_event,
244 	.read_counter	= uncore_msr_read_counter,
245 };
246 
247 static struct intel_uncore_type skl_uncore_cbox = {
248 	.name		= "cbox",
249 	.num_counters   = 4,
250 	.num_boxes	= 5,
251 	.perf_ctr_bits	= 44,
252 	.fixed_ctr_bits	= 48,
253 	.perf_ctr	= SNB_UNC_CBO_0_PER_CTR0,
254 	.event_ctl	= SNB_UNC_CBO_0_PERFEVTSEL0,
255 	.fixed_ctr	= SNB_UNC_FIXED_CTR,
256 	.fixed_ctl	= SNB_UNC_FIXED_CTR_CTRL,
257 	.single_fixed	= 1,
258 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
259 	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
260 	.ops		= &skl_uncore_msr_ops,
261 	.format_group	= &snb_uncore_format_group,
262 	.event_descs	= snb_uncore_events,
263 };
264 
265 static struct intel_uncore_type *skl_msr_uncores[] = {
266 	&skl_uncore_cbox,
267 	&snb_uncore_arb,
268 	NULL,
269 };
270 
skl_uncore_cpu_init(void)271 void skl_uncore_cpu_init(void)
272 {
273 	uncore_msr_uncores = skl_msr_uncores;
274 	if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
275 		skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
276 	snb_uncore_arb.ops = &skl_uncore_msr_ops;
277 }
278 
279 enum {
280 	SNB_PCI_UNCORE_IMC,
281 };
282 
283 static struct uncore_event_desc snb_uncore_imc_events[] = {
284 	INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
285 	INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
286 	INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
287 
288 	INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
289 	INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
290 	INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
291 
292 	{ /* end: all zeroes */ },
293 };
294 
295 #define SNB_UNCORE_PCI_IMC_EVENT_MASK		0xff
296 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET		0x48
297 
298 /* page size multiple covering all config regs */
299 #define SNB_UNCORE_PCI_IMC_MAP_SIZE		0x6000
300 
301 #define SNB_UNCORE_PCI_IMC_DATA_READS		0x1
302 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE	0x5050
303 #define SNB_UNCORE_PCI_IMC_DATA_WRITES		0x2
304 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE	0x5054
305 #define SNB_UNCORE_PCI_IMC_CTR_BASE		SNB_UNCORE_PCI_IMC_DATA_READS_BASE
306 
307 static struct attribute *snb_uncore_imc_formats_attr[] = {
308 	&format_attr_event.attr,
309 	NULL,
310 };
311 
312 static const struct attribute_group snb_uncore_imc_format_group = {
313 	.name = "format",
314 	.attrs = snb_uncore_imc_formats_attr,
315 };
316 
snb_uncore_imc_init_box(struct intel_uncore_box * box)317 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
318 {
319 	struct pci_dev *pdev = box->pci_dev;
320 	int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
321 	resource_size_t addr;
322 	u32 pci_dword;
323 
324 	pci_read_config_dword(pdev, where, &pci_dword);
325 	addr = pci_dword;
326 
327 #ifdef CONFIG_PHYS_ADDR_T_64BIT
328 	pci_read_config_dword(pdev, where + 4, &pci_dword);
329 	addr |= ((resource_size_t)pci_dword << 32);
330 #endif
331 
332 	addr &= ~(PAGE_SIZE - 1);
333 
334 	box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
335 	box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
336 }
337 
snb_uncore_imc_exit_box(struct intel_uncore_box * box)338 static void snb_uncore_imc_exit_box(struct intel_uncore_box *box)
339 {
340 	iounmap(box->io_addr);
341 }
342 
snb_uncore_imc_enable_box(struct intel_uncore_box * box)343 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
344 {}
345 
snb_uncore_imc_disable_box(struct intel_uncore_box * box)346 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
347 {}
348 
snb_uncore_imc_enable_event(struct intel_uncore_box * box,struct perf_event * event)349 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
350 {}
351 
snb_uncore_imc_disable_event(struct intel_uncore_box * box,struct perf_event * event)352 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
353 {}
354 
snb_uncore_imc_read_counter(struct intel_uncore_box * box,struct perf_event * event)355 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
356 {
357 	struct hw_perf_event *hwc = &event->hw;
358 
359 	return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
360 }
361 
362 /*
363  * custom event_init() function because we define our own fixed, free
364  * running counters, so we do not want to conflict with generic uncore
365  * logic. Also simplifies processing
366  */
snb_uncore_imc_event_init(struct perf_event * event)367 static int snb_uncore_imc_event_init(struct perf_event *event)
368 {
369 	struct intel_uncore_pmu *pmu;
370 	struct intel_uncore_box *box;
371 	struct hw_perf_event *hwc = &event->hw;
372 	u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
373 	int idx, base;
374 
375 	if (event->attr.type != event->pmu->type)
376 		return -ENOENT;
377 
378 	pmu = uncore_event_to_pmu(event);
379 	/* no device found for this pmu */
380 	if (pmu->func_id < 0)
381 		return -ENOENT;
382 
383 	/* Sampling not supported yet */
384 	if (hwc->sample_period)
385 		return -EINVAL;
386 
387 	/* unsupported modes and filters */
388 	if (event->attr.exclude_user   ||
389 	    event->attr.exclude_kernel ||
390 	    event->attr.exclude_hv     ||
391 	    event->attr.exclude_idle   ||
392 	    event->attr.exclude_host   ||
393 	    event->attr.exclude_guest  ||
394 	    event->attr.sample_period) /* no sampling */
395 		return -EINVAL;
396 
397 	/*
398 	 * Place all uncore events for a particular physical package
399 	 * onto a single cpu
400 	 */
401 	if (event->cpu < 0)
402 		return -EINVAL;
403 
404 	/* check only supported bits are set */
405 	if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
406 		return -EINVAL;
407 
408 	box = uncore_pmu_to_box(pmu, event->cpu);
409 	if (!box || box->cpu < 0)
410 		return -EINVAL;
411 
412 	event->cpu = box->cpu;
413 	event->pmu_private = box;
414 
415 	event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
416 
417 	event->hw.idx = -1;
418 	event->hw.last_tag = ~0ULL;
419 	event->hw.extra_reg.idx = EXTRA_REG_NONE;
420 	event->hw.branch_reg.idx = EXTRA_REG_NONE;
421 	/*
422 	 * check event is known (whitelist, determines counter)
423 	 */
424 	switch (cfg) {
425 	case SNB_UNCORE_PCI_IMC_DATA_READS:
426 		base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
427 		idx = UNCORE_PMC_IDX_FIXED;
428 		break;
429 	case SNB_UNCORE_PCI_IMC_DATA_WRITES:
430 		base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
431 		idx = UNCORE_PMC_IDX_FIXED + 1;
432 		break;
433 	default:
434 		return -EINVAL;
435 	}
436 
437 	/* must be done before validate_group */
438 	event->hw.event_base = base;
439 	event->hw.config = cfg;
440 	event->hw.idx = idx;
441 
442 	/* no group validation needed, we have free running counters */
443 
444 	return 0;
445 }
446 
snb_uncore_imc_hw_config(struct intel_uncore_box * box,struct perf_event * event)447 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
448 {
449 	return 0;
450 }
451 
snb_uncore_imc_event_start(struct perf_event * event,int flags)452 static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
453 {
454 	struct intel_uncore_box *box = uncore_event_to_box(event);
455 	u64 count;
456 
457 	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
458 		return;
459 
460 	event->hw.state = 0;
461 	box->n_active++;
462 
463 	list_add_tail(&event->active_entry, &box->active_list);
464 
465 	count = snb_uncore_imc_read_counter(box, event);
466 	local64_set(&event->hw.prev_count, count);
467 
468 	if (box->n_active == 1)
469 		uncore_pmu_start_hrtimer(box);
470 }
471 
snb_uncore_imc_event_stop(struct perf_event * event,int flags)472 static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
473 {
474 	struct intel_uncore_box *box = uncore_event_to_box(event);
475 	struct hw_perf_event *hwc = &event->hw;
476 
477 	if (!(hwc->state & PERF_HES_STOPPED)) {
478 		box->n_active--;
479 
480 		WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
481 		hwc->state |= PERF_HES_STOPPED;
482 
483 		list_del(&event->active_entry);
484 
485 		if (box->n_active == 0)
486 			uncore_pmu_cancel_hrtimer(box);
487 	}
488 
489 	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
490 		/*
491 		 * Drain the remaining delta count out of a event
492 		 * that we are disabling:
493 		 */
494 		uncore_perf_event_update(box, event);
495 		hwc->state |= PERF_HES_UPTODATE;
496 	}
497 }
498 
snb_uncore_imc_event_add(struct perf_event * event,int flags)499 static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
500 {
501 	struct intel_uncore_box *box = uncore_event_to_box(event);
502 	struct hw_perf_event *hwc = &event->hw;
503 
504 	if (!box)
505 		return -ENODEV;
506 
507 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
508 	if (!(flags & PERF_EF_START))
509 		hwc->state |= PERF_HES_ARCH;
510 
511 	snb_uncore_imc_event_start(event, 0);
512 
513 	return 0;
514 }
515 
snb_uncore_imc_event_del(struct perf_event * event,int flags)516 static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
517 {
518 	snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
519 }
520 
snb_pci2phy_map_init(int devid)521 int snb_pci2phy_map_init(int devid)
522 {
523 	struct pci_dev *dev = NULL;
524 	struct pci2phy_map *map;
525 	int bus, segment;
526 
527 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
528 	if (!dev)
529 		return -ENOTTY;
530 
531 	bus = dev->bus->number;
532 	segment = pci_domain_nr(dev->bus);
533 
534 	raw_spin_lock(&pci2phy_map_lock);
535 	map = __find_pci2phy_map(segment);
536 	if (!map) {
537 		raw_spin_unlock(&pci2phy_map_lock);
538 		pci_dev_put(dev);
539 		return -ENOMEM;
540 	}
541 	map->pbus_to_physid[bus] = 0;
542 	raw_spin_unlock(&pci2phy_map_lock);
543 
544 	pci_dev_put(dev);
545 
546 	return 0;
547 }
548 
549 static struct pmu snb_uncore_imc_pmu = {
550 	.task_ctx_nr	= perf_invalid_context,
551 	.event_init	= snb_uncore_imc_event_init,
552 	.add		= snb_uncore_imc_event_add,
553 	.del		= snb_uncore_imc_event_del,
554 	.start		= snb_uncore_imc_event_start,
555 	.stop		= snb_uncore_imc_event_stop,
556 	.read		= uncore_pmu_event_read,
557 };
558 
559 static struct intel_uncore_ops snb_uncore_imc_ops = {
560 	.init_box	= snb_uncore_imc_init_box,
561 	.exit_box	= snb_uncore_imc_exit_box,
562 	.enable_box	= snb_uncore_imc_enable_box,
563 	.disable_box	= snb_uncore_imc_disable_box,
564 	.disable_event	= snb_uncore_imc_disable_event,
565 	.enable_event	= snb_uncore_imc_enable_event,
566 	.hw_config	= snb_uncore_imc_hw_config,
567 	.read_counter	= snb_uncore_imc_read_counter,
568 };
569 
570 static struct intel_uncore_type snb_uncore_imc = {
571 	.name		= "imc",
572 	.num_counters   = 2,
573 	.num_boxes	= 1,
574 	.fixed_ctr_bits	= 32,
575 	.fixed_ctr	= SNB_UNCORE_PCI_IMC_CTR_BASE,
576 	.event_descs	= snb_uncore_imc_events,
577 	.format_group	= &snb_uncore_imc_format_group,
578 	.perf_ctr	= SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
579 	.event_mask	= SNB_UNCORE_PCI_IMC_EVENT_MASK,
580 	.ops		= &snb_uncore_imc_ops,
581 	.pmu		= &snb_uncore_imc_pmu,
582 };
583 
584 static struct intel_uncore_type *snb_pci_uncores[] = {
585 	[SNB_PCI_UNCORE_IMC]	= &snb_uncore_imc,
586 	NULL,
587 };
588 
589 static const struct pci_device_id snb_uncore_pci_ids[] = {
590 	{ /* IMC */
591 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
592 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
593 	},
594 	{ /* end: all zeroes */ },
595 };
596 
597 static const struct pci_device_id ivb_uncore_pci_ids[] = {
598 	{ /* IMC */
599 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
600 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
601 	},
602 	{ /* IMC */
603 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
604 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
605 	},
606 	{ /* end: all zeroes */ },
607 };
608 
609 static const struct pci_device_id hsw_uncore_pci_ids[] = {
610 	{ /* IMC */
611 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
612 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
613 	},
614 	{ /* IMC */
615 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
616 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
617 	},
618 	{ /* end: all zeroes */ },
619 };
620 
621 static const struct pci_device_id bdw_uncore_pci_ids[] = {
622 	{ /* IMC */
623 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
624 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
625 	},
626 	{ /* end: all zeroes */ },
627 };
628 
629 static const struct pci_device_id skl_uncore_pci_ids[] = {
630 	{ /* IMC */
631 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
632 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
633 	},
634 	{ /* IMC */
635 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
636 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
637 	},
638 	{ /* IMC */
639 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
640 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
641 	},
642 	{ /* IMC */
643 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
644 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
645 	},
646 	{ /* IMC */
647 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
648 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
649 	},
650 	{ /* IMC */
651 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
652 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
653 	},
654 	{ /* IMC */
655 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
656 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
657 	},
658 	{ /* IMC */
659 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
660 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
661 	},
662 	{ /* IMC */
663 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
664 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
665 	},
666 	{ /* IMC */
667 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
668 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
669 	},
670 	{ /* IMC */
671 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
672 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
673 	},
674 	{ /* IMC */
675 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
676 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
677 	},
678 	{ /* IMC */
679 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
680 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
681 	},
682 	{ /* IMC */
683 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
684 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
685 	},
686 	{ /* IMC */
687 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
688 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
689 	},
690 	{ /* IMC */
691 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
692 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
693 	},
694 	{ /* IMC */
695 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
696 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
697 	},
698 	{ /* IMC */
699 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
700 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
701 	},
702 	{ /* IMC */
703 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
704 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
705 	},
706 	{ /* IMC */
707 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
708 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
709 	},
710 	{ /* IMC */
711 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
712 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
713 	},
714 	{ /* IMC */
715 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
716 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
717 	},
718 	{ /* IMC */
719 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
720 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
721 	},
722 	{ /* IMC */
723 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
724 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
725 	},
726 	{ /* IMC */
727 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
728 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
729 	},
730 	{ /* end: all zeroes */ },
731 };
732 
733 static struct pci_driver snb_uncore_pci_driver = {
734 	.name		= "snb_uncore",
735 	.id_table	= snb_uncore_pci_ids,
736 };
737 
738 static struct pci_driver ivb_uncore_pci_driver = {
739 	.name		= "ivb_uncore",
740 	.id_table	= ivb_uncore_pci_ids,
741 };
742 
743 static struct pci_driver hsw_uncore_pci_driver = {
744 	.name		= "hsw_uncore",
745 	.id_table	= hsw_uncore_pci_ids,
746 };
747 
748 static struct pci_driver bdw_uncore_pci_driver = {
749 	.name		= "bdw_uncore",
750 	.id_table	= bdw_uncore_pci_ids,
751 };
752 
753 static struct pci_driver skl_uncore_pci_driver = {
754 	.name		= "skl_uncore",
755 	.id_table	= skl_uncore_pci_ids,
756 };
757 
758 struct imc_uncore_pci_dev {
759 	__u32 pci_id;
760 	struct pci_driver *driver;
761 };
762 #define IMC_DEV(a, d) \
763 	{ .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
764 
765 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
766 	IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
767 	IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
768 	IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
769 	IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
770 	IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
771 	IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
772 	IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
773 	IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
774 	IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
775 	IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
776 	IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
777 	IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
778 	IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core Y */
779 	IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U */
780 	IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U Quad Core */
781 	IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Dual Core */
782 	IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Quad Core */
783 	IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 2 Cores */
784 	IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 4 Cores */
785 	IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 4 Cores */
786 	IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 6 Cores */
787 	IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 2 Cores Desktop */
788 	IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Desktop */
789 	IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Desktop */
790 	IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Desktop */
791 	IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Work Station */
792 	IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Work Station */
793 	IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Work Station */
794 	IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Server */
795 	IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Server */
796 	IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Server */
797 	{  /* end marker */ }
798 };
799 
800 
801 #define for_each_imc_pci_id(x, t) \
802 	for (x = (t); (x)->pci_id; x++)
803 
imc_uncore_find_dev(void)804 static struct pci_driver *imc_uncore_find_dev(void)
805 {
806 	const struct imc_uncore_pci_dev *p;
807 	int ret;
808 
809 	for_each_imc_pci_id(p, desktop_imc_pci_ids) {
810 		ret = snb_pci2phy_map_init(p->pci_id);
811 		if (ret == 0)
812 			return p->driver;
813 	}
814 	return NULL;
815 }
816 
imc_uncore_pci_init(void)817 static int imc_uncore_pci_init(void)
818 {
819 	struct pci_driver *imc_drv = imc_uncore_find_dev();
820 
821 	if (!imc_drv)
822 		return -ENODEV;
823 
824 	uncore_pci_uncores = snb_pci_uncores;
825 	uncore_pci_driver = imc_drv;
826 
827 	return 0;
828 }
829 
snb_uncore_pci_init(void)830 int snb_uncore_pci_init(void)
831 {
832 	return imc_uncore_pci_init();
833 }
834 
ivb_uncore_pci_init(void)835 int ivb_uncore_pci_init(void)
836 {
837 	return imc_uncore_pci_init();
838 }
hsw_uncore_pci_init(void)839 int hsw_uncore_pci_init(void)
840 {
841 	return imc_uncore_pci_init();
842 }
843 
bdw_uncore_pci_init(void)844 int bdw_uncore_pci_init(void)
845 {
846 	return imc_uncore_pci_init();
847 }
848 
skl_uncore_pci_init(void)849 int skl_uncore_pci_init(void)
850 {
851 	return imc_uncore_pci_init();
852 }
853 
854 /* end of Sandy Bridge uncore support */
855 
856 /* Nehalem uncore support */
nhm_uncore_msr_disable_box(struct intel_uncore_box * box)857 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
858 {
859 	wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
860 }
861 
nhm_uncore_msr_enable_box(struct intel_uncore_box * box)862 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
863 {
864 	wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
865 }
866 
nhm_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)867 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
868 {
869 	struct hw_perf_event *hwc = &event->hw;
870 
871 	if (hwc->idx < UNCORE_PMC_IDX_FIXED)
872 		wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
873 	else
874 		wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
875 }
876 
877 static struct attribute *nhm_uncore_formats_attr[] = {
878 	&format_attr_event.attr,
879 	&format_attr_umask.attr,
880 	&format_attr_edge.attr,
881 	&format_attr_inv.attr,
882 	&format_attr_cmask8.attr,
883 	NULL,
884 };
885 
886 static const struct attribute_group nhm_uncore_format_group = {
887 	.name = "format",
888 	.attrs = nhm_uncore_formats_attr,
889 };
890 
891 static struct uncore_event_desc nhm_uncore_events[] = {
892 	INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
893 	INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
894 	INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
895 	INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
896 	INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
897 	INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
898 	INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
899 	INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
900 	INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
901 	{ /* end: all zeroes */ },
902 };
903 
904 static struct intel_uncore_ops nhm_uncore_msr_ops = {
905 	.disable_box	= nhm_uncore_msr_disable_box,
906 	.enable_box	= nhm_uncore_msr_enable_box,
907 	.disable_event	= snb_uncore_msr_disable_event,
908 	.enable_event	= nhm_uncore_msr_enable_event,
909 	.read_counter	= uncore_msr_read_counter,
910 };
911 
912 static struct intel_uncore_type nhm_uncore = {
913 	.name		= "",
914 	.num_counters   = 8,
915 	.num_boxes	= 1,
916 	.perf_ctr_bits	= 48,
917 	.fixed_ctr_bits	= 48,
918 	.event_ctl	= NHM_UNC_PERFEVTSEL0,
919 	.perf_ctr	= NHM_UNC_UNCORE_PMC0,
920 	.fixed_ctr	= NHM_UNC_FIXED_CTR,
921 	.fixed_ctl	= NHM_UNC_FIXED_CTR_CTRL,
922 	.event_mask	= NHM_UNC_RAW_EVENT_MASK,
923 	.event_descs	= nhm_uncore_events,
924 	.ops		= &nhm_uncore_msr_ops,
925 	.format_group	= &nhm_uncore_format_group,
926 };
927 
928 static struct intel_uncore_type *nhm_msr_uncores[] = {
929 	&nhm_uncore,
930 	NULL,
931 };
932 
nhm_uncore_cpu_init(void)933 void nhm_uncore_cpu_init(void)
934 {
935 	uncore_msr_uncores = nhm_msr_uncores;
936 }
937 
938 /* end of Nehalem uncore support */
939