1 /* SandyBridge-EP/IvyTown uncore support */
2 #include "perf_event_intel_uncore.h"
3
4
5 /* SNB-EP Box level control */
6 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
7 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
8 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
9 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
10 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
11 SNBEP_PMON_BOX_CTL_RST_CTRS | \
12 SNBEP_PMON_BOX_CTL_FRZ_EN)
13 /* SNB-EP event control */
14 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
15 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
16 #define SNBEP_PMON_CTL_RST (1 << 17)
17 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
18 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
19 #define SNBEP_PMON_CTL_EN (1 << 22)
20 #define SNBEP_PMON_CTL_INVERT (1 << 23)
21 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
22 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
23 SNBEP_PMON_CTL_UMASK_MASK | \
24 SNBEP_PMON_CTL_EDGE_DET | \
25 SNBEP_PMON_CTL_INVERT | \
26 SNBEP_PMON_CTL_TRESH_MASK)
27
28 /* SNB-EP Ubox event control */
29 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
30 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
31 (SNBEP_PMON_CTL_EV_SEL_MASK | \
32 SNBEP_PMON_CTL_UMASK_MASK | \
33 SNBEP_PMON_CTL_EDGE_DET | \
34 SNBEP_PMON_CTL_INVERT | \
35 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
36
37 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
38 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
39 SNBEP_CBO_PMON_CTL_TID_EN)
40
41 /* SNB-EP PCU event control */
42 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
43 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
44 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
45 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
46 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
47 (SNBEP_PMON_CTL_EV_SEL_MASK | \
48 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
49 SNBEP_PMON_CTL_EDGE_DET | \
50 SNBEP_PMON_CTL_EV_SEL_EXT | \
51 SNBEP_PMON_CTL_INVERT | \
52 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
53 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
54 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
55
56 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
57 (SNBEP_PMON_RAW_EVENT_MASK | \
58 SNBEP_PMON_CTL_EV_SEL_EXT)
59
60 /* SNB-EP pci control register */
61 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
62 #define SNBEP_PCI_PMON_CTL0 0xd8
63 /* SNB-EP pci counter register */
64 #define SNBEP_PCI_PMON_CTR0 0xa0
65
66 /* SNB-EP home agent register */
67 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
68 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
69 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
70 /* SNB-EP memory controller register */
71 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
72 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
73 /* SNB-EP QPI register */
74 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
75 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
76 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
78
79 /* SNB-EP Ubox register */
80 #define SNBEP_U_MSR_PMON_CTR0 0xc16
81 #define SNBEP_U_MSR_PMON_CTL0 0xc10
82
83 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
84 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
85
86 /* SNB-EP Cbo register */
87 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
88 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
89 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
90 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
91 #define SNBEP_CBO_MSR_OFFSET 0x20
92
93 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
94 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
95 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
97
98 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
99 .event = (e), \
100 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
101 .config_mask = (m), \
102 .idx = (i) \
103 }
104
105 /* SNB-EP PCU register */
106 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
107 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
108 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
109 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
110 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
111 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
112 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
113
114 /* IVBEP event control */
115 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
116 SNBEP_PMON_BOX_CTL_RST_CTRS)
117 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
118 SNBEP_PMON_CTL_UMASK_MASK | \
119 SNBEP_PMON_CTL_EDGE_DET | \
120 SNBEP_PMON_CTL_TRESH_MASK)
121 /* IVBEP Ubox */
122 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
123 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
124 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
125
126 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
127 (SNBEP_PMON_CTL_EV_SEL_MASK | \
128 SNBEP_PMON_CTL_UMASK_MASK | \
129 SNBEP_PMON_CTL_EDGE_DET | \
130 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
131 /* IVBEP Cbo */
132 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
133 SNBEP_CBO_PMON_CTL_TID_EN)
134
135 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
136 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
137 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
143
144 /* IVBEP home agent */
145 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
146 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
147 (IVBEP_PMON_RAW_EVENT_MASK | \
148 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
149 /* IVBEP PCU */
150 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
151 (SNBEP_PMON_CTL_EV_SEL_MASK | \
152 SNBEP_PMON_CTL_EV_SEL_EXT | \
153 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
154 SNBEP_PMON_CTL_EDGE_DET | \
155 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
156 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
157 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
158 /* IVBEP QPI */
159 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
160 (IVBEP_PMON_RAW_EVENT_MASK | \
161 SNBEP_PMON_CTL_EV_SEL_EXT)
162
163 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
164 ((1ULL << (n)) - 1)))
165
166 /* Haswell-EP Ubox */
167 #define HSWEP_U_MSR_PMON_CTR0 0x709
168 #define HSWEP_U_MSR_PMON_CTL0 0x705
169 #define HSWEP_U_MSR_PMON_FILTER 0x707
170
171 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
172 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
173
174 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
175 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
177 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
178 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
179
180 /* Haswell-EP CBo */
181 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
182 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
183 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
184 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
185 #define HSWEP_CBO_MSR_OFFSET 0x10
186
187
188 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
189 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
196
197
198 /* Haswell-EP Sbox */
199 #define HSWEP_S0_MSR_PMON_CTR0 0x726
200 #define HSWEP_S0_MSR_PMON_CTL0 0x721
201 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
202 #define HSWEP_SBOX_MSR_OFFSET 0xa
203 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
204 SNBEP_CBO_PMON_CTL_TID_EN)
205
206 /* Haswell-EP PCU */
207 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
208 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
209 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
210 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
211
212
213 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
214 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
215 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
216 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
217 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
218 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
219 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
220 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
221 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
222 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
223 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
224 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
225 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
226 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
227 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
228 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
229 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
230 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
231 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
232 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
233 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
234 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
235 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
236 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
237 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
238 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
239 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
240 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
241 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
242 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
243 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
244 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
245 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
246 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
247 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
248 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
249 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
250 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
251 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
252 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
253 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
254 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
255 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
256 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
257 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
258 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
259 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
260 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
261 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
262
snbep_uncore_pci_disable_box(struct intel_uncore_box * box)263 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
264 {
265 struct pci_dev *pdev = box->pci_dev;
266 int box_ctl = uncore_pci_box_ctl(box);
267 u32 config = 0;
268
269 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
270 config |= SNBEP_PMON_BOX_CTL_FRZ;
271 pci_write_config_dword(pdev, box_ctl, config);
272 }
273 }
274
snbep_uncore_pci_enable_box(struct intel_uncore_box * box)275 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
276 {
277 struct pci_dev *pdev = box->pci_dev;
278 int box_ctl = uncore_pci_box_ctl(box);
279 u32 config = 0;
280
281 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
282 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
283 pci_write_config_dword(pdev, box_ctl, config);
284 }
285 }
286
snbep_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)287 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
288 {
289 struct pci_dev *pdev = box->pci_dev;
290 struct hw_perf_event *hwc = &event->hw;
291
292 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
293 }
294
snbep_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)295 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
296 {
297 struct pci_dev *pdev = box->pci_dev;
298 struct hw_perf_event *hwc = &event->hw;
299
300 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
301 }
302
snbep_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)303 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
304 {
305 struct pci_dev *pdev = box->pci_dev;
306 struct hw_perf_event *hwc = &event->hw;
307 u64 count = 0;
308
309 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
310 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
311
312 return count;
313 }
314
snbep_uncore_pci_init_box(struct intel_uncore_box * box)315 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
316 {
317 struct pci_dev *pdev = box->pci_dev;
318
319 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
320 }
321
snbep_uncore_msr_disable_box(struct intel_uncore_box * box)322 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
323 {
324 u64 config;
325 unsigned msr;
326
327 msr = uncore_msr_box_ctl(box);
328 if (msr) {
329 rdmsrl(msr, config);
330 config |= SNBEP_PMON_BOX_CTL_FRZ;
331 wrmsrl(msr, config);
332 }
333 }
334
snbep_uncore_msr_enable_box(struct intel_uncore_box * box)335 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
336 {
337 u64 config;
338 unsigned msr;
339
340 msr = uncore_msr_box_ctl(box);
341 if (msr) {
342 rdmsrl(msr, config);
343 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
344 wrmsrl(msr, config);
345 }
346 }
347
snbep_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)348 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
349 {
350 struct hw_perf_event *hwc = &event->hw;
351 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
352
353 if (reg1->idx != EXTRA_REG_NONE)
354 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
355
356 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
357 }
358
snbep_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)359 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
360 struct perf_event *event)
361 {
362 struct hw_perf_event *hwc = &event->hw;
363
364 wrmsrl(hwc->config_base, hwc->config);
365 }
366
snbep_uncore_msr_init_box(struct intel_uncore_box * box)367 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
368 {
369 unsigned msr = uncore_msr_box_ctl(box);
370
371 if (msr)
372 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
373 }
374
375 static struct attribute *snbep_uncore_formats_attr[] = {
376 &format_attr_event.attr,
377 &format_attr_umask.attr,
378 &format_attr_edge.attr,
379 &format_attr_inv.attr,
380 &format_attr_thresh8.attr,
381 NULL,
382 };
383
384 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
385 &format_attr_event.attr,
386 &format_attr_umask.attr,
387 &format_attr_edge.attr,
388 &format_attr_inv.attr,
389 &format_attr_thresh5.attr,
390 NULL,
391 };
392
393 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
394 &format_attr_event.attr,
395 &format_attr_umask.attr,
396 &format_attr_edge.attr,
397 &format_attr_tid_en.attr,
398 &format_attr_inv.attr,
399 &format_attr_thresh8.attr,
400 &format_attr_filter_tid.attr,
401 &format_attr_filter_nid.attr,
402 &format_attr_filter_state.attr,
403 &format_attr_filter_opc.attr,
404 NULL,
405 };
406
407 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
408 &format_attr_event_ext.attr,
409 &format_attr_occ_sel.attr,
410 &format_attr_edge.attr,
411 &format_attr_inv.attr,
412 &format_attr_thresh5.attr,
413 &format_attr_occ_invert.attr,
414 &format_attr_occ_edge.attr,
415 &format_attr_filter_band0.attr,
416 &format_attr_filter_band1.attr,
417 &format_attr_filter_band2.attr,
418 &format_attr_filter_band3.attr,
419 NULL,
420 };
421
422 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
423 &format_attr_event_ext.attr,
424 &format_attr_umask.attr,
425 &format_attr_edge.attr,
426 &format_attr_inv.attr,
427 &format_attr_thresh8.attr,
428 &format_attr_match_rds.attr,
429 &format_attr_match_rnid30.attr,
430 &format_attr_match_rnid4.attr,
431 &format_attr_match_dnid.attr,
432 &format_attr_match_mc.attr,
433 &format_attr_match_opc.attr,
434 &format_attr_match_vnw.attr,
435 &format_attr_match0.attr,
436 &format_attr_match1.attr,
437 &format_attr_mask_rds.attr,
438 &format_attr_mask_rnid30.attr,
439 &format_attr_mask_rnid4.attr,
440 &format_attr_mask_dnid.attr,
441 &format_attr_mask_mc.attr,
442 &format_attr_mask_opc.attr,
443 &format_attr_mask_vnw.attr,
444 &format_attr_mask0.attr,
445 &format_attr_mask1.attr,
446 NULL,
447 };
448
449 static struct uncore_event_desc snbep_uncore_imc_events[] = {
450 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
451 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
452 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
453 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
454 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
455 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
456 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
457 { /* end: all zeroes */ },
458 };
459
460 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
461 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
462 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
463 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
464 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
465 { /* end: all zeroes */ },
466 };
467
468 static struct attribute_group snbep_uncore_format_group = {
469 .name = "format",
470 .attrs = snbep_uncore_formats_attr,
471 };
472
473 static struct attribute_group snbep_uncore_ubox_format_group = {
474 .name = "format",
475 .attrs = snbep_uncore_ubox_formats_attr,
476 };
477
478 static struct attribute_group snbep_uncore_cbox_format_group = {
479 .name = "format",
480 .attrs = snbep_uncore_cbox_formats_attr,
481 };
482
483 static struct attribute_group snbep_uncore_pcu_format_group = {
484 .name = "format",
485 .attrs = snbep_uncore_pcu_formats_attr,
486 };
487
488 static struct attribute_group snbep_uncore_qpi_format_group = {
489 .name = "format",
490 .attrs = snbep_uncore_qpi_formats_attr,
491 };
492
493 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
494 .disable_box = snbep_uncore_msr_disable_box, \
495 .enable_box = snbep_uncore_msr_enable_box, \
496 .disable_event = snbep_uncore_msr_disable_event, \
497 .enable_event = snbep_uncore_msr_enable_event, \
498 .read_counter = uncore_msr_read_counter
499
500 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
501 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
502 .init_box = snbep_uncore_msr_init_box \
503
504 static struct intel_uncore_ops snbep_uncore_msr_ops = {
505 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
506 };
507
508 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
509 .init_box = snbep_uncore_pci_init_box, \
510 .disable_box = snbep_uncore_pci_disable_box, \
511 .enable_box = snbep_uncore_pci_enable_box, \
512 .disable_event = snbep_uncore_pci_disable_event, \
513 .read_counter = snbep_uncore_pci_read_counter
514
515 static struct intel_uncore_ops snbep_uncore_pci_ops = {
516 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
517 .enable_event = snbep_uncore_pci_enable_event, \
518 };
519
520 static struct event_constraint snbep_uncore_cbox_constraints[] = {
521 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
522 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
523 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
524 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
525 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
526 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
527 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
528 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
529 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
530 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
531 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
532 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
533 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
534 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
535 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
536 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
537 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
538 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
539 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
540 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
541 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
542 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
543 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
544 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
545 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
546 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
547 EVENT_CONSTRAINT_END
548 };
549
550 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
551 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
552 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
553 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
554 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
555 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
556 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
557 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
558 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
559 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
560 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
561 EVENT_CONSTRAINT_END
562 };
563
564 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
565 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
566 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
567 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
568 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
569 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
570 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
571 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
572 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
573 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
574 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
575 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
576 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
577 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
578 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
579 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
580 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
581 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
582 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
583 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
584 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
585 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
586 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
587 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
588 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
589 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
590 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
591 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
592 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
593 EVENT_CONSTRAINT_END
594 };
595
596 static struct intel_uncore_type snbep_uncore_ubox = {
597 .name = "ubox",
598 .num_counters = 2,
599 .num_boxes = 1,
600 .perf_ctr_bits = 44,
601 .fixed_ctr_bits = 48,
602 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
603 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
604 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
605 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
606 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
607 .ops = &snbep_uncore_msr_ops,
608 .format_group = &snbep_uncore_ubox_format_group,
609 };
610
611 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
612 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
613 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
614 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
615 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
616 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
617 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
618 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
619 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
620 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
621 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
622 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
623 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
624 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
625 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
626 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
627 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
628 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
629 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
630 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
631 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
632 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
633 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
634 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
635 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
636 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
637 EVENT_EXTRA_END
638 };
639
snbep_cbox_put_constraint(struct intel_uncore_box * box,struct perf_event * event)640 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
641 {
642 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
643 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
644 int i;
645
646 if (uncore_box_is_fake(box))
647 return;
648
649 for (i = 0; i < 5; i++) {
650 if (reg1->alloc & (0x1 << i))
651 atomic_sub(1 << (i * 6), &er->ref);
652 }
653 reg1->alloc = 0;
654 }
655
656 static struct event_constraint *
__snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event,u64 (* cbox_filter_mask)(int fields))657 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
658 u64 (*cbox_filter_mask)(int fields))
659 {
660 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
661 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
662 int i, alloc = 0;
663 unsigned long flags;
664 u64 mask;
665
666 if (reg1->idx == EXTRA_REG_NONE)
667 return NULL;
668
669 raw_spin_lock_irqsave(&er->lock, flags);
670 for (i = 0; i < 5; i++) {
671 if (!(reg1->idx & (0x1 << i)))
672 continue;
673 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
674 continue;
675
676 mask = cbox_filter_mask(0x1 << i);
677 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
678 !((reg1->config ^ er->config) & mask)) {
679 atomic_add(1 << (i * 6), &er->ref);
680 er->config &= ~mask;
681 er->config |= reg1->config & mask;
682 alloc |= (0x1 << i);
683 } else {
684 break;
685 }
686 }
687 raw_spin_unlock_irqrestore(&er->lock, flags);
688 if (i < 5)
689 goto fail;
690
691 if (!uncore_box_is_fake(box))
692 reg1->alloc |= alloc;
693
694 return NULL;
695 fail:
696 for (; i >= 0; i--) {
697 if (alloc & (0x1 << i))
698 atomic_sub(1 << (i * 6), &er->ref);
699 }
700 return &uncore_constraint_empty;
701 }
702
snbep_cbox_filter_mask(int fields)703 static u64 snbep_cbox_filter_mask(int fields)
704 {
705 u64 mask = 0;
706
707 if (fields & 0x1)
708 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
709 if (fields & 0x2)
710 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
711 if (fields & 0x4)
712 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
713 if (fields & 0x8)
714 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
715
716 return mask;
717 }
718
719 static struct event_constraint *
snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)720 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
721 {
722 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
723 }
724
snbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)725 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
726 {
727 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
728 struct extra_reg *er;
729 int idx = 0;
730
731 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
732 if (er->event != (event->hw.config & er->config_mask))
733 continue;
734 idx |= er->idx;
735 }
736
737 if (idx) {
738 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
739 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
740 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
741 reg1->idx = idx;
742 }
743 return 0;
744 }
745
746 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
747 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
748 .hw_config = snbep_cbox_hw_config,
749 .get_constraint = snbep_cbox_get_constraint,
750 .put_constraint = snbep_cbox_put_constraint,
751 };
752
753 static struct intel_uncore_type snbep_uncore_cbox = {
754 .name = "cbox",
755 .num_counters = 4,
756 .num_boxes = 8,
757 .perf_ctr_bits = 44,
758 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
759 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
760 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
761 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
762 .msr_offset = SNBEP_CBO_MSR_OFFSET,
763 .num_shared_regs = 1,
764 .constraints = snbep_uncore_cbox_constraints,
765 .ops = &snbep_uncore_cbox_ops,
766 .format_group = &snbep_uncore_cbox_format_group,
767 };
768
snbep_pcu_alter_er(struct perf_event * event,int new_idx,bool modify)769 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
770 {
771 struct hw_perf_event *hwc = &event->hw;
772 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
773 u64 config = reg1->config;
774
775 if (new_idx > reg1->idx)
776 config <<= 8 * (new_idx - reg1->idx);
777 else
778 config >>= 8 * (reg1->idx - new_idx);
779
780 if (modify) {
781 hwc->config += new_idx - reg1->idx;
782 reg1->config = config;
783 reg1->idx = new_idx;
784 }
785 return config;
786 }
787
788 static struct event_constraint *
snbep_pcu_get_constraint(struct intel_uncore_box * box,struct perf_event * event)789 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
790 {
791 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
792 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
793 unsigned long flags;
794 int idx = reg1->idx;
795 u64 mask, config1 = reg1->config;
796 bool ok = false;
797
798 if (reg1->idx == EXTRA_REG_NONE ||
799 (!uncore_box_is_fake(box) && reg1->alloc))
800 return NULL;
801 again:
802 mask = 0xffULL << (idx * 8);
803 raw_spin_lock_irqsave(&er->lock, flags);
804 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
805 !((config1 ^ er->config) & mask)) {
806 atomic_add(1 << (idx * 8), &er->ref);
807 er->config &= ~mask;
808 er->config |= config1 & mask;
809 ok = true;
810 }
811 raw_spin_unlock_irqrestore(&er->lock, flags);
812
813 if (!ok) {
814 idx = (idx + 1) % 4;
815 if (idx != reg1->idx) {
816 config1 = snbep_pcu_alter_er(event, idx, false);
817 goto again;
818 }
819 return &uncore_constraint_empty;
820 }
821
822 if (!uncore_box_is_fake(box)) {
823 if (idx != reg1->idx)
824 snbep_pcu_alter_er(event, idx, true);
825 reg1->alloc = 1;
826 }
827 return NULL;
828 }
829
snbep_pcu_put_constraint(struct intel_uncore_box * box,struct perf_event * event)830 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
831 {
832 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
833 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
834
835 if (uncore_box_is_fake(box) || !reg1->alloc)
836 return;
837
838 atomic_sub(1 << (reg1->idx * 8), &er->ref);
839 reg1->alloc = 0;
840 }
841
snbep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)842 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
843 {
844 struct hw_perf_event *hwc = &event->hw;
845 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
846 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
847
848 if (ev_sel >= 0xb && ev_sel <= 0xe) {
849 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
850 reg1->idx = ev_sel - 0xb;
851 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
852 }
853 return 0;
854 }
855
856 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
857 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
858 .hw_config = snbep_pcu_hw_config,
859 .get_constraint = snbep_pcu_get_constraint,
860 .put_constraint = snbep_pcu_put_constraint,
861 };
862
863 static struct intel_uncore_type snbep_uncore_pcu = {
864 .name = "pcu",
865 .num_counters = 4,
866 .num_boxes = 1,
867 .perf_ctr_bits = 48,
868 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
869 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
870 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
871 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
872 .num_shared_regs = 1,
873 .ops = &snbep_uncore_pcu_ops,
874 .format_group = &snbep_uncore_pcu_format_group,
875 };
876
877 static struct intel_uncore_type *snbep_msr_uncores[] = {
878 &snbep_uncore_ubox,
879 &snbep_uncore_cbox,
880 &snbep_uncore_pcu,
881 NULL,
882 };
883
snbep_uncore_cpu_init(void)884 void snbep_uncore_cpu_init(void)
885 {
886 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
887 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
888 uncore_msr_uncores = snbep_msr_uncores;
889 }
890
891 enum {
892 SNBEP_PCI_QPI_PORT0_FILTER,
893 SNBEP_PCI_QPI_PORT1_FILTER,
894 HSWEP_PCI_PCU_3,
895 };
896
snbep_qpi_hw_config(struct intel_uncore_box * box,struct perf_event * event)897 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
898 {
899 struct hw_perf_event *hwc = &event->hw;
900 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
901 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
902
903 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
904 reg1->idx = 0;
905 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
906 reg1->config = event->attr.config1;
907 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
908 reg2->config = event->attr.config2;
909 }
910 return 0;
911 }
912
snbep_qpi_enable_event(struct intel_uncore_box * box,struct perf_event * event)913 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
914 {
915 struct pci_dev *pdev = box->pci_dev;
916 struct hw_perf_event *hwc = &event->hw;
917 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
918 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
919
920 if (reg1->idx != EXTRA_REG_NONE) {
921 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
922 struct pci_dev *filter_pdev = uncore_extra_pci_dev[box->phys_id][idx];
923 if (filter_pdev) {
924 pci_write_config_dword(filter_pdev, reg1->reg,
925 (u32)reg1->config);
926 pci_write_config_dword(filter_pdev, reg1->reg + 4,
927 (u32)(reg1->config >> 32));
928 pci_write_config_dword(filter_pdev, reg2->reg,
929 (u32)reg2->config);
930 pci_write_config_dword(filter_pdev, reg2->reg + 4,
931 (u32)(reg2->config >> 32));
932 }
933 }
934
935 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
936 }
937
938 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
939 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
940 .enable_event = snbep_qpi_enable_event,
941 .hw_config = snbep_qpi_hw_config,
942 .get_constraint = uncore_get_constraint,
943 .put_constraint = uncore_put_constraint,
944 };
945
946 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
947 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
948 .event_ctl = SNBEP_PCI_PMON_CTL0, \
949 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
950 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
951 .ops = &snbep_uncore_pci_ops, \
952 .format_group = &snbep_uncore_format_group
953
954 static struct intel_uncore_type snbep_uncore_ha = {
955 .name = "ha",
956 .num_counters = 4,
957 .num_boxes = 1,
958 .perf_ctr_bits = 48,
959 SNBEP_UNCORE_PCI_COMMON_INIT(),
960 };
961
962 static struct intel_uncore_type snbep_uncore_imc = {
963 .name = "imc",
964 .num_counters = 4,
965 .num_boxes = 4,
966 .perf_ctr_bits = 48,
967 .fixed_ctr_bits = 48,
968 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
969 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
970 .event_descs = snbep_uncore_imc_events,
971 SNBEP_UNCORE_PCI_COMMON_INIT(),
972 };
973
974 static struct intel_uncore_type snbep_uncore_qpi = {
975 .name = "qpi",
976 .num_counters = 4,
977 .num_boxes = 2,
978 .perf_ctr_bits = 48,
979 .perf_ctr = SNBEP_PCI_PMON_CTR0,
980 .event_ctl = SNBEP_PCI_PMON_CTL0,
981 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
982 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
983 .num_shared_regs = 1,
984 .ops = &snbep_uncore_qpi_ops,
985 .event_descs = snbep_uncore_qpi_events,
986 .format_group = &snbep_uncore_qpi_format_group,
987 };
988
989
990 static struct intel_uncore_type snbep_uncore_r2pcie = {
991 .name = "r2pcie",
992 .num_counters = 4,
993 .num_boxes = 1,
994 .perf_ctr_bits = 44,
995 .constraints = snbep_uncore_r2pcie_constraints,
996 SNBEP_UNCORE_PCI_COMMON_INIT(),
997 };
998
999 static struct intel_uncore_type snbep_uncore_r3qpi = {
1000 .name = "r3qpi",
1001 .num_counters = 3,
1002 .num_boxes = 2,
1003 .perf_ctr_bits = 44,
1004 .constraints = snbep_uncore_r3qpi_constraints,
1005 SNBEP_UNCORE_PCI_COMMON_INIT(),
1006 };
1007
1008 enum {
1009 SNBEP_PCI_UNCORE_HA,
1010 SNBEP_PCI_UNCORE_IMC,
1011 SNBEP_PCI_UNCORE_QPI,
1012 SNBEP_PCI_UNCORE_R2PCIE,
1013 SNBEP_PCI_UNCORE_R3QPI,
1014 };
1015
1016 static struct intel_uncore_type *snbep_pci_uncores[] = {
1017 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1018 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1019 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1020 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1021 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1022 NULL,
1023 };
1024
1025 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1026 { /* Home Agent */
1027 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1028 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1029 },
1030 { /* MC Channel 0 */
1031 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1032 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1033 },
1034 { /* MC Channel 1 */
1035 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1036 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1037 },
1038 { /* MC Channel 2 */
1039 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1040 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1041 },
1042 { /* MC Channel 3 */
1043 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1044 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1045 },
1046 { /* QPI Port 0 */
1047 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1048 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1049 },
1050 { /* QPI Port 1 */
1051 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1052 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1053 },
1054 { /* R2PCIe */
1055 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1056 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1057 },
1058 { /* R3QPI Link 0 */
1059 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1060 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1061 },
1062 { /* R3QPI Link 1 */
1063 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1064 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1065 },
1066 { /* QPI Port 0 filter */
1067 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1068 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1069 SNBEP_PCI_QPI_PORT0_FILTER),
1070 },
1071 { /* QPI Port 0 filter */
1072 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1073 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1074 SNBEP_PCI_QPI_PORT1_FILTER),
1075 },
1076 { /* end: all zeroes */ }
1077 };
1078
1079 static struct pci_driver snbep_uncore_pci_driver = {
1080 .name = "snbep_uncore",
1081 .id_table = snbep_uncore_pci_ids,
1082 };
1083
1084 #define NODE_ID_MASK 0x7
1085
1086 /*
1087 * build pci bus to socket mapping
1088 */
snbep_pci2phy_map_init(int devid)1089 static int snbep_pci2phy_map_init(int devid)
1090 {
1091 struct pci_dev *ubox_dev = NULL;
1092 int i, bus, nodeid, segment;
1093 struct pci2phy_map *map;
1094 int err = 0;
1095 u32 config = 0;
1096
1097 while (1) {
1098 /* find the UBOX device */
1099 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1100 if (!ubox_dev)
1101 break;
1102 bus = ubox_dev->bus->number;
1103 /* get the Node ID of the local register */
1104 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1105 if (err)
1106 break;
1107 nodeid = config & NODE_ID_MASK;
1108 /* get the Node ID mapping */
1109 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1110 if (err)
1111 break;
1112
1113 segment = pci_domain_nr(ubox_dev->bus);
1114 raw_spin_lock(&pci2phy_map_lock);
1115 map = __find_pci2phy_map(segment);
1116 if (!map) {
1117 raw_spin_unlock(&pci2phy_map_lock);
1118 err = -ENOMEM;
1119 break;
1120 }
1121
1122 /*
1123 * every three bits in the Node ID mapping register maps
1124 * to a particular node.
1125 */
1126 for (i = 0; i < 8; i++) {
1127 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1128 map->pbus_to_physid[bus] = i;
1129 break;
1130 }
1131 }
1132 raw_spin_unlock(&pci2phy_map_lock);
1133 }
1134
1135 if (!err) {
1136 /*
1137 * For PCI bus with no UBOX device, find the next bus
1138 * that has UBOX device and use its mapping.
1139 */
1140 raw_spin_lock(&pci2phy_map_lock);
1141 list_for_each_entry(map, &pci2phy_map_head, list) {
1142 i = -1;
1143 for (bus = 255; bus >= 0; bus--) {
1144 if (map->pbus_to_physid[bus] >= 0)
1145 i = map->pbus_to_physid[bus];
1146 else
1147 map->pbus_to_physid[bus] = i;
1148 }
1149 }
1150 raw_spin_unlock(&pci2phy_map_lock);
1151 }
1152
1153 pci_dev_put(ubox_dev);
1154
1155 return err ? pcibios_err_to_errno(err) : 0;
1156 }
1157
snbep_uncore_pci_init(void)1158 int snbep_uncore_pci_init(void)
1159 {
1160 int ret = snbep_pci2phy_map_init(0x3ce0);
1161 if (ret)
1162 return ret;
1163 uncore_pci_uncores = snbep_pci_uncores;
1164 uncore_pci_driver = &snbep_uncore_pci_driver;
1165 return 0;
1166 }
1167 /* end of Sandy Bridge-EP uncore support */
1168
1169 /* IvyTown uncore support */
ivbep_uncore_msr_init_box(struct intel_uncore_box * box)1170 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1171 {
1172 unsigned msr = uncore_msr_box_ctl(box);
1173 if (msr)
1174 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1175 }
1176
ivbep_uncore_pci_init_box(struct intel_uncore_box * box)1177 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1178 {
1179 struct pci_dev *pdev = box->pci_dev;
1180
1181 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1182 }
1183
1184 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1185 .init_box = ivbep_uncore_msr_init_box, \
1186 .disable_box = snbep_uncore_msr_disable_box, \
1187 .enable_box = snbep_uncore_msr_enable_box, \
1188 .disable_event = snbep_uncore_msr_disable_event, \
1189 .enable_event = snbep_uncore_msr_enable_event, \
1190 .read_counter = uncore_msr_read_counter
1191
1192 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1193 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1194 };
1195
1196 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1197 .init_box = ivbep_uncore_pci_init_box,
1198 .disable_box = snbep_uncore_pci_disable_box,
1199 .enable_box = snbep_uncore_pci_enable_box,
1200 .disable_event = snbep_uncore_pci_disable_event,
1201 .enable_event = snbep_uncore_pci_enable_event,
1202 .read_counter = snbep_uncore_pci_read_counter,
1203 };
1204
1205 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1206 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1207 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1208 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1209 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1210 .ops = &ivbep_uncore_pci_ops, \
1211 .format_group = &ivbep_uncore_format_group
1212
1213 static struct attribute *ivbep_uncore_formats_attr[] = {
1214 &format_attr_event.attr,
1215 &format_attr_umask.attr,
1216 &format_attr_edge.attr,
1217 &format_attr_inv.attr,
1218 &format_attr_thresh8.attr,
1219 NULL,
1220 };
1221
1222 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1223 &format_attr_event.attr,
1224 &format_attr_umask.attr,
1225 &format_attr_edge.attr,
1226 &format_attr_inv.attr,
1227 &format_attr_thresh5.attr,
1228 NULL,
1229 };
1230
1231 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1232 &format_attr_event.attr,
1233 &format_attr_umask.attr,
1234 &format_attr_edge.attr,
1235 &format_attr_tid_en.attr,
1236 &format_attr_thresh8.attr,
1237 &format_attr_filter_tid.attr,
1238 &format_attr_filter_link.attr,
1239 &format_attr_filter_state2.attr,
1240 &format_attr_filter_nid2.attr,
1241 &format_attr_filter_opc2.attr,
1242 &format_attr_filter_nc.attr,
1243 &format_attr_filter_c6.attr,
1244 &format_attr_filter_isoc.attr,
1245 NULL,
1246 };
1247
1248 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1249 &format_attr_event_ext.attr,
1250 &format_attr_occ_sel.attr,
1251 &format_attr_edge.attr,
1252 &format_attr_thresh5.attr,
1253 &format_attr_occ_invert.attr,
1254 &format_attr_occ_edge.attr,
1255 &format_attr_filter_band0.attr,
1256 &format_attr_filter_band1.attr,
1257 &format_attr_filter_band2.attr,
1258 &format_attr_filter_band3.attr,
1259 NULL,
1260 };
1261
1262 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1263 &format_attr_event_ext.attr,
1264 &format_attr_umask.attr,
1265 &format_attr_edge.attr,
1266 &format_attr_thresh8.attr,
1267 &format_attr_match_rds.attr,
1268 &format_attr_match_rnid30.attr,
1269 &format_attr_match_rnid4.attr,
1270 &format_attr_match_dnid.attr,
1271 &format_attr_match_mc.attr,
1272 &format_attr_match_opc.attr,
1273 &format_attr_match_vnw.attr,
1274 &format_attr_match0.attr,
1275 &format_attr_match1.attr,
1276 &format_attr_mask_rds.attr,
1277 &format_attr_mask_rnid30.attr,
1278 &format_attr_mask_rnid4.attr,
1279 &format_attr_mask_dnid.attr,
1280 &format_attr_mask_mc.attr,
1281 &format_attr_mask_opc.attr,
1282 &format_attr_mask_vnw.attr,
1283 &format_attr_mask0.attr,
1284 &format_attr_mask1.attr,
1285 NULL,
1286 };
1287
1288 static struct attribute_group ivbep_uncore_format_group = {
1289 .name = "format",
1290 .attrs = ivbep_uncore_formats_attr,
1291 };
1292
1293 static struct attribute_group ivbep_uncore_ubox_format_group = {
1294 .name = "format",
1295 .attrs = ivbep_uncore_ubox_formats_attr,
1296 };
1297
1298 static struct attribute_group ivbep_uncore_cbox_format_group = {
1299 .name = "format",
1300 .attrs = ivbep_uncore_cbox_formats_attr,
1301 };
1302
1303 static struct attribute_group ivbep_uncore_pcu_format_group = {
1304 .name = "format",
1305 .attrs = ivbep_uncore_pcu_formats_attr,
1306 };
1307
1308 static struct attribute_group ivbep_uncore_qpi_format_group = {
1309 .name = "format",
1310 .attrs = ivbep_uncore_qpi_formats_attr,
1311 };
1312
1313 static struct intel_uncore_type ivbep_uncore_ubox = {
1314 .name = "ubox",
1315 .num_counters = 2,
1316 .num_boxes = 1,
1317 .perf_ctr_bits = 44,
1318 .fixed_ctr_bits = 48,
1319 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1320 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1321 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1322 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1323 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1324 .ops = &ivbep_uncore_msr_ops,
1325 .format_group = &ivbep_uncore_ubox_format_group,
1326 };
1327
1328 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1329 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1330 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1331 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1332 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1333 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1334 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1335 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1336 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1337 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1338 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1339 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1340 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1341 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1342 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1343 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1344 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1345 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1346 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1347 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1348 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1349 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1350 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1351 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1352 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1353 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1354 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1355 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1356 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1357 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1358 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1359 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1360 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1361 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1362 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1363 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1364 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1365 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1366 EVENT_EXTRA_END
1367 };
1368
ivbep_cbox_filter_mask(int fields)1369 static u64 ivbep_cbox_filter_mask(int fields)
1370 {
1371 u64 mask = 0;
1372
1373 if (fields & 0x1)
1374 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1375 if (fields & 0x2)
1376 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1377 if (fields & 0x4)
1378 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1379 if (fields & 0x8)
1380 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1381 if (fields & 0x10) {
1382 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1383 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1384 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1385 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1386 }
1387
1388 return mask;
1389 }
1390
1391 static struct event_constraint *
ivbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1392 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1393 {
1394 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1395 }
1396
ivbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1397 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1398 {
1399 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1400 struct extra_reg *er;
1401 int idx = 0;
1402
1403 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1404 if (er->event != (event->hw.config & er->config_mask))
1405 continue;
1406 idx |= er->idx;
1407 }
1408
1409 if (idx) {
1410 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1411 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1412 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1413 reg1->idx = idx;
1414 }
1415 return 0;
1416 }
1417
ivbep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)1418 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1419 {
1420 struct hw_perf_event *hwc = &event->hw;
1421 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1422
1423 if (reg1->idx != EXTRA_REG_NONE) {
1424 u64 filter = uncore_shared_reg_config(box, 0);
1425 wrmsrl(reg1->reg, filter & 0xffffffff);
1426 wrmsrl(reg1->reg + 6, filter >> 32);
1427 }
1428
1429 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1430 }
1431
1432 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1433 .init_box = ivbep_uncore_msr_init_box,
1434 .disable_box = snbep_uncore_msr_disable_box,
1435 .enable_box = snbep_uncore_msr_enable_box,
1436 .disable_event = snbep_uncore_msr_disable_event,
1437 .enable_event = ivbep_cbox_enable_event,
1438 .read_counter = uncore_msr_read_counter,
1439 .hw_config = ivbep_cbox_hw_config,
1440 .get_constraint = ivbep_cbox_get_constraint,
1441 .put_constraint = snbep_cbox_put_constraint,
1442 };
1443
1444 static struct intel_uncore_type ivbep_uncore_cbox = {
1445 .name = "cbox",
1446 .num_counters = 4,
1447 .num_boxes = 15,
1448 .perf_ctr_bits = 44,
1449 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1450 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1451 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1452 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1453 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1454 .num_shared_regs = 1,
1455 .constraints = snbep_uncore_cbox_constraints,
1456 .ops = &ivbep_uncore_cbox_ops,
1457 .format_group = &ivbep_uncore_cbox_format_group,
1458 };
1459
1460 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1461 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1462 .hw_config = snbep_pcu_hw_config,
1463 .get_constraint = snbep_pcu_get_constraint,
1464 .put_constraint = snbep_pcu_put_constraint,
1465 };
1466
1467 static struct intel_uncore_type ivbep_uncore_pcu = {
1468 .name = "pcu",
1469 .num_counters = 4,
1470 .num_boxes = 1,
1471 .perf_ctr_bits = 48,
1472 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1473 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1474 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1475 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1476 .num_shared_regs = 1,
1477 .ops = &ivbep_uncore_pcu_ops,
1478 .format_group = &ivbep_uncore_pcu_format_group,
1479 };
1480
1481 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1482 &ivbep_uncore_ubox,
1483 &ivbep_uncore_cbox,
1484 &ivbep_uncore_pcu,
1485 NULL,
1486 };
1487
ivbep_uncore_cpu_init(void)1488 void ivbep_uncore_cpu_init(void)
1489 {
1490 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1491 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1492 uncore_msr_uncores = ivbep_msr_uncores;
1493 }
1494
1495 static struct intel_uncore_type ivbep_uncore_ha = {
1496 .name = "ha",
1497 .num_counters = 4,
1498 .num_boxes = 2,
1499 .perf_ctr_bits = 48,
1500 IVBEP_UNCORE_PCI_COMMON_INIT(),
1501 };
1502
1503 static struct intel_uncore_type ivbep_uncore_imc = {
1504 .name = "imc",
1505 .num_counters = 4,
1506 .num_boxes = 8,
1507 .perf_ctr_bits = 48,
1508 .fixed_ctr_bits = 48,
1509 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1510 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1511 .event_descs = snbep_uncore_imc_events,
1512 IVBEP_UNCORE_PCI_COMMON_INIT(),
1513 };
1514
1515 /* registers in IRP boxes are not properly aligned */
1516 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1517 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1518
ivbep_uncore_irp_enable_event(struct intel_uncore_box * box,struct perf_event * event)1519 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1520 {
1521 struct pci_dev *pdev = box->pci_dev;
1522 struct hw_perf_event *hwc = &event->hw;
1523
1524 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1525 hwc->config | SNBEP_PMON_CTL_EN);
1526 }
1527
ivbep_uncore_irp_disable_event(struct intel_uncore_box * box,struct perf_event * event)1528 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1529 {
1530 struct pci_dev *pdev = box->pci_dev;
1531 struct hw_perf_event *hwc = &event->hw;
1532
1533 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1534 }
1535
ivbep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)1536 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1537 {
1538 struct pci_dev *pdev = box->pci_dev;
1539 struct hw_perf_event *hwc = &event->hw;
1540 u64 count = 0;
1541
1542 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1543 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1544
1545 return count;
1546 }
1547
1548 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1549 .init_box = ivbep_uncore_pci_init_box,
1550 .disable_box = snbep_uncore_pci_disable_box,
1551 .enable_box = snbep_uncore_pci_enable_box,
1552 .disable_event = ivbep_uncore_irp_disable_event,
1553 .enable_event = ivbep_uncore_irp_enable_event,
1554 .read_counter = ivbep_uncore_irp_read_counter,
1555 };
1556
1557 static struct intel_uncore_type ivbep_uncore_irp = {
1558 .name = "irp",
1559 .num_counters = 4,
1560 .num_boxes = 1,
1561 .perf_ctr_bits = 48,
1562 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1563 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1564 .ops = &ivbep_uncore_irp_ops,
1565 .format_group = &ivbep_uncore_format_group,
1566 };
1567
1568 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1569 .init_box = ivbep_uncore_pci_init_box,
1570 .disable_box = snbep_uncore_pci_disable_box,
1571 .enable_box = snbep_uncore_pci_enable_box,
1572 .disable_event = snbep_uncore_pci_disable_event,
1573 .enable_event = snbep_qpi_enable_event,
1574 .read_counter = snbep_uncore_pci_read_counter,
1575 .hw_config = snbep_qpi_hw_config,
1576 .get_constraint = uncore_get_constraint,
1577 .put_constraint = uncore_put_constraint,
1578 };
1579
1580 static struct intel_uncore_type ivbep_uncore_qpi = {
1581 .name = "qpi",
1582 .num_counters = 4,
1583 .num_boxes = 3,
1584 .perf_ctr_bits = 48,
1585 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1586 .event_ctl = SNBEP_PCI_PMON_CTL0,
1587 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1588 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1589 .num_shared_regs = 1,
1590 .ops = &ivbep_uncore_qpi_ops,
1591 .format_group = &ivbep_uncore_qpi_format_group,
1592 };
1593
1594 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1595 .name = "r2pcie",
1596 .num_counters = 4,
1597 .num_boxes = 1,
1598 .perf_ctr_bits = 44,
1599 .constraints = snbep_uncore_r2pcie_constraints,
1600 IVBEP_UNCORE_PCI_COMMON_INIT(),
1601 };
1602
1603 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1604 .name = "r3qpi",
1605 .num_counters = 3,
1606 .num_boxes = 2,
1607 .perf_ctr_bits = 44,
1608 .constraints = snbep_uncore_r3qpi_constraints,
1609 IVBEP_UNCORE_PCI_COMMON_INIT(),
1610 };
1611
1612 enum {
1613 IVBEP_PCI_UNCORE_HA,
1614 IVBEP_PCI_UNCORE_IMC,
1615 IVBEP_PCI_UNCORE_IRP,
1616 IVBEP_PCI_UNCORE_QPI,
1617 IVBEP_PCI_UNCORE_R2PCIE,
1618 IVBEP_PCI_UNCORE_R3QPI,
1619 };
1620
1621 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1622 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1623 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1624 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1625 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1626 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1627 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1628 NULL,
1629 };
1630
1631 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1632 { /* Home Agent 0 */
1633 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1634 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1635 },
1636 { /* Home Agent 1 */
1637 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1638 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1639 },
1640 { /* MC0 Channel 0 */
1641 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1642 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1643 },
1644 { /* MC0 Channel 1 */
1645 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1646 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1647 },
1648 { /* MC0 Channel 3 */
1649 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1650 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1651 },
1652 { /* MC0 Channel 4 */
1653 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1654 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1655 },
1656 { /* MC1 Channel 0 */
1657 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1658 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1659 },
1660 { /* MC1 Channel 1 */
1661 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1662 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1663 },
1664 { /* MC1 Channel 3 */
1665 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1666 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1667 },
1668 { /* MC1 Channel 4 */
1669 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1670 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1671 },
1672 { /* IRP */
1673 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1674 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1675 },
1676 { /* QPI0 Port 0 */
1677 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1678 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1679 },
1680 { /* QPI0 Port 1 */
1681 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1682 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1683 },
1684 { /* QPI1 Port 2 */
1685 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1686 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1687 },
1688 { /* R2PCIe */
1689 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1690 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1691 },
1692 { /* R3QPI0 Link 0 */
1693 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1694 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1695 },
1696 { /* R3QPI0 Link 1 */
1697 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1698 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1699 },
1700 { /* R3QPI1 Link 2 */
1701 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1702 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1703 },
1704 { /* QPI Port 0 filter */
1705 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1706 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1707 SNBEP_PCI_QPI_PORT0_FILTER),
1708 },
1709 { /* QPI Port 0 filter */
1710 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1711 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1712 SNBEP_PCI_QPI_PORT1_FILTER),
1713 },
1714 { /* end: all zeroes */ }
1715 };
1716
1717 static struct pci_driver ivbep_uncore_pci_driver = {
1718 .name = "ivbep_uncore",
1719 .id_table = ivbep_uncore_pci_ids,
1720 };
1721
ivbep_uncore_pci_init(void)1722 int ivbep_uncore_pci_init(void)
1723 {
1724 int ret = snbep_pci2phy_map_init(0x0e1e);
1725 if (ret)
1726 return ret;
1727 uncore_pci_uncores = ivbep_pci_uncores;
1728 uncore_pci_driver = &ivbep_uncore_pci_driver;
1729 return 0;
1730 }
1731 /* end of IvyTown uncore support */
1732
1733 /* Haswell-EP uncore support */
1734 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
1735 &format_attr_event.attr,
1736 &format_attr_umask.attr,
1737 &format_attr_edge.attr,
1738 &format_attr_inv.attr,
1739 &format_attr_thresh5.attr,
1740 &format_attr_filter_tid2.attr,
1741 &format_attr_filter_cid.attr,
1742 NULL,
1743 };
1744
1745 static struct attribute_group hswep_uncore_ubox_format_group = {
1746 .name = "format",
1747 .attrs = hswep_uncore_ubox_formats_attr,
1748 };
1749
hswep_ubox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1750 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1751 {
1752 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1753 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
1754 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
1755 reg1->idx = 0;
1756 return 0;
1757 }
1758
1759 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
1760 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1761 .hw_config = hswep_ubox_hw_config,
1762 .get_constraint = uncore_get_constraint,
1763 .put_constraint = uncore_put_constraint,
1764 };
1765
1766 static struct intel_uncore_type hswep_uncore_ubox = {
1767 .name = "ubox",
1768 .num_counters = 2,
1769 .num_boxes = 1,
1770 .perf_ctr_bits = 44,
1771 .fixed_ctr_bits = 48,
1772 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
1773 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
1774 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
1775 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1776 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1777 .num_shared_regs = 1,
1778 .ops = &hswep_uncore_ubox_ops,
1779 .format_group = &hswep_uncore_ubox_format_group,
1780 };
1781
1782 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
1783 &format_attr_event.attr,
1784 &format_attr_umask.attr,
1785 &format_attr_edge.attr,
1786 &format_attr_tid_en.attr,
1787 &format_attr_thresh8.attr,
1788 &format_attr_filter_tid3.attr,
1789 &format_attr_filter_link2.attr,
1790 &format_attr_filter_state3.attr,
1791 &format_attr_filter_nid2.attr,
1792 &format_attr_filter_opc2.attr,
1793 &format_attr_filter_nc.attr,
1794 &format_attr_filter_c6.attr,
1795 &format_attr_filter_isoc.attr,
1796 NULL,
1797 };
1798
1799 static struct attribute_group hswep_uncore_cbox_format_group = {
1800 .name = "format",
1801 .attrs = hswep_uncore_cbox_formats_attr,
1802 };
1803
1804 static struct event_constraint hswep_uncore_cbox_constraints[] = {
1805 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
1806 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
1807 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1808 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1809 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
1810 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
1811 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
1812 EVENT_CONSTRAINT_END
1813 };
1814
1815 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
1816 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1817 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1818 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1819 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1820 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1821 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1822 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
1823 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
1824 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1825 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
1826 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
1827 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
1828 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
1829 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
1830 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
1831 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1832 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1833 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1834 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1835 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1836 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1837 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1838 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1839 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1840 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1841 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1842 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1843 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1844 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1845 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1846 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1847 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1848 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1849 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1850 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1851 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1852 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1853 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1854 EVENT_EXTRA_END
1855 };
1856
hswep_cbox_filter_mask(int fields)1857 static u64 hswep_cbox_filter_mask(int fields)
1858 {
1859 u64 mask = 0;
1860 if (fields & 0x1)
1861 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
1862 if (fields & 0x2)
1863 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1864 if (fields & 0x4)
1865 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1866 if (fields & 0x8)
1867 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
1868 if (fields & 0x10) {
1869 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1870 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
1871 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
1872 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1873 }
1874 return mask;
1875 }
1876
1877 static struct event_constraint *
hswep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1878 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1879 {
1880 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
1881 }
1882
hswep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1883 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1884 {
1885 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1886 struct extra_reg *er;
1887 int idx = 0;
1888
1889 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
1890 if (er->event != (event->hw.config & er->config_mask))
1891 continue;
1892 idx |= er->idx;
1893 }
1894
1895 if (idx) {
1896 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1897 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1898 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
1899 reg1->idx = idx;
1900 }
1901 return 0;
1902 }
1903
hswep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)1904 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
1905 struct perf_event *event)
1906 {
1907 struct hw_perf_event *hwc = &event->hw;
1908 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1909
1910 if (reg1->idx != EXTRA_REG_NONE) {
1911 u64 filter = uncore_shared_reg_config(box, 0);
1912 wrmsrl(reg1->reg, filter & 0xffffffff);
1913 wrmsrl(reg1->reg + 1, filter >> 32);
1914 }
1915
1916 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1917 }
1918
1919 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
1920 .init_box = snbep_uncore_msr_init_box,
1921 .disable_box = snbep_uncore_msr_disable_box,
1922 .enable_box = snbep_uncore_msr_enable_box,
1923 .disable_event = snbep_uncore_msr_disable_event,
1924 .enable_event = hswep_cbox_enable_event,
1925 .read_counter = uncore_msr_read_counter,
1926 .hw_config = hswep_cbox_hw_config,
1927 .get_constraint = hswep_cbox_get_constraint,
1928 .put_constraint = snbep_cbox_put_constraint,
1929 };
1930
1931 static struct intel_uncore_type hswep_uncore_cbox = {
1932 .name = "cbox",
1933 .num_counters = 4,
1934 .num_boxes = 18,
1935 .perf_ctr_bits = 48,
1936 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
1937 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
1938 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1939 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
1940 .msr_offset = HSWEP_CBO_MSR_OFFSET,
1941 .num_shared_regs = 1,
1942 .constraints = hswep_uncore_cbox_constraints,
1943 .ops = &hswep_uncore_cbox_ops,
1944 .format_group = &hswep_uncore_cbox_format_group,
1945 };
1946
1947 /*
1948 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
1949 */
hswep_uncore_sbox_msr_init_box(struct intel_uncore_box * box)1950 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
1951 {
1952 unsigned msr = uncore_msr_box_ctl(box);
1953
1954 if (msr) {
1955 u64 init = SNBEP_PMON_BOX_CTL_INT;
1956 u64 flags = 0;
1957 int i;
1958
1959 for_each_set_bit(i, (unsigned long *)&init, 64) {
1960 flags |= (1ULL << i);
1961 wrmsrl(msr, flags);
1962 }
1963 }
1964 }
1965
1966 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
1967 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1968 .init_box = hswep_uncore_sbox_msr_init_box
1969 };
1970
1971 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
1972 &format_attr_event.attr,
1973 &format_attr_umask.attr,
1974 &format_attr_edge.attr,
1975 &format_attr_tid_en.attr,
1976 &format_attr_inv.attr,
1977 &format_attr_thresh8.attr,
1978 NULL,
1979 };
1980
1981 static struct attribute_group hswep_uncore_sbox_format_group = {
1982 .name = "format",
1983 .attrs = hswep_uncore_sbox_formats_attr,
1984 };
1985
1986 static struct intel_uncore_type hswep_uncore_sbox = {
1987 .name = "sbox",
1988 .num_counters = 4,
1989 .num_boxes = 4,
1990 .perf_ctr_bits = 44,
1991 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
1992 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
1993 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
1994 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
1995 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
1996 .ops = &hswep_uncore_sbox_msr_ops,
1997 .format_group = &hswep_uncore_sbox_format_group,
1998 };
1999
hswep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)2000 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2001 {
2002 struct hw_perf_event *hwc = &event->hw;
2003 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2004 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2005
2006 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2007 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2008 reg1->idx = ev_sel - 0xb;
2009 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2010 }
2011 return 0;
2012 }
2013
2014 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2015 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2016 .hw_config = hswep_pcu_hw_config,
2017 .get_constraint = snbep_pcu_get_constraint,
2018 .put_constraint = snbep_pcu_put_constraint,
2019 };
2020
2021 static struct intel_uncore_type hswep_uncore_pcu = {
2022 .name = "pcu",
2023 .num_counters = 4,
2024 .num_boxes = 1,
2025 .perf_ctr_bits = 48,
2026 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2027 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2028 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2029 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2030 .num_shared_regs = 1,
2031 .ops = &hswep_uncore_pcu_ops,
2032 .format_group = &snbep_uncore_pcu_format_group,
2033 };
2034
2035 static struct intel_uncore_type *hswep_msr_uncores[] = {
2036 &hswep_uncore_ubox,
2037 &hswep_uncore_cbox,
2038 &hswep_uncore_sbox,
2039 &hswep_uncore_pcu,
2040 NULL,
2041 };
2042
hswep_uncore_cpu_init(void)2043 void hswep_uncore_cpu_init(void)
2044 {
2045 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2046 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2047
2048 /* Detect 6-8 core systems with only two SBOXes */
2049 if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
2050 u32 capid4;
2051
2052 pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
2053 0x94, &capid4);
2054 if (((capid4 >> 6) & 0x3) == 0)
2055 hswep_uncore_sbox.num_boxes = 2;
2056 }
2057
2058 uncore_msr_uncores = hswep_msr_uncores;
2059 }
2060
2061 static struct intel_uncore_type hswep_uncore_ha = {
2062 .name = "ha",
2063 .num_counters = 5,
2064 .num_boxes = 2,
2065 .perf_ctr_bits = 48,
2066 SNBEP_UNCORE_PCI_COMMON_INIT(),
2067 };
2068
2069 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2070 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2071 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2072 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2073 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2074 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2075 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2076 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2077 { /* end: all zeroes */ },
2078 };
2079
2080 static struct intel_uncore_type hswep_uncore_imc = {
2081 .name = "imc",
2082 .num_counters = 5,
2083 .num_boxes = 8,
2084 .perf_ctr_bits = 48,
2085 .fixed_ctr_bits = 48,
2086 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2087 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2088 .event_descs = hswep_uncore_imc_events,
2089 SNBEP_UNCORE_PCI_COMMON_INIT(),
2090 };
2091
2092 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2093
hswep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)2094 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2095 {
2096 struct pci_dev *pdev = box->pci_dev;
2097 struct hw_perf_event *hwc = &event->hw;
2098 u64 count = 0;
2099
2100 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2101 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2102
2103 return count;
2104 }
2105
2106 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2107 .init_box = snbep_uncore_pci_init_box,
2108 .disable_box = snbep_uncore_pci_disable_box,
2109 .enable_box = snbep_uncore_pci_enable_box,
2110 .disable_event = ivbep_uncore_irp_disable_event,
2111 .enable_event = ivbep_uncore_irp_enable_event,
2112 .read_counter = hswep_uncore_irp_read_counter,
2113 };
2114
2115 static struct intel_uncore_type hswep_uncore_irp = {
2116 .name = "irp",
2117 .num_counters = 4,
2118 .num_boxes = 1,
2119 .perf_ctr_bits = 48,
2120 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2121 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2122 .ops = &hswep_uncore_irp_ops,
2123 .format_group = &snbep_uncore_format_group,
2124 };
2125
2126 static struct intel_uncore_type hswep_uncore_qpi = {
2127 .name = "qpi",
2128 .num_counters = 5,
2129 .num_boxes = 3,
2130 .perf_ctr_bits = 48,
2131 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2132 .event_ctl = SNBEP_PCI_PMON_CTL0,
2133 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2134 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2135 .num_shared_regs = 1,
2136 .ops = &snbep_uncore_qpi_ops,
2137 .format_group = &snbep_uncore_qpi_format_group,
2138 };
2139
2140 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2141 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2142 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2143 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2144 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2145 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2146 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2147 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2148 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2149 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2150 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2151 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2152 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2153 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2154 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2155 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2156 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2157 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2158 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2159 EVENT_CONSTRAINT_END
2160 };
2161
2162 static struct intel_uncore_type hswep_uncore_r2pcie = {
2163 .name = "r2pcie",
2164 .num_counters = 4,
2165 .num_boxes = 1,
2166 .perf_ctr_bits = 48,
2167 .constraints = hswep_uncore_r2pcie_constraints,
2168 SNBEP_UNCORE_PCI_COMMON_INIT(),
2169 };
2170
2171 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2172 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2173 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2174 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2175 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2176 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2177 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2178 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2179 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2180 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2181 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2182 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2183 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2184 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2185 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2186 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2187 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2188 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2189 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2190 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2191 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2192 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2193 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2194 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2195 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2196 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2197 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2198 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2199 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2200 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2201 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2202 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2203 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2204 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2205 EVENT_CONSTRAINT_END
2206 };
2207
2208 static struct intel_uncore_type hswep_uncore_r3qpi = {
2209 .name = "r3qpi",
2210 .num_counters = 4,
2211 .num_boxes = 3,
2212 .perf_ctr_bits = 44,
2213 .constraints = hswep_uncore_r3qpi_constraints,
2214 SNBEP_UNCORE_PCI_COMMON_INIT(),
2215 };
2216
2217 enum {
2218 HSWEP_PCI_UNCORE_HA,
2219 HSWEP_PCI_UNCORE_IMC,
2220 HSWEP_PCI_UNCORE_IRP,
2221 HSWEP_PCI_UNCORE_QPI,
2222 HSWEP_PCI_UNCORE_R2PCIE,
2223 HSWEP_PCI_UNCORE_R3QPI,
2224 };
2225
2226 static struct intel_uncore_type *hswep_pci_uncores[] = {
2227 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
2228 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
2229 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
2230 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
2231 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
2232 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
2233 NULL,
2234 };
2235
2236 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2237 { /* Home Agent 0 */
2238 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2239 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2240 },
2241 { /* Home Agent 1 */
2242 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2243 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2244 },
2245 { /* MC0 Channel 0 */
2246 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2247 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2248 },
2249 { /* MC0 Channel 1 */
2250 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2251 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2252 },
2253 { /* MC0 Channel 2 */
2254 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2255 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2256 },
2257 { /* MC0 Channel 3 */
2258 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2259 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2260 },
2261 { /* MC1 Channel 0 */
2262 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2263 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2264 },
2265 { /* MC1 Channel 1 */
2266 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2267 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2268 },
2269 { /* MC1 Channel 2 */
2270 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2271 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2272 },
2273 { /* MC1 Channel 3 */
2274 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2275 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2276 },
2277 { /* IRP */
2278 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2279 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2280 },
2281 { /* QPI0 Port 0 */
2282 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2283 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2284 },
2285 { /* QPI0 Port 1 */
2286 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2287 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2288 },
2289 { /* QPI1 Port 2 */
2290 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2291 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2292 },
2293 { /* R2PCIe */
2294 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2295 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2296 },
2297 { /* R3QPI0 Link 0 */
2298 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2299 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2300 },
2301 { /* R3QPI0 Link 1 */
2302 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2303 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2304 },
2305 { /* R3QPI1 Link 2 */
2306 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2307 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2308 },
2309 { /* QPI Port 0 filter */
2310 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2311 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2312 SNBEP_PCI_QPI_PORT0_FILTER),
2313 },
2314 { /* QPI Port 1 filter */
2315 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2316 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2317 SNBEP_PCI_QPI_PORT1_FILTER),
2318 },
2319 { /* PCU.3 (for Capability registers) */
2320 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2321 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2322 HSWEP_PCI_PCU_3),
2323 },
2324 { /* end: all zeroes */ }
2325 };
2326
2327 static struct pci_driver hswep_uncore_pci_driver = {
2328 .name = "hswep_uncore",
2329 .id_table = hswep_uncore_pci_ids,
2330 };
2331
hswep_uncore_pci_init(void)2332 int hswep_uncore_pci_init(void)
2333 {
2334 int ret = snbep_pci2phy_map_init(0x2f1e);
2335 if (ret)
2336 return ret;
2337 uncore_pci_uncores = hswep_pci_uncores;
2338 uncore_pci_driver = &hswep_uncore_pci_driver;
2339 return 0;
2340 }
2341 /* end of Haswell-EP uncore support */
2342
2343 /* BDX-DE uncore support */
2344
2345 static struct intel_uncore_type bdx_uncore_ubox = {
2346 .name = "ubox",
2347 .num_counters = 2,
2348 .num_boxes = 1,
2349 .perf_ctr_bits = 48,
2350 .fixed_ctr_bits = 48,
2351 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2352 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2353 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2354 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2355 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2356 .num_shared_regs = 1,
2357 .ops = &ivbep_uncore_msr_ops,
2358 .format_group = &ivbep_uncore_ubox_format_group,
2359 };
2360
2361 static struct event_constraint bdx_uncore_cbox_constraints[] = {
2362 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
2363 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2364 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2365 EVENT_CONSTRAINT_END
2366 };
2367
2368 static struct intel_uncore_type bdx_uncore_cbox = {
2369 .name = "cbox",
2370 .num_counters = 4,
2371 .num_boxes = 8,
2372 .perf_ctr_bits = 48,
2373 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2374 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2375 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2376 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2377 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2378 .num_shared_regs = 1,
2379 .constraints = bdx_uncore_cbox_constraints,
2380 .ops = &hswep_uncore_cbox_ops,
2381 .format_group = &hswep_uncore_cbox_format_group,
2382 };
2383
2384 static struct intel_uncore_type *bdx_msr_uncores[] = {
2385 &bdx_uncore_ubox,
2386 &bdx_uncore_cbox,
2387 &hswep_uncore_pcu,
2388 NULL,
2389 };
2390
bdx_uncore_cpu_init(void)2391 void bdx_uncore_cpu_init(void)
2392 {
2393 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2394 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2395 uncore_msr_uncores = bdx_msr_uncores;
2396 }
2397
2398 static struct intel_uncore_type bdx_uncore_ha = {
2399 .name = "ha",
2400 .num_counters = 4,
2401 .num_boxes = 1,
2402 .perf_ctr_bits = 48,
2403 SNBEP_UNCORE_PCI_COMMON_INIT(),
2404 };
2405
2406 static struct intel_uncore_type bdx_uncore_imc = {
2407 .name = "imc",
2408 .num_counters = 5,
2409 .num_boxes = 2,
2410 .perf_ctr_bits = 48,
2411 .fixed_ctr_bits = 48,
2412 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2413 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2414 .event_descs = hswep_uncore_imc_events,
2415 SNBEP_UNCORE_PCI_COMMON_INIT(),
2416 };
2417
2418 static struct intel_uncore_type bdx_uncore_irp = {
2419 .name = "irp",
2420 .num_counters = 4,
2421 .num_boxes = 1,
2422 .perf_ctr_bits = 48,
2423 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2424 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2425 .ops = &hswep_uncore_irp_ops,
2426 .format_group = &snbep_uncore_format_group,
2427 };
2428
2429
2430 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
2431 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2432 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2433 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2434 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2435 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2436 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2437 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2438 EVENT_CONSTRAINT_END
2439 };
2440
2441 static struct intel_uncore_type bdx_uncore_r2pcie = {
2442 .name = "r2pcie",
2443 .num_counters = 4,
2444 .num_boxes = 1,
2445 .perf_ctr_bits = 48,
2446 .constraints = bdx_uncore_r2pcie_constraints,
2447 SNBEP_UNCORE_PCI_COMMON_INIT(),
2448 };
2449
2450 enum {
2451 BDX_PCI_UNCORE_HA,
2452 BDX_PCI_UNCORE_IMC,
2453 BDX_PCI_UNCORE_IRP,
2454 BDX_PCI_UNCORE_R2PCIE,
2455 };
2456
2457 static struct intel_uncore_type *bdx_pci_uncores[] = {
2458 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
2459 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
2460 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
2461 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
2462 NULL,
2463 };
2464
2465 static const struct pci_device_id bdx_uncore_pci_ids[] = {
2466 { /* Home Agent 0 */
2467 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
2468 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
2469 },
2470 { /* MC0 Channel 0 */
2471 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
2472 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
2473 },
2474 { /* MC0 Channel 1 */
2475 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
2476 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
2477 },
2478 { /* IRP */
2479 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
2480 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
2481 },
2482 { /* R2PCIe */
2483 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
2484 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
2485 },
2486 { /* end: all zeroes */ }
2487 };
2488
2489 static struct pci_driver bdx_uncore_pci_driver = {
2490 .name = "bdx_uncore",
2491 .id_table = bdx_uncore_pci_ids,
2492 };
2493
bdx_uncore_pci_init(void)2494 int bdx_uncore_pci_init(void)
2495 {
2496 int ret = snbep_pci2phy_map_init(0x6f1e);
2497
2498 if (ret)
2499 return ret;
2500 uncore_pci_uncores = bdx_pci_uncores;
2501 uncore_pci_driver = &bdx_uncore_pci_driver;
2502 return 0;
2503 }
2504
2505 /* end of BDX-DE uncore support */
2506