1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID 0x40
7 #define SNBEP_GIDNIDMAP 0x54
8
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
20 #define SNBEP_PMON_CTL_RST (1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
23 #define SNBEP_PMON_CTL_EN (1 << 22)
24 #define SNBEP_PMON_CTL_INVERT (1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
27 SNBEP_PMON_CTL_UMASK_MASK | \
28 SNBEP_PMON_CTL_EDGE_DET | \
29 SNBEP_PMON_CTL_INVERT | \
30 SNBEP_PMON_CTL_TRESH_MASK)
31
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
35 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36 SNBEP_PMON_CTL_UMASK_MASK | \
37 SNBEP_PMON_CTL_EDGE_DET | \
38 SNBEP_PMON_CTL_INVERT | \
39 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40
41 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
43 SNBEP_CBO_PMON_CTL_TID_EN)
44
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
51 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 SNBEP_PMON_CTL_EDGE_DET | \
54 SNBEP_PMON_CTL_INVERT | \
55 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
60 (SNBEP_PMON_RAW_EVENT_MASK | \
61 SNBEP_PMON_CTL_EV_SEL_EXT)
62
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
65 #define SNBEP_PCI_PMON_CTL0 0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0 0xa0
68
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
81
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0 0xc16
84 #define SNBEP_U_MSR_PMON_CTL0 0xc10
85
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
88
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
94 #define SNBEP_CBO_MSR_OFFSET 0x20
95
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
100
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
102 .event = (e), \
103 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
104 .config_mask = (m), \
105 .idx = (i) \
106 }
107
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
116
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
121 SNBEP_PMON_CTL_UMASK_MASK | \
122 SNBEP_PMON_CTL_EDGE_DET | \
123 SNBEP_PMON_CTL_TRESH_MASK)
124 /* IVBEP Ubox */
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
128
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131 SNBEP_PMON_CTL_UMASK_MASK | \
132 SNBEP_PMON_CTL_EDGE_DET | \
133 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134 /* IVBEP Cbo */
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
136 SNBEP_CBO_PMON_CTL_TID_EN)
137
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
146
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
150 (IVBEP_PMON_RAW_EVENT_MASK | \
151 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152 /* IVBEP PCU */
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
154 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 SNBEP_PMON_CTL_EDGE_DET | \
157 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160 /* IVBEP QPI */
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
162 (IVBEP_PMON_RAW_EVENT_MASK | \
163 SNBEP_PMON_CTL_EV_SEL_EXT)
164
165 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
166 ((1ULL << (n)) - 1)))
167
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0 0x709
170 #define HSWEP_U_MSR_PMON_CTL0 0x705
171 #define HSWEP_U_MSR_PMON_FILTER 0x707
172
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
175
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181
182 /* Haswell-EP CBo */
183 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
187 #define HSWEP_CBO_MSR_OFFSET 0x10
188
189
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
198
199
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0 0x726
202 #define HSWEP_S0_MSR_PMON_CTL0 0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
204 #define HSWEP_SBOX_MSR_OFFSET 0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
206 SNBEP_CBO_PMON_CTL_TID_EN)
207
208 /* Haswell-EP PCU */
209 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
213
214 /* KNL Ubox */
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 SNBEP_CBO_PMON_CTL_TID_EN)
218 /* KNL CHA */
219 #define KNL_CHA_MSR_OFFSET 0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
230
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
233 #define KNL_UCLK_MSR_PMON_CTL0 0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
237 #define KNL_PMON_FIXED_CTL_EN 0x1
238
239 /* KNL EDC */
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
245
246 /* KNL MC */
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
252
253 /* KNL IRP */
254 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
256 KNL_CHA_MSR_PMON_CTL_QOR)
257 /* KNL PCU */
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 SNBEP_PMON_CTL_EDGE_DET | \
266 SNBEP_CBO_PMON_CTL_TID_EN | \
267 SNBEP_PMON_CTL_INVERT | \
268 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
271
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID 0xc0
274 #define SKX_GIDNIDMAP 0xd4
275
276 /*
277 * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
278 * that BIOS programmed. MSR has package scope.
279 * | Bit | Default | Description
280 * | [63] | 00h | VALID - When set, indicates the CPU bus
281 * numbers have been initialized. (RO)
282 * |[62:48]| --- | Reserved
283 * |[47:40]| 00h | BUS_NUM_5 — Return the bus number BIOS assigned
284 * CPUBUSNO(5). (RO)
285 * |[39:32]| 00h | BUS_NUM_4 — Return the bus number BIOS assigned
286 * CPUBUSNO(4). (RO)
287 * |[31:24]| 00h | BUS_NUM_3 — Return the bus number BIOS assigned
288 * CPUBUSNO(3). (RO)
289 * |[23:16]| 00h | BUS_NUM_2 — Return the bus number BIOS assigned
290 * CPUBUSNO(2). (RO)
291 * |[15:8] | 00h | BUS_NUM_1 — Return the bus number BIOS assigned
292 * CPUBUSNO(1). (RO)
293 * | [7:0] | 00h | BUS_NUM_0 — Return the bus number BIOS assigned
294 * CPUBUSNO(0). (RO)
295 */
296 #define SKX_MSR_CPU_BUS_NUMBER 0x300
297 #define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63)
298 #define BUS_NUM_STRIDE 8
299
300 /* SKX CHA */
301 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
314
315 /* SKX IIO */
316 #define SKX_IIO0_MSR_PMON_CTL0 0xa48
317 #define SKX_IIO0_MSR_PMON_CTR0 0xa41
318 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
319 #define SKX_IIO_MSR_OFFSET 0x20
320
321 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
322 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
323 #define SKX_PMON_CTL_CH_MASK (0xff << 4)
324 #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
325 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
326 SNBEP_PMON_CTL_UMASK_MASK | \
327 SNBEP_PMON_CTL_EDGE_DET | \
328 SNBEP_PMON_CTL_INVERT | \
329 SKX_PMON_CTL_TRESH_MASK)
330 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
331 SKX_PMON_CTL_CH_MASK | \
332 SKX_PMON_CTL_FC_MASK)
333
334 /* SKX IRP */
335 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
336 #define SKX_IRP0_MSR_PMON_CTR0 0xa59
337 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
338 #define SKX_IRP_MSR_OFFSET 0x20
339
340 /* SKX UPI */
341 #define SKX_UPI_PCI_PMON_CTL0 0x350
342 #define SKX_UPI_PCI_PMON_CTR0 0x318
343 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
344 #define SKX_UPI_CTL_UMASK_EXT 0xffefff
345
346 /* SKX M2M */
347 #define SKX_M2M_PCI_PMON_CTL0 0x228
348 #define SKX_M2M_PCI_PMON_CTR0 0x200
349 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
350
351 /* SNR Ubox */
352 #define SNR_U_MSR_PMON_CTR0 0x1f98
353 #define SNR_U_MSR_PMON_CTL0 0x1f91
354 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
355 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
356
357 /* SNR CHA */
358 #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
359 #define SNR_CHA_MSR_PMON_CTL0 0x1c01
360 #define SNR_CHA_MSR_PMON_CTR0 0x1c08
361 #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
362 #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
363
364
365 /* SNR IIO */
366 #define SNR_IIO_MSR_PMON_CTL0 0x1e08
367 #define SNR_IIO_MSR_PMON_CTR0 0x1e01
368 #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
369 #define SNR_IIO_MSR_OFFSET 0x10
370 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
371
372 /* SNR IRP */
373 #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
374 #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
375 #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
376 #define SNR_IRP_MSR_OFFSET 0x10
377
378 /* SNR M2PCIE */
379 #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
380 #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
381 #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
382 #define SNR_M2PCIE_MSR_OFFSET 0x10
383
384 /* SNR PCU */
385 #define SNR_PCU_MSR_PMON_CTL0 0x1ef1
386 #define SNR_PCU_MSR_PMON_CTR0 0x1ef8
387 #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
388 #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
389
390 /* SNR M2M */
391 #define SNR_M2M_PCI_PMON_CTL0 0x468
392 #define SNR_M2M_PCI_PMON_CTR0 0x440
393 #define SNR_M2M_PCI_PMON_BOX_CTL 0x438
394 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
395
396 /* SNR PCIE3 */
397 #define SNR_PCIE3_PCI_PMON_CTL0 0x508
398 #define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
399 #define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0
400
401 /* SNR IMC */
402 #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
403 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
404 #define SNR_IMC_MMIO_PMON_CTL0 0x40
405 #define SNR_IMC_MMIO_PMON_CTR0 0x8
406 #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
407 #define SNR_IMC_MMIO_OFFSET 0x4000
408 #define SNR_IMC_MMIO_SIZE 0x4000
409 #define SNR_IMC_MMIO_BASE_OFFSET 0xd0
410 #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
411 #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
412 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF
413
414 /* ICX CHA */
415 #define ICX_C34_MSR_PMON_CTR0 0xb68
416 #define ICX_C34_MSR_PMON_CTL0 0xb61
417 #define ICX_C34_MSR_PMON_BOX_CTL 0xb60
418 #define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65
419
420 /* ICX IIO */
421 #define ICX_IIO_MSR_PMON_CTL0 0xa58
422 #define ICX_IIO_MSR_PMON_CTR0 0xa51
423 #define ICX_IIO_MSR_PMON_BOX_CTL 0xa50
424
425 /* ICX IRP */
426 #define ICX_IRP0_MSR_PMON_CTL0 0xa4d
427 #define ICX_IRP0_MSR_PMON_CTR0 0xa4b
428 #define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a
429
430 /* ICX M2PCIE */
431 #define ICX_M2PCIE_MSR_PMON_CTL0 0xa46
432 #define ICX_M2PCIE_MSR_PMON_CTR0 0xa41
433 #define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40
434
435 /* ICX UPI */
436 #define ICX_UPI_PCI_PMON_CTL0 0x350
437 #define ICX_UPI_PCI_PMON_CTR0 0x320
438 #define ICX_UPI_PCI_PMON_BOX_CTL 0x318
439 #define ICX_UPI_CTL_UMASK_EXT 0xffffff
440
441 /* ICX M3UPI*/
442 #define ICX_M3UPI_PCI_PMON_CTL0 0xd8
443 #define ICX_M3UPI_PCI_PMON_CTR0 0xa8
444 #define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0
445
446 /* ICX IMC */
447 #define ICX_NUMBER_IMC_CHN 3
448 #define ICX_IMC_MEM_STRIDE 0x4
449
450 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
451 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
452 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
453 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
454 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
455 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
456 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
457 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
458 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
459 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
460 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
461 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
462 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
463 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
464 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
465 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
466 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
467 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
468 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
469 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
470 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
471 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
472 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
473 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
474 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
475 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
476 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
477 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
478 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
479 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
480 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
481 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
482 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
483 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
484 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
485 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
486 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
487 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
488 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
489 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
490 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
491 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
492 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
493 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
494 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
495 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
496 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
510 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
511 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
512 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
513 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
514 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
515 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
516 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
517 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
518 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
519 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
520 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
521 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
522 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
523 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
524 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
525 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
526 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
527 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
528
snbep_uncore_pci_disable_box(struct intel_uncore_box * box)529 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
530 {
531 struct pci_dev *pdev = box->pci_dev;
532 int box_ctl = uncore_pci_box_ctl(box);
533 u32 config = 0;
534
535 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
536 config |= SNBEP_PMON_BOX_CTL_FRZ;
537 pci_write_config_dword(pdev, box_ctl, config);
538 }
539 }
540
snbep_uncore_pci_enable_box(struct intel_uncore_box * box)541 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
542 {
543 struct pci_dev *pdev = box->pci_dev;
544 int box_ctl = uncore_pci_box_ctl(box);
545 u32 config = 0;
546
547 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
548 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
549 pci_write_config_dword(pdev, box_ctl, config);
550 }
551 }
552
snbep_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)553 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
554 {
555 struct pci_dev *pdev = box->pci_dev;
556 struct hw_perf_event *hwc = &event->hw;
557
558 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
559 }
560
snbep_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)561 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
562 {
563 struct pci_dev *pdev = box->pci_dev;
564 struct hw_perf_event *hwc = &event->hw;
565
566 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
567 }
568
snbep_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)569 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
570 {
571 struct pci_dev *pdev = box->pci_dev;
572 struct hw_perf_event *hwc = &event->hw;
573 u64 count = 0;
574
575 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
576 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
577
578 return count;
579 }
580
snbep_uncore_pci_init_box(struct intel_uncore_box * box)581 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
582 {
583 struct pci_dev *pdev = box->pci_dev;
584 int box_ctl = uncore_pci_box_ctl(box);
585
586 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
587 }
588
snbep_uncore_msr_disable_box(struct intel_uncore_box * box)589 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
590 {
591 u64 config;
592 unsigned msr;
593
594 msr = uncore_msr_box_ctl(box);
595 if (msr) {
596 rdmsrl(msr, config);
597 config |= SNBEP_PMON_BOX_CTL_FRZ;
598 wrmsrl(msr, config);
599 }
600 }
601
snbep_uncore_msr_enable_box(struct intel_uncore_box * box)602 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
603 {
604 u64 config;
605 unsigned msr;
606
607 msr = uncore_msr_box_ctl(box);
608 if (msr) {
609 rdmsrl(msr, config);
610 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
611 wrmsrl(msr, config);
612 }
613 }
614
snbep_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)615 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
616 {
617 struct hw_perf_event *hwc = &event->hw;
618 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
619
620 if (reg1->idx != EXTRA_REG_NONE)
621 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
622
623 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
624 }
625
snbep_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)626 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
627 struct perf_event *event)
628 {
629 struct hw_perf_event *hwc = &event->hw;
630
631 wrmsrl(hwc->config_base, hwc->config);
632 }
633
snbep_uncore_msr_init_box(struct intel_uncore_box * box)634 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
635 {
636 unsigned msr = uncore_msr_box_ctl(box);
637
638 if (msr)
639 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
640 }
641
642 static struct attribute *snbep_uncore_formats_attr[] = {
643 &format_attr_event.attr,
644 &format_attr_umask.attr,
645 &format_attr_edge.attr,
646 &format_attr_inv.attr,
647 &format_attr_thresh8.attr,
648 NULL,
649 };
650
651 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
652 &format_attr_event.attr,
653 &format_attr_umask.attr,
654 &format_attr_edge.attr,
655 &format_attr_inv.attr,
656 &format_attr_thresh5.attr,
657 NULL,
658 };
659
660 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
661 &format_attr_event.attr,
662 &format_attr_umask.attr,
663 &format_attr_edge.attr,
664 &format_attr_tid_en.attr,
665 &format_attr_inv.attr,
666 &format_attr_thresh8.attr,
667 &format_attr_filter_tid.attr,
668 &format_attr_filter_nid.attr,
669 &format_attr_filter_state.attr,
670 &format_attr_filter_opc.attr,
671 NULL,
672 };
673
674 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
675 &format_attr_event.attr,
676 &format_attr_occ_sel.attr,
677 &format_attr_edge.attr,
678 &format_attr_inv.attr,
679 &format_attr_thresh5.attr,
680 &format_attr_occ_invert.attr,
681 &format_attr_occ_edge.attr,
682 &format_attr_filter_band0.attr,
683 &format_attr_filter_band1.attr,
684 &format_attr_filter_band2.attr,
685 &format_attr_filter_band3.attr,
686 NULL,
687 };
688
689 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
690 &format_attr_event_ext.attr,
691 &format_attr_umask.attr,
692 &format_attr_edge.attr,
693 &format_attr_inv.attr,
694 &format_attr_thresh8.attr,
695 &format_attr_match_rds.attr,
696 &format_attr_match_rnid30.attr,
697 &format_attr_match_rnid4.attr,
698 &format_attr_match_dnid.attr,
699 &format_attr_match_mc.attr,
700 &format_attr_match_opc.attr,
701 &format_attr_match_vnw.attr,
702 &format_attr_match0.attr,
703 &format_attr_match1.attr,
704 &format_attr_mask_rds.attr,
705 &format_attr_mask_rnid30.attr,
706 &format_attr_mask_rnid4.attr,
707 &format_attr_mask_dnid.attr,
708 &format_attr_mask_mc.attr,
709 &format_attr_mask_opc.attr,
710 &format_attr_mask_vnw.attr,
711 &format_attr_mask0.attr,
712 &format_attr_mask1.attr,
713 NULL,
714 };
715
716 static struct uncore_event_desc snbep_uncore_imc_events[] = {
717 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
718 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
719 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
720 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
721 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
722 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
723 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
724 { /* end: all zeroes */ },
725 };
726
727 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
728 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
729 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
730 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
731 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
732 { /* end: all zeroes */ },
733 };
734
735 static const struct attribute_group snbep_uncore_format_group = {
736 .name = "format",
737 .attrs = snbep_uncore_formats_attr,
738 };
739
740 static const struct attribute_group snbep_uncore_ubox_format_group = {
741 .name = "format",
742 .attrs = snbep_uncore_ubox_formats_attr,
743 };
744
745 static const struct attribute_group snbep_uncore_cbox_format_group = {
746 .name = "format",
747 .attrs = snbep_uncore_cbox_formats_attr,
748 };
749
750 static const struct attribute_group snbep_uncore_pcu_format_group = {
751 .name = "format",
752 .attrs = snbep_uncore_pcu_formats_attr,
753 };
754
755 static const struct attribute_group snbep_uncore_qpi_format_group = {
756 .name = "format",
757 .attrs = snbep_uncore_qpi_formats_attr,
758 };
759
760 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
761 .disable_box = snbep_uncore_msr_disable_box, \
762 .enable_box = snbep_uncore_msr_enable_box, \
763 .disable_event = snbep_uncore_msr_disable_event, \
764 .enable_event = snbep_uncore_msr_enable_event, \
765 .read_counter = uncore_msr_read_counter
766
767 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
768 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
769 .init_box = snbep_uncore_msr_init_box \
770
771 static struct intel_uncore_ops snbep_uncore_msr_ops = {
772 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
773 };
774
775 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
776 .init_box = snbep_uncore_pci_init_box, \
777 .disable_box = snbep_uncore_pci_disable_box, \
778 .enable_box = snbep_uncore_pci_enable_box, \
779 .disable_event = snbep_uncore_pci_disable_event, \
780 .read_counter = snbep_uncore_pci_read_counter
781
782 static struct intel_uncore_ops snbep_uncore_pci_ops = {
783 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
784 .enable_event = snbep_uncore_pci_enable_event, \
785 };
786
787 static struct event_constraint snbep_uncore_cbox_constraints[] = {
788 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
789 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
790 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
791 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
792 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
793 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
794 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
795 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
796 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
797 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
798 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
799 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
800 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
801 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
802 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
803 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
804 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
805 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
806 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
807 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
808 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
809 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
810 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
811 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
812 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
813 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
814 EVENT_CONSTRAINT_END
815 };
816
817 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
818 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
819 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
820 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
821 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
822 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
823 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
824 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
825 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
826 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
827 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
828 EVENT_CONSTRAINT_END
829 };
830
831 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
832 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
833 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
834 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
835 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
836 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
837 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
838 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
839 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
840 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
841 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
842 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
843 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
844 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
845 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
846 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
847 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
848 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
849 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
850 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
851 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
852 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
853 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
854 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
855 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
856 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
857 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
858 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
859 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
860 EVENT_CONSTRAINT_END
861 };
862
863 static struct intel_uncore_type snbep_uncore_ubox = {
864 .name = "ubox",
865 .num_counters = 2,
866 .num_boxes = 1,
867 .perf_ctr_bits = 44,
868 .fixed_ctr_bits = 48,
869 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
870 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
871 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
872 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
873 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
874 .ops = &snbep_uncore_msr_ops,
875 .format_group = &snbep_uncore_ubox_format_group,
876 };
877
878 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
879 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
880 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
881 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
882 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
883 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
884 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
885 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
886 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
887 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
888 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
889 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
890 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
891 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
892 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
893 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
894 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
895 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
896 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
897 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
898 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
899 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
900 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
901 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
902 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
903 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
904 EVENT_EXTRA_END
905 };
906
snbep_cbox_put_constraint(struct intel_uncore_box * box,struct perf_event * event)907 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
908 {
909 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
910 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
911 int i;
912
913 if (uncore_box_is_fake(box))
914 return;
915
916 for (i = 0; i < 5; i++) {
917 if (reg1->alloc & (0x1 << i))
918 atomic_sub(1 << (i * 6), &er->ref);
919 }
920 reg1->alloc = 0;
921 }
922
923 static struct event_constraint *
__snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event,u64 (* cbox_filter_mask)(int fields))924 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
925 u64 (*cbox_filter_mask)(int fields))
926 {
927 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
928 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
929 int i, alloc = 0;
930 unsigned long flags;
931 u64 mask;
932
933 if (reg1->idx == EXTRA_REG_NONE)
934 return NULL;
935
936 raw_spin_lock_irqsave(&er->lock, flags);
937 for (i = 0; i < 5; i++) {
938 if (!(reg1->idx & (0x1 << i)))
939 continue;
940 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
941 continue;
942
943 mask = cbox_filter_mask(0x1 << i);
944 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
945 !((reg1->config ^ er->config) & mask)) {
946 atomic_add(1 << (i * 6), &er->ref);
947 er->config &= ~mask;
948 er->config |= reg1->config & mask;
949 alloc |= (0x1 << i);
950 } else {
951 break;
952 }
953 }
954 raw_spin_unlock_irqrestore(&er->lock, flags);
955 if (i < 5)
956 goto fail;
957
958 if (!uncore_box_is_fake(box))
959 reg1->alloc |= alloc;
960
961 return NULL;
962 fail:
963 for (; i >= 0; i--) {
964 if (alloc & (0x1 << i))
965 atomic_sub(1 << (i * 6), &er->ref);
966 }
967 return &uncore_constraint_empty;
968 }
969
snbep_cbox_filter_mask(int fields)970 static u64 snbep_cbox_filter_mask(int fields)
971 {
972 u64 mask = 0;
973
974 if (fields & 0x1)
975 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
976 if (fields & 0x2)
977 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
978 if (fields & 0x4)
979 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
980 if (fields & 0x8)
981 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
982
983 return mask;
984 }
985
986 static struct event_constraint *
snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)987 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
988 {
989 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
990 }
991
snbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)992 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
993 {
994 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
995 struct extra_reg *er;
996 int idx = 0;
997
998 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
999 if (er->event != (event->hw.config & er->config_mask))
1000 continue;
1001 idx |= er->idx;
1002 }
1003
1004 if (idx) {
1005 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1006 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1007 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1008 reg1->idx = idx;
1009 }
1010 return 0;
1011 }
1012
1013 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1014 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1015 .hw_config = snbep_cbox_hw_config,
1016 .get_constraint = snbep_cbox_get_constraint,
1017 .put_constraint = snbep_cbox_put_constraint,
1018 };
1019
1020 static struct intel_uncore_type snbep_uncore_cbox = {
1021 .name = "cbox",
1022 .num_counters = 4,
1023 .num_boxes = 8,
1024 .perf_ctr_bits = 44,
1025 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1026 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1027 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1028 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1029 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1030 .num_shared_regs = 1,
1031 .constraints = snbep_uncore_cbox_constraints,
1032 .ops = &snbep_uncore_cbox_ops,
1033 .format_group = &snbep_uncore_cbox_format_group,
1034 };
1035
snbep_pcu_alter_er(struct perf_event * event,int new_idx,bool modify)1036 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1037 {
1038 struct hw_perf_event *hwc = &event->hw;
1039 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1040 u64 config = reg1->config;
1041
1042 if (new_idx > reg1->idx)
1043 config <<= 8 * (new_idx - reg1->idx);
1044 else
1045 config >>= 8 * (reg1->idx - new_idx);
1046
1047 if (modify) {
1048 hwc->config += new_idx - reg1->idx;
1049 reg1->config = config;
1050 reg1->idx = new_idx;
1051 }
1052 return config;
1053 }
1054
1055 static struct event_constraint *
snbep_pcu_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1056 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1057 {
1058 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1059 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1060 unsigned long flags;
1061 int idx = reg1->idx;
1062 u64 mask, config1 = reg1->config;
1063 bool ok = false;
1064
1065 if (reg1->idx == EXTRA_REG_NONE ||
1066 (!uncore_box_is_fake(box) && reg1->alloc))
1067 return NULL;
1068 again:
1069 mask = 0xffULL << (idx * 8);
1070 raw_spin_lock_irqsave(&er->lock, flags);
1071 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1072 !((config1 ^ er->config) & mask)) {
1073 atomic_add(1 << (idx * 8), &er->ref);
1074 er->config &= ~mask;
1075 er->config |= config1 & mask;
1076 ok = true;
1077 }
1078 raw_spin_unlock_irqrestore(&er->lock, flags);
1079
1080 if (!ok) {
1081 idx = (idx + 1) % 4;
1082 if (idx != reg1->idx) {
1083 config1 = snbep_pcu_alter_er(event, idx, false);
1084 goto again;
1085 }
1086 return &uncore_constraint_empty;
1087 }
1088
1089 if (!uncore_box_is_fake(box)) {
1090 if (idx != reg1->idx)
1091 snbep_pcu_alter_er(event, idx, true);
1092 reg1->alloc = 1;
1093 }
1094 return NULL;
1095 }
1096
snbep_pcu_put_constraint(struct intel_uncore_box * box,struct perf_event * event)1097 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1098 {
1099 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1100 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1101
1102 if (uncore_box_is_fake(box) || !reg1->alloc)
1103 return;
1104
1105 atomic_sub(1 << (reg1->idx * 8), &er->ref);
1106 reg1->alloc = 0;
1107 }
1108
snbep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)1109 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1110 {
1111 struct hw_perf_event *hwc = &event->hw;
1112 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1113 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1114
1115 if (ev_sel >= 0xb && ev_sel <= 0xe) {
1116 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1117 reg1->idx = ev_sel - 0xb;
1118 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1119 }
1120 return 0;
1121 }
1122
1123 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1124 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1125 .hw_config = snbep_pcu_hw_config,
1126 .get_constraint = snbep_pcu_get_constraint,
1127 .put_constraint = snbep_pcu_put_constraint,
1128 };
1129
1130 static struct intel_uncore_type snbep_uncore_pcu = {
1131 .name = "pcu",
1132 .num_counters = 4,
1133 .num_boxes = 1,
1134 .perf_ctr_bits = 48,
1135 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1136 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1137 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1138 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1139 .num_shared_regs = 1,
1140 .ops = &snbep_uncore_pcu_ops,
1141 .format_group = &snbep_uncore_pcu_format_group,
1142 };
1143
1144 static struct intel_uncore_type *snbep_msr_uncores[] = {
1145 &snbep_uncore_ubox,
1146 &snbep_uncore_cbox,
1147 &snbep_uncore_pcu,
1148 NULL,
1149 };
1150
snbep_uncore_cpu_init(void)1151 void snbep_uncore_cpu_init(void)
1152 {
1153 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1154 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1155 uncore_msr_uncores = snbep_msr_uncores;
1156 }
1157
1158 enum {
1159 SNBEP_PCI_QPI_PORT0_FILTER,
1160 SNBEP_PCI_QPI_PORT1_FILTER,
1161 BDX_PCI_QPI_PORT2_FILTER,
1162 };
1163
snbep_qpi_hw_config(struct intel_uncore_box * box,struct perf_event * event)1164 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1165 {
1166 struct hw_perf_event *hwc = &event->hw;
1167 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1168 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1169
1170 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1171 reg1->idx = 0;
1172 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1173 reg1->config = event->attr.config1;
1174 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1175 reg2->config = event->attr.config2;
1176 }
1177 return 0;
1178 }
1179
snbep_qpi_enable_event(struct intel_uncore_box * box,struct perf_event * event)1180 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1181 {
1182 struct pci_dev *pdev = box->pci_dev;
1183 struct hw_perf_event *hwc = &event->hw;
1184 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1185 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1186
1187 if (reg1->idx != EXTRA_REG_NONE) {
1188 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1189 int die = box->dieid;
1190 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1191
1192 if (filter_pdev) {
1193 pci_write_config_dword(filter_pdev, reg1->reg,
1194 (u32)reg1->config);
1195 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1196 (u32)(reg1->config >> 32));
1197 pci_write_config_dword(filter_pdev, reg2->reg,
1198 (u32)reg2->config);
1199 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1200 (u32)(reg2->config >> 32));
1201 }
1202 }
1203
1204 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1205 }
1206
1207 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1208 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1209 .enable_event = snbep_qpi_enable_event,
1210 .hw_config = snbep_qpi_hw_config,
1211 .get_constraint = uncore_get_constraint,
1212 .put_constraint = uncore_put_constraint,
1213 };
1214
1215 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1216 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1217 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1218 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1219 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1220 .ops = &snbep_uncore_pci_ops, \
1221 .format_group = &snbep_uncore_format_group
1222
1223 static struct intel_uncore_type snbep_uncore_ha = {
1224 .name = "ha",
1225 .num_counters = 4,
1226 .num_boxes = 1,
1227 .perf_ctr_bits = 48,
1228 SNBEP_UNCORE_PCI_COMMON_INIT(),
1229 };
1230
1231 static struct intel_uncore_type snbep_uncore_imc = {
1232 .name = "imc",
1233 .num_counters = 4,
1234 .num_boxes = 4,
1235 .perf_ctr_bits = 48,
1236 .fixed_ctr_bits = 48,
1237 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1238 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1239 .event_descs = snbep_uncore_imc_events,
1240 SNBEP_UNCORE_PCI_COMMON_INIT(),
1241 };
1242
1243 static struct intel_uncore_type snbep_uncore_qpi = {
1244 .name = "qpi",
1245 .num_counters = 4,
1246 .num_boxes = 2,
1247 .perf_ctr_bits = 48,
1248 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1249 .event_ctl = SNBEP_PCI_PMON_CTL0,
1250 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1251 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1252 .num_shared_regs = 1,
1253 .ops = &snbep_uncore_qpi_ops,
1254 .event_descs = snbep_uncore_qpi_events,
1255 .format_group = &snbep_uncore_qpi_format_group,
1256 };
1257
1258
1259 static struct intel_uncore_type snbep_uncore_r2pcie = {
1260 .name = "r2pcie",
1261 .num_counters = 4,
1262 .num_boxes = 1,
1263 .perf_ctr_bits = 44,
1264 .constraints = snbep_uncore_r2pcie_constraints,
1265 SNBEP_UNCORE_PCI_COMMON_INIT(),
1266 };
1267
1268 static struct intel_uncore_type snbep_uncore_r3qpi = {
1269 .name = "r3qpi",
1270 .num_counters = 3,
1271 .num_boxes = 2,
1272 .perf_ctr_bits = 44,
1273 .constraints = snbep_uncore_r3qpi_constraints,
1274 SNBEP_UNCORE_PCI_COMMON_INIT(),
1275 };
1276
1277 enum {
1278 SNBEP_PCI_UNCORE_HA,
1279 SNBEP_PCI_UNCORE_IMC,
1280 SNBEP_PCI_UNCORE_QPI,
1281 SNBEP_PCI_UNCORE_R2PCIE,
1282 SNBEP_PCI_UNCORE_R3QPI,
1283 };
1284
1285 static struct intel_uncore_type *snbep_pci_uncores[] = {
1286 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1287 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1288 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1289 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1290 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1291 NULL,
1292 };
1293
1294 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1295 { /* Home Agent */
1296 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1297 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1298 },
1299 { /* MC Channel 0 */
1300 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1301 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1302 },
1303 { /* MC Channel 1 */
1304 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1305 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1306 },
1307 { /* MC Channel 2 */
1308 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1309 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1310 },
1311 { /* MC Channel 3 */
1312 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1313 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1314 },
1315 { /* QPI Port 0 */
1316 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1317 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1318 },
1319 { /* QPI Port 1 */
1320 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1321 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1322 },
1323 { /* R2PCIe */
1324 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1325 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1326 },
1327 { /* R3QPI Link 0 */
1328 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1329 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1330 },
1331 { /* R3QPI Link 1 */
1332 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1333 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1334 },
1335 { /* QPI Port 0 filter */
1336 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1337 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1338 SNBEP_PCI_QPI_PORT0_FILTER),
1339 },
1340 { /* QPI Port 0 filter */
1341 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1342 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1343 SNBEP_PCI_QPI_PORT1_FILTER),
1344 },
1345 { /* end: all zeroes */ }
1346 };
1347
1348 static struct pci_driver snbep_uncore_pci_driver = {
1349 .name = "snbep_uncore",
1350 .id_table = snbep_uncore_pci_ids,
1351 };
1352
1353 #define NODE_ID_MASK 0x7
1354
1355 /*
1356 * build pci bus to socket mapping
1357 */
snbep_pci2phy_map_init(int devid,int nodeid_loc,int idmap_loc,bool reverse)1358 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1359 {
1360 struct pci_dev *ubox_dev = NULL;
1361 int i, bus, nodeid, segment;
1362 struct pci2phy_map *map;
1363 int err = 0;
1364 u32 config = 0;
1365
1366 while (1) {
1367 /* find the UBOX device */
1368 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1369 if (!ubox_dev)
1370 break;
1371 bus = ubox_dev->bus->number;
1372 /* get the Node ID of the local register */
1373 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1374 if (err)
1375 break;
1376 nodeid = config & NODE_ID_MASK;
1377 /* get the Node ID mapping */
1378 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1379 if (err)
1380 break;
1381
1382 segment = pci_domain_nr(ubox_dev->bus);
1383 raw_spin_lock(&pci2phy_map_lock);
1384 map = __find_pci2phy_map(segment);
1385 if (!map) {
1386 raw_spin_unlock(&pci2phy_map_lock);
1387 err = -ENOMEM;
1388 break;
1389 }
1390
1391 /*
1392 * every three bits in the Node ID mapping register maps
1393 * to a particular node.
1394 */
1395 for (i = 0; i < 8; i++) {
1396 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1397 map->pbus_to_physid[bus] = i;
1398 break;
1399 }
1400 }
1401 raw_spin_unlock(&pci2phy_map_lock);
1402 }
1403
1404 if (!err) {
1405 /*
1406 * For PCI bus with no UBOX device, find the next bus
1407 * that has UBOX device and use its mapping.
1408 */
1409 raw_spin_lock(&pci2phy_map_lock);
1410 list_for_each_entry(map, &pci2phy_map_head, list) {
1411 i = -1;
1412 if (reverse) {
1413 for (bus = 255; bus >= 0; bus--) {
1414 if (map->pbus_to_physid[bus] >= 0)
1415 i = map->pbus_to_physid[bus];
1416 else
1417 map->pbus_to_physid[bus] = i;
1418 }
1419 } else {
1420 for (bus = 0; bus <= 255; bus++) {
1421 if (map->pbus_to_physid[bus] >= 0)
1422 i = map->pbus_to_physid[bus];
1423 else
1424 map->pbus_to_physid[bus] = i;
1425 }
1426 }
1427 }
1428 raw_spin_unlock(&pci2phy_map_lock);
1429 }
1430
1431 pci_dev_put(ubox_dev);
1432
1433 return err ? pcibios_err_to_errno(err) : 0;
1434 }
1435
snbep_uncore_pci_init(void)1436 int snbep_uncore_pci_init(void)
1437 {
1438 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1439 if (ret)
1440 return ret;
1441 uncore_pci_uncores = snbep_pci_uncores;
1442 uncore_pci_driver = &snbep_uncore_pci_driver;
1443 return 0;
1444 }
1445 /* end of Sandy Bridge-EP uncore support */
1446
1447 /* IvyTown uncore support */
ivbep_uncore_msr_init_box(struct intel_uncore_box * box)1448 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1449 {
1450 unsigned msr = uncore_msr_box_ctl(box);
1451 if (msr)
1452 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1453 }
1454
ivbep_uncore_pci_init_box(struct intel_uncore_box * box)1455 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1456 {
1457 struct pci_dev *pdev = box->pci_dev;
1458
1459 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1460 }
1461
1462 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1463 .init_box = ivbep_uncore_msr_init_box, \
1464 .disable_box = snbep_uncore_msr_disable_box, \
1465 .enable_box = snbep_uncore_msr_enable_box, \
1466 .disable_event = snbep_uncore_msr_disable_event, \
1467 .enable_event = snbep_uncore_msr_enable_event, \
1468 .read_counter = uncore_msr_read_counter
1469
1470 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1471 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1472 };
1473
1474 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1475 .init_box = ivbep_uncore_pci_init_box,
1476 .disable_box = snbep_uncore_pci_disable_box,
1477 .enable_box = snbep_uncore_pci_enable_box,
1478 .disable_event = snbep_uncore_pci_disable_event,
1479 .enable_event = snbep_uncore_pci_enable_event,
1480 .read_counter = snbep_uncore_pci_read_counter,
1481 };
1482
1483 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1484 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1485 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1486 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1487 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1488 .ops = &ivbep_uncore_pci_ops, \
1489 .format_group = &ivbep_uncore_format_group
1490
1491 static struct attribute *ivbep_uncore_formats_attr[] = {
1492 &format_attr_event.attr,
1493 &format_attr_umask.attr,
1494 &format_attr_edge.attr,
1495 &format_attr_inv.attr,
1496 &format_attr_thresh8.attr,
1497 NULL,
1498 };
1499
1500 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1501 &format_attr_event.attr,
1502 &format_attr_umask.attr,
1503 &format_attr_edge.attr,
1504 &format_attr_inv.attr,
1505 &format_attr_thresh5.attr,
1506 NULL,
1507 };
1508
1509 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1510 &format_attr_event.attr,
1511 &format_attr_umask.attr,
1512 &format_attr_edge.attr,
1513 &format_attr_tid_en.attr,
1514 &format_attr_thresh8.attr,
1515 &format_attr_filter_tid.attr,
1516 &format_attr_filter_link.attr,
1517 &format_attr_filter_state2.attr,
1518 &format_attr_filter_nid2.attr,
1519 &format_attr_filter_opc2.attr,
1520 &format_attr_filter_nc.attr,
1521 &format_attr_filter_c6.attr,
1522 &format_attr_filter_isoc.attr,
1523 NULL,
1524 };
1525
1526 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1527 &format_attr_event.attr,
1528 &format_attr_occ_sel.attr,
1529 &format_attr_edge.attr,
1530 &format_attr_thresh5.attr,
1531 &format_attr_occ_invert.attr,
1532 &format_attr_occ_edge.attr,
1533 &format_attr_filter_band0.attr,
1534 &format_attr_filter_band1.attr,
1535 &format_attr_filter_band2.attr,
1536 &format_attr_filter_band3.attr,
1537 NULL,
1538 };
1539
1540 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1541 &format_attr_event_ext.attr,
1542 &format_attr_umask.attr,
1543 &format_attr_edge.attr,
1544 &format_attr_thresh8.attr,
1545 &format_attr_match_rds.attr,
1546 &format_attr_match_rnid30.attr,
1547 &format_attr_match_rnid4.attr,
1548 &format_attr_match_dnid.attr,
1549 &format_attr_match_mc.attr,
1550 &format_attr_match_opc.attr,
1551 &format_attr_match_vnw.attr,
1552 &format_attr_match0.attr,
1553 &format_attr_match1.attr,
1554 &format_attr_mask_rds.attr,
1555 &format_attr_mask_rnid30.attr,
1556 &format_attr_mask_rnid4.attr,
1557 &format_attr_mask_dnid.attr,
1558 &format_attr_mask_mc.attr,
1559 &format_attr_mask_opc.attr,
1560 &format_attr_mask_vnw.attr,
1561 &format_attr_mask0.attr,
1562 &format_attr_mask1.attr,
1563 NULL,
1564 };
1565
1566 static const struct attribute_group ivbep_uncore_format_group = {
1567 .name = "format",
1568 .attrs = ivbep_uncore_formats_attr,
1569 };
1570
1571 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1572 .name = "format",
1573 .attrs = ivbep_uncore_ubox_formats_attr,
1574 };
1575
1576 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1577 .name = "format",
1578 .attrs = ivbep_uncore_cbox_formats_attr,
1579 };
1580
1581 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1582 .name = "format",
1583 .attrs = ivbep_uncore_pcu_formats_attr,
1584 };
1585
1586 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1587 .name = "format",
1588 .attrs = ivbep_uncore_qpi_formats_attr,
1589 };
1590
1591 static struct intel_uncore_type ivbep_uncore_ubox = {
1592 .name = "ubox",
1593 .num_counters = 2,
1594 .num_boxes = 1,
1595 .perf_ctr_bits = 44,
1596 .fixed_ctr_bits = 48,
1597 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1598 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1599 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1600 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1601 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1602 .ops = &ivbep_uncore_msr_ops,
1603 .format_group = &ivbep_uncore_ubox_format_group,
1604 };
1605
1606 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1607 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1608 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1609 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1610 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1611 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1612 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1613 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1614 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1615 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1616 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1617 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1618 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1619 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1620 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1621 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1622 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1623 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1624 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1625 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1626 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1627 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1628 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1629 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1630 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1631 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1632 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1633 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1634 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1635 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1636 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1637 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1638 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1639 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1640 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1641 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1642 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1643 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1644 EVENT_EXTRA_END
1645 };
1646
ivbep_cbox_filter_mask(int fields)1647 static u64 ivbep_cbox_filter_mask(int fields)
1648 {
1649 u64 mask = 0;
1650
1651 if (fields & 0x1)
1652 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1653 if (fields & 0x2)
1654 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1655 if (fields & 0x4)
1656 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1657 if (fields & 0x8)
1658 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1659 if (fields & 0x10) {
1660 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1661 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1662 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1663 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1664 }
1665
1666 return mask;
1667 }
1668
1669 static struct event_constraint *
ivbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1670 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1671 {
1672 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1673 }
1674
ivbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1675 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1676 {
1677 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1678 struct extra_reg *er;
1679 int idx = 0;
1680
1681 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1682 if (er->event != (event->hw.config & er->config_mask))
1683 continue;
1684 idx |= er->idx;
1685 }
1686
1687 if (idx) {
1688 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1689 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1690 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1691 reg1->idx = idx;
1692 }
1693 return 0;
1694 }
1695
ivbep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)1696 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1697 {
1698 struct hw_perf_event *hwc = &event->hw;
1699 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1700
1701 if (reg1->idx != EXTRA_REG_NONE) {
1702 u64 filter = uncore_shared_reg_config(box, 0);
1703 wrmsrl(reg1->reg, filter & 0xffffffff);
1704 wrmsrl(reg1->reg + 6, filter >> 32);
1705 }
1706
1707 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1708 }
1709
1710 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1711 .init_box = ivbep_uncore_msr_init_box,
1712 .disable_box = snbep_uncore_msr_disable_box,
1713 .enable_box = snbep_uncore_msr_enable_box,
1714 .disable_event = snbep_uncore_msr_disable_event,
1715 .enable_event = ivbep_cbox_enable_event,
1716 .read_counter = uncore_msr_read_counter,
1717 .hw_config = ivbep_cbox_hw_config,
1718 .get_constraint = ivbep_cbox_get_constraint,
1719 .put_constraint = snbep_cbox_put_constraint,
1720 };
1721
1722 static struct intel_uncore_type ivbep_uncore_cbox = {
1723 .name = "cbox",
1724 .num_counters = 4,
1725 .num_boxes = 15,
1726 .perf_ctr_bits = 44,
1727 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1728 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1729 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1730 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1731 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1732 .num_shared_regs = 1,
1733 .constraints = snbep_uncore_cbox_constraints,
1734 .ops = &ivbep_uncore_cbox_ops,
1735 .format_group = &ivbep_uncore_cbox_format_group,
1736 };
1737
1738 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1739 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1740 .hw_config = snbep_pcu_hw_config,
1741 .get_constraint = snbep_pcu_get_constraint,
1742 .put_constraint = snbep_pcu_put_constraint,
1743 };
1744
1745 static struct intel_uncore_type ivbep_uncore_pcu = {
1746 .name = "pcu",
1747 .num_counters = 4,
1748 .num_boxes = 1,
1749 .perf_ctr_bits = 48,
1750 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1751 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1752 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1753 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1754 .num_shared_regs = 1,
1755 .ops = &ivbep_uncore_pcu_ops,
1756 .format_group = &ivbep_uncore_pcu_format_group,
1757 };
1758
1759 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1760 &ivbep_uncore_ubox,
1761 &ivbep_uncore_cbox,
1762 &ivbep_uncore_pcu,
1763 NULL,
1764 };
1765
ivbep_uncore_cpu_init(void)1766 void ivbep_uncore_cpu_init(void)
1767 {
1768 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1769 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1770 uncore_msr_uncores = ivbep_msr_uncores;
1771 }
1772
1773 static struct intel_uncore_type ivbep_uncore_ha = {
1774 .name = "ha",
1775 .num_counters = 4,
1776 .num_boxes = 2,
1777 .perf_ctr_bits = 48,
1778 IVBEP_UNCORE_PCI_COMMON_INIT(),
1779 };
1780
1781 static struct intel_uncore_type ivbep_uncore_imc = {
1782 .name = "imc",
1783 .num_counters = 4,
1784 .num_boxes = 8,
1785 .perf_ctr_bits = 48,
1786 .fixed_ctr_bits = 48,
1787 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1788 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1789 .event_descs = snbep_uncore_imc_events,
1790 IVBEP_UNCORE_PCI_COMMON_INIT(),
1791 };
1792
1793 /* registers in IRP boxes are not properly aligned */
1794 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1795 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1796
ivbep_uncore_irp_enable_event(struct intel_uncore_box * box,struct perf_event * event)1797 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1798 {
1799 struct pci_dev *pdev = box->pci_dev;
1800 struct hw_perf_event *hwc = &event->hw;
1801
1802 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1803 hwc->config | SNBEP_PMON_CTL_EN);
1804 }
1805
ivbep_uncore_irp_disable_event(struct intel_uncore_box * box,struct perf_event * event)1806 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1807 {
1808 struct pci_dev *pdev = box->pci_dev;
1809 struct hw_perf_event *hwc = &event->hw;
1810
1811 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1812 }
1813
ivbep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)1814 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1815 {
1816 struct pci_dev *pdev = box->pci_dev;
1817 struct hw_perf_event *hwc = &event->hw;
1818 u64 count = 0;
1819
1820 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1821 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1822
1823 return count;
1824 }
1825
1826 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1827 .init_box = ivbep_uncore_pci_init_box,
1828 .disable_box = snbep_uncore_pci_disable_box,
1829 .enable_box = snbep_uncore_pci_enable_box,
1830 .disable_event = ivbep_uncore_irp_disable_event,
1831 .enable_event = ivbep_uncore_irp_enable_event,
1832 .read_counter = ivbep_uncore_irp_read_counter,
1833 };
1834
1835 static struct intel_uncore_type ivbep_uncore_irp = {
1836 .name = "irp",
1837 .num_counters = 4,
1838 .num_boxes = 1,
1839 .perf_ctr_bits = 48,
1840 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1841 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1842 .ops = &ivbep_uncore_irp_ops,
1843 .format_group = &ivbep_uncore_format_group,
1844 };
1845
1846 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1847 .init_box = ivbep_uncore_pci_init_box,
1848 .disable_box = snbep_uncore_pci_disable_box,
1849 .enable_box = snbep_uncore_pci_enable_box,
1850 .disable_event = snbep_uncore_pci_disable_event,
1851 .enable_event = snbep_qpi_enable_event,
1852 .read_counter = snbep_uncore_pci_read_counter,
1853 .hw_config = snbep_qpi_hw_config,
1854 .get_constraint = uncore_get_constraint,
1855 .put_constraint = uncore_put_constraint,
1856 };
1857
1858 static struct intel_uncore_type ivbep_uncore_qpi = {
1859 .name = "qpi",
1860 .num_counters = 4,
1861 .num_boxes = 3,
1862 .perf_ctr_bits = 48,
1863 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1864 .event_ctl = SNBEP_PCI_PMON_CTL0,
1865 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1866 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1867 .num_shared_regs = 1,
1868 .ops = &ivbep_uncore_qpi_ops,
1869 .format_group = &ivbep_uncore_qpi_format_group,
1870 };
1871
1872 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1873 .name = "r2pcie",
1874 .num_counters = 4,
1875 .num_boxes = 1,
1876 .perf_ctr_bits = 44,
1877 .constraints = snbep_uncore_r2pcie_constraints,
1878 IVBEP_UNCORE_PCI_COMMON_INIT(),
1879 };
1880
1881 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1882 .name = "r3qpi",
1883 .num_counters = 3,
1884 .num_boxes = 2,
1885 .perf_ctr_bits = 44,
1886 .constraints = snbep_uncore_r3qpi_constraints,
1887 IVBEP_UNCORE_PCI_COMMON_INIT(),
1888 };
1889
1890 enum {
1891 IVBEP_PCI_UNCORE_HA,
1892 IVBEP_PCI_UNCORE_IMC,
1893 IVBEP_PCI_UNCORE_IRP,
1894 IVBEP_PCI_UNCORE_QPI,
1895 IVBEP_PCI_UNCORE_R2PCIE,
1896 IVBEP_PCI_UNCORE_R3QPI,
1897 };
1898
1899 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1900 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1901 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1902 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1903 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1904 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1905 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1906 NULL,
1907 };
1908
1909 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1910 { /* Home Agent 0 */
1911 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1912 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1913 },
1914 { /* Home Agent 1 */
1915 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1916 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1917 },
1918 { /* MC0 Channel 0 */
1919 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1920 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1921 },
1922 { /* MC0 Channel 1 */
1923 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1924 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1925 },
1926 { /* MC0 Channel 3 */
1927 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1928 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1929 },
1930 { /* MC0 Channel 4 */
1931 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1932 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1933 },
1934 { /* MC1 Channel 0 */
1935 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1936 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1937 },
1938 { /* MC1 Channel 1 */
1939 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1940 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1941 },
1942 { /* MC1 Channel 3 */
1943 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1944 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1945 },
1946 { /* MC1 Channel 4 */
1947 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1948 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1949 },
1950 { /* IRP */
1951 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1952 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1953 },
1954 { /* QPI0 Port 0 */
1955 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1956 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1957 },
1958 { /* QPI0 Port 1 */
1959 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1960 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1961 },
1962 { /* QPI1 Port 2 */
1963 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1964 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1965 },
1966 { /* R2PCIe */
1967 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1968 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1969 },
1970 { /* R3QPI0 Link 0 */
1971 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1972 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1973 },
1974 { /* R3QPI0 Link 1 */
1975 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1976 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1977 },
1978 { /* R3QPI1 Link 2 */
1979 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1980 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1981 },
1982 { /* QPI Port 0 filter */
1983 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1984 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1985 SNBEP_PCI_QPI_PORT0_FILTER),
1986 },
1987 { /* QPI Port 0 filter */
1988 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1989 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1990 SNBEP_PCI_QPI_PORT1_FILTER),
1991 },
1992 { /* end: all zeroes */ }
1993 };
1994
1995 static struct pci_driver ivbep_uncore_pci_driver = {
1996 .name = "ivbep_uncore",
1997 .id_table = ivbep_uncore_pci_ids,
1998 };
1999
ivbep_uncore_pci_init(void)2000 int ivbep_uncore_pci_init(void)
2001 {
2002 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2003 if (ret)
2004 return ret;
2005 uncore_pci_uncores = ivbep_pci_uncores;
2006 uncore_pci_driver = &ivbep_uncore_pci_driver;
2007 return 0;
2008 }
2009 /* end of IvyTown uncore support */
2010
2011 /* KNL uncore support */
2012 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2013 &format_attr_event.attr,
2014 &format_attr_umask.attr,
2015 &format_attr_edge.attr,
2016 &format_attr_tid_en.attr,
2017 &format_attr_inv.attr,
2018 &format_attr_thresh5.attr,
2019 NULL,
2020 };
2021
2022 static const struct attribute_group knl_uncore_ubox_format_group = {
2023 .name = "format",
2024 .attrs = knl_uncore_ubox_formats_attr,
2025 };
2026
2027 static struct intel_uncore_type knl_uncore_ubox = {
2028 .name = "ubox",
2029 .num_counters = 2,
2030 .num_boxes = 1,
2031 .perf_ctr_bits = 48,
2032 .fixed_ctr_bits = 48,
2033 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2034 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2035 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2036 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2037 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2038 .ops = &snbep_uncore_msr_ops,
2039 .format_group = &knl_uncore_ubox_format_group,
2040 };
2041
2042 static struct attribute *knl_uncore_cha_formats_attr[] = {
2043 &format_attr_event.attr,
2044 &format_attr_umask.attr,
2045 &format_attr_qor.attr,
2046 &format_attr_edge.attr,
2047 &format_attr_tid_en.attr,
2048 &format_attr_inv.attr,
2049 &format_attr_thresh8.attr,
2050 &format_attr_filter_tid4.attr,
2051 &format_attr_filter_link3.attr,
2052 &format_attr_filter_state4.attr,
2053 &format_attr_filter_local.attr,
2054 &format_attr_filter_all_op.attr,
2055 &format_attr_filter_nnm.attr,
2056 &format_attr_filter_opc3.attr,
2057 &format_attr_filter_nc.attr,
2058 &format_attr_filter_isoc.attr,
2059 NULL,
2060 };
2061
2062 static const struct attribute_group knl_uncore_cha_format_group = {
2063 .name = "format",
2064 .attrs = knl_uncore_cha_formats_attr,
2065 };
2066
2067 static struct event_constraint knl_uncore_cha_constraints[] = {
2068 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2069 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2070 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2071 EVENT_CONSTRAINT_END
2072 };
2073
2074 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2075 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2076 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2077 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2078 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2079 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2080 EVENT_EXTRA_END
2081 };
2082
knl_cha_filter_mask(int fields)2083 static u64 knl_cha_filter_mask(int fields)
2084 {
2085 u64 mask = 0;
2086
2087 if (fields & 0x1)
2088 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2089 if (fields & 0x2)
2090 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2091 if (fields & 0x4)
2092 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2093 return mask;
2094 }
2095
2096 static struct event_constraint *
knl_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2097 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2098 {
2099 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2100 }
2101
knl_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)2102 static int knl_cha_hw_config(struct intel_uncore_box *box,
2103 struct perf_event *event)
2104 {
2105 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2106 struct extra_reg *er;
2107 int idx = 0;
2108
2109 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2110 if (er->event != (event->hw.config & er->config_mask))
2111 continue;
2112 idx |= er->idx;
2113 }
2114
2115 if (idx) {
2116 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2117 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2118 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2119
2120 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2121 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2122 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2123 reg1->idx = idx;
2124 }
2125 return 0;
2126 }
2127
2128 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2129 struct perf_event *event);
2130
2131 static struct intel_uncore_ops knl_uncore_cha_ops = {
2132 .init_box = snbep_uncore_msr_init_box,
2133 .disable_box = snbep_uncore_msr_disable_box,
2134 .enable_box = snbep_uncore_msr_enable_box,
2135 .disable_event = snbep_uncore_msr_disable_event,
2136 .enable_event = hswep_cbox_enable_event,
2137 .read_counter = uncore_msr_read_counter,
2138 .hw_config = knl_cha_hw_config,
2139 .get_constraint = knl_cha_get_constraint,
2140 .put_constraint = snbep_cbox_put_constraint,
2141 };
2142
2143 static struct intel_uncore_type knl_uncore_cha = {
2144 .name = "cha",
2145 .num_counters = 4,
2146 .num_boxes = 38,
2147 .perf_ctr_bits = 48,
2148 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2149 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2150 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2151 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2152 .msr_offset = KNL_CHA_MSR_OFFSET,
2153 .num_shared_regs = 1,
2154 .constraints = knl_uncore_cha_constraints,
2155 .ops = &knl_uncore_cha_ops,
2156 .format_group = &knl_uncore_cha_format_group,
2157 };
2158
2159 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2160 &format_attr_event2.attr,
2161 &format_attr_use_occ_ctr.attr,
2162 &format_attr_occ_sel.attr,
2163 &format_attr_edge.attr,
2164 &format_attr_tid_en.attr,
2165 &format_attr_inv.attr,
2166 &format_attr_thresh6.attr,
2167 &format_attr_occ_invert.attr,
2168 &format_attr_occ_edge_det.attr,
2169 NULL,
2170 };
2171
2172 static const struct attribute_group knl_uncore_pcu_format_group = {
2173 .name = "format",
2174 .attrs = knl_uncore_pcu_formats_attr,
2175 };
2176
2177 static struct intel_uncore_type knl_uncore_pcu = {
2178 .name = "pcu",
2179 .num_counters = 4,
2180 .num_boxes = 1,
2181 .perf_ctr_bits = 48,
2182 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2183 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2184 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2185 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2186 .ops = &snbep_uncore_msr_ops,
2187 .format_group = &knl_uncore_pcu_format_group,
2188 };
2189
2190 static struct intel_uncore_type *knl_msr_uncores[] = {
2191 &knl_uncore_ubox,
2192 &knl_uncore_cha,
2193 &knl_uncore_pcu,
2194 NULL,
2195 };
2196
knl_uncore_cpu_init(void)2197 void knl_uncore_cpu_init(void)
2198 {
2199 uncore_msr_uncores = knl_msr_uncores;
2200 }
2201
knl_uncore_imc_enable_box(struct intel_uncore_box * box)2202 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2203 {
2204 struct pci_dev *pdev = box->pci_dev;
2205 int box_ctl = uncore_pci_box_ctl(box);
2206
2207 pci_write_config_dword(pdev, box_ctl, 0);
2208 }
2209
knl_uncore_imc_enable_event(struct intel_uncore_box * box,struct perf_event * event)2210 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2211 struct perf_event *event)
2212 {
2213 struct pci_dev *pdev = box->pci_dev;
2214 struct hw_perf_event *hwc = &event->hw;
2215
2216 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2217 == UNCORE_FIXED_EVENT)
2218 pci_write_config_dword(pdev, hwc->config_base,
2219 hwc->config | KNL_PMON_FIXED_CTL_EN);
2220 else
2221 pci_write_config_dword(pdev, hwc->config_base,
2222 hwc->config | SNBEP_PMON_CTL_EN);
2223 }
2224
2225 static struct intel_uncore_ops knl_uncore_imc_ops = {
2226 .init_box = snbep_uncore_pci_init_box,
2227 .disable_box = snbep_uncore_pci_disable_box,
2228 .enable_box = knl_uncore_imc_enable_box,
2229 .read_counter = snbep_uncore_pci_read_counter,
2230 .enable_event = knl_uncore_imc_enable_event,
2231 .disable_event = snbep_uncore_pci_disable_event,
2232 };
2233
2234 static struct intel_uncore_type knl_uncore_imc_uclk = {
2235 .name = "imc_uclk",
2236 .num_counters = 4,
2237 .num_boxes = 2,
2238 .perf_ctr_bits = 48,
2239 .fixed_ctr_bits = 48,
2240 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2241 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2242 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2243 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2244 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2245 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2246 .ops = &knl_uncore_imc_ops,
2247 .format_group = &snbep_uncore_format_group,
2248 };
2249
2250 static struct intel_uncore_type knl_uncore_imc_dclk = {
2251 .name = "imc",
2252 .num_counters = 4,
2253 .num_boxes = 6,
2254 .perf_ctr_bits = 48,
2255 .fixed_ctr_bits = 48,
2256 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2257 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2258 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2259 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2260 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2261 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2262 .ops = &knl_uncore_imc_ops,
2263 .format_group = &snbep_uncore_format_group,
2264 };
2265
2266 static struct intel_uncore_type knl_uncore_edc_uclk = {
2267 .name = "edc_uclk",
2268 .num_counters = 4,
2269 .num_boxes = 8,
2270 .perf_ctr_bits = 48,
2271 .fixed_ctr_bits = 48,
2272 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2273 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2274 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2275 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2276 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2277 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2278 .ops = &knl_uncore_imc_ops,
2279 .format_group = &snbep_uncore_format_group,
2280 };
2281
2282 static struct intel_uncore_type knl_uncore_edc_eclk = {
2283 .name = "edc_eclk",
2284 .num_counters = 4,
2285 .num_boxes = 8,
2286 .perf_ctr_bits = 48,
2287 .fixed_ctr_bits = 48,
2288 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2289 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2290 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2291 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2292 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2293 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2294 .ops = &knl_uncore_imc_ops,
2295 .format_group = &snbep_uncore_format_group,
2296 };
2297
2298 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2299 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2300 EVENT_CONSTRAINT_END
2301 };
2302
2303 static struct intel_uncore_type knl_uncore_m2pcie = {
2304 .name = "m2pcie",
2305 .num_counters = 4,
2306 .num_boxes = 1,
2307 .perf_ctr_bits = 48,
2308 .constraints = knl_uncore_m2pcie_constraints,
2309 SNBEP_UNCORE_PCI_COMMON_INIT(),
2310 };
2311
2312 static struct attribute *knl_uncore_irp_formats_attr[] = {
2313 &format_attr_event.attr,
2314 &format_attr_umask.attr,
2315 &format_attr_qor.attr,
2316 &format_attr_edge.attr,
2317 &format_attr_inv.attr,
2318 &format_attr_thresh8.attr,
2319 NULL,
2320 };
2321
2322 static const struct attribute_group knl_uncore_irp_format_group = {
2323 .name = "format",
2324 .attrs = knl_uncore_irp_formats_attr,
2325 };
2326
2327 static struct intel_uncore_type knl_uncore_irp = {
2328 .name = "irp",
2329 .num_counters = 2,
2330 .num_boxes = 1,
2331 .perf_ctr_bits = 48,
2332 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2333 .event_ctl = SNBEP_PCI_PMON_CTL0,
2334 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2335 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2336 .ops = &snbep_uncore_pci_ops,
2337 .format_group = &knl_uncore_irp_format_group,
2338 };
2339
2340 enum {
2341 KNL_PCI_UNCORE_MC_UCLK,
2342 KNL_PCI_UNCORE_MC_DCLK,
2343 KNL_PCI_UNCORE_EDC_UCLK,
2344 KNL_PCI_UNCORE_EDC_ECLK,
2345 KNL_PCI_UNCORE_M2PCIE,
2346 KNL_PCI_UNCORE_IRP,
2347 };
2348
2349 static struct intel_uncore_type *knl_pci_uncores[] = {
2350 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2351 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2352 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2353 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2354 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2355 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2356 NULL,
2357 };
2358
2359 /*
2360 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2361 * device type. prior to KNL, each instance of a PMU device type had a unique
2362 * device ID.
2363 *
2364 * PCI Device ID Uncore PMU Devices
2365 * ----------------------------------
2366 * 0x7841 MC0 UClk, MC1 UClk
2367 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2368 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2369 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2370 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2371 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2372 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2373 * 0x7817 M2PCIe
2374 * 0x7814 IRP
2375 */
2376
2377 static const struct pci_device_id knl_uncore_pci_ids[] = {
2378 { /* MC0 UClk */
2379 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2380 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2381 },
2382 { /* MC1 UClk */
2383 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2384 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2385 },
2386 { /* MC0 DClk CH 0 */
2387 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2388 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2389 },
2390 { /* MC0 DClk CH 1 */
2391 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2392 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2393 },
2394 { /* MC0 DClk CH 2 */
2395 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2396 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2397 },
2398 { /* MC1 DClk CH 0 */
2399 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2400 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2401 },
2402 { /* MC1 DClk CH 1 */
2403 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2404 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2405 },
2406 { /* MC1 DClk CH 2 */
2407 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2408 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2409 },
2410 { /* EDC0 UClk */
2411 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2412 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2413 },
2414 { /* EDC1 UClk */
2415 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2416 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2417 },
2418 { /* EDC2 UClk */
2419 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2420 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2421 },
2422 { /* EDC3 UClk */
2423 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2424 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2425 },
2426 { /* EDC4 UClk */
2427 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2428 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2429 },
2430 { /* EDC5 UClk */
2431 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2432 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2433 },
2434 { /* EDC6 UClk */
2435 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2436 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2437 },
2438 { /* EDC7 UClk */
2439 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2440 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2441 },
2442 { /* EDC0 EClk */
2443 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2444 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2445 },
2446 { /* EDC1 EClk */
2447 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2448 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2449 },
2450 { /* EDC2 EClk */
2451 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2452 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2453 },
2454 { /* EDC3 EClk */
2455 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2456 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2457 },
2458 { /* EDC4 EClk */
2459 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2460 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2461 },
2462 { /* EDC5 EClk */
2463 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2464 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2465 },
2466 { /* EDC6 EClk */
2467 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2468 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2469 },
2470 { /* EDC7 EClk */
2471 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2472 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2473 },
2474 { /* M2PCIe */
2475 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2476 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2477 },
2478 { /* IRP */
2479 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2480 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2481 },
2482 { /* end: all zeroes */ }
2483 };
2484
2485 static struct pci_driver knl_uncore_pci_driver = {
2486 .name = "knl_uncore",
2487 .id_table = knl_uncore_pci_ids,
2488 };
2489
knl_uncore_pci_init(void)2490 int knl_uncore_pci_init(void)
2491 {
2492 int ret;
2493
2494 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2495 ret = snb_pci2phy_map_init(0x7814); /* IRP */
2496 if (ret)
2497 return ret;
2498 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2499 if (ret)
2500 return ret;
2501 uncore_pci_uncores = knl_pci_uncores;
2502 uncore_pci_driver = &knl_uncore_pci_driver;
2503 return 0;
2504 }
2505
2506 /* end of KNL uncore support */
2507
2508 /* Haswell-EP uncore support */
2509 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2510 &format_attr_event.attr,
2511 &format_attr_umask.attr,
2512 &format_attr_edge.attr,
2513 &format_attr_inv.attr,
2514 &format_attr_thresh5.attr,
2515 &format_attr_filter_tid2.attr,
2516 &format_attr_filter_cid.attr,
2517 NULL,
2518 };
2519
2520 static const struct attribute_group hswep_uncore_ubox_format_group = {
2521 .name = "format",
2522 .attrs = hswep_uncore_ubox_formats_attr,
2523 };
2524
hswep_ubox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2525 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2526 {
2527 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2528 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2529 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2530 reg1->idx = 0;
2531 return 0;
2532 }
2533
2534 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2535 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2536 .hw_config = hswep_ubox_hw_config,
2537 .get_constraint = uncore_get_constraint,
2538 .put_constraint = uncore_put_constraint,
2539 };
2540
2541 static struct intel_uncore_type hswep_uncore_ubox = {
2542 .name = "ubox",
2543 .num_counters = 2,
2544 .num_boxes = 1,
2545 .perf_ctr_bits = 44,
2546 .fixed_ctr_bits = 48,
2547 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2548 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2549 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2550 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2551 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2552 .num_shared_regs = 1,
2553 .ops = &hswep_uncore_ubox_ops,
2554 .format_group = &hswep_uncore_ubox_format_group,
2555 };
2556
2557 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2558 &format_attr_event.attr,
2559 &format_attr_umask.attr,
2560 &format_attr_edge.attr,
2561 &format_attr_tid_en.attr,
2562 &format_attr_thresh8.attr,
2563 &format_attr_filter_tid3.attr,
2564 &format_attr_filter_link2.attr,
2565 &format_attr_filter_state3.attr,
2566 &format_attr_filter_nid2.attr,
2567 &format_attr_filter_opc2.attr,
2568 &format_attr_filter_nc.attr,
2569 &format_attr_filter_c6.attr,
2570 &format_attr_filter_isoc.attr,
2571 NULL,
2572 };
2573
2574 static const struct attribute_group hswep_uncore_cbox_format_group = {
2575 .name = "format",
2576 .attrs = hswep_uncore_cbox_formats_attr,
2577 };
2578
2579 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2580 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2581 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2582 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2583 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2584 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2585 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2586 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2587 EVENT_CONSTRAINT_END
2588 };
2589
2590 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2591 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2592 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2593 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2594 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2595 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2596 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2597 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2598 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2599 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2600 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2601 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2602 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2603 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2604 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2605 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2606 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2607 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2608 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2609 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2610 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2611 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2612 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2613 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2614 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2615 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2616 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2617 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2618 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2619 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2620 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2621 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2622 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2623 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2624 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2625 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2626 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2627 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2628 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2629 EVENT_EXTRA_END
2630 };
2631
hswep_cbox_filter_mask(int fields)2632 static u64 hswep_cbox_filter_mask(int fields)
2633 {
2634 u64 mask = 0;
2635 if (fields & 0x1)
2636 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2637 if (fields & 0x2)
2638 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2639 if (fields & 0x4)
2640 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2641 if (fields & 0x8)
2642 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2643 if (fields & 0x10) {
2644 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2645 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2646 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2647 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2648 }
2649 return mask;
2650 }
2651
2652 static struct event_constraint *
hswep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2653 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2654 {
2655 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2656 }
2657
hswep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2658 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2659 {
2660 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2661 struct extra_reg *er;
2662 int idx = 0;
2663
2664 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2665 if (er->event != (event->hw.config & er->config_mask))
2666 continue;
2667 idx |= er->idx;
2668 }
2669
2670 if (idx) {
2671 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2672 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2673 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2674 reg1->idx = idx;
2675 }
2676 return 0;
2677 }
2678
hswep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)2679 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2680 struct perf_event *event)
2681 {
2682 struct hw_perf_event *hwc = &event->hw;
2683 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2684
2685 if (reg1->idx != EXTRA_REG_NONE) {
2686 u64 filter = uncore_shared_reg_config(box, 0);
2687 wrmsrl(reg1->reg, filter & 0xffffffff);
2688 wrmsrl(reg1->reg + 1, filter >> 32);
2689 }
2690
2691 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2692 }
2693
2694 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2695 .init_box = snbep_uncore_msr_init_box,
2696 .disable_box = snbep_uncore_msr_disable_box,
2697 .enable_box = snbep_uncore_msr_enable_box,
2698 .disable_event = snbep_uncore_msr_disable_event,
2699 .enable_event = hswep_cbox_enable_event,
2700 .read_counter = uncore_msr_read_counter,
2701 .hw_config = hswep_cbox_hw_config,
2702 .get_constraint = hswep_cbox_get_constraint,
2703 .put_constraint = snbep_cbox_put_constraint,
2704 };
2705
2706 static struct intel_uncore_type hswep_uncore_cbox = {
2707 .name = "cbox",
2708 .num_counters = 4,
2709 .num_boxes = 18,
2710 .perf_ctr_bits = 48,
2711 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2712 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2713 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2714 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2715 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2716 .num_shared_regs = 1,
2717 .constraints = hswep_uncore_cbox_constraints,
2718 .ops = &hswep_uncore_cbox_ops,
2719 .format_group = &hswep_uncore_cbox_format_group,
2720 };
2721
2722 /*
2723 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2724 */
hswep_uncore_sbox_msr_init_box(struct intel_uncore_box * box)2725 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2726 {
2727 unsigned msr = uncore_msr_box_ctl(box);
2728
2729 if (msr) {
2730 u64 init = SNBEP_PMON_BOX_CTL_INT;
2731 u64 flags = 0;
2732 int i;
2733
2734 for_each_set_bit(i, (unsigned long *)&init, 64) {
2735 flags |= (1ULL << i);
2736 wrmsrl(msr, flags);
2737 }
2738 }
2739 }
2740
2741 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2742 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2743 .init_box = hswep_uncore_sbox_msr_init_box
2744 };
2745
2746 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2747 &format_attr_event.attr,
2748 &format_attr_umask.attr,
2749 &format_attr_edge.attr,
2750 &format_attr_tid_en.attr,
2751 &format_attr_inv.attr,
2752 &format_attr_thresh8.attr,
2753 NULL,
2754 };
2755
2756 static const struct attribute_group hswep_uncore_sbox_format_group = {
2757 .name = "format",
2758 .attrs = hswep_uncore_sbox_formats_attr,
2759 };
2760
2761 static struct intel_uncore_type hswep_uncore_sbox = {
2762 .name = "sbox",
2763 .num_counters = 4,
2764 .num_boxes = 4,
2765 .perf_ctr_bits = 44,
2766 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2767 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2768 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2769 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2770 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2771 .ops = &hswep_uncore_sbox_msr_ops,
2772 .format_group = &hswep_uncore_sbox_format_group,
2773 };
2774
hswep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)2775 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2776 {
2777 struct hw_perf_event *hwc = &event->hw;
2778 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2779 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2780
2781 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2782 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2783 reg1->idx = ev_sel - 0xb;
2784 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2785 }
2786 return 0;
2787 }
2788
2789 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2790 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2791 .hw_config = hswep_pcu_hw_config,
2792 .get_constraint = snbep_pcu_get_constraint,
2793 .put_constraint = snbep_pcu_put_constraint,
2794 };
2795
2796 static struct intel_uncore_type hswep_uncore_pcu = {
2797 .name = "pcu",
2798 .num_counters = 4,
2799 .num_boxes = 1,
2800 .perf_ctr_bits = 48,
2801 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2802 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2803 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2804 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2805 .num_shared_regs = 1,
2806 .ops = &hswep_uncore_pcu_ops,
2807 .format_group = &snbep_uncore_pcu_format_group,
2808 };
2809
2810 static struct intel_uncore_type *hswep_msr_uncores[] = {
2811 &hswep_uncore_ubox,
2812 &hswep_uncore_cbox,
2813 &hswep_uncore_sbox,
2814 &hswep_uncore_pcu,
2815 NULL,
2816 };
2817
2818 #define HSWEP_PCU_DID 0x2fc0
2819 #define HSWEP_PCU_CAPID4_OFFET 0x94
2820 #define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
2821
hswep_has_limit_sbox(unsigned int device)2822 static bool hswep_has_limit_sbox(unsigned int device)
2823 {
2824 struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2825 u32 capid4;
2826
2827 if (!dev)
2828 return false;
2829
2830 pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2831 pci_dev_put(dev);
2832 if (!hswep_get_chop(capid4))
2833 return true;
2834
2835 return false;
2836 }
2837
hswep_uncore_cpu_init(void)2838 void hswep_uncore_cpu_init(void)
2839 {
2840 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2841 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2842
2843 /* Detect 6-8 core systems with only two SBOXes */
2844 if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2845 hswep_uncore_sbox.num_boxes = 2;
2846
2847 uncore_msr_uncores = hswep_msr_uncores;
2848 }
2849
2850 static struct intel_uncore_type hswep_uncore_ha = {
2851 .name = "ha",
2852 .num_counters = 4,
2853 .num_boxes = 2,
2854 .perf_ctr_bits = 48,
2855 SNBEP_UNCORE_PCI_COMMON_INIT(),
2856 };
2857
2858 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2859 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2860 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2861 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2862 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2863 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2864 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2865 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2866 { /* end: all zeroes */ },
2867 };
2868
2869 static struct intel_uncore_type hswep_uncore_imc = {
2870 .name = "imc",
2871 .num_counters = 4,
2872 .num_boxes = 8,
2873 .perf_ctr_bits = 48,
2874 .fixed_ctr_bits = 48,
2875 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2876 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2877 .event_descs = hswep_uncore_imc_events,
2878 SNBEP_UNCORE_PCI_COMMON_INIT(),
2879 };
2880
2881 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2882
hswep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)2883 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2884 {
2885 struct pci_dev *pdev = box->pci_dev;
2886 struct hw_perf_event *hwc = &event->hw;
2887 u64 count = 0;
2888
2889 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2890 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2891
2892 return count;
2893 }
2894
2895 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2896 .init_box = snbep_uncore_pci_init_box,
2897 .disable_box = snbep_uncore_pci_disable_box,
2898 .enable_box = snbep_uncore_pci_enable_box,
2899 .disable_event = ivbep_uncore_irp_disable_event,
2900 .enable_event = ivbep_uncore_irp_enable_event,
2901 .read_counter = hswep_uncore_irp_read_counter,
2902 };
2903
2904 static struct intel_uncore_type hswep_uncore_irp = {
2905 .name = "irp",
2906 .num_counters = 4,
2907 .num_boxes = 1,
2908 .perf_ctr_bits = 48,
2909 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2910 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2911 .ops = &hswep_uncore_irp_ops,
2912 .format_group = &snbep_uncore_format_group,
2913 };
2914
2915 static struct intel_uncore_type hswep_uncore_qpi = {
2916 .name = "qpi",
2917 .num_counters = 4,
2918 .num_boxes = 3,
2919 .perf_ctr_bits = 48,
2920 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2921 .event_ctl = SNBEP_PCI_PMON_CTL0,
2922 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2923 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2924 .num_shared_regs = 1,
2925 .ops = &snbep_uncore_qpi_ops,
2926 .format_group = &snbep_uncore_qpi_format_group,
2927 };
2928
2929 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2930 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2931 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2932 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2933 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2934 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2935 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2936 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2937 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2938 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2939 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2940 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2941 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2942 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2943 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2944 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2945 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2946 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2947 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2948 EVENT_CONSTRAINT_END
2949 };
2950
2951 static struct intel_uncore_type hswep_uncore_r2pcie = {
2952 .name = "r2pcie",
2953 .num_counters = 4,
2954 .num_boxes = 1,
2955 .perf_ctr_bits = 48,
2956 .constraints = hswep_uncore_r2pcie_constraints,
2957 SNBEP_UNCORE_PCI_COMMON_INIT(),
2958 };
2959
2960 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2961 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2962 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2963 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2964 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2965 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2966 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2967 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2968 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2969 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2970 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2971 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2972 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2973 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2974 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2975 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2976 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2977 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2978 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2979 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2980 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2981 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2982 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2983 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2984 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2985 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2986 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2987 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2988 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2989 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2990 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2991 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2992 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2993 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2994 EVENT_CONSTRAINT_END
2995 };
2996
2997 static struct intel_uncore_type hswep_uncore_r3qpi = {
2998 .name = "r3qpi",
2999 .num_counters = 3,
3000 .num_boxes = 3,
3001 .perf_ctr_bits = 44,
3002 .constraints = hswep_uncore_r3qpi_constraints,
3003 SNBEP_UNCORE_PCI_COMMON_INIT(),
3004 };
3005
3006 enum {
3007 HSWEP_PCI_UNCORE_HA,
3008 HSWEP_PCI_UNCORE_IMC,
3009 HSWEP_PCI_UNCORE_IRP,
3010 HSWEP_PCI_UNCORE_QPI,
3011 HSWEP_PCI_UNCORE_R2PCIE,
3012 HSWEP_PCI_UNCORE_R3QPI,
3013 };
3014
3015 static struct intel_uncore_type *hswep_pci_uncores[] = {
3016 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
3017 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
3018 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
3019 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
3020 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
3021 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
3022 NULL,
3023 };
3024
3025 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3026 { /* Home Agent 0 */
3027 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3028 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3029 },
3030 { /* Home Agent 1 */
3031 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3032 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3033 },
3034 { /* MC0 Channel 0 */
3035 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3036 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3037 },
3038 { /* MC0 Channel 1 */
3039 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3040 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3041 },
3042 { /* MC0 Channel 2 */
3043 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3044 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3045 },
3046 { /* MC0 Channel 3 */
3047 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3048 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3049 },
3050 { /* MC1 Channel 0 */
3051 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3052 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3053 },
3054 { /* MC1 Channel 1 */
3055 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3056 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3057 },
3058 { /* MC1 Channel 2 */
3059 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3060 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3061 },
3062 { /* MC1 Channel 3 */
3063 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3064 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3065 },
3066 { /* IRP */
3067 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3068 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3069 },
3070 { /* QPI0 Port 0 */
3071 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3072 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3073 },
3074 { /* QPI0 Port 1 */
3075 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3076 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3077 },
3078 { /* QPI1 Port 2 */
3079 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3080 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3081 },
3082 { /* R2PCIe */
3083 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3084 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3085 },
3086 { /* R3QPI0 Link 0 */
3087 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3088 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3089 },
3090 { /* R3QPI0 Link 1 */
3091 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3092 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3093 },
3094 { /* R3QPI1 Link 2 */
3095 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3096 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3097 },
3098 { /* QPI Port 0 filter */
3099 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3100 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3101 SNBEP_PCI_QPI_PORT0_FILTER),
3102 },
3103 { /* QPI Port 1 filter */
3104 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3105 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3106 SNBEP_PCI_QPI_PORT1_FILTER),
3107 },
3108 { /* end: all zeroes */ }
3109 };
3110
3111 static struct pci_driver hswep_uncore_pci_driver = {
3112 .name = "hswep_uncore",
3113 .id_table = hswep_uncore_pci_ids,
3114 };
3115
hswep_uncore_pci_init(void)3116 int hswep_uncore_pci_init(void)
3117 {
3118 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3119 if (ret)
3120 return ret;
3121 uncore_pci_uncores = hswep_pci_uncores;
3122 uncore_pci_driver = &hswep_uncore_pci_driver;
3123 return 0;
3124 }
3125 /* end of Haswell-EP uncore support */
3126
3127 /* BDX uncore support */
3128
3129 static struct intel_uncore_type bdx_uncore_ubox = {
3130 .name = "ubox",
3131 .num_counters = 2,
3132 .num_boxes = 1,
3133 .perf_ctr_bits = 48,
3134 .fixed_ctr_bits = 48,
3135 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3136 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3137 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3138 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3139 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3140 .num_shared_regs = 1,
3141 .ops = &ivbep_uncore_msr_ops,
3142 .format_group = &ivbep_uncore_ubox_format_group,
3143 };
3144
3145 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3146 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3147 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3148 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3149 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3150 EVENT_CONSTRAINT_END
3151 };
3152
3153 static struct intel_uncore_type bdx_uncore_cbox = {
3154 .name = "cbox",
3155 .num_counters = 4,
3156 .num_boxes = 24,
3157 .perf_ctr_bits = 48,
3158 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3159 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3160 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3161 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3162 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3163 .num_shared_regs = 1,
3164 .constraints = bdx_uncore_cbox_constraints,
3165 .ops = &hswep_uncore_cbox_ops,
3166 .format_group = &hswep_uncore_cbox_format_group,
3167 };
3168
3169 static struct intel_uncore_type bdx_uncore_sbox = {
3170 .name = "sbox",
3171 .num_counters = 4,
3172 .num_boxes = 4,
3173 .perf_ctr_bits = 48,
3174 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
3175 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
3176 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3177 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
3178 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
3179 .ops = &hswep_uncore_sbox_msr_ops,
3180 .format_group = &hswep_uncore_sbox_format_group,
3181 };
3182
3183 #define BDX_MSR_UNCORE_SBOX 3
3184
3185 static struct intel_uncore_type *bdx_msr_uncores[] = {
3186 &bdx_uncore_ubox,
3187 &bdx_uncore_cbox,
3188 &hswep_uncore_pcu,
3189 &bdx_uncore_sbox,
3190 NULL,
3191 };
3192
3193 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3194 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3195 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3196 EVENT_CONSTRAINT_END
3197 };
3198
3199 #define BDX_PCU_DID 0x6fc0
3200
bdx_uncore_cpu_init(void)3201 void bdx_uncore_cpu_init(void)
3202 {
3203 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3204 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3205 uncore_msr_uncores = bdx_msr_uncores;
3206
3207 /* Detect systems with no SBOXes */
3208 if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3209 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3210
3211 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3212 }
3213
3214 static struct intel_uncore_type bdx_uncore_ha = {
3215 .name = "ha",
3216 .num_counters = 4,
3217 .num_boxes = 2,
3218 .perf_ctr_bits = 48,
3219 SNBEP_UNCORE_PCI_COMMON_INIT(),
3220 };
3221
3222 static struct intel_uncore_type bdx_uncore_imc = {
3223 .name = "imc",
3224 .num_counters = 4,
3225 .num_boxes = 8,
3226 .perf_ctr_bits = 48,
3227 .fixed_ctr_bits = 48,
3228 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3229 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3230 .event_descs = hswep_uncore_imc_events,
3231 SNBEP_UNCORE_PCI_COMMON_INIT(),
3232 };
3233
3234 static struct intel_uncore_type bdx_uncore_irp = {
3235 .name = "irp",
3236 .num_counters = 4,
3237 .num_boxes = 1,
3238 .perf_ctr_bits = 48,
3239 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3240 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3241 .ops = &hswep_uncore_irp_ops,
3242 .format_group = &snbep_uncore_format_group,
3243 };
3244
3245 static struct intel_uncore_type bdx_uncore_qpi = {
3246 .name = "qpi",
3247 .num_counters = 4,
3248 .num_boxes = 3,
3249 .perf_ctr_bits = 48,
3250 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3251 .event_ctl = SNBEP_PCI_PMON_CTL0,
3252 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3253 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3254 .num_shared_regs = 1,
3255 .ops = &snbep_uncore_qpi_ops,
3256 .format_group = &snbep_uncore_qpi_format_group,
3257 };
3258
3259 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3260 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3261 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3262 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3263 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3264 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3265 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3266 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3267 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3268 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3269 EVENT_CONSTRAINT_END
3270 };
3271
3272 static struct intel_uncore_type bdx_uncore_r2pcie = {
3273 .name = "r2pcie",
3274 .num_counters = 4,
3275 .num_boxes = 1,
3276 .perf_ctr_bits = 48,
3277 .constraints = bdx_uncore_r2pcie_constraints,
3278 SNBEP_UNCORE_PCI_COMMON_INIT(),
3279 };
3280
3281 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3282 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3283 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3284 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3285 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3286 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3287 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3288 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3289 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3290 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3291 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3292 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3293 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3294 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3295 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3296 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3297 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3298 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3299 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3300 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3301 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3302 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3303 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3304 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3305 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3306 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3307 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3308 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3309 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3310 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3311 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3312 EVENT_CONSTRAINT_END
3313 };
3314
3315 static struct intel_uncore_type bdx_uncore_r3qpi = {
3316 .name = "r3qpi",
3317 .num_counters = 3,
3318 .num_boxes = 3,
3319 .perf_ctr_bits = 48,
3320 .constraints = bdx_uncore_r3qpi_constraints,
3321 SNBEP_UNCORE_PCI_COMMON_INIT(),
3322 };
3323
3324 enum {
3325 BDX_PCI_UNCORE_HA,
3326 BDX_PCI_UNCORE_IMC,
3327 BDX_PCI_UNCORE_IRP,
3328 BDX_PCI_UNCORE_QPI,
3329 BDX_PCI_UNCORE_R2PCIE,
3330 BDX_PCI_UNCORE_R3QPI,
3331 };
3332
3333 static struct intel_uncore_type *bdx_pci_uncores[] = {
3334 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3335 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3336 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
3337 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
3338 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3339 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
3340 NULL,
3341 };
3342
3343 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3344 { /* Home Agent 0 */
3345 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3346 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3347 },
3348 { /* Home Agent 1 */
3349 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3350 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3351 },
3352 { /* MC0 Channel 0 */
3353 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3354 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3355 },
3356 { /* MC0 Channel 1 */
3357 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3358 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3359 },
3360 { /* MC0 Channel 2 */
3361 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3362 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3363 },
3364 { /* MC0 Channel 3 */
3365 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3366 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3367 },
3368 { /* MC1 Channel 0 */
3369 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3370 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3371 },
3372 { /* MC1 Channel 1 */
3373 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3374 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3375 },
3376 { /* MC1 Channel 2 */
3377 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3378 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3379 },
3380 { /* MC1 Channel 3 */
3381 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3382 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3383 },
3384 { /* IRP */
3385 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3386 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3387 },
3388 { /* QPI0 Port 0 */
3389 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3390 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3391 },
3392 { /* QPI0 Port 1 */
3393 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3394 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3395 },
3396 { /* QPI1 Port 2 */
3397 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3398 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3399 },
3400 { /* R2PCIe */
3401 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3402 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3403 },
3404 { /* R3QPI0 Link 0 */
3405 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3406 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3407 },
3408 { /* R3QPI0 Link 1 */
3409 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3410 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3411 },
3412 { /* R3QPI1 Link 2 */
3413 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3414 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3415 },
3416 { /* QPI Port 0 filter */
3417 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3418 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3419 SNBEP_PCI_QPI_PORT0_FILTER),
3420 },
3421 { /* QPI Port 1 filter */
3422 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3423 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3424 SNBEP_PCI_QPI_PORT1_FILTER),
3425 },
3426 { /* QPI Port 2 filter */
3427 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3428 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3429 BDX_PCI_QPI_PORT2_FILTER),
3430 },
3431 { /* end: all zeroes */ }
3432 };
3433
3434 static struct pci_driver bdx_uncore_pci_driver = {
3435 .name = "bdx_uncore",
3436 .id_table = bdx_uncore_pci_ids,
3437 };
3438
bdx_uncore_pci_init(void)3439 int bdx_uncore_pci_init(void)
3440 {
3441 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3442
3443 if (ret)
3444 return ret;
3445 uncore_pci_uncores = bdx_pci_uncores;
3446 uncore_pci_driver = &bdx_uncore_pci_driver;
3447 return 0;
3448 }
3449
3450 /* end of BDX uncore support */
3451
3452 /* SKX uncore support */
3453
3454 static struct intel_uncore_type skx_uncore_ubox = {
3455 .name = "ubox",
3456 .num_counters = 2,
3457 .num_boxes = 1,
3458 .perf_ctr_bits = 48,
3459 .fixed_ctr_bits = 48,
3460 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3461 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3462 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3463 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3464 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3465 .ops = &ivbep_uncore_msr_ops,
3466 .format_group = &ivbep_uncore_ubox_format_group,
3467 };
3468
3469 static struct attribute *skx_uncore_cha_formats_attr[] = {
3470 &format_attr_event.attr,
3471 &format_attr_umask.attr,
3472 &format_attr_edge.attr,
3473 &format_attr_tid_en.attr,
3474 &format_attr_inv.attr,
3475 &format_attr_thresh8.attr,
3476 &format_attr_filter_tid4.attr,
3477 &format_attr_filter_state5.attr,
3478 &format_attr_filter_rem.attr,
3479 &format_attr_filter_loc.attr,
3480 &format_attr_filter_nm.attr,
3481 &format_attr_filter_all_op.attr,
3482 &format_attr_filter_not_nm.attr,
3483 &format_attr_filter_opc_0.attr,
3484 &format_attr_filter_opc_1.attr,
3485 &format_attr_filter_nc.attr,
3486 &format_attr_filter_isoc.attr,
3487 NULL,
3488 };
3489
3490 static const struct attribute_group skx_uncore_chabox_format_group = {
3491 .name = "format",
3492 .attrs = skx_uncore_cha_formats_attr,
3493 };
3494
3495 static struct event_constraint skx_uncore_chabox_constraints[] = {
3496 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3497 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3498 EVENT_CONSTRAINT_END
3499 };
3500
3501 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3502 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3503 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3504 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3505 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3506 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3507 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3508 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3509 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3510 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3511 EVENT_EXTRA_END
3512 };
3513
skx_cha_filter_mask(int fields)3514 static u64 skx_cha_filter_mask(int fields)
3515 {
3516 u64 mask = 0;
3517
3518 if (fields & 0x1)
3519 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3520 if (fields & 0x2)
3521 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3522 if (fields & 0x4)
3523 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3524 if (fields & 0x8) {
3525 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3526 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3527 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3528 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3529 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3530 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3531 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3532 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3533 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3534 }
3535 return mask;
3536 }
3537
3538 static struct event_constraint *
skx_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)3539 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3540 {
3541 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3542 }
3543
skx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)3544 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3545 {
3546 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3547 struct extra_reg *er;
3548 int idx = 0;
3549 /* Any of the CHA events may be filtered by Thread/Core-ID.*/
3550 if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3551 idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3552
3553 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3554 if (er->event != (event->hw.config & er->config_mask))
3555 continue;
3556 idx |= er->idx;
3557 }
3558
3559 if (idx) {
3560 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3561 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3562 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3563 reg1->idx = idx;
3564 }
3565 return 0;
3566 }
3567
3568 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3569 /* There is no frz_en for chabox ctl */
3570 .init_box = ivbep_uncore_msr_init_box,
3571 .disable_box = snbep_uncore_msr_disable_box,
3572 .enable_box = snbep_uncore_msr_enable_box,
3573 .disable_event = snbep_uncore_msr_disable_event,
3574 .enable_event = hswep_cbox_enable_event,
3575 .read_counter = uncore_msr_read_counter,
3576 .hw_config = skx_cha_hw_config,
3577 .get_constraint = skx_cha_get_constraint,
3578 .put_constraint = snbep_cbox_put_constraint,
3579 };
3580
3581 static struct intel_uncore_type skx_uncore_chabox = {
3582 .name = "cha",
3583 .num_counters = 4,
3584 .perf_ctr_bits = 48,
3585 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3586 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3587 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3588 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3589 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3590 .num_shared_regs = 1,
3591 .constraints = skx_uncore_chabox_constraints,
3592 .ops = &skx_uncore_chabox_ops,
3593 .format_group = &skx_uncore_chabox_format_group,
3594 };
3595
3596 static struct attribute *skx_uncore_iio_formats_attr[] = {
3597 &format_attr_event.attr,
3598 &format_attr_umask.attr,
3599 &format_attr_edge.attr,
3600 &format_attr_inv.attr,
3601 &format_attr_thresh9.attr,
3602 &format_attr_ch_mask.attr,
3603 &format_attr_fc_mask.attr,
3604 NULL,
3605 };
3606
3607 static const struct attribute_group skx_uncore_iio_format_group = {
3608 .name = "format",
3609 .attrs = skx_uncore_iio_formats_attr,
3610 };
3611
3612 static struct event_constraint skx_uncore_iio_constraints[] = {
3613 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3614 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3615 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3616 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3617 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3618 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3619 UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
3620 EVENT_CONSTRAINT_END
3621 };
3622
skx_iio_enable_event(struct intel_uncore_box * box,struct perf_event * event)3623 static void skx_iio_enable_event(struct intel_uncore_box *box,
3624 struct perf_event *event)
3625 {
3626 struct hw_perf_event *hwc = &event->hw;
3627
3628 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3629 }
3630
3631 static struct intel_uncore_ops skx_uncore_iio_ops = {
3632 .init_box = ivbep_uncore_msr_init_box,
3633 .disable_box = snbep_uncore_msr_disable_box,
3634 .enable_box = snbep_uncore_msr_enable_box,
3635 .disable_event = snbep_uncore_msr_disable_event,
3636 .enable_event = skx_iio_enable_event,
3637 .read_counter = uncore_msr_read_counter,
3638 };
3639
skx_iio_stack(struct intel_uncore_pmu * pmu,int die)3640 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3641 {
3642 return pmu->type->topology[die] >> (pmu->pmu_idx * BUS_NUM_STRIDE);
3643 }
3644
3645 static umode_t
pmu_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die,int zero_bus_pmu)3646 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3647 int die, int zero_bus_pmu)
3648 {
3649 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3650
3651 return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3652 }
3653
3654 static umode_t
skx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)3655 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3656 {
3657 /* Root bus 0x00 is valid only for pmu_idx = 0. */
3658 return pmu_iio_mapping_visible(kobj, attr, die, 0);
3659 }
3660
skx_iio_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)3661 static ssize_t skx_iio_mapping_show(struct device *dev,
3662 struct device_attribute *attr, char *buf)
3663 {
3664 struct pci_bus *bus = pci_find_next_bus(NULL);
3665 struct intel_uncore_pmu *uncore_pmu = dev_to_uncore_pmu(dev);
3666 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3667 long die = (long)ea->var;
3668
3669 /*
3670 * Current implementation is for single segment configuration hence it's
3671 * safe to take the segment value from the first available root bus.
3672 */
3673 return sprintf(buf, "%04x:%02x\n", pci_domain_nr(bus),
3674 skx_iio_stack(uncore_pmu, die));
3675 }
3676
skx_msr_cpu_bus_read(int cpu,u64 * topology)3677 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3678 {
3679 u64 msr_value;
3680
3681 if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3682 !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3683 return -ENXIO;
3684
3685 *topology = msr_value;
3686
3687 return 0;
3688 }
3689
die_to_cpu(int die)3690 static int die_to_cpu(int die)
3691 {
3692 int res = 0, cpu, current_die;
3693 /*
3694 * Using cpus_read_lock() to ensure cpu is not going down between
3695 * looking at cpu_online_mask.
3696 */
3697 cpus_read_lock();
3698 for_each_online_cpu(cpu) {
3699 current_die = topology_logical_die_id(cpu);
3700 if (current_die == die) {
3701 res = cpu;
3702 break;
3703 }
3704 }
3705 cpus_read_unlock();
3706 return res;
3707 }
3708
skx_iio_get_topology(struct intel_uncore_type * type)3709 static int skx_iio_get_topology(struct intel_uncore_type *type)
3710 {
3711 int i, ret;
3712 struct pci_bus *bus = NULL;
3713
3714 /*
3715 * Verified single-segment environments only; disabled for multiple
3716 * segment topologies for now except VMD domains.
3717 * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
3718 */
3719 while ((bus = pci_find_next_bus(bus))
3720 && (!pci_domain_nr(bus) || pci_domain_nr(bus) > 0xffff))
3721 ;
3722 if (bus)
3723 return -EPERM;
3724
3725 type->topology = kcalloc(uncore_max_dies(), sizeof(u64), GFP_KERNEL);
3726 if (!type->topology)
3727 return -ENOMEM;
3728
3729 for (i = 0; i < uncore_max_dies(); i++) {
3730 ret = skx_msr_cpu_bus_read(die_to_cpu(i), &type->topology[i]);
3731 if (ret) {
3732 kfree(type->topology);
3733 type->topology = NULL;
3734 return ret;
3735 }
3736 }
3737
3738 return 0;
3739 }
3740
3741 static struct attribute_group skx_iio_mapping_group = {
3742 .is_visible = skx_iio_mapping_visible,
3743 };
3744
3745 static const struct attribute_group *skx_iio_attr_update[] = {
3746 &skx_iio_mapping_group,
3747 NULL,
3748 };
3749
pmu_clear_mapping_attr(const struct attribute_group ** groups,struct attribute_group * ag)3750 static void pmu_clear_mapping_attr(const struct attribute_group **groups,
3751 struct attribute_group *ag)
3752 {
3753 int i;
3754
3755 for (i = 0; groups[i]; i++) {
3756 if (groups[i] == ag) {
3757 for (i++; groups[i]; i++)
3758 groups[i - 1] = groups[i];
3759 groups[i - 1] = NULL;
3760 break;
3761 }
3762 }
3763 }
3764
3765 static int
pmu_iio_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)3766 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3767 {
3768 char buf[64];
3769 int ret;
3770 long die = -1;
3771 struct attribute **attrs = NULL;
3772 struct dev_ext_attribute *eas = NULL;
3773
3774 ret = type->get_topology(type);
3775 if (ret < 0)
3776 goto clear_attr_update;
3777
3778 ret = -ENOMEM;
3779
3780 /* One more for NULL. */
3781 attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3782 if (!attrs)
3783 goto clear_topology;
3784
3785 eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3786 if (!eas)
3787 goto clear_attrs;
3788
3789 for (die = 0; die < uncore_max_dies(); die++) {
3790 sprintf(buf, "die%ld", die);
3791 sysfs_attr_init(&eas[die].attr.attr);
3792 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3793 if (!eas[die].attr.attr.name)
3794 goto err;
3795 eas[die].attr.attr.mode = 0444;
3796 eas[die].attr.show = skx_iio_mapping_show;
3797 eas[die].attr.store = NULL;
3798 eas[die].var = (void *)die;
3799 attrs[die] = &eas[die].attr.attr;
3800 }
3801 ag->attrs = attrs;
3802
3803 return 0;
3804 err:
3805 for (; die >= 0; die--)
3806 kfree(eas[die].attr.attr.name);
3807 kfree(eas);
3808 clear_attrs:
3809 kfree(attrs);
3810 clear_topology:
3811 kfree(type->topology);
3812 clear_attr_update:
3813 pmu_clear_mapping_attr(type->attr_update, ag);
3814 return ret;
3815 }
3816
skx_iio_set_mapping(struct intel_uncore_type * type)3817 static int skx_iio_set_mapping(struct intel_uncore_type *type)
3818 {
3819 return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
3820 }
3821
skx_iio_cleanup_mapping(struct intel_uncore_type * type)3822 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3823 {
3824 struct attribute **attr = skx_iio_mapping_group.attrs;
3825
3826 if (!attr)
3827 return;
3828
3829 for (; *attr; attr++)
3830 kfree((*attr)->name);
3831 kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3832 kfree(skx_iio_mapping_group.attrs);
3833 skx_iio_mapping_group.attrs = NULL;
3834 kfree(type->topology);
3835 }
3836
3837 static struct intel_uncore_type skx_uncore_iio = {
3838 .name = "iio",
3839 .num_counters = 4,
3840 .num_boxes = 6,
3841 .perf_ctr_bits = 48,
3842 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3843 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3844 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3845 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3846 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3847 .msr_offset = SKX_IIO_MSR_OFFSET,
3848 .constraints = skx_uncore_iio_constraints,
3849 .ops = &skx_uncore_iio_ops,
3850 .format_group = &skx_uncore_iio_format_group,
3851 .attr_update = skx_iio_attr_update,
3852 .get_topology = skx_iio_get_topology,
3853 .set_mapping = skx_iio_set_mapping,
3854 .cleanup_mapping = skx_iio_cleanup_mapping,
3855 };
3856
3857 enum perf_uncore_iio_freerunning_type_id {
3858 SKX_IIO_MSR_IOCLK = 0,
3859 SKX_IIO_MSR_BW = 1,
3860 SKX_IIO_MSR_UTIL = 2,
3861
3862 SKX_IIO_FREERUNNING_TYPE_MAX,
3863 };
3864
3865
3866 static struct freerunning_counters skx_iio_freerunning[] = {
3867 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 },
3868 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 },
3869 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 },
3870 };
3871
3872 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3873 /* Free-Running IO CLOCKS Counter */
3874 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
3875 /* Free-Running IIO BANDWIDTH Counters */
3876 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
3877 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
3878 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
3879 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
3880 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
3881 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
3882 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
3883 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
3884 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
3885 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
3886 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
3887 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
3888 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"),
3889 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
3890 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
3891 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"),
3892 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
3893 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
3894 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"),
3895 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
3896 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
3897 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"),
3898 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
3899 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
3900 /* Free-running IIO UTILIZATION Counters */
3901 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"),
3902 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"),
3903 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"),
3904 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"),
3905 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"),
3906 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"),
3907 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"),
3908 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"),
3909 { /* end: all zeroes */ },
3910 };
3911
3912 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3913 .read_counter = uncore_msr_read_counter,
3914 .hw_config = uncore_freerunning_hw_config,
3915 };
3916
3917 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3918 &format_attr_event.attr,
3919 &format_attr_umask.attr,
3920 NULL,
3921 };
3922
3923 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3924 .name = "format",
3925 .attrs = skx_uncore_iio_freerunning_formats_attr,
3926 };
3927
3928 static struct intel_uncore_type skx_uncore_iio_free_running = {
3929 .name = "iio_free_running",
3930 .num_counters = 17,
3931 .num_boxes = 6,
3932 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX,
3933 .freerunning = skx_iio_freerunning,
3934 .ops = &skx_uncore_iio_freerunning_ops,
3935 .event_descs = skx_uncore_iio_freerunning_events,
3936 .format_group = &skx_uncore_iio_freerunning_format_group,
3937 };
3938
3939 static struct attribute *skx_uncore_formats_attr[] = {
3940 &format_attr_event.attr,
3941 &format_attr_umask.attr,
3942 &format_attr_edge.attr,
3943 &format_attr_inv.attr,
3944 &format_attr_thresh8.attr,
3945 NULL,
3946 };
3947
3948 static const struct attribute_group skx_uncore_format_group = {
3949 .name = "format",
3950 .attrs = skx_uncore_formats_attr,
3951 };
3952
3953 static struct intel_uncore_type skx_uncore_irp = {
3954 .name = "irp",
3955 .num_counters = 2,
3956 .num_boxes = 6,
3957 .perf_ctr_bits = 48,
3958 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
3959 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
3960 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3961 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
3962 .msr_offset = SKX_IRP_MSR_OFFSET,
3963 .ops = &skx_uncore_iio_ops,
3964 .format_group = &skx_uncore_format_group,
3965 };
3966
3967 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3968 &format_attr_event.attr,
3969 &format_attr_umask.attr,
3970 &format_attr_edge.attr,
3971 &format_attr_inv.attr,
3972 &format_attr_thresh8.attr,
3973 &format_attr_occ_invert.attr,
3974 &format_attr_occ_edge_det.attr,
3975 &format_attr_filter_band0.attr,
3976 &format_attr_filter_band1.attr,
3977 &format_attr_filter_band2.attr,
3978 &format_attr_filter_band3.attr,
3979 NULL,
3980 };
3981
3982 static struct attribute_group skx_uncore_pcu_format_group = {
3983 .name = "format",
3984 .attrs = skx_uncore_pcu_formats_attr,
3985 };
3986
3987 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3988 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3989 .hw_config = hswep_pcu_hw_config,
3990 .get_constraint = snbep_pcu_get_constraint,
3991 .put_constraint = snbep_pcu_put_constraint,
3992 };
3993
3994 static struct intel_uncore_type skx_uncore_pcu = {
3995 .name = "pcu",
3996 .num_counters = 4,
3997 .num_boxes = 1,
3998 .perf_ctr_bits = 48,
3999 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
4000 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
4001 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4002 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
4003 .num_shared_regs = 1,
4004 .ops = &skx_uncore_pcu_ops,
4005 .format_group = &skx_uncore_pcu_format_group,
4006 };
4007
4008 static struct intel_uncore_type *skx_msr_uncores[] = {
4009 &skx_uncore_ubox,
4010 &skx_uncore_chabox,
4011 &skx_uncore_iio,
4012 &skx_uncore_iio_free_running,
4013 &skx_uncore_irp,
4014 &skx_uncore_pcu,
4015 NULL,
4016 };
4017
4018 /*
4019 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4020 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4021 */
4022 #define SKX_CAPID6 0x9c
4023 #define SKX_CHA_BIT_MASK GENMASK(27, 0)
4024
skx_count_chabox(void)4025 static int skx_count_chabox(void)
4026 {
4027 struct pci_dev *dev = NULL;
4028 u32 val = 0;
4029
4030 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4031 if (!dev)
4032 goto out;
4033
4034 pci_read_config_dword(dev, SKX_CAPID6, &val);
4035 val &= SKX_CHA_BIT_MASK;
4036 out:
4037 pci_dev_put(dev);
4038 return hweight32(val);
4039 }
4040
skx_uncore_cpu_init(void)4041 void skx_uncore_cpu_init(void)
4042 {
4043 skx_uncore_chabox.num_boxes = skx_count_chabox();
4044 uncore_msr_uncores = skx_msr_uncores;
4045 }
4046
4047 static struct intel_uncore_type skx_uncore_imc = {
4048 .name = "imc",
4049 .num_counters = 4,
4050 .num_boxes = 6,
4051 .perf_ctr_bits = 48,
4052 .fixed_ctr_bits = 48,
4053 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4054 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4055 .event_descs = hswep_uncore_imc_events,
4056 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4057 .event_ctl = SNBEP_PCI_PMON_CTL0,
4058 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4059 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4060 .ops = &ivbep_uncore_pci_ops,
4061 .format_group = &skx_uncore_format_group,
4062 };
4063
4064 static struct attribute *skx_upi_uncore_formats_attr[] = {
4065 &format_attr_event.attr,
4066 &format_attr_umask_ext.attr,
4067 &format_attr_edge.attr,
4068 &format_attr_inv.attr,
4069 &format_attr_thresh8.attr,
4070 NULL,
4071 };
4072
4073 static const struct attribute_group skx_upi_uncore_format_group = {
4074 .name = "format",
4075 .attrs = skx_upi_uncore_formats_attr,
4076 };
4077
skx_upi_uncore_pci_init_box(struct intel_uncore_box * box)4078 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4079 {
4080 struct pci_dev *pdev = box->pci_dev;
4081
4082 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4083 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4084 }
4085
4086 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4087 .init_box = skx_upi_uncore_pci_init_box,
4088 .disable_box = snbep_uncore_pci_disable_box,
4089 .enable_box = snbep_uncore_pci_enable_box,
4090 .disable_event = snbep_uncore_pci_disable_event,
4091 .enable_event = snbep_uncore_pci_enable_event,
4092 .read_counter = snbep_uncore_pci_read_counter,
4093 };
4094
4095 static struct intel_uncore_type skx_uncore_upi = {
4096 .name = "upi",
4097 .num_counters = 4,
4098 .num_boxes = 3,
4099 .perf_ctr_bits = 48,
4100 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
4101 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
4102 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4103 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4104 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
4105 .ops = &skx_upi_uncore_pci_ops,
4106 .format_group = &skx_upi_uncore_format_group,
4107 };
4108
skx_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4109 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4110 {
4111 struct pci_dev *pdev = box->pci_dev;
4112
4113 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4114 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4115 }
4116
4117 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4118 .init_box = skx_m2m_uncore_pci_init_box,
4119 .disable_box = snbep_uncore_pci_disable_box,
4120 .enable_box = snbep_uncore_pci_enable_box,
4121 .disable_event = snbep_uncore_pci_disable_event,
4122 .enable_event = snbep_uncore_pci_enable_event,
4123 .read_counter = snbep_uncore_pci_read_counter,
4124 };
4125
4126 static struct intel_uncore_type skx_uncore_m2m = {
4127 .name = "m2m",
4128 .num_counters = 4,
4129 .num_boxes = 2,
4130 .perf_ctr_bits = 48,
4131 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
4132 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
4133 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4134 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
4135 .ops = &skx_m2m_uncore_pci_ops,
4136 .format_group = &skx_uncore_format_group,
4137 };
4138
4139 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4140 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4141 EVENT_CONSTRAINT_END
4142 };
4143
4144 static struct intel_uncore_type skx_uncore_m2pcie = {
4145 .name = "m2pcie",
4146 .num_counters = 4,
4147 .num_boxes = 4,
4148 .perf_ctr_bits = 48,
4149 .constraints = skx_uncore_m2pcie_constraints,
4150 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4151 .event_ctl = SNBEP_PCI_PMON_CTL0,
4152 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4153 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4154 .ops = &ivbep_uncore_pci_ops,
4155 .format_group = &skx_uncore_format_group,
4156 };
4157
4158 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4159 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4160 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4161 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4162 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4163 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4164 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4165 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4166 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4167 EVENT_CONSTRAINT_END
4168 };
4169
4170 static struct intel_uncore_type skx_uncore_m3upi = {
4171 .name = "m3upi",
4172 .num_counters = 3,
4173 .num_boxes = 3,
4174 .perf_ctr_bits = 48,
4175 .constraints = skx_uncore_m3upi_constraints,
4176 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4177 .event_ctl = SNBEP_PCI_PMON_CTL0,
4178 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4179 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4180 .ops = &ivbep_uncore_pci_ops,
4181 .format_group = &skx_uncore_format_group,
4182 };
4183
4184 enum {
4185 SKX_PCI_UNCORE_IMC,
4186 SKX_PCI_UNCORE_M2M,
4187 SKX_PCI_UNCORE_UPI,
4188 SKX_PCI_UNCORE_M2PCIE,
4189 SKX_PCI_UNCORE_M3UPI,
4190 };
4191
4192 static struct intel_uncore_type *skx_pci_uncores[] = {
4193 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
4194 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
4195 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
4196 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4197 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
4198 NULL,
4199 };
4200
4201 static const struct pci_device_id skx_uncore_pci_ids[] = {
4202 { /* MC0 Channel 0 */
4203 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4204 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4205 },
4206 { /* MC0 Channel 1 */
4207 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4208 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4209 },
4210 { /* MC0 Channel 2 */
4211 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4212 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4213 },
4214 { /* MC1 Channel 0 */
4215 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4216 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4217 },
4218 { /* MC1 Channel 1 */
4219 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4220 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4221 },
4222 { /* MC1 Channel 2 */
4223 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4224 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4225 },
4226 { /* M2M0 */
4227 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4228 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4229 },
4230 { /* M2M1 */
4231 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4232 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4233 },
4234 { /* UPI0 Link 0 */
4235 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4236 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4237 },
4238 { /* UPI0 Link 1 */
4239 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4240 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4241 },
4242 { /* UPI1 Link 2 */
4243 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4244 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4245 },
4246 { /* M2PCIe 0 */
4247 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4248 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4249 },
4250 { /* M2PCIe 1 */
4251 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4252 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4253 },
4254 { /* M2PCIe 2 */
4255 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4256 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4257 },
4258 { /* M2PCIe 3 */
4259 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4260 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4261 },
4262 { /* M3UPI0 Link 0 */
4263 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4264 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4265 },
4266 { /* M3UPI0 Link 1 */
4267 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4268 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4269 },
4270 { /* M3UPI1 Link 2 */
4271 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4272 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4273 },
4274 { /* end: all zeroes */ }
4275 };
4276
4277
4278 static struct pci_driver skx_uncore_pci_driver = {
4279 .name = "skx_uncore",
4280 .id_table = skx_uncore_pci_ids,
4281 };
4282
skx_uncore_pci_init(void)4283 int skx_uncore_pci_init(void)
4284 {
4285 /* need to double check pci address */
4286 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4287
4288 if (ret)
4289 return ret;
4290
4291 uncore_pci_uncores = skx_pci_uncores;
4292 uncore_pci_driver = &skx_uncore_pci_driver;
4293 return 0;
4294 }
4295
4296 /* end of SKX uncore support */
4297
4298 /* SNR uncore support */
4299
4300 static struct intel_uncore_type snr_uncore_ubox = {
4301 .name = "ubox",
4302 .num_counters = 2,
4303 .num_boxes = 1,
4304 .perf_ctr_bits = 48,
4305 .fixed_ctr_bits = 48,
4306 .perf_ctr = SNR_U_MSR_PMON_CTR0,
4307 .event_ctl = SNR_U_MSR_PMON_CTL0,
4308 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4309 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4310 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4311 .ops = &ivbep_uncore_msr_ops,
4312 .format_group = &ivbep_uncore_format_group,
4313 };
4314
4315 static struct attribute *snr_uncore_cha_formats_attr[] = {
4316 &format_attr_event.attr,
4317 &format_attr_umask_ext2.attr,
4318 &format_attr_edge.attr,
4319 &format_attr_tid_en.attr,
4320 &format_attr_inv.attr,
4321 &format_attr_thresh8.attr,
4322 &format_attr_filter_tid5.attr,
4323 NULL,
4324 };
4325 static const struct attribute_group snr_uncore_chabox_format_group = {
4326 .name = "format",
4327 .attrs = snr_uncore_cha_formats_attr,
4328 };
4329
snr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)4330 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4331 {
4332 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4333
4334 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4335 box->pmu->type->msr_offset * box->pmu->pmu_idx;
4336 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4337 reg1->idx = 0;
4338
4339 return 0;
4340 }
4341
snr_cha_enable_event(struct intel_uncore_box * box,struct perf_event * event)4342 static void snr_cha_enable_event(struct intel_uncore_box *box,
4343 struct perf_event *event)
4344 {
4345 struct hw_perf_event *hwc = &event->hw;
4346 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4347
4348 if (reg1->idx != EXTRA_REG_NONE)
4349 wrmsrl(reg1->reg, reg1->config);
4350
4351 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4352 }
4353
4354 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4355 .init_box = ivbep_uncore_msr_init_box,
4356 .disable_box = snbep_uncore_msr_disable_box,
4357 .enable_box = snbep_uncore_msr_enable_box,
4358 .disable_event = snbep_uncore_msr_disable_event,
4359 .enable_event = snr_cha_enable_event,
4360 .read_counter = uncore_msr_read_counter,
4361 .hw_config = snr_cha_hw_config,
4362 };
4363
4364 static struct intel_uncore_type snr_uncore_chabox = {
4365 .name = "cha",
4366 .num_counters = 4,
4367 .num_boxes = 6,
4368 .perf_ctr_bits = 48,
4369 .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4370 .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4371 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4372 .msr_offset = HSWEP_CBO_MSR_OFFSET,
4373 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4374 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4375 .ops = &snr_uncore_chabox_ops,
4376 .format_group = &snr_uncore_chabox_format_group,
4377 };
4378
4379 static struct attribute *snr_uncore_iio_formats_attr[] = {
4380 &format_attr_event.attr,
4381 &format_attr_umask.attr,
4382 &format_attr_edge.attr,
4383 &format_attr_inv.attr,
4384 &format_attr_thresh9.attr,
4385 &format_attr_ch_mask2.attr,
4386 &format_attr_fc_mask2.attr,
4387 NULL,
4388 };
4389
4390 static const struct attribute_group snr_uncore_iio_format_group = {
4391 .name = "format",
4392 .attrs = snr_uncore_iio_formats_attr,
4393 };
4394
4395 static struct intel_uncore_type snr_uncore_iio = {
4396 .name = "iio",
4397 .num_counters = 4,
4398 .num_boxes = 5,
4399 .perf_ctr_bits = 48,
4400 .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4401 .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4402 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4403 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4404 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4405 .msr_offset = SNR_IIO_MSR_OFFSET,
4406 .ops = &ivbep_uncore_msr_ops,
4407 .format_group = &snr_uncore_iio_format_group,
4408 };
4409
4410 static struct intel_uncore_type snr_uncore_irp = {
4411 .name = "irp",
4412 .num_counters = 2,
4413 .num_boxes = 5,
4414 .perf_ctr_bits = 48,
4415 .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4416 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4417 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4418 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4419 .msr_offset = SNR_IRP_MSR_OFFSET,
4420 .ops = &ivbep_uncore_msr_ops,
4421 .format_group = &ivbep_uncore_format_group,
4422 };
4423
4424 static struct intel_uncore_type snr_uncore_m2pcie = {
4425 .name = "m2pcie",
4426 .num_counters = 4,
4427 .num_boxes = 5,
4428 .perf_ctr_bits = 48,
4429 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4430 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4431 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4432 .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4433 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4434 .ops = &ivbep_uncore_msr_ops,
4435 .format_group = &ivbep_uncore_format_group,
4436 };
4437
snr_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)4438 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4439 {
4440 struct hw_perf_event *hwc = &event->hw;
4441 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4442 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4443
4444 if (ev_sel >= 0xb && ev_sel <= 0xe) {
4445 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4446 reg1->idx = ev_sel - 0xb;
4447 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4448 }
4449 return 0;
4450 }
4451
4452 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4453 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4454 .hw_config = snr_pcu_hw_config,
4455 .get_constraint = snbep_pcu_get_constraint,
4456 .put_constraint = snbep_pcu_put_constraint,
4457 };
4458
4459 static struct intel_uncore_type snr_uncore_pcu = {
4460 .name = "pcu",
4461 .num_counters = 4,
4462 .num_boxes = 1,
4463 .perf_ctr_bits = 48,
4464 .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4465 .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4466 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4467 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4468 .num_shared_regs = 1,
4469 .ops = &snr_uncore_pcu_ops,
4470 .format_group = &skx_uncore_pcu_format_group,
4471 };
4472
4473 enum perf_uncore_snr_iio_freerunning_type_id {
4474 SNR_IIO_MSR_IOCLK,
4475 SNR_IIO_MSR_BW_IN,
4476
4477 SNR_IIO_FREERUNNING_TYPE_MAX,
4478 };
4479
4480 static struct freerunning_counters snr_iio_freerunning[] = {
4481 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4482 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4483 };
4484
4485 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4486 /* Free-Running IIO CLOCKS Counter */
4487 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4488 /* Free-Running IIO BANDWIDTH IN Counters */
4489 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4490 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
4491 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4492 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4493 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
4494 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4495 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4496 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
4497 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4498 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4499 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
4500 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
4501 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
4502 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
4503 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
4504 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
4505 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
4506 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
4507 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
4508 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
4509 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
4510 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
4511 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
4512 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
4513 { /* end: all zeroes */ },
4514 };
4515
4516 static struct intel_uncore_type snr_uncore_iio_free_running = {
4517 .name = "iio_free_running",
4518 .num_counters = 9,
4519 .num_boxes = 5,
4520 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4521 .freerunning = snr_iio_freerunning,
4522 .ops = &skx_uncore_iio_freerunning_ops,
4523 .event_descs = snr_uncore_iio_freerunning_events,
4524 .format_group = &skx_uncore_iio_freerunning_format_group,
4525 };
4526
4527 static struct intel_uncore_type *snr_msr_uncores[] = {
4528 &snr_uncore_ubox,
4529 &snr_uncore_chabox,
4530 &snr_uncore_iio,
4531 &snr_uncore_irp,
4532 &snr_uncore_m2pcie,
4533 &snr_uncore_pcu,
4534 &snr_uncore_iio_free_running,
4535 NULL,
4536 };
4537
snr_uncore_cpu_init(void)4538 void snr_uncore_cpu_init(void)
4539 {
4540 uncore_msr_uncores = snr_msr_uncores;
4541 }
4542
snr_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4543 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4544 {
4545 struct pci_dev *pdev = box->pci_dev;
4546 int box_ctl = uncore_pci_box_ctl(box);
4547
4548 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4549 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4550 }
4551
4552 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4553 .init_box = snr_m2m_uncore_pci_init_box,
4554 .disable_box = snbep_uncore_pci_disable_box,
4555 .enable_box = snbep_uncore_pci_enable_box,
4556 .disable_event = snbep_uncore_pci_disable_event,
4557 .enable_event = snbep_uncore_pci_enable_event,
4558 .read_counter = snbep_uncore_pci_read_counter,
4559 };
4560
4561 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4562 &format_attr_event.attr,
4563 &format_attr_umask_ext3.attr,
4564 &format_attr_edge.attr,
4565 &format_attr_inv.attr,
4566 &format_attr_thresh8.attr,
4567 NULL,
4568 };
4569
4570 static const struct attribute_group snr_m2m_uncore_format_group = {
4571 .name = "format",
4572 .attrs = snr_m2m_uncore_formats_attr,
4573 };
4574
4575 static struct intel_uncore_type snr_uncore_m2m = {
4576 .name = "m2m",
4577 .num_counters = 4,
4578 .num_boxes = 1,
4579 .perf_ctr_bits = 48,
4580 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4581 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4582 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4583 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4584 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4585 .ops = &snr_m2m_uncore_pci_ops,
4586 .format_group = &snr_m2m_uncore_format_group,
4587 };
4588
snr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)4589 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4590 {
4591 struct pci_dev *pdev = box->pci_dev;
4592 struct hw_perf_event *hwc = &event->hw;
4593
4594 pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4595 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4596 }
4597
4598 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4599 .init_box = snr_m2m_uncore_pci_init_box,
4600 .disable_box = snbep_uncore_pci_disable_box,
4601 .enable_box = snbep_uncore_pci_enable_box,
4602 .disable_event = snbep_uncore_pci_disable_event,
4603 .enable_event = snr_uncore_pci_enable_event,
4604 .read_counter = snbep_uncore_pci_read_counter,
4605 };
4606
4607 static struct intel_uncore_type snr_uncore_pcie3 = {
4608 .name = "pcie3",
4609 .num_counters = 4,
4610 .num_boxes = 1,
4611 .perf_ctr_bits = 48,
4612 .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
4613 .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
4614 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
4615 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4616 .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
4617 .ops = &snr_pcie3_uncore_pci_ops,
4618 .format_group = &skx_uncore_iio_format_group,
4619 };
4620
4621 enum {
4622 SNR_PCI_UNCORE_M2M,
4623 SNR_PCI_UNCORE_PCIE3,
4624 };
4625
4626 static struct intel_uncore_type *snr_pci_uncores[] = {
4627 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
4628 [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
4629 NULL,
4630 };
4631
4632 static const struct pci_device_id snr_uncore_pci_ids[] = {
4633 { /* M2M */
4634 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4635 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4636 },
4637 { /* end: all zeroes */ }
4638 };
4639
4640 static struct pci_driver snr_uncore_pci_driver = {
4641 .name = "snr_uncore",
4642 .id_table = snr_uncore_pci_ids,
4643 };
4644
4645 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4646 { /* PCIe3 RP */
4647 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4648 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4649 },
4650 { /* end: all zeroes */ }
4651 };
4652
4653 static struct pci_driver snr_uncore_pci_sub_driver = {
4654 .name = "snr_uncore_sub",
4655 .id_table = snr_uncore_pci_sub_ids,
4656 };
4657
snr_uncore_pci_init(void)4658 int snr_uncore_pci_init(void)
4659 {
4660 /* SNR UBOX DID */
4661 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4662 SKX_GIDNIDMAP, true);
4663
4664 if (ret)
4665 return ret;
4666
4667 uncore_pci_uncores = snr_pci_uncores;
4668 uncore_pci_driver = &snr_uncore_pci_driver;
4669 uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4670 return 0;
4671 }
4672
snr_uncore_get_mc_dev(int id)4673 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4674 {
4675 struct pci_dev *mc_dev = NULL;
4676 int phys_id, pkg;
4677
4678 while (1) {
4679 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4680 if (!mc_dev)
4681 break;
4682 phys_id = uncore_pcibus_to_physid(mc_dev->bus);
4683 if (phys_id < 0)
4684 continue;
4685 pkg = topology_phys_to_logical_pkg(phys_id);
4686 if (pkg < 0)
4687 continue;
4688 else if (pkg == id)
4689 break;
4690 }
4691 return mc_dev;
4692 }
4693
__snr_uncore_mmio_init_box(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset)4694 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4695 unsigned int box_ctl, int mem_offset)
4696 {
4697 struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4698 struct intel_uncore_type *type = box->pmu->type;
4699 resource_size_t addr;
4700 u32 pci_dword;
4701
4702 if (!pdev)
4703 return;
4704
4705 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4706 addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4707
4708 pci_read_config_dword(pdev, mem_offset, &pci_dword);
4709 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4710
4711 addr += box_ctl;
4712
4713 pci_dev_put(pdev);
4714
4715 box->io_addr = ioremap(addr, type->mmio_map_size);
4716 if (!box->io_addr) {
4717 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4718 return;
4719 }
4720
4721 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4722 }
4723
snr_uncore_mmio_init_box(struct intel_uncore_box * box)4724 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4725 {
4726 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4727 SNR_IMC_MMIO_MEM0_OFFSET);
4728 }
4729
snr_uncore_mmio_disable_box(struct intel_uncore_box * box)4730 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4731 {
4732 u32 config;
4733
4734 if (!box->io_addr)
4735 return;
4736
4737 config = readl(box->io_addr);
4738 config |= SNBEP_PMON_BOX_CTL_FRZ;
4739 writel(config, box->io_addr);
4740 }
4741
snr_uncore_mmio_enable_box(struct intel_uncore_box * box)4742 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4743 {
4744 u32 config;
4745
4746 if (!box->io_addr)
4747 return;
4748
4749 config = readl(box->io_addr);
4750 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4751 writel(config, box->io_addr);
4752 }
4753
snr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)4754 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4755 struct perf_event *event)
4756 {
4757 struct hw_perf_event *hwc = &event->hw;
4758
4759 if (!box->io_addr)
4760 return;
4761
4762 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4763 return;
4764
4765 writel(hwc->config | SNBEP_PMON_CTL_EN,
4766 box->io_addr + hwc->config_base);
4767 }
4768
snr_uncore_mmio_disable_event(struct intel_uncore_box * box,struct perf_event * event)4769 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4770 struct perf_event *event)
4771 {
4772 struct hw_perf_event *hwc = &event->hw;
4773
4774 if (!box->io_addr)
4775 return;
4776
4777 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4778 return;
4779
4780 writel(hwc->config, box->io_addr + hwc->config_base);
4781 }
4782
4783 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4784 .init_box = snr_uncore_mmio_init_box,
4785 .exit_box = uncore_mmio_exit_box,
4786 .disable_box = snr_uncore_mmio_disable_box,
4787 .enable_box = snr_uncore_mmio_enable_box,
4788 .disable_event = snr_uncore_mmio_disable_event,
4789 .enable_event = snr_uncore_mmio_enable_event,
4790 .read_counter = uncore_mmio_read_counter,
4791 };
4792
4793 static struct uncore_event_desc snr_uncore_imc_events[] = {
4794 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
4795 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
4796 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4797 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4798 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4799 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4800 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4801 { /* end: all zeroes */ },
4802 };
4803
4804 static struct intel_uncore_type snr_uncore_imc = {
4805 .name = "imc",
4806 .num_counters = 4,
4807 .num_boxes = 2,
4808 .perf_ctr_bits = 48,
4809 .fixed_ctr_bits = 48,
4810 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
4811 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
4812 .event_descs = snr_uncore_imc_events,
4813 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
4814 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
4815 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4816 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
4817 .mmio_offset = SNR_IMC_MMIO_OFFSET,
4818 .mmio_map_size = SNR_IMC_MMIO_SIZE,
4819 .ops = &snr_uncore_mmio_ops,
4820 .format_group = &skx_uncore_format_group,
4821 };
4822
4823 enum perf_uncore_snr_imc_freerunning_type_id {
4824 SNR_IMC_DCLK,
4825 SNR_IMC_DDR,
4826
4827 SNR_IMC_FREERUNNING_TYPE_MAX,
4828 };
4829
4830 static struct freerunning_counters snr_imc_freerunning[] = {
4831 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
4832 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
4833 };
4834
4835 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4836 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
4837
4838 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
4839 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
4840 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
4841 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
4842 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
4843 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
4844 { /* end: all zeroes */ },
4845 };
4846
4847 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4848 .init_box = snr_uncore_mmio_init_box,
4849 .exit_box = uncore_mmio_exit_box,
4850 .read_counter = uncore_mmio_read_counter,
4851 .hw_config = uncore_freerunning_hw_config,
4852 };
4853
4854 static struct intel_uncore_type snr_uncore_imc_free_running = {
4855 .name = "imc_free_running",
4856 .num_counters = 3,
4857 .num_boxes = 1,
4858 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
4859 .mmio_map_size = SNR_IMC_MMIO_SIZE,
4860 .freerunning = snr_imc_freerunning,
4861 .ops = &snr_uncore_imc_freerunning_ops,
4862 .event_descs = snr_uncore_imc_freerunning_events,
4863 .format_group = &skx_uncore_iio_freerunning_format_group,
4864 };
4865
4866 static struct intel_uncore_type *snr_mmio_uncores[] = {
4867 &snr_uncore_imc,
4868 &snr_uncore_imc_free_running,
4869 NULL,
4870 };
4871
snr_uncore_mmio_init(void)4872 void snr_uncore_mmio_init(void)
4873 {
4874 uncore_mmio_uncores = snr_mmio_uncores;
4875 }
4876
4877 /* end of SNR uncore support */
4878
4879 /* ICX uncore support */
4880
4881 static unsigned icx_cha_msr_offsets[] = {
4882 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
4883 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
4884 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
4885 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe,
4886 0x1c, 0x2a, 0x38, 0x46,
4887 };
4888
icx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)4889 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4890 {
4891 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4892 bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
4893
4894 if (tie_en) {
4895 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
4896 icx_cha_msr_offsets[box->pmu->pmu_idx];
4897 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4898 reg1->idx = 0;
4899 }
4900
4901 return 0;
4902 }
4903
4904 static struct intel_uncore_ops icx_uncore_chabox_ops = {
4905 .init_box = ivbep_uncore_msr_init_box,
4906 .disable_box = snbep_uncore_msr_disable_box,
4907 .enable_box = snbep_uncore_msr_enable_box,
4908 .disable_event = snbep_uncore_msr_disable_event,
4909 .enable_event = snr_cha_enable_event,
4910 .read_counter = uncore_msr_read_counter,
4911 .hw_config = icx_cha_hw_config,
4912 };
4913
4914 static struct intel_uncore_type icx_uncore_chabox = {
4915 .name = "cha",
4916 .num_counters = 4,
4917 .perf_ctr_bits = 48,
4918 .event_ctl = ICX_C34_MSR_PMON_CTL0,
4919 .perf_ctr = ICX_C34_MSR_PMON_CTR0,
4920 .box_ctl = ICX_C34_MSR_PMON_BOX_CTL,
4921 .msr_offsets = icx_cha_msr_offsets,
4922 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4923 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4924 .constraints = skx_uncore_chabox_constraints,
4925 .ops = &icx_uncore_chabox_ops,
4926 .format_group = &snr_uncore_chabox_format_group,
4927 };
4928
4929 static unsigned icx_msr_offsets[] = {
4930 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4931 };
4932
4933 static struct event_constraint icx_uncore_iio_constraints[] = {
4934 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
4935 UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
4936 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4937 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
4938 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4939 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
4940 UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
4941 EVENT_CONSTRAINT_END
4942 };
4943
4944 static struct intel_uncore_type icx_uncore_iio = {
4945 .name = "iio",
4946 .num_counters = 4,
4947 .num_boxes = 6,
4948 .perf_ctr_bits = 48,
4949 .event_ctl = ICX_IIO_MSR_PMON_CTL0,
4950 .perf_ctr = ICX_IIO_MSR_PMON_CTR0,
4951 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4952 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4953 .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL,
4954 .msr_offsets = icx_msr_offsets,
4955 .constraints = icx_uncore_iio_constraints,
4956 .ops = &skx_uncore_iio_ops,
4957 .format_group = &snr_uncore_iio_format_group,
4958 };
4959
4960 static struct intel_uncore_type icx_uncore_irp = {
4961 .name = "irp",
4962 .num_counters = 2,
4963 .num_boxes = 6,
4964 .perf_ctr_bits = 48,
4965 .event_ctl = ICX_IRP0_MSR_PMON_CTL0,
4966 .perf_ctr = ICX_IRP0_MSR_PMON_CTR0,
4967 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4968 .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL,
4969 .msr_offsets = icx_msr_offsets,
4970 .ops = &ivbep_uncore_msr_ops,
4971 .format_group = &ivbep_uncore_format_group,
4972 };
4973
4974 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
4975 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
4976 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4977 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
4978 EVENT_CONSTRAINT_END
4979 };
4980
4981 static struct intel_uncore_type icx_uncore_m2pcie = {
4982 .name = "m2pcie",
4983 .num_counters = 4,
4984 .num_boxes = 6,
4985 .perf_ctr_bits = 48,
4986 .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0,
4987 .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0,
4988 .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL,
4989 .msr_offsets = icx_msr_offsets,
4990 .constraints = icx_uncore_m2pcie_constraints,
4991 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4992 .ops = &ivbep_uncore_msr_ops,
4993 .format_group = &ivbep_uncore_format_group,
4994 };
4995
4996 enum perf_uncore_icx_iio_freerunning_type_id {
4997 ICX_IIO_MSR_IOCLK,
4998 ICX_IIO_MSR_BW_IN,
4999
5000 ICX_IIO_FREERUNNING_TYPE_MAX,
5001 };
5002
5003 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5004 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5005 };
5006
5007 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5008 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5009 };
5010
5011 static struct freerunning_counters icx_iio_freerunning[] = {
5012 [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5013 [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5014 };
5015
5016 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5017 /* Free-Running IIO CLOCKS Counter */
5018 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
5019 /* Free-Running IIO BANDWIDTH IN Counters */
5020 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
5021 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
5022 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
5023 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
5024 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
5025 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
5026 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
5027 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
5028 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
5029 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
5030 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
5031 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
5032 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
5033 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
5034 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
5035 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
5036 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
5037 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
5038 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
5039 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
5040 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
5041 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
5042 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
5043 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
5044 { /* end: all zeroes */ },
5045 };
5046
5047 static struct intel_uncore_type icx_uncore_iio_free_running = {
5048 .name = "iio_free_running",
5049 .num_counters = 9,
5050 .num_boxes = 6,
5051 .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
5052 .freerunning = icx_iio_freerunning,
5053 .ops = &skx_uncore_iio_freerunning_ops,
5054 .event_descs = icx_uncore_iio_freerunning_events,
5055 .format_group = &skx_uncore_iio_freerunning_format_group,
5056 };
5057
5058 static struct intel_uncore_type *icx_msr_uncores[] = {
5059 &skx_uncore_ubox,
5060 &icx_uncore_chabox,
5061 &icx_uncore_iio,
5062 &icx_uncore_irp,
5063 &icx_uncore_m2pcie,
5064 &skx_uncore_pcu,
5065 &icx_uncore_iio_free_running,
5066 NULL,
5067 };
5068
5069 /*
5070 * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5071 * registers which located at Device 30, Function 3
5072 */
5073 #define ICX_CAPID6 0x9c
5074 #define ICX_CAPID7 0xa0
5075
icx_count_chabox(void)5076 static u64 icx_count_chabox(void)
5077 {
5078 struct pci_dev *dev = NULL;
5079 u64 caps = 0;
5080
5081 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5082 if (!dev)
5083 goto out;
5084
5085 pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5086 pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5087 out:
5088 pci_dev_put(dev);
5089 return hweight64(caps);
5090 }
5091
icx_uncore_cpu_init(void)5092 void icx_uncore_cpu_init(void)
5093 {
5094 u64 num_boxes = icx_count_chabox();
5095
5096 if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5097 return;
5098 icx_uncore_chabox.num_boxes = num_boxes;
5099 uncore_msr_uncores = icx_msr_uncores;
5100 }
5101
5102 static struct intel_uncore_type icx_uncore_m2m = {
5103 .name = "m2m",
5104 .num_counters = 4,
5105 .num_boxes = 4,
5106 .perf_ctr_bits = 48,
5107 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
5108 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
5109 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5110 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
5111 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
5112 .ops = &snr_m2m_uncore_pci_ops,
5113 .format_group = &snr_m2m_uncore_format_group,
5114 };
5115
5116 static struct attribute *icx_upi_uncore_formats_attr[] = {
5117 &format_attr_event.attr,
5118 &format_attr_umask_ext4.attr,
5119 &format_attr_edge.attr,
5120 &format_attr_inv.attr,
5121 &format_attr_thresh8.attr,
5122 NULL,
5123 };
5124
5125 static const struct attribute_group icx_upi_uncore_format_group = {
5126 .name = "format",
5127 .attrs = icx_upi_uncore_formats_attr,
5128 };
5129
5130 static struct intel_uncore_type icx_uncore_upi = {
5131 .name = "upi",
5132 .num_counters = 4,
5133 .num_boxes = 3,
5134 .perf_ctr_bits = 48,
5135 .perf_ctr = ICX_UPI_PCI_PMON_CTR0,
5136 .event_ctl = ICX_UPI_PCI_PMON_CTL0,
5137 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5138 .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5139 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
5140 .ops = &skx_upi_uncore_pci_ops,
5141 .format_group = &icx_upi_uncore_format_group,
5142 };
5143
5144 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5145 UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5146 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5147 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5148 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5149 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5150 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5151 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5152 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5153 EVENT_CONSTRAINT_END
5154 };
5155
5156 static struct intel_uncore_type icx_uncore_m3upi = {
5157 .name = "m3upi",
5158 .num_counters = 4,
5159 .num_boxes = 3,
5160 .perf_ctr_bits = 48,
5161 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0,
5162 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0,
5163 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5164 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
5165 .constraints = icx_uncore_m3upi_constraints,
5166 .ops = &ivbep_uncore_pci_ops,
5167 .format_group = &skx_uncore_format_group,
5168 };
5169
5170 enum {
5171 ICX_PCI_UNCORE_M2M,
5172 ICX_PCI_UNCORE_UPI,
5173 ICX_PCI_UNCORE_M3UPI,
5174 };
5175
5176 static struct intel_uncore_type *icx_pci_uncores[] = {
5177 [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m,
5178 [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi,
5179 [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi,
5180 NULL,
5181 };
5182
5183 static const struct pci_device_id icx_uncore_pci_ids[] = {
5184 { /* M2M 0 */
5185 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5186 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5187 },
5188 { /* M2M 1 */
5189 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5190 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5191 },
5192 { /* M2M 2 */
5193 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5194 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5195 },
5196 { /* M2M 3 */
5197 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5198 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5199 },
5200 { /* UPI Link 0 */
5201 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5202 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5203 },
5204 { /* UPI Link 1 */
5205 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5206 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5207 },
5208 { /* UPI Link 2 */
5209 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5210 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5211 },
5212 { /* M3UPI Link 0 */
5213 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5214 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5215 },
5216 { /* M3UPI Link 1 */
5217 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5218 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5219 },
5220 { /* M3UPI Link 2 */
5221 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5222 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5223 },
5224 { /* end: all zeroes */ }
5225 };
5226
5227 static struct pci_driver icx_uncore_pci_driver = {
5228 .name = "icx_uncore",
5229 .id_table = icx_uncore_pci_ids,
5230 };
5231
icx_uncore_pci_init(void)5232 int icx_uncore_pci_init(void)
5233 {
5234 /* ICX UBOX DID */
5235 int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5236 SKX_GIDNIDMAP, true);
5237
5238 if (ret)
5239 return ret;
5240
5241 uncore_pci_uncores = icx_pci_uncores;
5242 uncore_pci_driver = &icx_uncore_pci_driver;
5243 return 0;
5244 }
5245
icx_uncore_imc_init_box(struct intel_uncore_box * box)5246 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5247 {
5248 unsigned int box_ctl = box->pmu->type->box_ctl +
5249 box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5250 int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5251 SNR_IMC_MMIO_MEM0_OFFSET;
5252
5253 __snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
5254 }
5255
5256 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5257 .init_box = icx_uncore_imc_init_box,
5258 .exit_box = uncore_mmio_exit_box,
5259 .disable_box = snr_uncore_mmio_disable_box,
5260 .enable_box = snr_uncore_mmio_enable_box,
5261 .disable_event = snr_uncore_mmio_disable_event,
5262 .enable_event = snr_uncore_mmio_enable_event,
5263 .read_counter = uncore_mmio_read_counter,
5264 };
5265
5266 static struct intel_uncore_type icx_uncore_imc = {
5267 .name = "imc",
5268 .num_counters = 4,
5269 .num_boxes = 12,
5270 .perf_ctr_bits = 48,
5271 .fixed_ctr_bits = 48,
5272 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5273 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5274 .event_descs = snr_uncore_imc_events,
5275 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5276 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5277 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5278 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5279 .mmio_offset = SNR_IMC_MMIO_OFFSET,
5280 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5281 .ops = &icx_uncore_mmio_ops,
5282 .format_group = &skx_uncore_format_group,
5283 };
5284
5285 enum perf_uncore_icx_imc_freerunning_type_id {
5286 ICX_IMC_DCLK,
5287 ICX_IMC_DDR,
5288 ICX_IMC_DDRT,
5289
5290 ICX_IMC_FREERUNNING_TYPE_MAX,
5291 };
5292
5293 static struct freerunning_counters icx_imc_freerunning[] = {
5294 [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5295 [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5296 [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 },
5297 };
5298
5299 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5300 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5301
5302 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
5303 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
5304 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
5305 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
5306 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
5307 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
5308
5309 INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
5310 INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
5311 INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
5312 INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
5313 INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
5314 INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
5315 { /* end: all zeroes */ },
5316 };
5317
icx_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)5318 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5319 {
5320 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5321 SNR_IMC_MMIO_MEM0_OFFSET;
5322
5323 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
5324 }
5325
5326 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5327 .init_box = icx_uncore_imc_freerunning_init_box,
5328 .exit_box = uncore_mmio_exit_box,
5329 .read_counter = uncore_mmio_read_counter,
5330 .hw_config = uncore_freerunning_hw_config,
5331 };
5332
5333 static struct intel_uncore_type icx_uncore_imc_free_running = {
5334 .name = "imc_free_running",
5335 .num_counters = 5,
5336 .num_boxes = 4,
5337 .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX,
5338 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5339 .freerunning = icx_imc_freerunning,
5340 .ops = &icx_uncore_imc_freerunning_ops,
5341 .event_descs = icx_uncore_imc_freerunning_events,
5342 .format_group = &skx_uncore_iio_freerunning_format_group,
5343 };
5344
5345 static struct intel_uncore_type *icx_mmio_uncores[] = {
5346 &icx_uncore_imc,
5347 &icx_uncore_imc_free_running,
5348 NULL,
5349 };
5350
icx_uncore_mmio_init(void)5351 void icx_uncore_mmio_init(void)
5352 {
5353 uncore_mmio_uncores = icx_mmio_uncores;
5354 }
5355
5356 /* end of ICX uncore support */
5357