• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4 #include "uncore_discovery.h"
5 
6 /* SNB-EP pci bus to socket mapping */
7 #define SNBEP_CPUNODEID			0x40
8 #define SNBEP_GIDNIDMAP			0x54
9 
10 /* SNB-EP Box level control */
11 #define SNBEP_PMON_BOX_CTL_RST_CTRL	(1 << 0)
12 #define SNBEP_PMON_BOX_CTL_RST_CTRS	(1 << 1)
13 #define SNBEP_PMON_BOX_CTL_FRZ		(1 << 8)
14 #define SNBEP_PMON_BOX_CTL_FRZ_EN	(1 << 16)
15 #define SNBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
16 					 SNBEP_PMON_BOX_CTL_RST_CTRS | \
17 					 SNBEP_PMON_BOX_CTL_FRZ_EN)
18 /* SNB-EP event control */
19 #define SNBEP_PMON_CTL_EV_SEL_MASK	0x000000ff
20 #define SNBEP_PMON_CTL_UMASK_MASK	0x0000ff00
21 #define SNBEP_PMON_CTL_RST		(1 << 17)
22 #define SNBEP_PMON_CTL_EDGE_DET		(1 << 18)
23 #define SNBEP_PMON_CTL_EV_SEL_EXT	(1 << 21)
24 #define SNBEP_PMON_CTL_EN		(1 << 22)
25 #define SNBEP_PMON_CTL_INVERT		(1 << 23)
26 #define SNBEP_PMON_CTL_TRESH_MASK	0xff000000
27 #define SNBEP_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
28 					 SNBEP_PMON_CTL_UMASK_MASK | \
29 					 SNBEP_PMON_CTL_EDGE_DET | \
30 					 SNBEP_PMON_CTL_INVERT | \
31 					 SNBEP_PMON_CTL_TRESH_MASK)
32 
33 /* SNB-EP Ubox event control */
34 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK		0x1f000000
35 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK		\
36 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
37 				 SNBEP_PMON_CTL_UMASK_MASK | \
38 				 SNBEP_PMON_CTL_EDGE_DET | \
39 				 SNBEP_PMON_CTL_INVERT | \
40 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
41 
42 #define SNBEP_CBO_PMON_CTL_TID_EN		(1 << 19)
43 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK	(SNBEP_PMON_RAW_EVENT_MASK | \
44 						 SNBEP_CBO_PMON_CTL_TID_EN)
45 
46 /* SNB-EP PCU event control */
47 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK	0x0000c000
48 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK	0x1f000000
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT	(1 << 30)
50 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET	(1 << 31)
51 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
52 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
53 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
54 				 SNBEP_PMON_CTL_EDGE_DET | \
55 				 SNBEP_PMON_CTL_INVERT | \
56 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
57 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
58 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
59 
60 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
61 				(SNBEP_PMON_RAW_EVENT_MASK | \
62 				 SNBEP_PMON_CTL_EV_SEL_EXT)
63 
64 /* SNB-EP pci control register */
65 #define SNBEP_PCI_PMON_BOX_CTL			0xf4
66 #define SNBEP_PCI_PMON_CTL0			0xd8
67 /* SNB-EP pci counter register */
68 #define SNBEP_PCI_PMON_CTR0			0xa0
69 
70 /* SNB-EP home agent register */
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0	0x40
72 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1	0x44
73 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH	0x48
74 /* SNB-EP memory controller register */
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL		0xf0
76 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR		0xd0
77 /* SNB-EP QPI register */
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0		0x228
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1		0x22c
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0		0x238
81 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1		0x23c
82 
83 /* SNB-EP Ubox register */
84 #define SNBEP_U_MSR_PMON_CTR0			0xc16
85 #define SNBEP_U_MSR_PMON_CTL0			0xc10
86 
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL		0xc08
88 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR		0xc09
89 
90 /* SNB-EP Cbo register */
91 #define SNBEP_C0_MSR_PMON_CTR0			0xd16
92 #define SNBEP_C0_MSR_PMON_CTL0			0xd10
93 #define SNBEP_C0_MSR_PMON_BOX_CTL		0xd04
94 #define SNBEP_C0_MSR_PMON_BOX_FILTER		0xd14
95 #define SNBEP_CBO_MSR_OFFSET			0x20
96 
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID	0x1f
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID	0x3fc00
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE	0x7c0000
100 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC	0xff800000
101 
102 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {	\
103 	.event = (e),				\
104 	.msr = SNBEP_C0_MSR_PMON_BOX_FILTER,	\
105 	.config_mask = (m),			\
106 	.idx = (i)				\
107 }
108 
109 /* SNB-EP PCU register */
110 #define SNBEP_PCU_MSR_PMON_CTR0			0xc36
111 #define SNBEP_PCU_MSR_PMON_CTL0			0xc30
112 #define SNBEP_PCU_MSR_PMON_BOX_CTL		0xc24
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER		0xc34
114 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK	0xffffffff
115 #define SNBEP_PCU_MSR_CORE_C3_CTR		0x3fc
116 #define SNBEP_PCU_MSR_CORE_C6_CTR		0x3fd
117 
118 /* IVBEP event control */
119 #define IVBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
120 					 SNBEP_PMON_BOX_CTL_RST_CTRS)
121 #define IVBEP_PMON_RAW_EVENT_MASK		(SNBEP_PMON_CTL_EV_SEL_MASK | \
122 					 SNBEP_PMON_CTL_UMASK_MASK | \
123 					 SNBEP_PMON_CTL_EDGE_DET | \
124 					 SNBEP_PMON_CTL_TRESH_MASK)
125 /* IVBEP Ubox */
126 #define IVBEP_U_MSR_PMON_GLOBAL_CTL		0xc00
127 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
128 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL		(1 << 29)
129 
130 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK	\
131 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
132 				 SNBEP_PMON_CTL_UMASK_MASK | \
133 				 SNBEP_PMON_CTL_EDGE_DET | \
134 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
135 /* IVBEP Cbo */
136 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK		(IVBEP_PMON_RAW_EVENT_MASK | \
137 						 SNBEP_CBO_PMON_CTL_TID_EN)
138 
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x1fULL << 0)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 5)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x3fULL << 17)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
146 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
147 
148 /* IVBEP home agent */
149 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST		(1 << 16)
150 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK		\
151 				(IVBEP_PMON_RAW_EVENT_MASK | \
152 				 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
153 /* IVBEP PCU */
154 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
155 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
156 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
157 				 SNBEP_PMON_CTL_EDGE_DET | \
158 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
159 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
160 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
161 /* IVBEP QPI */
162 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
163 				(IVBEP_PMON_RAW_EVENT_MASK | \
164 				 SNBEP_PMON_CTL_EV_SEL_EXT)
165 
166 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
167 				((1ULL << (n)) - 1)))
168 
169 /* Haswell-EP Ubox */
170 #define HSWEP_U_MSR_PMON_CTR0			0x709
171 #define HSWEP_U_MSR_PMON_CTL0			0x705
172 #define HSWEP_U_MSR_PMON_FILTER			0x707
173 
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL		0x703
175 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR		0x704
176 
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID		(0x1 << 0)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID		(0x1fULL << 1)
179 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
180 					(HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
181 					 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
182 
183 /* Haswell-EP CBo */
184 #define HSWEP_C0_MSR_PMON_CTR0			0xe08
185 #define HSWEP_C0_MSR_PMON_CTL0			0xe01
186 #define HSWEP_C0_MSR_PMON_BOX_CTL			0xe00
187 #define HSWEP_C0_MSR_PMON_BOX_FILTER0		0xe05
188 #define HSWEP_CBO_MSR_OFFSET			0x10
189 
190 
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x3fULL << 0)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 6)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x7fULL << 17)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
198 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
199 
200 
201 /* Haswell-EP Sbox */
202 #define HSWEP_S0_MSR_PMON_CTR0			0x726
203 #define HSWEP_S0_MSR_PMON_CTL0			0x721
204 #define HSWEP_S0_MSR_PMON_BOX_CTL			0x720
205 #define HSWEP_SBOX_MSR_OFFSET			0xa
206 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
207 						 SNBEP_CBO_PMON_CTL_TID_EN)
208 
209 /* Haswell-EP PCU */
210 #define HSWEP_PCU_MSR_PMON_CTR0			0x717
211 #define HSWEP_PCU_MSR_PMON_CTL0			0x711
212 #define HSWEP_PCU_MSR_PMON_BOX_CTL		0x710
213 #define HSWEP_PCU_MSR_PMON_BOX_FILTER		0x715
214 
215 /* KNL Ubox */
216 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
217 					(SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
218 						SNBEP_CBO_PMON_CTL_TID_EN)
219 /* KNL CHA */
220 #define KNL_CHA_MSR_OFFSET			0xc
221 #define KNL_CHA_MSR_PMON_CTL_QOR		(1 << 16)
222 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
223 					(SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
224 					 KNL_CHA_MSR_PMON_CTL_QOR)
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID		0x1ff
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE	(7 << 18)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP		(0xfffffe2aULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE	(0x1ULL << 32)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE	(0x1ULL << 33)
230 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC		(0x1ULL << 37)
231 
232 /* KNL EDC/MC UCLK */
233 #define KNL_UCLK_MSR_PMON_CTR0_LOW		0x400
234 #define KNL_UCLK_MSR_PMON_CTL0			0x420
235 #define KNL_UCLK_MSR_PMON_BOX_CTL		0x430
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW	0x44c
237 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL	0x454
238 #define KNL_PMON_FIXED_CTL_EN			0x1
239 
240 /* KNL EDC */
241 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW		0xa00
242 #define KNL_EDC0_ECLK_MSR_PMON_CTL0		0xa20
243 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL		0xa30
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW	0xa3c
245 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL	0xa44
246 
247 /* KNL MC */
248 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW		0xb00
249 #define KNL_MC0_CH0_MSR_PMON_CTL0		0xb20
250 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL		0xb30
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW		0xb3c
252 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL		0xb44
253 
254 /* KNL IRP */
255 #define KNL_IRP_PCI_PMON_BOX_CTL		0xf0
256 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
257 						 KNL_CHA_MSR_PMON_CTL_QOR)
258 /* KNL PCU */
259 #define KNL_PCU_PMON_CTL_EV_SEL_MASK		0x0000007f
260 #define KNL_PCU_PMON_CTL_USE_OCC_CTR		(1 << 7)
261 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK		0x3f000000
262 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK	\
263 				(KNL_PCU_PMON_CTL_EV_SEL_MASK | \
264 				 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
265 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
266 				 SNBEP_PMON_CTL_EDGE_DET | \
267 				 SNBEP_CBO_PMON_CTL_TID_EN | \
268 				 SNBEP_PMON_CTL_INVERT | \
269 				 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
270 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
271 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
272 
273 /* SKX pci bus to socket mapping */
274 #define SKX_CPUNODEID			0xc0
275 #define SKX_GIDNIDMAP			0xd4
276 
277 /*
278  * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
279  * that BIOS programmed. MSR has package scope.
280  * |  Bit  |  Default  |  Description
281  * | [63]  |    00h    | VALID - When set, indicates the CPU bus
282  *                       numbers have been initialized. (RO)
283  * |[62:48]|    ---    | Reserved
284  * |[47:40]|    00h    | BUS_NUM_5 - Return the bus number BIOS assigned
285  *                       CPUBUSNO(5). (RO)
286  * |[39:32]|    00h    | BUS_NUM_4 - Return the bus number BIOS assigned
287  *                       CPUBUSNO(4). (RO)
288  * |[31:24]|    00h    | BUS_NUM_3 - Return the bus number BIOS assigned
289  *                       CPUBUSNO(3). (RO)
290  * |[23:16]|    00h    | BUS_NUM_2 - Return the bus number BIOS assigned
291  *                       CPUBUSNO(2). (RO)
292  * |[15:8] |    00h    | BUS_NUM_1 - Return the bus number BIOS assigned
293  *                       CPUBUSNO(1). (RO)
294  * | [7:0] |    00h    | BUS_NUM_0 - Return the bus number BIOS assigned
295  *                       CPUBUSNO(0). (RO)
296  */
297 #define SKX_MSR_CPU_BUS_NUMBER		0x300
298 #define SKX_MSR_CPU_BUS_VALID_BIT	(1ULL << 63)
299 #define BUS_NUM_STRIDE			8
300 
301 /* SKX CHA */
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID		(0x1ffULL << 0)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 9)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE	(0x3ffULL << 17)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM		(0x1ULL << 32)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC		(0x1ULL << 33)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC	(0x1ULL << 35)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM		(0x1ULL << 36)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM	(0x1ULL << 37)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0	(0x3ffULL << 41)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1	(0x3ffULL << 51)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
314 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
315 
316 /* SKX IIO */
317 #define SKX_IIO0_MSR_PMON_CTL0		0xa48
318 #define SKX_IIO0_MSR_PMON_CTR0		0xa41
319 #define SKX_IIO0_MSR_PMON_BOX_CTL	0xa40
320 #define SKX_IIO_MSR_OFFSET		0x20
321 
322 #define SKX_PMON_CTL_TRESH_MASK		(0xff << 24)
323 #define SKX_PMON_CTL_TRESH_MASK_EXT	(0xf)
324 #define SKX_PMON_CTL_CH_MASK		(0xff << 4)
325 #define SKX_PMON_CTL_FC_MASK		(0x7 << 12)
326 #define SKX_IIO_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
327 					 SNBEP_PMON_CTL_UMASK_MASK | \
328 					 SNBEP_PMON_CTL_EDGE_DET | \
329 					 SNBEP_PMON_CTL_INVERT | \
330 					 SKX_PMON_CTL_TRESH_MASK)
331 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT	(SKX_PMON_CTL_TRESH_MASK_EXT | \
332 					 SKX_PMON_CTL_CH_MASK | \
333 					 SKX_PMON_CTL_FC_MASK)
334 
335 /* SKX IRP */
336 #define SKX_IRP0_MSR_PMON_CTL0		0xa5b
337 #define SKX_IRP0_MSR_PMON_CTR0		0xa59
338 #define SKX_IRP0_MSR_PMON_BOX_CTL	0xa58
339 #define SKX_IRP_MSR_OFFSET		0x20
340 
341 /* SKX UPI */
342 #define SKX_UPI_PCI_PMON_CTL0		0x350
343 #define SKX_UPI_PCI_PMON_CTR0		0x318
344 #define SKX_UPI_PCI_PMON_BOX_CTL	0x378
345 #define SKX_UPI_CTL_UMASK_EXT		0xffefff
346 
347 /* SKX M2M */
348 #define SKX_M2M_PCI_PMON_CTL0		0x228
349 #define SKX_M2M_PCI_PMON_CTR0		0x200
350 #define SKX_M2M_PCI_PMON_BOX_CTL	0x258
351 
352 /* Memory Map registers device ID */
353 #define SNR_ICX_MESH2IIO_MMAP_DID		0x9a2
354 #define SNR_ICX_SAD_CONTROL_CFG		0x3f4
355 
356 /* Getting I/O stack id in SAD_COTROL_CFG notation */
357 #define SAD_CONTROL_STACK_ID(data)		(((data) >> 4) & 0x7)
358 
359 /* SNR Ubox */
360 #define SNR_U_MSR_PMON_CTR0			0x1f98
361 #define SNR_U_MSR_PMON_CTL0			0x1f91
362 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL		0x1f93
363 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR		0x1f94
364 
365 /* SNR CHA */
366 #define SNR_CHA_RAW_EVENT_MASK_EXT		0x3ffffff
367 #define SNR_CHA_MSR_PMON_CTL0			0x1c01
368 #define SNR_CHA_MSR_PMON_CTR0			0x1c08
369 #define SNR_CHA_MSR_PMON_BOX_CTL		0x1c00
370 #define SNR_C0_MSR_PMON_BOX_FILTER0		0x1c05
371 
372 
373 /* SNR IIO */
374 #define SNR_IIO_MSR_PMON_CTL0			0x1e08
375 #define SNR_IIO_MSR_PMON_CTR0			0x1e01
376 #define SNR_IIO_MSR_PMON_BOX_CTL		0x1e00
377 #define SNR_IIO_MSR_OFFSET			0x10
378 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT		0x7ffff
379 
380 /* SNR IRP */
381 #define SNR_IRP0_MSR_PMON_CTL0			0x1ea8
382 #define SNR_IRP0_MSR_PMON_CTR0			0x1ea1
383 #define SNR_IRP0_MSR_PMON_BOX_CTL		0x1ea0
384 #define SNR_IRP_MSR_OFFSET			0x10
385 
386 /* SNR M2PCIE */
387 #define SNR_M2PCIE_MSR_PMON_CTL0		0x1e58
388 #define SNR_M2PCIE_MSR_PMON_CTR0		0x1e51
389 #define SNR_M2PCIE_MSR_PMON_BOX_CTL		0x1e50
390 #define SNR_M2PCIE_MSR_OFFSET			0x10
391 
392 /* SNR PCU */
393 #define SNR_PCU_MSR_PMON_CTL0			0x1ef1
394 #define SNR_PCU_MSR_PMON_CTR0			0x1ef8
395 #define SNR_PCU_MSR_PMON_BOX_CTL		0x1ef0
396 #define SNR_PCU_MSR_PMON_BOX_FILTER		0x1efc
397 
398 /* SNR M2M */
399 #define SNR_M2M_PCI_PMON_CTL0			0x468
400 #define SNR_M2M_PCI_PMON_CTR0			0x440
401 #define SNR_M2M_PCI_PMON_BOX_CTL		0x438
402 #define SNR_M2M_PCI_PMON_UMASK_EXT		0xff
403 
404 /* SNR PCIE3 */
405 #define SNR_PCIE3_PCI_PMON_CTL0			0x508
406 #define SNR_PCIE3_PCI_PMON_CTR0			0x4e8
407 #define SNR_PCIE3_PCI_PMON_BOX_CTL		0x4e0
408 
409 /* SNR IMC */
410 #define SNR_IMC_MMIO_PMON_FIXED_CTL		0x54
411 #define SNR_IMC_MMIO_PMON_FIXED_CTR		0x38
412 #define SNR_IMC_MMIO_PMON_CTL0			0x40
413 #define SNR_IMC_MMIO_PMON_CTR0			0x8
414 #define SNR_IMC_MMIO_PMON_BOX_CTL		0x22800
415 #define SNR_IMC_MMIO_OFFSET			0x4000
416 #define SNR_IMC_MMIO_SIZE			0x4000
417 #define SNR_IMC_MMIO_BASE_OFFSET		0xd0
418 #define SNR_IMC_MMIO_BASE_MASK			0x1FFFFFFF
419 #define SNR_IMC_MMIO_MEM0_OFFSET		0xd8
420 #define SNR_IMC_MMIO_MEM0_MASK			0x7FF
421 
422 /* ICX CHA */
423 #define ICX_C34_MSR_PMON_CTR0			0xb68
424 #define ICX_C34_MSR_PMON_CTL0			0xb61
425 #define ICX_C34_MSR_PMON_BOX_CTL		0xb60
426 #define ICX_C34_MSR_PMON_BOX_FILTER0		0xb65
427 
428 /* ICX IIO */
429 #define ICX_IIO_MSR_PMON_CTL0			0xa58
430 #define ICX_IIO_MSR_PMON_CTR0			0xa51
431 #define ICX_IIO_MSR_PMON_BOX_CTL		0xa50
432 
433 /* ICX IRP */
434 #define ICX_IRP0_MSR_PMON_CTL0			0xa4d
435 #define ICX_IRP0_MSR_PMON_CTR0			0xa4b
436 #define ICX_IRP0_MSR_PMON_BOX_CTL		0xa4a
437 
438 /* ICX M2PCIE */
439 #define ICX_M2PCIE_MSR_PMON_CTL0		0xa46
440 #define ICX_M2PCIE_MSR_PMON_CTR0		0xa41
441 #define ICX_M2PCIE_MSR_PMON_BOX_CTL		0xa40
442 
443 /* ICX UPI */
444 #define ICX_UPI_PCI_PMON_CTL0			0x350
445 #define ICX_UPI_PCI_PMON_CTR0			0x320
446 #define ICX_UPI_PCI_PMON_BOX_CTL		0x318
447 #define ICX_UPI_CTL_UMASK_EXT			0xffffff
448 
449 /* ICX M3UPI*/
450 #define ICX_M3UPI_PCI_PMON_CTL0			0xd8
451 #define ICX_M3UPI_PCI_PMON_CTR0			0xa8
452 #define ICX_M3UPI_PCI_PMON_BOX_CTL		0xa0
453 
454 /* ICX IMC */
455 #define ICX_NUMBER_IMC_CHN			3
456 #define ICX_IMC_MEM_STRIDE			0x4
457 
458 /* SPR */
459 #define SPR_RAW_EVENT_MASK_EXT			0xffffff
460 
461 /* SPR CHA */
462 #define SPR_CHA_PMON_CTL_TID_EN			(1 << 16)
463 #define SPR_CHA_PMON_EVENT_MASK			(SNBEP_PMON_RAW_EVENT_MASK | \
464 						 SPR_CHA_PMON_CTL_TID_EN)
465 #define SPR_CHA_PMON_BOX_FILTER_TID		0x3ff
466 
467 #define SPR_C0_MSR_PMON_BOX_FILTER0		0x200e
468 
469 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
470 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
471 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
472 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
473 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
474 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
475 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
476 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
477 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
478 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
479 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
480 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
481 DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
482 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
483 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
484 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
485 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
486 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
487 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
488 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
489 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
490 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
491 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
492 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
493 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
494 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
495 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
496 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
510 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
511 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
512 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
513 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
514 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
515 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
516 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
517 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
518 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
519 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
520 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
521 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
522 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
523 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
524 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
525 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
526 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
527 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
528 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
529 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
530 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
531 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
532 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
533 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
534 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
535 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
536 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
537 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
538 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
539 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
540 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
541 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
542 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
543 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
544 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
545 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
546 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
547 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
548 
snbep_uncore_pci_disable_box(struct intel_uncore_box * box)549 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
550 {
551 	struct pci_dev *pdev = box->pci_dev;
552 	int box_ctl = uncore_pci_box_ctl(box);
553 	u32 config = 0;
554 
555 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
556 		config |= SNBEP_PMON_BOX_CTL_FRZ;
557 		pci_write_config_dword(pdev, box_ctl, config);
558 	}
559 }
560 
snbep_uncore_pci_enable_box(struct intel_uncore_box * box)561 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
562 {
563 	struct pci_dev *pdev = box->pci_dev;
564 	int box_ctl = uncore_pci_box_ctl(box);
565 	u32 config = 0;
566 
567 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
568 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
569 		pci_write_config_dword(pdev, box_ctl, config);
570 	}
571 }
572 
snbep_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)573 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
574 {
575 	struct pci_dev *pdev = box->pci_dev;
576 	struct hw_perf_event *hwc = &event->hw;
577 
578 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
579 }
580 
snbep_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)581 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
582 {
583 	struct pci_dev *pdev = box->pci_dev;
584 	struct hw_perf_event *hwc = &event->hw;
585 
586 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
587 }
588 
snbep_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)589 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
590 {
591 	struct pci_dev *pdev = box->pci_dev;
592 	struct hw_perf_event *hwc = &event->hw;
593 	u64 count = 0;
594 
595 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
596 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
597 
598 	return count;
599 }
600 
snbep_uncore_pci_init_box(struct intel_uncore_box * box)601 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
602 {
603 	struct pci_dev *pdev = box->pci_dev;
604 	int box_ctl = uncore_pci_box_ctl(box);
605 
606 	pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
607 }
608 
snbep_uncore_msr_disable_box(struct intel_uncore_box * box)609 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
610 {
611 	u64 config;
612 	unsigned msr;
613 
614 	msr = uncore_msr_box_ctl(box);
615 	if (msr) {
616 		rdmsrl(msr, config);
617 		config |= SNBEP_PMON_BOX_CTL_FRZ;
618 		wrmsrl(msr, config);
619 	}
620 }
621 
snbep_uncore_msr_enable_box(struct intel_uncore_box * box)622 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
623 {
624 	u64 config;
625 	unsigned msr;
626 
627 	msr = uncore_msr_box_ctl(box);
628 	if (msr) {
629 		rdmsrl(msr, config);
630 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
631 		wrmsrl(msr, config);
632 	}
633 }
634 
snbep_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)635 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
636 {
637 	struct hw_perf_event *hwc = &event->hw;
638 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
639 
640 	if (reg1->idx != EXTRA_REG_NONE)
641 		wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
642 
643 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
644 }
645 
snbep_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)646 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
647 					struct perf_event *event)
648 {
649 	struct hw_perf_event *hwc = &event->hw;
650 
651 	wrmsrl(hwc->config_base, hwc->config);
652 }
653 
snbep_uncore_msr_init_box(struct intel_uncore_box * box)654 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
655 {
656 	unsigned msr = uncore_msr_box_ctl(box);
657 
658 	if (msr)
659 		wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
660 }
661 
662 static struct attribute *snbep_uncore_formats_attr[] = {
663 	&format_attr_event.attr,
664 	&format_attr_umask.attr,
665 	&format_attr_edge.attr,
666 	&format_attr_inv.attr,
667 	&format_attr_thresh8.attr,
668 	NULL,
669 };
670 
671 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
672 	&format_attr_event.attr,
673 	&format_attr_umask.attr,
674 	&format_attr_edge.attr,
675 	&format_attr_inv.attr,
676 	&format_attr_thresh5.attr,
677 	NULL,
678 };
679 
680 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
681 	&format_attr_event.attr,
682 	&format_attr_umask.attr,
683 	&format_attr_edge.attr,
684 	&format_attr_tid_en.attr,
685 	&format_attr_inv.attr,
686 	&format_attr_thresh8.attr,
687 	&format_attr_filter_tid.attr,
688 	&format_attr_filter_nid.attr,
689 	&format_attr_filter_state.attr,
690 	&format_attr_filter_opc.attr,
691 	NULL,
692 };
693 
694 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
695 	&format_attr_event.attr,
696 	&format_attr_occ_sel.attr,
697 	&format_attr_edge.attr,
698 	&format_attr_inv.attr,
699 	&format_attr_thresh5.attr,
700 	&format_attr_occ_invert.attr,
701 	&format_attr_occ_edge.attr,
702 	&format_attr_filter_band0.attr,
703 	&format_attr_filter_band1.attr,
704 	&format_attr_filter_band2.attr,
705 	&format_attr_filter_band3.attr,
706 	NULL,
707 };
708 
709 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
710 	&format_attr_event_ext.attr,
711 	&format_attr_umask.attr,
712 	&format_attr_edge.attr,
713 	&format_attr_inv.attr,
714 	&format_attr_thresh8.attr,
715 	&format_attr_match_rds.attr,
716 	&format_attr_match_rnid30.attr,
717 	&format_attr_match_rnid4.attr,
718 	&format_attr_match_dnid.attr,
719 	&format_attr_match_mc.attr,
720 	&format_attr_match_opc.attr,
721 	&format_attr_match_vnw.attr,
722 	&format_attr_match0.attr,
723 	&format_attr_match1.attr,
724 	&format_attr_mask_rds.attr,
725 	&format_attr_mask_rnid30.attr,
726 	&format_attr_mask_rnid4.attr,
727 	&format_attr_mask_dnid.attr,
728 	&format_attr_mask_mc.attr,
729 	&format_attr_mask_opc.attr,
730 	&format_attr_mask_vnw.attr,
731 	&format_attr_mask0.attr,
732 	&format_attr_mask1.attr,
733 	NULL,
734 };
735 
736 static struct uncore_event_desc snbep_uncore_imc_events[] = {
737 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
738 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
739 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
740 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
741 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
742 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
743 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
744 	{ /* end: all zeroes */ },
745 };
746 
747 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
748 	INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
749 	INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
750 	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
751 	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
752 	{ /* end: all zeroes */ },
753 };
754 
755 static const struct attribute_group snbep_uncore_format_group = {
756 	.name = "format",
757 	.attrs = snbep_uncore_formats_attr,
758 };
759 
760 static const struct attribute_group snbep_uncore_ubox_format_group = {
761 	.name = "format",
762 	.attrs = snbep_uncore_ubox_formats_attr,
763 };
764 
765 static const struct attribute_group snbep_uncore_cbox_format_group = {
766 	.name = "format",
767 	.attrs = snbep_uncore_cbox_formats_attr,
768 };
769 
770 static const struct attribute_group snbep_uncore_pcu_format_group = {
771 	.name = "format",
772 	.attrs = snbep_uncore_pcu_formats_attr,
773 };
774 
775 static const struct attribute_group snbep_uncore_qpi_format_group = {
776 	.name = "format",
777 	.attrs = snbep_uncore_qpi_formats_attr,
778 };
779 
780 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
781 	.disable_box	= snbep_uncore_msr_disable_box,		\
782 	.enable_box	= snbep_uncore_msr_enable_box,		\
783 	.disable_event	= snbep_uncore_msr_disable_event,	\
784 	.enable_event	= snbep_uncore_msr_enable_event,	\
785 	.read_counter	= uncore_msr_read_counter
786 
787 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
788 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),			\
789 	.init_box	= snbep_uncore_msr_init_box		\
790 
791 static struct intel_uncore_ops snbep_uncore_msr_ops = {
792 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
793 };
794 
795 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()			\
796 	.init_box	= snbep_uncore_pci_init_box,		\
797 	.disable_box	= snbep_uncore_pci_disable_box,		\
798 	.enable_box	= snbep_uncore_pci_enable_box,		\
799 	.disable_event	= snbep_uncore_pci_disable_event,	\
800 	.read_counter	= snbep_uncore_pci_read_counter
801 
802 static struct intel_uncore_ops snbep_uncore_pci_ops = {
803 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
804 	.enable_event	= snbep_uncore_pci_enable_event,	\
805 };
806 
807 static struct event_constraint snbep_uncore_cbox_constraints[] = {
808 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
809 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
810 	UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
811 	UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
812 	UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
813 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
814 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
815 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
816 	UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
817 	UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
818 	UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
819 	UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
820 	UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
821 	UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
822 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
823 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
824 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
825 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
826 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
827 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
828 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
829 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
830 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
831 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
832 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
833 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
834 	EVENT_CONSTRAINT_END
835 };
836 
837 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
838 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
839 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
840 	UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
841 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
842 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
843 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
844 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
845 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
846 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
847 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
848 	EVENT_CONSTRAINT_END
849 };
850 
851 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
852 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
853 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
854 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
855 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
856 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
857 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
858 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
859 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
860 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
861 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
862 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
863 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
864 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
865 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
866 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
867 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
868 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
869 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
870 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
871 	UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
872 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
873 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
874 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
875 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
876 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
877 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
878 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
879 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
880 	EVENT_CONSTRAINT_END
881 };
882 
883 static struct intel_uncore_type snbep_uncore_ubox = {
884 	.name		= "ubox",
885 	.num_counters   = 2,
886 	.num_boxes	= 1,
887 	.perf_ctr_bits	= 44,
888 	.fixed_ctr_bits	= 48,
889 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
890 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
891 	.event_mask	= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
892 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
893 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
894 	.ops		= &snbep_uncore_msr_ops,
895 	.format_group	= &snbep_uncore_ubox_format_group,
896 };
897 
898 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
899 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
900 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
901 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
902 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
903 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
904 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
905 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
906 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
907 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
908 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
909 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
910 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
911 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
912 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
913 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
914 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
915 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
916 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
917 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
918 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
919 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
920 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
921 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
922 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
923 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
924 	EVENT_EXTRA_END
925 };
926 
snbep_cbox_put_constraint(struct intel_uncore_box * box,struct perf_event * event)927 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
928 {
929 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
930 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
931 	int i;
932 
933 	if (uncore_box_is_fake(box))
934 		return;
935 
936 	for (i = 0; i < 5; i++) {
937 		if (reg1->alloc & (0x1 << i))
938 			atomic_sub(1 << (i * 6), &er->ref);
939 	}
940 	reg1->alloc = 0;
941 }
942 
943 static struct event_constraint *
__snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event,u64 (* cbox_filter_mask)(int fields))944 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
945 			    u64 (*cbox_filter_mask)(int fields))
946 {
947 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
948 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
949 	int i, alloc = 0;
950 	unsigned long flags;
951 	u64 mask;
952 
953 	if (reg1->idx == EXTRA_REG_NONE)
954 		return NULL;
955 
956 	raw_spin_lock_irqsave(&er->lock, flags);
957 	for (i = 0; i < 5; i++) {
958 		if (!(reg1->idx & (0x1 << i)))
959 			continue;
960 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
961 			continue;
962 
963 		mask = cbox_filter_mask(0x1 << i);
964 		if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
965 		    !((reg1->config ^ er->config) & mask)) {
966 			atomic_add(1 << (i * 6), &er->ref);
967 			er->config &= ~mask;
968 			er->config |= reg1->config & mask;
969 			alloc |= (0x1 << i);
970 		} else {
971 			break;
972 		}
973 	}
974 	raw_spin_unlock_irqrestore(&er->lock, flags);
975 	if (i < 5)
976 		goto fail;
977 
978 	if (!uncore_box_is_fake(box))
979 		reg1->alloc |= alloc;
980 
981 	return NULL;
982 fail:
983 	for (; i >= 0; i--) {
984 		if (alloc & (0x1 << i))
985 			atomic_sub(1 << (i * 6), &er->ref);
986 	}
987 	return &uncore_constraint_empty;
988 }
989 
snbep_cbox_filter_mask(int fields)990 static u64 snbep_cbox_filter_mask(int fields)
991 {
992 	u64 mask = 0;
993 
994 	if (fields & 0x1)
995 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
996 	if (fields & 0x2)
997 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
998 	if (fields & 0x4)
999 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1000 	if (fields & 0x8)
1001 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1002 
1003 	return mask;
1004 }
1005 
1006 static struct event_constraint *
snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1007 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1008 {
1009 	return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
1010 }
1011 
snbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1012 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1013 {
1014 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1015 	struct extra_reg *er;
1016 	int idx = 0;
1017 
1018 	for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1019 		if (er->event != (event->hw.config & er->config_mask))
1020 			continue;
1021 		idx |= er->idx;
1022 	}
1023 
1024 	if (idx) {
1025 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1026 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1027 		reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1028 		reg1->idx = idx;
1029 	}
1030 	return 0;
1031 }
1032 
1033 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1034 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1035 	.hw_config		= snbep_cbox_hw_config,
1036 	.get_constraint		= snbep_cbox_get_constraint,
1037 	.put_constraint		= snbep_cbox_put_constraint,
1038 };
1039 
1040 static struct intel_uncore_type snbep_uncore_cbox = {
1041 	.name			= "cbox",
1042 	.num_counters		= 4,
1043 	.num_boxes		= 8,
1044 	.perf_ctr_bits		= 44,
1045 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1046 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1047 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1048 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1049 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1050 	.num_shared_regs	= 1,
1051 	.constraints		= snbep_uncore_cbox_constraints,
1052 	.ops			= &snbep_uncore_cbox_ops,
1053 	.format_group		= &snbep_uncore_cbox_format_group,
1054 };
1055 
snbep_pcu_alter_er(struct perf_event * event,int new_idx,bool modify)1056 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1057 {
1058 	struct hw_perf_event *hwc = &event->hw;
1059 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1060 	u64 config = reg1->config;
1061 
1062 	if (new_idx > reg1->idx)
1063 		config <<= 8 * (new_idx - reg1->idx);
1064 	else
1065 		config >>= 8 * (reg1->idx - new_idx);
1066 
1067 	if (modify) {
1068 		hwc->config += new_idx - reg1->idx;
1069 		reg1->config = config;
1070 		reg1->idx = new_idx;
1071 	}
1072 	return config;
1073 }
1074 
1075 static struct event_constraint *
snbep_pcu_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1076 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1077 {
1078 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1079 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1080 	unsigned long flags;
1081 	int idx = reg1->idx;
1082 	u64 mask, config1 = reg1->config;
1083 	bool ok = false;
1084 
1085 	if (reg1->idx == EXTRA_REG_NONE ||
1086 	    (!uncore_box_is_fake(box) && reg1->alloc))
1087 		return NULL;
1088 again:
1089 	mask = 0xffULL << (idx * 8);
1090 	raw_spin_lock_irqsave(&er->lock, flags);
1091 	if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1092 	    !((config1 ^ er->config) & mask)) {
1093 		atomic_add(1 << (idx * 8), &er->ref);
1094 		er->config &= ~mask;
1095 		er->config |= config1 & mask;
1096 		ok = true;
1097 	}
1098 	raw_spin_unlock_irqrestore(&er->lock, flags);
1099 
1100 	if (!ok) {
1101 		idx = (idx + 1) % 4;
1102 		if (idx != reg1->idx) {
1103 			config1 = snbep_pcu_alter_er(event, idx, false);
1104 			goto again;
1105 		}
1106 		return &uncore_constraint_empty;
1107 	}
1108 
1109 	if (!uncore_box_is_fake(box)) {
1110 		if (idx != reg1->idx)
1111 			snbep_pcu_alter_er(event, idx, true);
1112 		reg1->alloc = 1;
1113 	}
1114 	return NULL;
1115 }
1116 
snbep_pcu_put_constraint(struct intel_uncore_box * box,struct perf_event * event)1117 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1118 {
1119 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1120 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1121 
1122 	if (uncore_box_is_fake(box) || !reg1->alloc)
1123 		return;
1124 
1125 	atomic_sub(1 << (reg1->idx * 8), &er->ref);
1126 	reg1->alloc = 0;
1127 }
1128 
snbep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)1129 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1130 {
1131 	struct hw_perf_event *hwc = &event->hw;
1132 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1133 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1134 
1135 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
1136 		reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1137 		reg1->idx = ev_sel - 0xb;
1138 		reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1139 	}
1140 	return 0;
1141 }
1142 
1143 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1144 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1145 	.hw_config		= snbep_pcu_hw_config,
1146 	.get_constraint		= snbep_pcu_get_constraint,
1147 	.put_constraint		= snbep_pcu_put_constraint,
1148 };
1149 
1150 static struct intel_uncore_type snbep_uncore_pcu = {
1151 	.name			= "pcu",
1152 	.num_counters		= 4,
1153 	.num_boxes		= 1,
1154 	.perf_ctr_bits		= 48,
1155 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1156 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1157 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1158 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1159 	.num_shared_regs	= 1,
1160 	.ops			= &snbep_uncore_pcu_ops,
1161 	.format_group		= &snbep_uncore_pcu_format_group,
1162 };
1163 
1164 static struct intel_uncore_type *snbep_msr_uncores[] = {
1165 	&snbep_uncore_ubox,
1166 	&snbep_uncore_cbox,
1167 	&snbep_uncore_pcu,
1168 	NULL,
1169 };
1170 
snbep_uncore_cpu_init(void)1171 void snbep_uncore_cpu_init(void)
1172 {
1173 	if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1174 		snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1175 	uncore_msr_uncores = snbep_msr_uncores;
1176 }
1177 
1178 enum {
1179 	SNBEP_PCI_QPI_PORT0_FILTER,
1180 	SNBEP_PCI_QPI_PORT1_FILTER,
1181 	BDX_PCI_QPI_PORT2_FILTER,
1182 };
1183 
snbep_qpi_hw_config(struct intel_uncore_box * box,struct perf_event * event)1184 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1185 {
1186 	struct hw_perf_event *hwc = &event->hw;
1187 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1188 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1189 
1190 	if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1191 		reg1->idx = 0;
1192 		reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1193 		reg1->config = event->attr.config1;
1194 		reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1195 		reg2->config = event->attr.config2;
1196 	}
1197 	return 0;
1198 }
1199 
snbep_qpi_enable_event(struct intel_uncore_box * box,struct perf_event * event)1200 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1201 {
1202 	struct pci_dev *pdev = box->pci_dev;
1203 	struct hw_perf_event *hwc = &event->hw;
1204 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1205 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1206 
1207 	if (reg1->idx != EXTRA_REG_NONE) {
1208 		int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1209 		int die = box->dieid;
1210 		struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1211 
1212 		if (filter_pdev) {
1213 			pci_write_config_dword(filter_pdev, reg1->reg,
1214 						(u32)reg1->config);
1215 			pci_write_config_dword(filter_pdev, reg1->reg + 4,
1216 						(u32)(reg1->config >> 32));
1217 			pci_write_config_dword(filter_pdev, reg2->reg,
1218 						(u32)reg2->config);
1219 			pci_write_config_dword(filter_pdev, reg2->reg + 4,
1220 						(u32)(reg2->config >> 32));
1221 		}
1222 	}
1223 
1224 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1225 }
1226 
1227 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1228 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1229 	.enable_event		= snbep_qpi_enable_event,
1230 	.hw_config		= snbep_qpi_hw_config,
1231 	.get_constraint		= uncore_get_constraint,
1232 	.put_constraint		= uncore_put_constraint,
1233 };
1234 
1235 #define SNBEP_UNCORE_PCI_COMMON_INIT()				\
1236 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1237 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1238 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,		\
1239 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1240 	.ops		= &snbep_uncore_pci_ops,		\
1241 	.format_group	= &snbep_uncore_format_group
1242 
1243 static struct intel_uncore_type snbep_uncore_ha = {
1244 	.name		= "ha",
1245 	.num_counters   = 4,
1246 	.num_boxes	= 1,
1247 	.perf_ctr_bits	= 48,
1248 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1249 };
1250 
1251 static struct intel_uncore_type snbep_uncore_imc = {
1252 	.name		= "imc",
1253 	.num_counters   = 4,
1254 	.num_boxes	= 4,
1255 	.perf_ctr_bits	= 48,
1256 	.fixed_ctr_bits	= 48,
1257 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1258 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1259 	.event_descs	= snbep_uncore_imc_events,
1260 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1261 };
1262 
1263 static struct intel_uncore_type snbep_uncore_qpi = {
1264 	.name			= "qpi",
1265 	.num_counters		= 4,
1266 	.num_boxes		= 2,
1267 	.perf_ctr_bits		= 48,
1268 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1269 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1270 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1271 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1272 	.num_shared_regs	= 1,
1273 	.ops			= &snbep_uncore_qpi_ops,
1274 	.event_descs		= snbep_uncore_qpi_events,
1275 	.format_group		= &snbep_uncore_qpi_format_group,
1276 };
1277 
1278 
1279 static struct intel_uncore_type snbep_uncore_r2pcie = {
1280 	.name		= "r2pcie",
1281 	.num_counters   = 4,
1282 	.num_boxes	= 1,
1283 	.perf_ctr_bits	= 44,
1284 	.constraints	= snbep_uncore_r2pcie_constraints,
1285 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1286 };
1287 
1288 static struct intel_uncore_type snbep_uncore_r3qpi = {
1289 	.name		= "r3qpi",
1290 	.num_counters   = 3,
1291 	.num_boxes	= 2,
1292 	.perf_ctr_bits	= 44,
1293 	.constraints	= snbep_uncore_r3qpi_constraints,
1294 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1295 };
1296 
1297 enum {
1298 	SNBEP_PCI_UNCORE_HA,
1299 	SNBEP_PCI_UNCORE_IMC,
1300 	SNBEP_PCI_UNCORE_QPI,
1301 	SNBEP_PCI_UNCORE_R2PCIE,
1302 	SNBEP_PCI_UNCORE_R3QPI,
1303 };
1304 
1305 static struct intel_uncore_type *snbep_pci_uncores[] = {
1306 	[SNBEP_PCI_UNCORE_HA]		= &snbep_uncore_ha,
1307 	[SNBEP_PCI_UNCORE_IMC]		= &snbep_uncore_imc,
1308 	[SNBEP_PCI_UNCORE_QPI]		= &snbep_uncore_qpi,
1309 	[SNBEP_PCI_UNCORE_R2PCIE]	= &snbep_uncore_r2pcie,
1310 	[SNBEP_PCI_UNCORE_R3QPI]	= &snbep_uncore_r3qpi,
1311 	NULL,
1312 };
1313 
1314 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1315 	{ /* Home Agent */
1316 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1317 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1318 	},
1319 	{ /* MC Channel 0 */
1320 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1321 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1322 	},
1323 	{ /* MC Channel 1 */
1324 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1325 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1326 	},
1327 	{ /* MC Channel 2 */
1328 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1329 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1330 	},
1331 	{ /* MC Channel 3 */
1332 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1333 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1334 	},
1335 	{ /* QPI Port 0 */
1336 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1337 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1338 	},
1339 	{ /* QPI Port 1 */
1340 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1341 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1342 	},
1343 	{ /* R2PCIe */
1344 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1345 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1346 	},
1347 	{ /* R3QPI Link 0 */
1348 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1349 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1350 	},
1351 	{ /* R3QPI Link 1 */
1352 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1353 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1354 	},
1355 	{ /* QPI Port 0 filter  */
1356 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1357 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1358 						   SNBEP_PCI_QPI_PORT0_FILTER),
1359 	},
1360 	{ /* QPI Port 0 filter  */
1361 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1362 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1363 						   SNBEP_PCI_QPI_PORT1_FILTER),
1364 	},
1365 	{ /* end: all zeroes */ }
1366 };
1367 
1368 static struct pci_driver snbep_uncore_pci_driver = {
1369 	.name		= "snbep_uncore",
1370 	.id_table	= snbep_uncore_pci_ids,
1371 };
1372 
1373 #define NODE_ID_MASK	0x7
1374 
1375 /*
1376  * build pci bus to socket mapping
1377  */
snbep_pci2phy_map_init(int devid,int nodeid_loc,int idmap_loc,bool reverse)1378 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1379 {
1380 	struct pci_dev *ubox_dev = NULL;
1381 	int i, bus, nodeid, segment, die_id;
1382 	struct pci2phy_map *map;
1383 	int err = 0;
1384 	u32 config = 0;
1385 
1386 	while (1) {
1387 		/* find the UBOX device */
1388 		ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1389 		if (!ubox_dev)
1390 			break;
1391 		bus = ubox_dev->bus->number;
1392 		/*
1393 		 * The nodeid and idmap registers only contain enough
1394 		 * information to handle 8 nodes.  On systems with more
1395 		 * than 8 nodes, we need to rely on NUMA information,
1396 		 * filled in from BIOS supplied information, to determine
1397 		 * the topology.
1398 		 */
1399 		if (nr_node_ids <= 8) {
1400 			/* get the Node ID of the local register */
1401 			err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1402 			if (err)
1403 				break;
1404 			nodeid = config & NODE_ID_MASK;
1405 			/* get the Node ID mapping */
1406 			err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1407 			if (err)
1408 				break;
1409 
1410 			segment = pci_domain_nr(ubox_dev->bus);
1411 			raw_spin_lock(&pci2phy_map_lock);
1412 			map = __find_pci2phy_map(segment);
1413 			if (!map) {
1414 				raw_spin_unlock(&pci2phy_map_lock);
1415 				err = -ENOMEM;
1416 				break;
1417 			}
1418 
1419 			/*
1420 			 * every three bits in the Node ID mapping register maps
1421 			 * to a particular node.
1422 			 */
1423 			for (i = 0; i < 8; i++) {
1424 				if (nodeid == ((config >> (3 * i)) & 0x7)) {
1425 					if (topology_max_die_per_package() > 1)
1426 						die_id = i;
1427 					else
1428 						die_id = topology_phys_to_logical_pkg(i);
1429 					if (die_id < 0)
1430 						die_id = -ENODEV;
1431 					map->pbus_to_dieid[bus] = die_id;
1432 					break;
1433 				}
1434 			}
1435 			raw_spin_unlock(&pci2phy_map_lock);
1436 		} else {
1437 			int node = pcibus_to_node(ubox_dev->bus);
1438 			int cpu;
1439 
1440 			segment = pci_domain_nr(ubox_dev->bus);
1441 			raw_spin_lock(&pci2phy_map_lock);
1442 			map = __find_pci2phy_map(segment);
1443 			if (!map) {
1444 				raw_spin_unlock(&pci2phy_map_lock);
1445 				err = -ENOMEM;
1446 				break;
1447 			}
1448 
1449 			die_id = -1;
1450 			for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
1451 				struct cpuinfo_x86 *c = &cpu_data(cpu);
1452 
1453 				if (c->initialized && cpu_to_node(cpu) == node) {
1454 					map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
1455 					break;
1456 				}
1457 			}
1458 			raw_spin_unlock(&pci2phy_map_lock);
1459 
1460 			if (WARN_ON_ONCE(die_id == -1)) {
1461 				err = -EINVAL;
1462 				break;
1463 			}
1464 		}
1465 	}
1466 
1467 	if (!err) {
1468 		/*
1469 		 * For PCI bus with no UBOX device, find the next bus
1470 		 * that has UBOX device and use its mapping.
1471 		 */
1472 		raw_spin_lock(&pci2phy_map_lock);
1473 		list_for_each_entry(map, &pci2phy_map_head, list) {
1474 			i = -1;
1475 			if (reverse) {
1476 				for (bus = 255; bus >= 0; bus--) {
1477 					if (map->pbus_to_dieid[bus] != -1)
1478 						i = map->pbus_to_dieid[bus];
1479 					else
1480 						map->pbus_to_dieid[bus] = i;
1481 				}
1482 			} else {
1483 				for (bus = 0; bus <= 255; bus++) {
1484 					if (map->pbus_to_dieid[bus] != -1)
1485 						i = map->pbus_to_dieid[bus];
1486 					else
1487 						map->pbus_to_dieid[bus] = i;
1488 				}
1489 			}
1490 		}
1491 		raw_spin_unlock(&pci2phy_map_lock);
1492 	}
1493 
1494 	pci_dev_put(ubox_dev);
1495 
1496 	return err ? pcibios_err_to_errno(err) : 0;
1497 }
1498 
snbep_uncore_pci_init(void)1499 int snbep_uncore_pci_init(void)
1500 {
1501 	int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1502 	if (ret)
1503 		return ret;
1504 	uncore_pci_uncores = snbep_pci_uncores;
1505 	uncore_pci_driver = &snbep_uncore_pci_driver;
1506 	return 0;
1507 }
1508 /* end of Sandy Bridge-EP uncore support */
1509 
1510 /* IvyTown uncore support */
ivbep_uncore_msr_init_box(struct intel_uncore_box * box)1511 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1512 {
1513 	unsigned msr = uncore_msr_box_ctl(box);
1514 	if (msr)
1515 		wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1516 }
1517 
ivbep_uncore_pci_init_box(struct intel_uncore_box * box)1518 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1519 {
1520 	struct pci_dev *pdev = box->pci_dev;
1521 
1522 	pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1523 }
1524 
1525 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
1526 	.init_box	= ivbep_uncore_msr_init_box,		\
1527 	.disable_box	= snbep_uncore_msr_disable_box,		\
1528 	.enable_box	= snbep_uncore_msr_enable_box,		\
1529 	.disable_event	= snbep_uncore_msr_disable_event,	\
1530 	.enable_event	= snbep_uncore_msr_enable_event,	\
1531 	.read_counter	= uncore_msr_read_counter
1532 
1533 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1534 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1535 };
1536 
1537 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1538 	.init_box	= ivbep_uncore_pci_init_box,
1539 	.disable_box	= snbep_uncore_pci_disable_box,
1540 	.enable_box	= snbep_uncore_pci_enable_box,
1541 	.disable_event	= snbep_uncore_pci_disable_event,
1542 	.enable_event	= snbep_uncore_pci_enable_event,
1543 	.read_counter	= snbep_uncore_pci_read_counter,
1544 };
1545 
1546 #define IVBEP_UNCORE_PCI_COMMON_INIT()				\
1547 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1548 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1549 	.event_mask	= IVBEP_PMON_RAW_EVENT_MASK,		\
1550 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1551 	.ops		= &ivbep_uncore_pci_ops,			\
1552 	.format_group	= &ivbep_uncore_format_group
1553 
1554 static struct attribute *ivbep_uncore_formats_attr[] = {
1555 	&format_attr_event.attr,
1556 	&format_attr_umask.attr,
1557 	&format_attr_edge.attr,
1558 	&format_attr_inv.attr,
1559 	&format_attr_thresh8.attr,
1560 	NULL,
1561 };
1562 
1563 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1564 	&format_attr_event.attr,
1565 	&format_attr_umask.attr,
1566 	&format_attr_edge.attr,
1567 	&format_attr_inv.attr,
1568 	&format_attr_thresh5.attr,
1569 	NULL,
1570 };
1571 
1572 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1573 	&format_attr_event.attr,
1574 	&format_attr_umask.attr,
1575 	&format_attr_edge.attr,
1576 	&format_attr_tid_en.attr,
1577 	&format_attr_thresh8.attr,
1578 	&format_attr_filter_tid.attr,
1579 	&format_attr_filter_link.attr,
1580 	&format_attr_filter_state2.attr,
1581 	&format_attr_filter_nid2.attr,
1582 	&format_attr_filter_opc2.attr,
1583 	&format_attr_filter_nc.attr,
1584 	&format_attr_filter_c6.attr,
1585 	&format_attr_filter_isoc.attr,
1586 	NULL,
1587 };
1588 
1589 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1590 	&format_attr_event.attr,
1591 	&format_attr_occ_sel.attr,
1592 	&format_attr_edge.attr,
1593 	&format_attr_thresh5.attr,
1594 	&format_attr_occ_invert.attr,
1595 	&format_attr_occ_edge.attr,
1596 	&format_attr_filter_band0.attr,
1597 	&format_attr_filter_band1.attr,
1598 	&format_attr_filter_band2.attr,
1599 	&format_attr_filter_band3.attr,
1600 	NULL,
1601 };
1602 
1603 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1604 	&format_attr_event_ext.attr,
1605 	&format_attr_umask.attr,
1606 	&format_attr_edge.attr,
1607 	&format_attr_thresh8.attr,
1608 	&format_attr_match_rds.attr,
1609 	&format_attr_match_rnid30.attr,
1610 	&format_attr_match_rnid4.attr,
1611 	&format_attr_match_dnid.attr,
1612 	&format_attr_match_mc.attr,
1613 	&format_attr_match_opc.attr,
1614 	&format_attr_match_vnw.attr,
1615 	&format_attr_match0.attr,
1616 	&format_attr_match1.attr,
1617 	&format_attr_mask_rds.attr,
1618 	&format_attr_mask_rnid30.attr,
1619 	&format_attr_mask_rnid4.attr,
1620 	&format_attr_mask_dnid.attr,
1621 	&format_attr_mask_mc.attr,
1622 	&format_attr_mask_opc.attr,
1623 	&format_attr_mask_vnw.attr,
1624 	&format_attr_mask0.attr,
1625 	&format_attr_mask1.attr,
1626 	NULL,
1627 };
1628 
1629 static const struct attribute_group ivbep_uncore_format_group = {
1630 	.name = "format",
1631 	.attrs = ivbep_uncore_formats_attr,
1632 };
1633 
1634 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1635 	.name = "format",
1636 	.attrs = ivbep_uncore_ubox_formats_attr,
1637 };
1638 
1639 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1640 	.name = "format",
1641 	.attrs = ivbep_uncore_cbox_formats_attr,
1642 };
1643 
1644 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1645 	.name = "format",
1646 	.attrs = ivbep_uncore_pcu_formats_attr,
1647 };
1648 
1649 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1650 	.name = "format",
1651 	.attrs = ivbep_uncore_qpi_formats_attr,
1652 };
1653 
1654 static struct intel_uncore_type ivbep_uncore_ubox = {
1655 	.name		= "ubox",
1656 	.num_counters   = 2,
1657 	.num_boxes	= 1,
1658 	.perf_ctr_bits	= 44,
1659 	.fixed_ctr_bits	= 48,
1660 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
1661 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
1662 	.event_mask	= IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1663 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1664 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1665 	.ops		= &ivbep_uncore_msr_ops,
1666 	.format_group	= &ivbep_uncore_ubox_format_group,
1667 };
1668 
1669 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1670 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1671 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1672 	SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1673 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1674 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1675 	SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1676 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1677 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1678 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1679 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1680 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1681 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1682 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1683 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1684 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1685 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1686 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1687 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1688 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1689 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1690 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1691 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1692 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1693 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1694 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1695 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1696 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1697 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1698 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1699 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1700 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1701 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1702 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1703 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1704 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1705 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1706 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1707 	EVENT_EXTRA_END
1708 };
1709 
ivbep_cbox_filter_mask(int fields)1710 static u64 ivbep_cbox_filter_mask(int fields)
1711 {
1712 	u64 mask = 0;
1713 
1714 	if (fields & 0x1)
1715 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1716 	if (fields & 0x2)
1717 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1718 	if (fields & 0x4)
1719 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1720 	if (fields & 0x8)
1721 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1722 	if (fields & 0x10) {
1723 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1724 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1725 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1726 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1727 	}
1728 
1729 	return mask;
1730 }
1731 
1732 static struct event_constraint *
ivbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1733 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1734 {
1735 	return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1736 }
1737 
ivbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1738 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1739 {
1740 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1741 	struct extra_reg *er;
1742 	int idx = 0;
1743 
1744 	for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1745 		if (er->event != (event->hw.config & er->config_mask))
1746 			continue;
1747 		idx |= er->idx;
1748 	}
1749 
1750 	if (idx) {
1751 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1752 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1753 		reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1754 		reg1->idx = idx;
1755 	}
1756 	return 0;
1757 }
1758 
ivbep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)1759 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1760 {
1761 	struct hw_perf_event *hwc = &event->hw;
1762 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1763 
1764 	if (reg1->idx != EXTRA_REG_NONE) {
1765 		u64 filter = uncore_shared_reg_config(box, 0);
1766 		wrmsrl(reg1->reg, filter & 0xffffffff);
1767 		wrmsrl(reg1->reg + 6, filter >> 32);
1768 	}
1769 
1770 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1771 }
1772 
1773 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1774 	.init_box		= ivbep_uncore_msr_init_box,
1775 	.disable_box		= snbep_uncore_msr_disable_box,
1776 	.enable_box		= snbep_uncore_msr_enable_box,
1777 	.disable_event		= snbep_uncore_msr_disable_event,
1778 	.enable_event		= ivbep_cbox_enable_event,
1779 	.read_counter		= uncore_msr_read_counter,
1780 	.hw_config		= ivbep_cbox_hw_config,
1781 	.get_constraint		= ivbep_cbox_get_constraint,
1782 	.put_constraint		= snbep_cbox_put_constraint,
1783 };
1784 
1785 static struct intel_uncore_type ivbep_uncore_cbox = {
1786 	.name			= "cbox",
1787 	.num_counters		= 4,
1788 	.num_boxes		= 15,
1789 	.perf_ctr_bits		= 44,
1790 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1791 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1792 	.event_mask		= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1793 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1794 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1795 	.num_shared_regs	= 1,
1796 	.constraints		= snbep_uncore_cbox_constraints,
1797 	.ops			= &ivbep_uncore_cbox_ops,
1798 	.format_group		= &ivbep_uncore_cbox_format_group,
1799 };
1800 
1801 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1802 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1803 	.hw_config		= snbep_pcu_hw_config,
1804 	.get_constraint		= snbep_pcu_get_constraint,
1805 	.put_constraint		= snbep_pcu_put_constraint,
1806 };
1807 
1808 static struct intel_uncore_type ivbep_uncore_pcu = {
1809 	.name			= "pcu",
1810 	.num_counters		= 4,
1811 	.num_boxes		= 1,
1812 	.perf_ctr_bits		= 48,
1813 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1814 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1815 	.event_mask		= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1816 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1817 	.num_shared_regs	= 1,
1818 	.ops			= &ivbep_uncore_pcu_ops,
1819 	.format_group		= &ivbep_uncore_pcu_format_group,
1820 };
1821 
1822 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1823 	&ivbep_uncore_ubox,
1824 	&ivbep_uncore_cbox,
1825 	&ivbep_uncore_pcu,
1826 	NULL,
1827 };
1828 
ivbep_uncore_cpu_init(void)1829 void ivbep_uncore_cpu_init(void)
1830 {
1831 	if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1832 		ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1833 	uncore_msr_uncores = ivbep_msr_uncores;
1834 }
1835 
1836 static struct intel_uncore_type ivbep_uncore_ha = {
1837 	.name		= "ha",
1838 	.num_counters   = 4,
1839 	.num_boxes	= 2,
1840 	.perf_ctr_bits	= 48,
1841 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1842 };
1843 
1844 static struct intel_uncore_type ivbep_uncore_imc = {
1845 	.name		= "imc",
1846 	.num_counters   = 4,
1847 	.num_boxes	= 8,
1848 	.perf_ctr_bits	= 48,
1849 	.fixed_ctr_bits	= 48,
1850 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1851 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1852 	.event_descs	= snbep_uncore_imc_events,
1853 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1854 };
1855 
1856 /* registers in IRP boxes are not properly aligned */
1857 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1858 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1859 
ivbep_uncore_irp_enable_event(struct intel_uncore_box * box,struct perf_event * event)1860 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1861 {
1862 	struct pci_dev *pdev = box->pci_dev;
1863 	struct hw_perf_event *hwc = &event->hw;
1864 
1865 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1866 			       hwc->config | SNBEP_PMON_CTL_EN);
1867 }
1868 
ivbep_uncore_irp_disable_event(struct intel_uncore_box * box,struct perf_event * event)1869 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1870 {
1871 	struct pci_dev *pdev = box->pci_dev;
1872 	struct hw_perf_event *hwc = &event->hw;
1873 
1874 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1875 }
1876 
ivbep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)1877 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1878 {
1879 	struct pci_dev *pdev = box->pci_dev;
1880 	struct hw_perf_event *hwc = &event->hw;
1881 	u64 count = 0;
1882 
1883 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1884 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1885 
1886 	return count;
1887 }
1888 
1889 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1890 	.init_box	= ivbep_uncore_pci_init_box,
1891 	.disable_box	= snbep_uncore_pci_disable_box,
1892 	.enable_box	= snbep_uncore_pci_enable_box,
1893 	.disable_event	= ivbep_uncore_irp_disable_event,
1894 	.enable_event	= ivbep_uncore_irp_enable_event,
1895 	.read_counter	= ivbep_uncore_irp_read_counter,
1896 };
1897 
1898 static struct intel_uncore_type ivbep_uncore_irp = {
1899 	.name			= "irp",
1900 	.num_counters		= 4,
1901 	.num_boxes		= 1,
1902 	.perf_ctr_bits		= 48,
1903 	.event_mask		= IVBEP_PMON_RAW_EVENT_MASK,
1904 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1905 	.ops			= &ivbep_uncore_irp_ops,
1906 	.format_group		= &ivbep_uncore_format_group,
1907 };
1908 
1909 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1910 	.init_box	= ivbep_uncore_pci_init_box,
1911 	.disable_box	= snbep_uncore_pci_disable_box,
1912 	.enable_box	= snbep_uncore_pci_enable_box,
1913 	.disable_event	= snbep_uncore_pci_disable_event,
1914 	.enable_event	= snbep_qpi_enable_event,
1915 	.read_counter	= snbep_uncore_pci_read_counter,
1916 	.hw_config	= snbep_qpi_hw_config,
1917 	.get_constraint	= uncore_get_constraint,
1918 	.put_constraint	= uncore_put_constraint,
1919 };
1920 
1921 static struct intel_uncore_type ivbep_uncore_qpi = {
1922 	.name			= "qpi",
1923 	.num_counters		= 4,
1924 	.num_boxes		= 3,
1925 	.perf_ctr_bits		= 48,
1926 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1927 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1928 	.event_mask		= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1929 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1930 	.num_shared_regs	= 1,
1931 	.ops			= &ivbep_uncore_qpi_ops,
1932 	.format_group		= &ivbep_uncore_qpi_format_group,
1933 };
1934 
1935 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1936 	.name		= "r2pcie",
1937 	.num_counters   = 4,
1938 	.num_boxes	= 1,
1939 	.perf_ctr_bits	= 44,
1940 	.constraints	= snbep_uncore_r2pcie_constraints,
1941 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1942 };
1943 
1944 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1945 	.name		= "r3qpi",
1946 	.num_counters   = 3,
1947 	.num_boxes	= 2,
1948 	.perf_ctr_bits	= 44,
1949 	.constraints	= snbep_uncore_r3qpi_constraints,
1950 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1951 };
1952 
1953 enum {
1954 	IVBEP_PCI_UNCORE_HA,
1955 	IVBEP_PCI_UNCORE_IMC,
1956 	IVBEP_PCI_UNCORE_IRP,
1957 	IVBEP_PCI_UNCORE_QPI,
1958 	IVBEP_PCI_UNCORE_R2PCIE,
1959 	IVBEP_PCI_UNCORE_R3QPI,
1960 };
1961 
1962 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1963 	[IVBEP_PCI_UNCORE_HA]	= &ivbep_uncore_ha,
1964 	[IVBEP_PCI_UNCORE_IMC]	= &ivbep_uncore_imc,
1965 	[IVBEP_PCI_UNCORE_IRP]	= &ivbep_uncore_irp,
1966 	[IVBEP_PCI_UNCORE_QPI]	= &ivbep_uncore_qpi,
1967 	[IVBEP_PCI_UNCORE_R2PCIE]	= &ivbep_uncore_r2pcie,
1968 	[IVBEP_PCI_UNCORE_R3QPI]	= &ivbep_uncore_r3qpi,
1969 	NULL,
1970 };
1971 
1972 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1973 	{ /* Home Agent 0 */
1974 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1975 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1976 	},
1977 	{ /* Home Agent 1 */
1978 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1979 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1980 	},
1981 	{ /* MC0 Channel 0 */
1982 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1983 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1984 	},
1985 	{ /* MC0 Channel 1 */
1986 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1987 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1988 	},
1989 	{ /* MC0 Channel 3 */
1990 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1991 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1992 	},
1993 	{ /* MC0 Channel 4 */
1994 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1995 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1996 	},
1997 	{ /* MC1 Channel 0 */
1998 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1999 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
2000 	},
2001 	{ /* MC1 Channel 1 */
2002 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
2003 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
2004 	},
2005 	{ /* MC1 Channel 3 */
2006 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
2007 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
2008 	},
2009 	{ /* MC1 Channel 4 */
2010 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
2011 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2012 	},
2013 	{ /* IRP */
2014 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2015 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2016 	},
2017 	{ /* QPI0 Port 0 */
2018 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2019 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2020 	},
2021 	{ /* QPI0 Port 1 */
2022 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2023 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2024 	},
2025 	{ /* QPI1 Port 2 */
2026 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2027 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2028 	},
2029 	{ /* R2PCIe */
2030 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2031 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2032 	},
2033 	{ /* R3QPI0 Link 0 */
2034 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2035 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2036 	},
2037 	{ /* R3QPI0 Link 1 */
2038 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2039 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2040 	},
2041 	{ /* R3QPI1 Link 2 */
2042 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2043 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2044 	},
2045 	{ /* QPI Port 0 filter  */
2046 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2047 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2048 						   SNBEP_PCI_QPI_PORT0_FILTER),
2049 	},
2050 	{ /* QPI Port 0 filter  */
2051 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2052 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2053 						   SNBEP_PCI_QPI_PORT1_FILTER),
2054 	},
2055 	{ /* end: all zeroes */ }
2056 };
2057 
2058 static struct pci_driver ivbep_uncore_pci_driver = {
2059 	.name		= "ivbep_uncore",
2060 	.id_table	= ivbep_uncore_pci_ids,
2061 };
2062 
ivbep_uncore_pci_init(void)2063 int ivbep_uncore_pci_init(void)
2064 {
2065 	int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2066 	if (ret)
2067 		return ret;
2068 	uncore_pci_uncores = ivbep_pci_uncores;
2069 	uncore_pci_driver = &ivbep_uncore_pci_driver;
2070 	return 0;
2071 }
2072 /* end of IvyTown uncore support */
2073 
2074 /* KNL uncore support */
2075 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2076 	&format_attr_event.attr,
2077 	&format_attr_umask.attr,
2078 	&format_attr_edge.attr,
2079 	&format_attr_tid_en.attr,
2080 	&format_attr_inv.attr,
2081 	&format_attr_thresh5.attr,
2082 	NULL,
2083 };
2084 
2085 static const struct attribute_group knl_uncore_ubox_format_group = {
2086 	.name = "format",
2087 	.attrs = knl_uncore_ubox_formats_attr,
2088 };
2089 
2090 static struct intel_uncore_type knl_uncore_ubox = {
2091 	.name			= "ubox",
2092 	.num_counters		= 2,
2093 	.num_boxes		= 1,
2094 	.perf_ctr_bits		= 48,
2095 	.fixed_ctr_bits		= 48,
2096 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2097 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2098 	.event_mask		= KNL_U_MSR_PMON_RAW_EVENT_MASK,
2099 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2100 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2101 	.ops			= &snbep_uncore_msr_ops,
2102 	.format_group		= &knl_uncore_ubox_format_group,
2103 };
2104 
2105 static struct attribute *knl_uncore_cha_formats_attr[] = {
2106 	&format_attr_event.attr,
2107 	&format_attr_umask.attr,
2108 	&format_attr_qor.attr,
2109 	&format_attr_edge.attr,
2110 	&format_attr_tid_en.attr,
2111 	&format_attr_inv.attr,
2112 	&format_attr_thresh8.attr,
2113 	&format_attr_filter_tid4.attr,
2114 	&format_attr_filter_link3.attr,
2115 	&format_attr_filter_state4.attr,
2116 	&format_attr_filter_local.attr,
2117 	&format_attr_filter_all_op.attr,
2118 	&format_attr_filter_nnm.attr,
2119 	&format_attr_filter_opc3.attr,
2120 	&format_attr_filter_nc.attr,
2121 	&format_attr_filter_isoc.attr,
2122 	NULL,
2123 };
2124 
2125 static const struct attribute_group knl_uncore_cha_format_group = {
2126 	.name = "format",
2127 	.attrs = knl_uncore_cha_formats_attr,
2128 };
2129 
2130 static struct event_constraint knl_uncore_cha_constraints[] = {
2131 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2132 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2133 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2134 	EVENT_CONSTRAINT_END
2135 };
2136 
2137 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2138 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2139 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2140 	SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2141 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2142 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2143 	EVENT_EXTRA_END
2144 };
2145 
knl_cha_filter_mask(int fields)2146 static u64 knl_cha_filter_mask(int fields)
2147 {
2148 	u64 mask = 0;
2149 
2150 	if (fields & 0x1)
2151 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2152 	if (fields & 0x2)
2153 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2154 	if (fields & 0x4)
2155 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2156 	return mask;
2157 }
2158 
2159 static struct event_constraint *
knl_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2160 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2161 {
2162 	return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2163 }
2164 
knl_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)2165 static int knl_cha_hw_config(struct intel_uncore_box *box,
2166 			     struct perf_event *event)
2167 {
2168 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2169 	struct extra_reg *er;
2170 	int idx = 0;
2171 
2172 	for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2173 		if (er->event != (event->hw.config & er->config_mask))
2174 			continue;
2175 		idx |= er->idx;
2176 	}
2177 
2178 	if (idx) {
2179 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2180 			    KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2181 		reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2182 
2183 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2184 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2185 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2186 		reg1->idx = idx;
2187 	}
2188 	return 0;
2189 }
2190 
2191 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2192 				    struct perf_event *event);
2193 
2194 static struct intel_uncore_ops knl_uncore_cha_ops = {
2195 	.init_box		= snbep_uncore_msr_init_box,
2196 	.disable_box		= snbep_uncore_msr_disable_box,
2197 	.enable_box		= snbep_uncore_msr_enable_box,
2198 	.disable_event		= snbep_uncore_msr_disable_event,
2199 	.enable_event		= hswep_cbox_enable_event,
2200 	.read_counter		= uncore_msr_read_counter,
2201 	.hw_config		= knl_cha_hw_config,
2202 	.get_constraint		= knl_cha_get_constraint,
2203 	.put_constraint		= snbep_cbox_put_constraint,
2204 };
2205 
2206 static struct intel_uncore_type knl_uncore_cha = {
2207 	.name			= "cha",
2208 	.num_counters		= 4,
2209 	.num_boxes		= 38,
2210 	.perf_ctr_bits		= 48,
2211 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2212 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2213 	.event_mask		= KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2214 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2215 	.msr_offset		= KNL_CHA_MSR_OFFSET,
2216 	.num_shared_regs	= 1,
2217 	.constraints		= knl_uncore_cha_constraints,
2218 	.ops			= &knl_uncore_cha_ops,
2219 	.format_group		= &knl_uncore_cha_format_group,
2220 };
2221 
2222 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2223 	&format_attr_event2.attr,
2224 	&format_attr_use_occ_ctr.attr,
2225 	&format_attr_occ_sel.attr,
2226 	&format_attr_edge.attr,
2227 	&format_attr_tid_en.attr,
2228 	&format_attr_inv.attr,
2229 	&format_attr_thresh6.attr,
2230 	&format_attr_occ_invert.attr,
2231 	&format_attr_occ_edge_det.attr,
2232 	NULL,
2233 };
2234 
2235 static const struct attribute_group knl_uncore_pcu_format_group = {
2236 	.name = "format",
2237 	.attrs = knl_uncore_pcu_formats_attr,
2238 };
2239 
2240 static struct intel_uncore_type knl_uncore_pcu = {
2241 	.name			= "pcu",
2242 	.num_counters		= 4,
2243 	.num_boxes		= 1,
2244 	.perf_ctr_bits		= 48,
2245 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2246 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2247 	.event_mask		= KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2248 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2249 	.ops			= &snbep_uncore_msr_ops,
2250 	.format_group		= &knl_uncore_pcu_format_group,
2251 };
2252 
2253 static struct intel_uncore_type *knl_msr_uncores[] = {
2254 	&knl_uncore_ubox,
2255 	&knl_uncore_cha,
2256 	&knl_uncore_pcu,
2257 	NULL,
2258 };
2259 
knl_uncore_cpu_init(void)2260 void knl_uncore_cpu_init(void)
2261 {
2262 	uncore_msr_uncores = knl_msr_uncores;
2263 }
2264 
knl_uncore_imc_enable_box(struct intel_uncore_box * box)2265 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2266 {
2267 	struct pci_dev *pdev = box->pci_dev;
2268 	int box_ctl = uncore_pci_box_ctl(box);
2269 
2270 	pci_write_config_dword(pdev, box_ctl, 0);
2271 }
2272 
knl_uncore_imc_enable_event(struct intel_uncore_box * box,struct perf_event * event)2273 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2274 					struct perf_event *event)
2275 {
2276 	struct pci_dev *pdev = box->pci_dev;
2277 	struct hw_perf_event *hwc = &event->hw;
2278 
2279 	if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2280 							== UNCORE_FIXED_EVENT)
2281 		pci_write_config_dword(pdev, hwc->config_base,
2282 				       hwc->config | KNL_PMON_FIXED_CTL_EN);
2283 	else
2284 		pci_write_config_dword(pdev, hwc->config_base,
2285 				       hwc->config | SNBEP_PMON_CTL_EN);
2286 }
2287 
2288 static struct intel_uncore_ops knl_uncore_imc_ops = {
2289 	.init_box	= snbep_uncore_pci_init_box,
2290 	.disable_box	= snbep_uncore_pci_disable_box,
2291 	.enable_box	= knl_uncore_imc_enable_box,
2292 	.read_counter	= snbep_uncore_pci_read_counter,
2293 	.enable_event	= knl_uncore_imc_enable_event,
2294 	.disable_event	= snbep_uncore_pci_disable_event,
2295 };
2296 
2297 static struct intel_uncore_type knl_uncore_imc_uclk = {
2298 	.name			= "imc_uclk",
2299 	.num_counters		= 4,
2300 	.num_boxes		= 2,
2301 	.perf_ctr_bits		= 48,
2302 	.fixed_ctr_bits		= 48,
2303 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2304 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2305 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2306 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2307 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2308 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2309 	.ops			= &knl_uncore_imc_ops,
2310 	.format_group		= &snbep_uncore_format_group,
2311 };
2312 
2313 static struct intel_uncore_type knl_uncore_imc_dclk = {
2314 	.name			= "imc",
2315 	.num_counters		= 4,
2316 	.num_boxes		= 6,
2317 	.perf_ctr_bits		= 48,
2318 	.fixed_ctr_bits		= 48,
2319 	.perf_ctr		= KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2320 	.event_ctl		= KNL_MC0_CH0_MSR_PMON_CTL0,
2321 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2322 	.fixed_ctr		= KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2323 	.fixed_ctl		= KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2324 	.box_ctl		= KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2325 	.ops			= &knl_uncore_imc_ops,
2326 	.format_group		= &snbep_uncore_format_group,
2327 };
2328 
2329 static struct intel_uncore_type knl_uncore_edc_uclk = {
2330 	.name			= "edc_uclk",
2331 	.num_counters		= 4,
2332 	.num_boxes		= 8,
2333 	.perf_ctr_bits		= 48,
2334 	.fixed_ctr_bits		= 48,
2335 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2336 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2337 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2338 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2339 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2340 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2341 	.ops			= &knl_uncore_imc_ops,
2342 	.format_group		= &snbep_uncore_format_group,
2343 };
2344 
2345 static struct intel_uncore_type knl_uncore_edc_eclk = {
2346 	.name			= "edc_eclk",
2347 	.num_counters		= 4,
2348 	.num_boxes		= 8,
2349 	.perf_ctr_bits		= 48,
2350 	.fixed_ctr_bits		= 48,
2351 	.perf_ctr		= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2352 	.event_ctl		= KNL_EDC0_ECLK_MSR_PMON_CTL0,
2353 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2354 	.fixed_ctr		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2355 	.fixed_ctl		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2356 	.box_ctl		= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2357 	.ops			= &knl_uncore_imc_ops,
2358 	.format_group		= &snbep_uncore_format_group,
2359 };
2360 
2361 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2362 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2363 	EVENT_CONSTRAINT_END
2364 };
2365 
2366 static struct intel_uncore_type knl_uncore_m2pcie = {
2367 	.name		= "m2pcie",
2368 	.num_counters   = 4,
2369 	.num_boxes	= 1,
2370 	.perf_ctr_bits	= 48,
2371 	.constraints	= knl_uncore_m2pcie_constraints,
2372 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2373 };
2374 
2375 static struct attribute *knl_uncore_irp_formats_attr[] = {
2376 	&format_attr_event.attr,
2377 	&format_attr_umask.attr,
2378 	&format_attr_qor.attr,
2379 	&format_attr_edge.attr,
2380 	&format_attr_inv.attr,
2381 	&format_attr_thresh8.attr,
2382 	NULL,
2383 };
2384 
2385 static const struct attribute_group knl_uncore_irp_format_group = {
2386 	.name = "format",
2387 	.attrs = knl_uncore_irp_formats_attr,
2388 };
2389 
2390 static struct intel_uncore_type knl_uncore_irp = {
2391 	.name			= "irp",
2392 	.num_counters		= 2,
2393 	.num_boxes		= 1,
2394 	.perf_ctr_bits		= 48,
2395 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2396 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2397 	.event_mask		= KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2398 	.box_ctl		= KNL_IRP_PCI_PMON_BOX_CTL,
2399 	.ops			= &snbep_uncore_pci_ops,
2400 	.format_group		= &knl_uncore_irp_format_group,
2401 };
2402 
2403 enum {
2404 	KNL_PCI_UNCORE_MC_UCLK,
2405 	KNL_PCI_UNCORE_MC_DCLK,
2406 	KNL_PCI_UNCORE_EDC_UCLK,
2407 	KNL_PCI_UNCORE_EDC_ECLK,
2408 	KNL_PCI_UNCORE_M2PCIE,
2409 	KNL_PCI_UNCORE_IRP,
2410 };
2411 
2412 static struct intel_uncore_type *knl_pci_uncores[] = {
2413 	[KNL_PCI_UNCORE_MC_UCLK]	= &knl_uncore_imc_uclk,
2414 	[KNL_PCI_UNCORE_MC_DCLK]	= &knl_uncore_imc_dclk,
2415 	[KNL_PCI_UNCORE_EDC_UCLK]	= &knl_uncore_edc_uclk,
2416 	[KNL_PCI_UNCORE_EDC_ECLK]	= &knl_uncore_edc_eclk,
2417 	[KNL_PCI_UNCORE_M2PCIE]		= &knl_uncore_m2pcie,
2418 	[KNL_PCI_UNCORE_IRP]		= &knl_uncore_irp,
2419 	NULL,
2420 };
2421 
2422 /*
2423  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2424  * device type. prior to KNL, each instance of a PMU device type had a unique
2425  * device ID.
2426  *
2427  *	PCI Device ID	Uncore PMU Devices
2428  *	----------------------------------
2429  *	0x7841		MC0 UClk, MC1 UClk
2430  *	0x7843		MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2431  *			MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2432  *	0x7833		EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2433  *			EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2434  *	0x7835		EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2435  *			EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2436  *	0x7817		M2PCIe
2437  *	0x7814		IRP
2438 */
2439 
2440 static const struct pci_device_id knl_uncore_pci_ids[] = {
2441 	{ /* MC0 UClk */
2442 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2443 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2444 	},
2445 	{ /* MC1 UClk */
2446 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2447 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2448 	},
2449 	{ /* MC0 DClk CH 0 */
2450 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2451 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2452 	},
2453 	{ /* MC0 DClk CH 1 */
2454 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2455 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2456 	},
2457 	{ /* MC0 DClk CH 2 */
2458 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2459 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2460 	},
2461 	{ /* MC1 DClk CH 0 */
2462 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2463 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2464 	},
2465 	{ /* MC1 DClk CH 1 */
2466 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2467 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2468 	},
2469 	{ /* MC1 DClk CH 2 */
2470 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2471 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2472 	},
2473 	{ /* EDC0 UClk */
2474 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2475 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2476 	},
2477 	{ /* EDC1 UClk */
2478 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2479 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2480 	},
2481 	{ /* EDC2 UClk */
2482 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2483 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2484 	},
2485 	{ /* EDC3 UClk */
2486 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2487 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2488 	},
2489 	{ /* EDC4 UClk */
2490 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2491 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2492 	},
2493 	{ /* EDC5 UClk */
2494 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2495 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2496 	},
2497 	{ /* EDC6 UClk */
2498 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2499 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2500 	},
2501 	{ /* EDC7 UClk */
2502 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2503 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2504 	},
2505 	{ /* EDC0 EClk */
2506 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2507 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2508 	},
2509 	{ /* EDC1 EClk */
2510 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2511 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2512 	},
2513 	{ /* EDC2 EClk */
2514 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2515 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2516 	},
2517 	{ /* EDC3 EClk */
2518 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2519 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2520 	},
2521 	{ /* EDC4 EClk */
2522 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2523 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2524 	},
2525 	{ /* EDC5 EClk */
2526 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2527 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2528 	},
2529 	{ /* EDC6 EClk */
2530 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2531 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2532 	},
2533 	{ /* EDC7 EClk */
2534 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2535 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2536 	},
2537 	{ /* M2PCIe */
2538 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2539 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2540 	},
2541 	{ /* IRP */
2542 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2543 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2544 	},
2545 	{ /* end: all zeroes */ }
2546 };
2547 
2548 static struct pci_driver knl_uncore_pci_driver = {
2549 	.name		= "knl_uncore",
2550 	.id_table	= knl_uncore_pci_ids,
2551 };
2552 
knl_uncore_pci_init(void)2553 int knl_uncore_pci_init(void)
2554 {
2555 	int ret;
2556 
2557 	/* All KNL PCI based PMON units are on the same PCI bus except IRP */
2558 	ret = snb_pci2phy_map_init(0x7814); /* IRP */
2559 	if (ret)
2560 		return ret;
2561 	ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2562 	if (ret)
2563 		return ret;
2564 	uncore_pci_uncores = knl_pci_uncores;
2565 	uncore_pci_driver = &knl_uncore_pci_driver;
2566 	return 0;
2567 }
2568 
2569 /* end of KNL uncore support */
2570 
2571 /* Haswell-EP uncore support */
2572 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2573 	&format_attr_event.attr,
2574 	&format_attr_umask.attr,
2575 	&format_attr_edge.attr,
2576 	&format_attr_inv.attr,
2577 	&format_attr_thresh5.attr,
2578 	&format_attr_filter_tid2.attr,
2579 	&format_attr_filter_cid.attr,
2580 	NULL,
2581 };
2582 
2583 static const struct attribute_group hswep_uncore_ubox_format_group = {
2584 	.name = "format",
2585 	.attrs = hswep_uncore_ubox_formats_attr,
2586 };
2587 
hswep_ubox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2588 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2589 {
2590 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2591 	reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2592 	reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2593 	reg1->idx = 0;
2594 	return 0;
2595 }
2596 
2597 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2598 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2599 	.hw_config		= hswep_ubox_hw_config,
2600 	.get_constraint		= uncore_get_constraint,
2601 	.put_constraint		= uncore_put_constraint,
2602 };
2603 
2604 static struct intel_uncore_type hswep_uncore_ubox = {
2605 	.name			= "ubox",
2606 	.num_counters		= 2,
2607 	.num_boxes		= 1,
2608 	.perf_ctr_bits		= 44,
2609 	.fixed_ctr_bits		= 48,
2610 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2611 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2612 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2613 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2614 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2615 	.num_shared_regs	= 1,
2616 	.ops			= &hswep_uncore_ubox_ops,
2617 	.format_group		= &hswep_uncore_ubox_format_group,
2618 };
2619 
2620 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2621 	&format_attr_event.attr,
2622 	&format_attr_umask.attr,
2623 	&format_attr_edge.attr,
2624 	&format_attr_tid_en.attr,
2625 	&format_attr_thresh8.attr,
2626 	&format_attr_filter_tid3.attr,
2627 	&format_attr_filter_link2.attr,
2628 	&format_attr_filter_state3.attr,
2629 	&format_attr_filter_nid2.attr,
2630 	&format_attr_filter_opc2.attr,
2631 	&format_attr_filter_nc.attr,
2632 	&format_attr_filter_c6.attr,
2633 	&format_attr_filter_isoc.attr,
2634 	NULL,
2635 };
2636 
2637 static const struct attribute_group hswep_uncore_cbox_format_group = {
2638 	.name = "format",
2639 	.attrs = hswep_uncore_cbox_formats_attr,
2640 };
2641 
2642 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2643 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2644 	UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2645 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2646 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2647 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2648 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2649 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2650 	EVENT_CONSTRAINT_END
2651 };
2652 
2653 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2654 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2655 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2656 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2657 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2658 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2659 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2660 	SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2661 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2662 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2663 	SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2664 	SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2665 	SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2666 	SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2667 	SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2668 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2669 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2670 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2671 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2672 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2673 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2674 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2675 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2676 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2677 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2678 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2679 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2680 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2681 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2682 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2683 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2684 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2685 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2686 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2687 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2688 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2689 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2690 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2691 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2692 	EVENT_EXTRA_END
2693 };
2694 
hswep_cbox_filter_mask(int fields)2695 static u64 hswep_cbox_filter_mask(int fields)
2696 {
2697 	u64 mask = 0;
2698 	if (fields & 0x1)
2699 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2700 	if (fields & 0x2)
2701 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2702 	if (fields & 0x4)
2703 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2704 	if (fields & 0x8)
2705 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2706 	if (fields & 0x10) {
2707 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2708 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2709 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2710 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2711 	}
2712 	return mask;
2713 }
2714 
2715 static struct event_constraint *
hswep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2716 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2717 {
2718 	return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2719 }
2720 
hswep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2721 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2722 {
2723 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2724 	struct extra_reg *er;
2725 	int idx = 0;
2726 
2727 	for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2728 		if (er->event != (event->hw.config & er->config_mask))
2729 			continue;
2730 		idx |= er->idx;
2731 	}
2732 
2733 	if (idx) {
2734 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2735 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2736 		reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2737 		reg1->idx = idx;
2738 	}
2739 	return 0;
2740 }
2741 
hswep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)2742 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2743 				  struct perf_event *event)
2744 {
2745 	struct hw_perf_event *hwc = &event->hw;
2746 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2747 
2748 	if (reg1->idx != EXTRA_REG_NONE) {
2749 		u64 filter = uncore_shared_reg_config(box, 0);
2750 		wrmsrl(reg1->reg, filter & 0xffffffff);
2751 		wrmsrl(reg1->reg + 1, filter >> 32);
2752 	}
2753 
2754 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2755 }
2756 
2757 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2758 	.init_box		= snbep_uncore_msr_init_box,
2759 	.disable_box		= snbep_uncore_msr_disable_box,
2760 	.enable_box		= snbep_uncore_msr_enable_box,
2761 	.disable_event		= snbep_uncore_msr_disable_event,
2762 	.enable_event		= hswep_cbox_enable_event,
2763 	.read_counter		= uncore_msr_read_counter,
2764 	.hw_config		= hswep_cbox_hw_config,
2765 	.get_constraint		= hswep_cbox_get_constraint,
2766 	.put_constraint		= snbep_cbox_put_constraint,
2767 };
2768 
2769 static struct intel_uncore_type hswep_uncore_cbox = {
2770 	.name			= "cbox",
2771 	.num_counters		= 4,
2772 	.num_boxes		= 18,
2773 	.perf_ctr_bits		= 48,
2774 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2775 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2776 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2777 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2778 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
2779 	.num_shared_regs	= 1,
2780 	.constraints		= hswep_uncore_cbox_constraints,
2781 	.ops			= &hswep_uncore_cbox_ops,
2782 	.format_group		= &hswep_uncore_cbox_format_group,
2783 };
2784 
2785 /*
2786  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2787  */
hswep_uncore_sbox_msr_init_box(struct intel_uncore_box * box)2788 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2789 {
2790 	unsigned msr = uncore_msr_box_ctl(box);
2791 
2792 	if (msr) {
2793 		u64 init = SNBEP_PMON_BOX_CTL_INT;
2794 		u64 flags = 0;
2795 		int i;
2796 
2797 		for_each_set_bit(i, (unsigned long *)&init, 64) {
2798 			flags |= (1ULL << i);
2799 			wrmsrl(msr, flags);
2800 		}
2801 	}
2802 }
2803 
2804 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2805 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2806 	.init_box		= hswep_uncore_sbox_msr_init_box
2807 };
2808 
2809 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2810 	&format_attr_event.attr,
2811 	&format_attr_umask.attr,
2812 	&format_attr_edge.attr,
2813 	&format_attr_tid_en.attr,
2814 	&format_attr_inv.attr,
2815 	&format_attr_thresh8.attr,
2816 	NULL,
2817 };
2818 
2819 static const struct attribute_group hswep_uncore_sbox_format_group = {
2820 	.name = "format",
2821 	.attrs = hswep_uncore_sbox_formats_attr,
2822 };
2823 
2824 static struct intel_uncore_type hswep_uncore_sbox = {
2825 	.name			= "sbox",
2826 	.num_counters		= 4,
2827 	.num_boxes		= 4,
2828 	.perf_ctr_bits		= 44,
2829 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
2830 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
2831 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2832 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
2833 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
2834 	.ops			= &hswep_uncore_sbox_msr_ops,
2835 	.format_group		= &hswep_uncore_sbox_format_group,
2836 };
2837 
hswep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)2838 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2839 {
2840 	struct hw_perf_event *hwc = &event->hw;
2841 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2842 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2843 
2844 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
2845 		reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2846 		reg1->idx = ev_sel - 0xb;
2847 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
2848 	}
2849 	return 0;
2850 }
2851 
2852 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2853 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2854 	.hw_config		= hswep_pcu_hw_config,
2855 	.get_constraint		= snbep_pcu_get_constraint,
2856 	.put_constraint		= snbep_pcu_put_constraint,
2857 };
2858 
2859 static struct intel_uncore_type hswep_uncore_pcu = {
2860 	.name			= "pcu",
2861 	.num_counters		= 4,
2862 	.num_boxes		= 1,
2863 	.perf_ctr_bits		= 48,
2864 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2865 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2866 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2867 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2868 	.num_shared_regs	= 1,
2869 	.ops			= &hswep_uncore_pcu_ops,
2870 	.format_group		= &snbep_uncore_pcu_format_group,
2871 };
2872 
2873 static struct intel_uncore_type *hswep_msr_uncores[] = {
2874 	&hswep_uncore_ubox,
2875 	&hswep_uncore_cbox,
2876 	&hswep_uncore_sbox,
2877 	&hswep_uncore_pcu,
2878 	NULL,
2879 };
2880 
2881 #define HSWEP_PCU_DID			0x2fc0
2882 #define HSWEP_PCU_CAPID4_OFFET		0x94
2883 #define hswep_get_chop(_cap)		(((_cap) >> 6) & 0x3)
2884 
hswep_has_limit_sbox(unsigned int device)2885 static bool hswep_has_limit_sbox(unsigned int device)
2886 {
2887 	struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2888 	u32 capid4;
2889 
2890 	if (!dev)
2891 		return false;
2892 
2893 	pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2894 	pci_dev_put(dev);
2895 	if (!hswep_get_chop(capid4))
2896 		return true;
2897 
2898 	return false;
2899 }
2900 
hswep_uncore_cpu_init(void)2901 void hswep_uncore_cpu_init(void)
2902 {
2903 	if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2904 		hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2905 
2906 	/* Detect 6-8 core systems with only two SBOXes */
2907 	if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2908 		hswep_uncore_sbox.num_boxes = 2;
2909 
2910 	uncore_msr_uncores = hswep_msr_uncores;
2911 }
2912 
2913 static struct intel_uncore_type hswep_uncore_ha = {
2914 	.name		= "ha",
2915 	.num_counters   = 4,
2916 	.num_boxes	= 2,
2917 	.perf_ctr_bits	= 48,
2918 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2919 };
2920 
2921 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2922 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2923 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2924 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2925 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2926 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2927 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2928 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2929 	{ /* end: all zeroes */ },
2930 };
2931 
2932 static struct intel_uncore_type hswep_uncore_imc = {
2933 	.name		= "imc",
2934 	.num_counters   = 4,
2935 	.num_boxes	= 8,
2936 	.perf_ctr_bits	= 48,
2937 	.fixed_ctr_bits	= 48,
2938 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2939 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2940 	.event_descs	= hswep_uncore_imc_events,
2941 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2942 };
2943 
2944 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2945 
hswep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)2946 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2947 {
2948 	struct pci_dev *pdev = box->pci_dev;
2949 	struct hw_perf_event *hwc = &event->hw;
2950 	u64 count = 0;
2951 
2952 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2953 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2954 
2955 	return count;
2956 }
2957 
2958 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2959 	.init_box	= snbep_uncore_pci_init_box,
2960 	.disable_box	= snbep_uncore_pci_disable_box,
2961 	.enable_box	= snbep_uncore_pci_enable_box,
2962 	.disable_event	= ivbep_uncore_irp_disable_event,
2963 	.enable_event	= ivbep_uncore_irp_enable_event,
2964 	.read_counter	= hswep_uncore_irp_read_counter,
2965 };
2966 
2967 static struct intel_uncore_type hswep_uncore_irp = {
2968 	.name			= "irp",
2969 	.num_counters		= 4,
2970 	.num_boxes		= 1,
2971 	.perf_ctr_bits		= 48,
2972 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2973 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2974 	.ops			= &hswep_uncore_irp_ops,
2975 	.format_group		= &snbep_uncore_format_group,
2976 };
2977 
2978 static struct intel_uncore_type hswep_uncore_qpi = {
2979 	.name			= "qpi",
2980 	.num_counters		= 4,
2981 	.num_boxes		= 3,
2982 	.perf_ctr_bits		= 48,
2983 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2984 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2985 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2986 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2987 	.num_shared_regs	= 1,
2988 	.ops			= &snbep_uncore_qpi_ops,
2989 	.format_group		= &snbep_uncore_qpi_format_group,
2990 };
2991 
2992 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2993 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2994 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2995 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2996 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2997 	UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2998 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2999 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3000 	UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
3001 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3002 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3003 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
3004 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
3005 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3006 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3007 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3008 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3009 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3010 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
3011 	EVENT_CONSTRAINT_END
3012 };
3013 
3014 static struct intel_uncore_type hswep_uncore_r2pcie = {
3015 	.name		= "r2pcie",
3016 	.num_counters   = 4,
3017 	.num_boxes	= 1,
3018 	.perf_ctr_bits	= 48,
3019 	.constraints	= hswep_uncore_r2pcie_constraints,
3020 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3021 };
3022 
3023 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3024 	UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3025 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3026 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3027 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3028 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3029 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3030 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3031 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3032 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3033 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3034 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3035 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3036 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3037 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3038 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3039 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3040 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3041 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3042 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3043 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3044 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3045 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3046 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3047 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3048 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3049 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3050 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3051 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3052 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3053 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3054 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3055 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3056 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3057 	EVENT_CONSTRAINT_END
3058 };
3059 
3060 static struct intel_uncore_type hswep_uncore_r3qpi = {
3061 	.name		= "r3qpi",
3062 	.num_counters   = 3,
3063 	.num_boxes	= 3,
3064 	.perf_ctr_bits	= 44,
3065 	.constraints	= hswep_uncore_r3qpi_constraints,
3066 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3067 };
3068 
3069 enum {
3070 	HSWEP_PCI_UNCORE_HA,
3071 	HSWEP_PCI_UNCORE_IMC,
3072 	HSWEP_PCI_UNCORE_IRP,
3073 	HSWEP_PCI_UNCORE_QPI,
3074 	HSWEP_PCI_UNCORE_R2PCIE,
3075 	HSWEP_PCI_UNCORE_R3QPI,
3076 };
3077 
3078 static struct intel_uncore_type *hswep_pci_uncores[] = {
3079 	[HSWEP_PCI_UNCORE_HA]	= &hswep_uncore_ha,
3080 	[HSWEP_PCI_UNCORE_IMC]	= &hswep_uncore_imc,
3081 	[HSWEP_PCI_UNCORE_IRP]	= &hswep_uncore_irp,
3082 	[HSWEP_PCI_UNCORE_QPI]	= &hswep_uncore_qpi,
3083 	[HSWEP_PCI_UNCORE_R2PCIE]	= &hswep_uncore_r2pcie,
3084 	[HSWEP_PCI_UNCORE_R3QPI]	= &hswep_uncore_r3qpi,
3085 	NULL,
3086 };
3087 
3088 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3089 	{ /* Home Agent 0 */
3090 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3091 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3092 	},
3093 	{ /* Home Agent 1 */
3094 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3095 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3096 	},
3097 	{ /* MC0 Channel 0 */
3098 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3099 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3100 	},
3101 	{ /* MC0 Channel 1 */
3102 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3103 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3104 	},
3105 	{ /* MC0 Channel 2 */
3106 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3107 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3108 	},
3109 	{ /* MC0 Channel 3 */
3110 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3111 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3112 	},
3113 	{ /* MC1 Channel 0 */
3114 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3115 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3116 	},
3117 	{ /* MC1 Channel 1 */
3118 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3119 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3120 	},
3121 	{ /* MC1 Channel 2 */
3122 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3123 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3124 	},
3125 	{ /* MC1 Channel 3 */
3126 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3127 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3128 	},
3129 	{ /* IRP */
3130 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3131 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3132 	},
3133 	{ /* QPI0 Port 0 */
3134 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3135 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3136 	},
3137 	{ /* QPI0 Port 1 */
3138 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3139 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3140 	},
3141 	{ /* QPI1 Port 2 */
3142 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3143 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3144 	},
3145 	{ /* R2PCIe */
3146 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3147 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3148 	},
3149 	{ /* R3QPI0 Link 0 */
3150 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3151 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3152 	},
3153 	{ /* R3QPI0 Link 1 */
3154 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3155 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3156 	},
3157 	{ /* R3QPI1 Link 2 */
3158 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3159 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3160 	},
3161 	{ /* QPI Port 0 filter  */
3162 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3163 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3164 						   SNBEP_PCI_QPI_PORT0_FILTER),
3165 	},
3166 	{ /* QPI Port 1 filter  */
3167 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3168 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3169 						   SNBEP_PCI_QPI_PORT1_FILTER),
3170 	},
3171 	{ /* end: all zeroes */ }
3172 };
3173 
3174 static struct pci_driver hswep_uncore_pci_driver = {
3175 	.name		= "hswep_uncore",
3176 	.id_table	= hswep_uncore_pci_ids,
3177 };
3178 
hswep_uncore_pci_init(void)3179 int hswep_uncore_pci_init(void)
3180 {
3181 	int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3182 	if (ret)
3183 		return ret;
3184 	uncore_pci_uncores = hswep_pci_uncores;
3185 	uncore_pci_driver = &hswep_uncore_pci_driver;
3186 	return 0;
3187 }
3188 /* end of Haswell-EP uncore support */
3189 
3190 /* BDX uncore support */
3191 
3192 static struct intel_uncore_type bdx_uncore_ubox = {
3193 	.name			= "ubox",
3194 	.num_counters		= 2,
3195 	.num_boxes		= 1,
3196 	.perf_ctr_bits		= 48,
3197 	.fixed_ctr_bits		= 48,
3198 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3199 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3200 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3201 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3202 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3203 	.num_shared_regs	= 1,
3204 	.ops			= &ivbep_uncore_msr_ops,
3205 	.format_group		= &ivbep_uncore_ubox_format_group,
3206 };
3207 
3208 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3209 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3210 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3211 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3212 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3213 	EVENT_CONSTRAINT_END
3214 };
3215 
3216 static struct intel_uncore_type bdx_uncore_cbox = {
3217 	.name			= "cbox",
3218 	.num_counters		= 4,
3219 	.num_boxes		= 24,
3220 	.perf_ctr_bits		= 48,
3221 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3222 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3223 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3224 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3225 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3226 	.num_shared_regs	= 1,
3227 	.constraints		= bdx_uncore_cbox_constraints,
3228 	.ops			= &hswep_uncore_cbox_ops,
3229 	.format_group		= &hswep_uncore_cbox_format_group,
3230 };
3231 
3232 static struct intel_uncore_type bdx_uncore_sbox = {
3233 	.name			= "sbox",
3234 	.num_counters		= 4,
3235 	.num_boxes		= 4,
3236 	.perf_ctr_bits		= 48,
3237 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
3238 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
3239 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3240 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
3241 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
3242 	.ops			= &hswep_uncore_sbox_msr_ops,
3243 	.format_group		= &hswep_uncore_sbox_format_group,
3244 };
3245 
3246 #define BDX_MSR_UNCORE_SBOX	3
3247 
3248 static struct intel_uncore_type *bdx_msr_uncores[] = {
3249 	&bdx_uncore_ubox,
3250 	&bdx_uncore_cbox,
3251 	&hswep_uncore_pcu,
3252 	&bdx_uncore_sbox,
3253 	NULL,
3254 };
3255 
3256 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3257 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3258 	EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3259 	EVENT_CONSTRAINT_END
3260 };
3261 
3262 #define BDX_PCU_DID			0x6fc0
3263 
bdx_uncore_cpu_init(void)3264 void bdx_uncore_cpu_init(void)
3265 {
3266 	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3267 		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3268 	uncore_msr_uncores = bdx_msr_uncores;
3269 
3270 	/* Detect systems with no SBOXes */
3271 	if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3272 		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3273 
3274 	hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3275 }
3276 
3277 static struct intel_uncore_type bdx_uncore_ha = {
3278 	.name		= "ha",
3279 	.num_counters   = 4,
3280 	.num_boxes	= 2,
3281 	.perf_ctr_bits	= 48,
3282 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3283 };
3284 
3285 static struct intel_uncore_type bdx_uncore_imc = {
3286 	.name		= "imc",
3287 	.num_counters   = 4,
3288 	.num_boxes	= 8,
3289 	.perf_ctr_bits	= 48,
3290 	.fixed_ctr_bits	= 48,
3291 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3292 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3293 	.event_descs	= hswep_uncore_imc_events,
3294 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3295 };
3296 
3297 static struct intel_uncore_type bdx_uncore_irp = {
3298 	.name			= "irp",
3299 	.num_counters		= 4,
3300 	.num_boxes		= 1,
3301 	.perf_ctr_bits		= 48,
3302 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3303 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3304 	.ops			= &hswep_uncore_irp_ops,
3305 	.format_group		= &snbep_uncore_format_group,
3306 };
3307 
3308 static struct intel_uncore_type bdx_uncore_qpi = {
3309 	.name			= "qpi",
3310 	.num_counters		= 4,
3311 	.num_boxes		= 3,
3312 	.perf_ctr_bits		= 48,
3313 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3314 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3315 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3316 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3317 	.num_shared_regs	= 1,
3318 	.ops			= &snbep_uncore_qpi_ops,
3319 	.format_group		= &snbep_uncore_qpi_format_group,
3320 };
3321 
3322 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3323 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3324 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3325 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3326 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3327 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3328 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3329 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3330 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3331 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3332 	EVENT_CONSTRAINT_END
3333 };
3334 
3335 static struct intel_uncore_type bdx_uncore_r2pcie = {
3336 	.name		= "r2pcie",
3337 	.num_counters   = 4,
3338 	.num_boxes	= 1,
3339 	.perf_ctr_bits	= 48,
3340 	.constraints	= bdx_uncore_r2pcie_constraints,
3341 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3342 };
3343 
3344 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3345 	UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3346 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3347 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3348 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3349 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3350 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3351 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3352 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3353 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3354 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3355 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3356 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3357 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3358 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3359 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3360 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3361 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3362 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3363 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3364 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3365 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3366 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3367 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3368 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3369 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3370 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3371 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3372 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3373 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3374 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3375 	EVENT_CONSTRAINT_END
3376 };
3377 
3378 static struct intel_uncore_type bdx_uncore_r3qpi = {
3379 	.name		= "r3qpi",
3380 	.num_counters   = 3,
3381 	.num_boxes	= 3,
3382 	.perf_ctr_bits	= 48,
3383 	.constraints	= bdx_uncore_r3qpi_constraints,
3384 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3385 };
3386 
3387 enum {
3388 	BDX_PCI_UNCORE_HA,
3389 	BDX_PCI_UNCORE_IMC,
3390 	BDX_PCI_UNCORE_IRP,
3391 	BDX_PCI_UNCORE_QPI,
3392 	BDX_PCI_UNCORE_R2PCIE,
3393 	BDX_PCI_UNCORE_R3QPI,
3394 };
3395 
3396 static struct intel_uncore_type *bdx_pci_uncores[] = {
3397 	[BDX_PCI_UNCORE_HA]	= &bdx_uncore_ha,
3398 	[BDX_PCI_UNCORE_IMC]	= &bdx_uncore_imc,
3399 	[BDX_PCI_UNCORE_IRP]	= &bdx_uncore_irp,
3400 	[BDX_PCI_UNCORE_QPI]	= &bdx_uncore_qpi,
3401 	[BDX_PCI_UNCORE_R2PCIE]	= &bdx_uncore_r2pcie,
3402 	[BDX_PCI_UNCORE_R3QPI]	= &bdx_uncore_r3qpi,
3403 	NULL,
3404 };
3405 
3406 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3407 	{ /* Home Agent 0 */
3408 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3409 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3410 	},
3411 	{ /* Home Agent 1 */
3412 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3413 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3414 	},
3415 	{ /* MC0 Channel 0 */
3416 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3417 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3418 	},
3419 	{ /* MC0 Channel 1 */
3420 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3421 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3422 	},
3423 	{ /* MC0 Channel 2 */
3424 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3425 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3426 	},
3427 	{ /* MC0 Channel 3 */
3428 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3429 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3430 	},
3431 	{ /* MC1 Channel 0 */
3432 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3433 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3434 	},
3435 	{ /* MC1 Channel 1 */
3436 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3437 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3438 	},
3439 	{ /* MC1 Channel 2 */
3440 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3441 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3442 	},
3443 	{ /* MC1 Channel 3 */
3444 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3445 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3446 	},
3447 	{ /* IRP */
3448 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3449 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3450 	},
3451 	{ /* QPI0 Port 0 */
3452 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3453 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3454 	},
3455 	{ /* QPI0 Port 1 */
3456 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3457 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3458 	},
3459 	{ /* QPI1 Port 2 */
3460 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3461 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3462 	},
3463 	{ /* R2PCIe */
3464 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3465 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3466 	},
3467 	{ /* R3QPI0 Link 0 */
3468 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3469 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3470 	},
3471 	{ /* R3QPI0 Link 1 */
3472 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3473 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3474 	},
3475 	{ /* R3QPI1 Link 2 */
3476 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3477 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3478 	},
3479 	{ /* QPI Port 0 filter  */
3480 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3481 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3482 						   SNBEP_PCI_QPI_PORT0_FILTER),
3483 	},
3484 	{ /* QPI Port 1 filter  */
3485 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3486 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3487 						   SNBEP_PCI_QPI_PORT1_FILTER),
3488 	},
3489 	{ /* QPI Port 2 filter  */
3490 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3491 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3492 						   BDX_PCI_QPI_PORT2_FILTER),
3493 	},
3494 	{ /* end: all zeroes */ }
3495 };
3496 
3497 static struct pci_driver bdx_uncore_pci_driver = {
3498 	.name		= "bdx_uncore",
3499 	.id_table	= bdx_uncore_pci_ids,
3500 };
3501 
bdx_uncore_pci_init(void)3502 int bdx_uncore_pci_init(void)
3503 {
3504 	int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3505 
3506 	if (ret)
3507 		return ret;
3508 	uncore_pci_uncores = bdx_pci_uncores;
3509 	uncore_pci_driver = &bdx_uncore_pci_driver;
3510 	return 0;
3511 }
3512 
3513 /* end of BDX uncore support */
3514 
3515 /* SKX uncore support */
3516 
3517 static struct intel_uncore_type skx_uncore_ubox = {
3518 	.name			= "ubox",
3519 	.num_counters		= 2,
3520 	.num_boxes		= 1,
3521 	.perf_ctr_bits		= 48,
3522 	.fixed_ctr_bits		= 48,
3523 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3524 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3525 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3526 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3527 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3528 	.ops			= &ivbep_uncore_msr_ops,
3529 	.format_group		= &ivbep_uncore_ubox_format_group,
3530 };
3531 
3532 static struct attribute *skx_uncore_cha_formats_attr[] = {
3533 	&format_attr_event.attr,
3534 	&format_attr_umask.attr,
3535 	&format_attr_edge.attr,
3536 	&format_attr_tid_en.attr,
3537 	&format_attr_inv.attr,
3538 	&format_attr_thresh8.attr,
3539 	&format_attr_filter_tid4.attr,
3540 	&format_attr_filter_state5.attr,
3541 	&format_attr_filter_rem.attr,
3542 	&format_attr_filter_loc.attr,
3543 	&format_attr_filter_nm.attr,
3544 	&format_attr_filter_all_op.attr,
3545 	&format_attr_filter_not_nm.attr,
3546 	&format_attr_filter_opc_0.attr,
3547 	&format_attr_filter_opc_1.attr,
3548 	&format_attr_filter_nc.attr,
3549 	&format_attr_filter_isoc.attr,
3550 	NULL,
3551 };
3552 
3553 static const struct attribute_group skx_uncore_chabox_format_group = {
3554 	.name = "format",
3555 	.attrs = skx_uncore_cha_formats_attr,
3556 };
3557 
3558 static struct event_constraint skx_uncore_chabox_constraints[] = {
3559 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3560 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3561 	EVENT_CONSTRAINT_END
3562 };
3563 
3564 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3565 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3566 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3567 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3568 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3569 	SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3570 	SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3571 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3572 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3573 	SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3574 	EVENT_EXTRA_END
3575 };
3576 
skx_cha_filter_mask(int fields)3577 static u64 skx_cha_filter_mask(int fields)
3578 {
3579 	u64 mask = 0;
3580 
3581 	if (fields & 0x1)
3582 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3583 	if (fields & 0x2)
3584 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3585 	if (fields & 0x4)
3586 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3587 	if (fields & 0x8) {
3588 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3589 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3590 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3591 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3592 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3593 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3594 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3595 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3596 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3597 	}
3598 	return mask;
3599 }
3600 
3601 static struct event_constraint *
skx_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)3602 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3603 {
3604 	return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3605 }
3606 
skx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)3607 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3608 {
3609 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3610 	struct extra_reg *er;
3611 	int idx = 0;
3612 	/* Any of the CHA events may be filtered by Thread/Core-ID.*/
3613 	if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3614 		idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3615 
3616 	for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3617 		if (er->event != (event->hw.config & er->config_mask))
3618 			continue;
3619 		idx |= er->idx;
3620 	}
3621 
3622 	if (idx) {
3623 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3624 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3625 		reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3626 		reg1->idx = idx;
3627 	}
3628 	return 0;
3629 }
3630 
3631 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3632 	/* There is no frz_en for chabox ctl */
3633 	.init_box		= ivbep_uncore_msr_init_box,
3634 	.disable_box		= snbep_uncore_msr_disable_box,
3635 	.enable_box		= snbep_uncore_msr_enable_box,
3636 	.disable_event		= snbep_uncore_msr_disable_event,
3637 	.enable_event		= hswep_cbox_enable_event,
3638 	.read_counter		= uncore_msr_read_counter,
3639 	.hw_config		= skx_cha_hw_config,
3640 	.get_constraint		= skx_cha_get_constraint,
3641 	.put_constraint		= snbep_cbox_put_constraint,
3642 };
3643 
3644 static struct intel_uncore_type skx_uncore_chabox = {
3645 	.name			= "cha",
3646 	.num_counters		= 4,
3647 	.perf_ctr_bits		= 48,
3648 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3649 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3650 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3651 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3652 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3653 	.num_shared_regs	= 1,
3654 	.constraints		= skx_uncore_chabox_constraints,
3655 	.ops			= &skx_uncore_chabox_ops,
3656 	.format_group		= &skx_uncore_chabox_format_group,
3657 };
3658 
3659 static struct attribute *skx_uncore_iio_formats_attr[] = {
3660 	&format_attr_event.attr,
3661 	&format_attr_umask.attr,
3662 	&format_attr_edge.attr,
3663 	&format_attr_inv.attr,
3664 	&format_attr_thresh9.attr,
3665 	&format_attr_ch_mask.attr,
3666 	&format_attr_fc_mask.attr,
3667 	NULL,
3668 };
3669 
3670 static const struct attribute_group skx_uncore_iio_format_group = {
3671 	.name = "format",
3672 	.attrs = skx_uncore_iio_formats_attr,
3673 };
3674 
3675 static struct event_constraint skx_uncore_iio_constraints[] = {
3676 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3677 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3678 	UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3679 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3680 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3681 	UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3682 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
3683 	EVENT_CONSTRAINT_END
3684 };
3685 
skx_iio_enable_event(struct intel_uncore_box * box,struct perf_event * event)3686 static void skx_iio_enable_event(struct intel_uncore_box *box,
3687 				 struct perf_event *event)
3688 {
3689 	struct hw_perf_event *hwc = &event->hw;
3690 
3691 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3692 }
3693 
3694 static struct intel_uncore_ops skx_uncore_iio_ops = {
3695 	.init_box		= ivbep_uncore_msr_init_box,
3696 	.disable_box		= snbep_uncore_msr_disable_box,
3697 	.enable_box		= snbep_uncore_msr_enable_box,
3698 	.disable_event		= snbep_uncore_msr_disable_event,
3699 	.enable_event		= skx_iio_enable_event,
3700 	.read_counter		= uncore_msr_read_counter,
3701 };
3702 
skx_iio_stack(struct intel_uncore_pmu * pmu,int die)3703 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3704 {
3705 	return pmu->type->topology[die].configuration >>
3706 	       (pmu->pmu_idx * BUS_NUM_STRIDE);
3707 }
3708 
3709 static umode_t
pmu_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die,int zero_bus_pmu)3710 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3711 			 int die, int zero_bus_pmu)
3712 {
3713 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3714 
3715 	return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3716 }
3717 
3718 static umode_t
skx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)3719 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3720 {
3721 	/* Root bus 0x00 is valid only for pmu_idx = 0. */
3722 	return pmu_iio_mapping_visible(kobj, attr, die, 0);
3723 }
3724 
skx_iio_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)3725 static ssize_t skx_iio_mapping_show(struct device *dev,
3726 				    struct device_attribute *attr, char *buf)
3727 {
3728 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3729 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3730 	long die = (long)ea->var;
3731 
3732 	return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
3733 					   skx_iio_stack(pmu, die));
3734 }
3735 
skx_msr_cpu_bus_read(int cpu,u64 * topology)3736 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3737 {
3738 	u64 msr_value;
3739 
3740 	if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3741 			!(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3742 		return -ENXIO;
3743 
3744 	*topology = msr_value;
3745 
3746 	return 0;
3747 }
3748 
die_to_cpu(int die)3749 static int die_to_cpu(int die)
3750 {
3751 	int res = 0, cpu, current_die;
3752 	/*
3753 	 * Using cpus_read_lock() to ensure cpu is not going down between
3754 	 * looking at cpu_online_mask.
3755 	 */
3756 	cpus_read_lock();
3757 	for_each_online_cpu(cpu) {
3758 		current_die = topology_logical_die_id(cpu);
3759 		if (current_die == die) {
3760 			res = cpu;
3761 			break;
3762 		}
3763 	}
3764 	cpus_read_unlock();
3765 	return res;
3766 }
3767 
skx_iio_get_topology(struct intel_uncore_type * type)3768 static int skx_iio_get_topology(struct intel_uncore_type *type)
3769 {
3770 	int die, ret = -EPERM;
3771 
3772 	type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
3773 				 GFP_KERNEL);
3774 	if (!type->topology)
3775 		return -ENOMEM;
3776 
3777 	for (die = 0; die < uncore_max_dies(); die++) {
3778 		ret = skx_msr_cpu_bus_read(die_to_cpu(die),
3779 					   &type->topology[die].configuration);
3780 		if (ret)
3781 			break;
3782 
3783 		ret = uncore_die_to_segment(die);
3784 		if (ret < 0)
3785 			break;
3786 
3787 		type->topology[die].segment = ret;
3788 	}
3789 
3790 	if (ret < 0) {
3791 		kfree(type->topology);
3792 		type->topology = NULL;
3793 	}
3794 
3795 	return ret;
3796 }
3797 
3798 static struct attribute_group skx_iio_mapping_group = {
3799 	.is_visible	= skx_iio_mapping_visible,
3800 };
3801 
3802 static const struct attribute_group *skx_iio_attr_update[] = {
3803 	&skx_iio_mapping_group,
3804 	NULL,
3805 };
3806 
pmu_clear_mapping_attr(const struct attribute_group ** groups,struct attribute_group * ag)3807 static void pmu_clear_mapping_attr(const struct attribute_group **groups,
3808 				   struct attribute_group *ag)
3809 {
3810 	int i;
3811 
3812 	for (i = 0; groups[i]; i++) {
3813 		if (groups[i] == ag) {
3814 			for (i++; groups[i]; i++)
3815 				groups[i - 1] = groups[i];
3816 			groups[i - 1] = NULL;
3817 			break;
3818 		}
3819 	}
3820 }
3821 
3822 static int
pmu_iio_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)3823 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3824 {
3825 	char buf[64];
3826 	int ret;
3827 	long die = -1;
3828 	struct attribute **attrs = NULL;
3829 	struct dev_ext_attribute *eas = NULL;
3830 
3831 	ret = type->get_topology(type);
3832 	if (ret < 0)
3833 		goto clear_attr_update;
3834 
3835 	ret = -ENOMEM;
3836 
3837 	/* One more for NULL. */
3838 	attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3839 	if (!attrs)
3840 		goto clear_topology;
3841 
3842 	eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3843 	if (!eas)
3844 		goto clear_attrs;
3845 
3846 	for (die = 0; die < uncore_max_dies(); die++) {
3847 		sprintf(buf, "die%ld", die);
3848 		sysfs_attr_init(&eas[die].attr.attr);
3849 		eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3850 		if (!eas[die].attr.attr.name)
3851 			goto err;
3852 		eas[die].attr.attr.mode = 0444;
3853 		eas[die].attr.show = skx_iio_mapping_show;
3854 		eas[die].attr.store = NULL;
3855 		eas[die].var = (void *)die;
3856 		attrs[die] = &eas[die].attr.attr;
3857 	}
3858 	ag->attrs = attrs;
3859 
3860 	return 0;
3861 err:
3862 	for (; die >= 0; die--)
3863 		kfree(eas[die].attr.attr.name);
3864 	kfree(eas);
3865 clear_attrs:
3866 	kfree(attrs);
3867 clear_topology:
3868 	kfree(type->topology);
3869 clear_attr_update:
3870 	pmu_clear_mapping_attr(type->attr_update, ag);
3871 	return ret;
3872 }
3873 
3874 static void
pmu_iio_cleanup_mapping(struct intel_uncore_type * type,struct attribute_group * ag)3875 pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3876 {
3877 	struct attribute **attr = ag->attrs;
3878 
3879 	if (!attr)
3880 		return;
3881 
3882 	for (; *attr; attr++)
3883 		kfree((*attr)->name);
3884 	kfree(attr_to_ext_attr(*ag->attrs));
3885 	kfree(ag->attrs);
3886 	ag->attrs = NULL;
3887 	kfree(type->topology);
3888 }
3889 
skx_iio_set_mapping(struct intel_uncore_type * type)3890 static int skx_iio_set_mapping(struct intel_uncore_type *type)
3891 {
3892 	return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
3893 }
3894 
skx_iio_cleanup_mapping(struct intel_uncore_type * type)3895 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3896 {
3897 	pmu_iio_cleanup_mapping(type, &skx_iio_mapping_group);
3898 }
3899 
3900 static struct intel_uncore_type skx_uncore_iio = {
3901 	.name			= "iio",
3902 	.num_counters		= 4,
3903 	.num_boxes		= 6,
3904 	.perf_ctr_bits		= 48,
3905 	.event_ctl		= SKX_IIO0_MSR_PMON_CTL0,
3906 	.perf_ctr		= SKX_IIO0_MSR_PMON_CTR0,
3907 	.event_mask		= SKX_IIO_PMON_RAW_EVENT_MASK,
3908 	.event_mask_ext		= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3909 	.box_ctl		= SKX_IIO0_MSR_PMON_BOX_CTL,
3910 	.msr_offset		= SKX_IIO_MSR_OFFSET,
3911 	.constraints		= skx_uncore_iio_constraints,
3912 	.ops			= &skx_uncore_iio_ops,
3913 	.format_group		= &skx_uncore_iio_format_group,
3914 	.attr_update		= skx_iio_attr_update,
3915 	.get_topology		= skx_iio_get_topology,
3916 	.set_mapping		= skx_iio_set_mapping,
3917 	.cleanup_mapping	= skx_iio_cleanup_mapping,
3918 };
3919 
3920 enum perf_uncore_iio_freerunning_type_id {
3921 	SKX_IIO_MSR_IOCLK			= 0,
3922 	SKX_IIO_MSR_BW				= 1,
3923 	SKX_IIO_MSR_UTIL			= 2,
3924 
3925 	SKX_IIO_FREERUNNING_TYPE_MAX,
3926 };
3927 
3928 
3929 static struct freerunning_counters skx_iio_freerunning[] = {
3930 	[SKX_IIO_MSR_IOCLK]	= { 0xa45, 0x1, 0x20, 1, 36 },
3931 	[SKX_IIO_MSR_BW]	= { 0xb00, 0x1, 0x10, 8, 36 },
3932 	[SKX_IIO_MSR_UTIL]	= { 0xb08, 0x1, 0x10, 8, 36 },
3933 };
3934 
3935 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3936 	/* Free-Running IO CLOCKS Counter */
3937 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
3938 	/* Free-Running IIO BANDWIDTH Counters */
3939 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
3940 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
3941 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
3942 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
3943 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
3944 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
3945 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
3946 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
3947 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
3948 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
3949 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
3950 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
3951 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x24"),
3952 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
3953 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
3954 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x25"),
3955 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
3956 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
3957 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x26"),
3958 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
3959 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
3960 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x27"),
3961 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
3962 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
3963 	/* Free-running IIO UTILIZATION Counters */
3964 	INTEL_UNCORE_EVENT_DESC(util_in_port0,		"event=0xff,umask=0x30"),
3965 	INTEL_UNCORE_EVENT_DESC(util_out_port0,		"event=0xff,umask=0x31"),
3966 	INTEL_UNCORE_EVENT_DESC(util_in_port1,		"event=0xff,umask=0x32"),
3967 	INTEL_UNCORE_EVENT_DESC(util_out_port1,		"event=0xff,umask=0x33"),
3968 	INTEL_UNCORE_EVENT_DESC(util_in_port2,		"event=0xff,umask=0x34"),
3969 	INTEL_UNCORE_EVENT_DESC(util_out_port2,		"event=0xff,umask=0x35"),
3970 	INTEL_UNCORE_EVENT_DESC(util_in_port3,		"event=0xff,umask=0x36"),
3971 	INTEL_UNCORE_EVENT_DESC(util_out_port3,		"event=0xff,umask=0x37"),
3972 	{ /* end: all zeroes */ },
3973 };
3974 
3975 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3976 	.read_counter		= uncore_msr_read_counter,
3977 	.hw_config		= uncore_freerunning_hw_config,
3978 };
3979 
3980 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3981 	&format_attr_event.attr,
3982 	&format_attr_umask.attr,
3983 	NULL,
3984 };
3985 
3986 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3987 	.name = "format",
3988 	.attrs = skx_uncore_iio_freerunning_formats_attr,
3989 };
3990 
3991 static struct intel_uncore_type skx_uncore_iio_free_running = {
3992 	.name			= "iio_free_running",
3993 	.num_counters		= 17,
3994 	.num_boxes		= 6,
3995 	.num_freerunning_types	= SKX_IIO_FREERUNNING_TYPE_MAX,
3996 	.freerunning		= skx_iio_freerunning,
3997 	.ops			= &skx_uncore_iio_freerunning_ops,
3998 	.event_descs		= skx_uncore_iio_freerunning_events,
3999 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4000 };
4001 
4002 static struct attribute *skx_uncore_formats_attr[] = {
4003 	&format_attr_event.attr,
4004 	&format_attr_umask.attr,
4005 	&format_attr_edge.attr,
4006 	&format_attr_inv.attr,
4007 	&format_attr_thresh8.attr,
4008 	NULL,
4009 };
4010 
4011 static const struct attribute_group skx_uncore_format_group = {
4012 	.name = "format",
4013 	.attrs = skx_uncore_formats_attr,
4014 };
4015 
4016 static struct intel_uncore_type skx_uncore_irp = {
4017 	.name			= "irp",
4018 	.num_counters		= 2,
4019 	.num_boxes		= 6,
4020 	.perf_ctr_bits		= 48,
4021 	.event_ctl		= SKX_IRP0_MSR_PMON_CTL0,
4022 	.perf_ctr		= SKX_IRP0_MSR_PMON_CTR0,
4023 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4024 	.box_ctl		= SKX_IRP0_MSR_PMON_BOX_CTL,
4025 	.msr_offset		= SKX_IRP_MSR_OFFSET,
4026 	.ops			= &skx_uncore_iio_ops,
4027 	.format_group		= &skx_uncore_format_group,
4028 };
4029 
4030 static struct attribute *skx_uncore_pcu_formats_attr[] = {
4031 	&format_attr_event.attr,
4032 	&format_attr_umask.attr,
4033 	&format_attr_edge.attr,
4034 	&format_attr_inv.attr,
4035 	&format_attr_thresh8.attr,
4036 	&format_attr_occ_invert.attr,
4037 	&format_attr_occ_edge_det.attr,
4038 	&format_attr_filter_band0.attr,
4039 	&format_attr_filter_band1.attr,
4040 	&format_attr_filter_band2.attr,
4041 	&format_attr_filter_band3.attr,
4042 	NULL,
4043 };
4044 
4045 static struct attribute_group skx_uncore_pcu_format_group = {
4046 	.name = "format",
4047 	.attrs = skx_uncore_pcu_formats_attr,
4048 };
4049 
4050 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4051 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4052 	.hw_config		= hswep_pcu_hw_config,
4053 	.get_constraint		= snbep_pcu_get_constraint,
4054 	.put_constraint		= snbep_pcu_put_constraint,
4055 };
4056 
4057 static struct intel_uncore_type skx_uncore_pcu = {
4058 	.name			= "pcu",
4059 	.num_counters		= 4,
4060 	.num_boxes		= 1,
4061 	.perf_ctr_bits		= 48,
4062 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
4063 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
4064 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4065 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
4066 	.num_shared_regs	= 1,
4067 	.ops			= &skx_uncore_pcu_ops,
4068 	.format_group		= &skx_uncore_pcu_format_group,
4069 };
4070 
4071 static struct intel_uncore_type *skx_msr_uncores[] = {
4072 	&skx_uncore_ubox,
4073 	&skx_uncore_chabox,
4074 	&skx_uncore_iio,
4075 	&skx_uncore_iio_free_running,
4076 	&skx_uncore_irp,
4077 	&skx_uncore_pcu,
4078 	NULL,
4079 };
4080 
4081 /*
4082  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4083  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4084  */
4085 #define SKX_CAPID6		0x9c
4086 #define SKX_CHA_BIT_MASK	GENMASK(27, 0)
4087 
skx_count_chabox(void)4088 static int skx_count_chabox(void)
4089 {
4090 	struct pci_dev *dev = NULL;
4091 	u32 val = 0;
4092 
4093 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4094 	if (!dev)
4095 		goto out;
4096 
4097 	pci_read_config_dword(dev, SKX_CAPID6, &val);
4098 	val &= SKX_CHA_BIT_MASK;
4099 out:
4100 	pci_dev_put(dev);
4101 	return hweight32(val);
4102 }
4103 
skx_uncore_cpu_init(void)4104 void skx_uncore_cpu_init(void)
4105 {
4106 	skx_uncore_chabox.num_boxes = skx_count_chabox();
4107 	uncore_msr_uncores = skx_msr_uncores;
4108 }
4109 
4110 static struct intel_uncore_type skx_uncore_imc = {
4111 	.name		= "imc",
4112 	.num_counters   = 4,
4113 	.num_boxes	= 6,
4114 	.perf_ctr_bits	= 48,
4115 	.fixed_ctr_bits	= 48,
4116 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4117 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4118 	.event_descs	= hswep_uncore_imc_events,
4119 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4120 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4121 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4122 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4123 	.ops		= &ivbep_uncore_pci_ops,
4124 	.format_group	= &skx_uncore_format_group,
4125 };
4126 
4127 static struct attribute *skx_upi_uncore_formats_attr[] = {
4128 	&format_attr_event.attr,
4129 	&format_attr_umask_ext.attr,
4130 	&format_attr_edge.attr,
4131 	&format_attr_inv.attr,
4132 	&format_attr_thresh8.attr,
4133 	NULL,
4134 };
4135 
4136 static const struct attribute_group skx_upi_uncore_format_group = {
4137 	.name = "format",
4138 	.attrs = skx_upi_uncore_formats_attr,
4139 };
4140 
skx_upi_uncore_pci_init_box(struct intel_uncore_box * box)4141 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4142 {
4143 	struct pci_dev *pdev = box->pci_dev;
4144 
4145 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4146 	pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4147 }
4148 
4149 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4150 	.init_box	= skx_upi_uncore_pci_init_box,
4151 	.disable_box	= snbep_uncore_pci_disable_box,
4152 	.enable_box	= snbep_uncore_pci_enable_box,
4153 	.disable_event	= snbep_uncore_pci_disable_event,
4154 	.enable_event	= snbep_uncore_pci_enable_event,
4155 	.read_counter	= snbep_uncore_pci_read_counter,
4156 };
4157 
4158 static struct intel_uncore_type skx_uncore_upi = {
4159 	.name		= "upi",
4160 	.num_counters   = 4,
4161 	.num_boxes	= 3,
4162 	.perf_ctr_bits	= 48,
4163 	.perf_ctr	= SKX_UPI_PCI_PMON_CTR0,
4164 	.event_ctl	= SKX_UPI_PCI_PMON_CTL0,
4165 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4166 	.event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4167 	.box_ctl	= SKX_UPI_PCI_PMON_BOX_CTL,
4168 	.ops		= &skx_upi_uncore_pci_ops,
4169 	.format_group	= &skx_upi_uncore_format_group,
4170 };
4171 
skx_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4172 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4173 {
4174 	struct pci_dev *pdev = box->pci_dev;
4175 
4176 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4177 	pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4178 }
4179 
4180 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4181 	.init_box	= skx_m2m_uncore_pci_init_box,
4182 	.disable_box	= snbep_uncore_pci_disable_box,
4183 	.enable_box	= snbep_uncore_pci_enable_box,
4184 	.disable_event	= snbep_uncore_pci_disable_event,
4185 	.enable_event	= snbep_uncore_pci_enable_event,
4186 	.read_counter	= snbep_uncore_pci_read_counter,
4187 };
4188 
4189 static struct intel_uncore_type skx_uncore_m2m = {
4190 	.name		= "m2m",
4191 	.num_counters   = 4,
4192 	.num_boxes	= 2,
4193 	.perf_ctr_bits	= 48,
4194 	.perf_ctr	= SKX_M2M_PCI_PMON_CTR0,
4195 	.event_ctl	= SKX_M2M_PCI_PMON_CTL0,
4196 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4197 	.box_ctl	= SKX_M2M_PCI_PMON_BOX_CTL,
4198 	.ops		= &skx_m2m_uncore_pci_ops,
4199 	.format_group	= &skx_uncore_format_group,
4200 };
4201 
4202 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4203 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4204 	EVENT_CONSTRAINT_END
4205 };
4206 
4207 static struct intel_uncore_type skx_uncore_m2pcie = {
4208 	.name		= "m2pcie",
4209 	.num_counters   = 4,
4210 	.num_boxes	= 4,
4211 	.perf_ctr_bits	= 48,
4212 	.constraints	= skx_uncore_m2pcie_constraints,
4213 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4214 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4215 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4216 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4217 	.ops		= &ivbep_uncore_pci_ops,
4218 	.format_group	= &skx_uncore_format_group,
4219 };
4220 
4221 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4222 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4223 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4224 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4225 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4226 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4227 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4228 	UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4229 	UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4230 	EVENT_CONSTRAINT_END
4231 };
4232 
4233 static struct intel_uncore_type skx_uncore_m3upi = {
4234 	.name		= "m3upi",
4235 	.num_counters   = 3,
4236 	.num_boxes	= 3,
4237 	.perf_ctr_bits	= 48,
4238 	.constraints	= skx_uncore_m3upi_constraints,
4239 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4240 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4241 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4242 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4243 	.ops		= &ivbep_uncore_pci_ops,
4244 	.format_group	= &skx_uncore_format_group,
4245 };
4246 
4247 enum {
4248 	SKX_PCI_UNCORE_IMC,
4249 	SKX_PCI_UNCORE_M2M,
4250 	SKX_PCI_UNCORE_UPI,
4251 	SKX_PCI_UNCORE_M2PCIE,
4252 	SKX_PCI_UNCORE_M3UPI,
4253 };
4254 
4255 static struct intel_uncore_type *skx_pci_uncores[] = {
4256 	[SKX_PCI_UNCORE_IMC]	= &skx_uncore_imc,
4257 	[SKX_PCI_UNCORE_M2M]	= &skx_uncore_m2m,
4258 	[SKX_PCI_UNCORE_UPI]	= &skx_uncore_upi,
4259 	[SKX_PCI_UNCORE_M2PCIE]	= &skx_uncore_m2pcie,
4260 	[SKX_PCI_UNCORE_M3UPI]	= &skx_uncore_m3upi,
4261 	NULL,
4262 };
4263 
4264 static const struct pci_device_id skx_uncore_pci_ids[] = {
4265 	{ /* MC0 Channel 0 */
4266 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4267 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4268 	},
4269 	{ /* MC0 Channel 1 */
4270 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4271 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4272 	},
4273 	{ /* MC0 Channel 2 */
4274 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4275 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4276 	},
4277 	{ /* MC1 Channel 0 */
4278 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4279 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4280 	},
4281 	{ /* MC1 Channel 1 */
4282 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4283 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4284 	},
4285 	{ /* MC1 Channel 2 */
4286 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4287 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4288 	},
4289 	{ /* M2M0 */
4290 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4291 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4292 	},
4293 	{ /* M2M1 */
4294 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4295 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4296 	},
4297 	{ /* UPI0 Link 0 */
4298 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4299 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4300 	},
4301 	{ /* UPI0 Link 1 */
4302 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4303 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4304 	},
4305 	{ /* UPI1 Link 2 */
4306 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4307 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4308 	},
4309 	{ /* M2PCIe 0 */
4310 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4311 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4312 	},
4313 	{ /* M2PCIe 1 */
4314 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4315 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4316 	},
4317 	{ /* M2PCIe 2 */
4318 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4319 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4320 	},
4321 	{ /* M2PCIe 3 */
4322 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4323 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4324 	},
4325 	{ /* M3UPI0 Link 0 */
4326 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4327 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4328 	},
4329 	{ /* M3UPI0 Link 1 */
4330 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4331 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4332 	},
4333 	{ /* M3UPI1 Link 2 */
4334 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4335 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4336 	},
4337 	{ /* end: all zeroes */ }
4338 };
4339 
4340 
4341 static struct pci_driver skx_uncore_pci_driver = {
4342 	.name		= "skx_uncore",
4343 	.id_table	= skx_uncore_pci_ids,
4344 };
4345 
skx_uncore_pci_init(void)4346 int skx_uncore_pci_init(void)
4347 {
4348 	/* need to double check pci address */
4349 	int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4350 
4351 	if (ret)
4352 		return ret;
4353 
4354 	uncore_pci_uncores = skx_pci_uncores;
4355 	uncore_pci_driver = &skx_uncore_pci_driver;
4356 	return 0;
4357 }
4358 
4359 /* end of SKX uncore support */
4360 
4361 /* SNR uncore support */
4362 
4363 static struct intel_uncore_type snr_uncore_ubox = {
4364 	.name			= "ubox",
4365 	.num_counters		= 2,
4366 	.num_boxes		= 1,
4367 	.perf_ctr_bits		= 48,
4368 	.fixed_ctr_bits		= 48,
4369 	.perf_ctr		= SNR_U_MSR_PMON_CTR0,
4370 	.event_ctl		= SNR_U_MSR_PMON_CTL0,
4371 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4372 	.fixed_ctr		= SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4373 	.fixed_ctl		= SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4374 	.ops			= &ivbep_uncore_msr_ops,
4375 	.format_group		= &ivbep_uncore_format_group,
4376 };
4377 
4378 static struct attribute *snr_uncore_cha_formats_attr[] = {
4379 	&format_attr_event.attr,
4380 	&format_attr_umask_ext2.attr,
4381 	&format_attr_edge.attr,
4382 	&format_attr_tid_en.attr,
4383 	&format_attr_inv.attr,
4384 	&format_attr_thresh8.attr,
4385 	&format_attr_filter_tid5.attr,
4386 	NULL,
4387 };
4388 static const struct attribute_group snr_uncore_chabox_format_group = {
4389 	.name = "format",
4390 	.attrs = snr_uncore_cha_formats_attr,
4391 };
4392 
snr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)4393 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4394 {
4395 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4396 
4397 	reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4398 		    box->pmu->type->msr_offset * box->pmu->pmu_idx;
4399 	reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4400 	reg1->idx = 0;
4401 
4402 	return 0;
4403 }
4404 
snr_cha_enable_event(struct intel_uncore_box * box,struct perf_event * event)4405 static void snr_cha_enable_event(struct intel_uncore_box *box,
4406 				   struct perf_event *event)
4407 {
4408 	struct hw_perf_event *hwc = &event->hw;
4409 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4410 
4411 	if (reg1->idx != EXTRA_REG_NONE)
4412 		wrmsrl(reg1->reg, reg1->config);
4413 
4414 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4415 }
4416 
4417 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4418 	.init_box		= ivbep_uncore_msr_init_box,
4419 	.disable_box		= snbep_uncore_msr_disable_box,
4420 	.enable_box		= snbep_uncore_msr_enable_box,
4421 	.disable_event		= snbep_uncore_msr_disable_event,
4422 	.enable_event		= snr_cha_enable_event,
4423 	.read_counter		= uncore_msr_read_counter,
4424 	.hw_config		= snr_cha_hw_config,
4425 };
4426 
4427 static struct intel_uncore_type snr_uncore_chabox = {
4428 	.name			= "cha",
4429 	.num_counters		= 4,
4430 	.num_boxes		= 6,
4431 	.perf_ctr_bits		= 48,
4432 	.event_ctl		= SNR_CHA_MSR_PMON_CTL0,
4433 	.perf_ctr		= SNR_CHA_MSR_PMON_CTR0,
4434 	.box_ctl		= SNR_CHA_MSR_PMON_BOX_CTL,
4435 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
4436 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4437 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4438 	.ops			= &snr_uncore_chabox_ops,
4439 	.format_group		= &snr_uncore_chabox_format_group,
4440 };
4441 
4442 static struct attribute *snr_uncore_iio_formats_attr[] = {
4443 	&format_attr_event.attr,
4444 	&format_attr_umask.attr,
4445 	&format_attr_edge.attr,
4446 	&format_attr_inv.attr,
4447 	&format_attr_thresh9.attr,
4448 	&format_attr_ch_mask2.attr,
4449 	&format_attr_fc_mask2.attr,
4450 	NULL,
4451 };
4452 
4453 static const struct attribute_group snr_uncore_iio_format_group = {
4454 	.name = "format",
4455 	.attrs = snr_uncore_iio_formats_attr,
4456 };
4457 
4458 static umode_t
snr_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)4459 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4460 {
4461 	/* Root bus 0x00 is valid only for pmu_idx = 1. */
4462 	return pmu_iio_mapping_visible(kobj, attr, die, 1);
4463 }
4464 
4465 static struct attribute_group snr_iio_mapping_group = {
4466 	.is_visible	= snr_iio_mapping_visible,
4467 };
4468 
4469 static const struct attribute_group *snr_iio_attr_update[] = {
4470 	&snr_iio_mapping_group,
4471 	NULL,
4472 };
4473 
sad_cfg_iio_topology(struct intel_uncore_type * type,u8 * sad_pmon_mapping)4474 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4475 {
4476 	u32 sad_cfg;
4477 	int die, stack_id, ret = -EPERM;
4478 	struct pci_dev *dev = NULL;
4479 
4480 	type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
4481 				 GFP_KERNEL);
4482 	if (!type->topology)
4483 		return -ENOMEM;
4484 
4485 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4486 		ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4487 		if (ret) {
4488 			ret = pcibios_err_to_errno(ret);
4489 			break;
4490 		}
4491 
4492 		die = uncore_pcibus_to_dieid(dev->bus);
4493 		stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4494 		if (die < 0 || stack_id >= type->num_boxes) {
4495 			ret = -EPERM;
4496 			break;
4497 		}
4498 
4499 		/* Convert stack id from SAD_CONTROL to PMON notation. */
4500 		stack_id = sad_pmon_mapping[stack_id];
4501 
4502 		((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number;
4503 		type->topology[die].segment = pci_domain_nr(dev->bus);
4504 	}
4505 
4506 	if (ret) {
4507 		kfree(type->topology);
4508 		type->topology = NULL;
4509 	}
4510 
4511 	pci_dev_put(dev);
4512 
4513 	return ret;
4514 }
4515 
4516 /*
4517  * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4518  */
4519 enum {
4520 	SNR_QAT_PMON_ID,
4521 	SNR_CBDMA_DMI_PMON_ID,
4522 	SNR_NIS_PMON_ID,
4523 	SNR_DLB_PMON_ID,
4524 	SNR_PCIE_GEN3_PMON_ID
4525 };
4526 
4527 static u8 snr_sad_pmon_mapping[] = {
4528 	SNR_CBDMA_DMI_PMON_ID,
4529 	SNR_PCIE_GEN3_PMON_ID,
4530 	SNR_DLB_PMON_ID,
4531 	SNR_NIS_PMON_ID,
4532 	SNR_QAT_PMON_ID
4533 };
4534 
snr_iio_get_topology(struct intel_uncore_type * type)4535 static int snr_iio_get_topology(struct intel_uncore_type *type)
4536 {
4537 	return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4538 }
4539 
snr_iio_set_mapping(struct intel_uncore_type * type)4540 static int snr_iio_set_mapping(struct intel_uncore_type *type)
4541 {
4542 	return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4543 }
4544 
snr_iio_cleanup_mapping(struct intel_uncore_type * type)4545 static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
4546 {
4547 	pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
4548 }
4549 
4550 static struct event_constraint snr_uncore_iio_constraints[] = {
4551 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4552 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4553 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
4554 	EVENT_CONSTRAINT_END
4555 };
4556 
4557 static struct intel_uncore_type snr_uncore_iio = {
4558 	.name			= "iio",
4559 	.num_counters		= 4,
4560 	.num_boxes		= 5,
4561 	.perf_ctr_bits		= 48,
4562 	.event_ctl		= SNR_IIO_MSR_PMON_CTL0,
4563 	.perf_ctr		= SNR_IIO_MSR_PMON_CTR0,
4564 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4565 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4566 	.box_ctl		= SNR_IIO_MSR_PMON_BOX_CTL,
4567 	.msr_offset		= SNR_IIO_MSR_OFFSET,
4568 	.constraints		= snr_uncore_iio_constraints,
4569 	.ops			= &ivbep_uncore_msr_ops,
4570 	.format_group		= &snr_uncore_iio_format_group,
4571 	.attr_update		= snr_iio_attr_update,
4572 	.get_topology		= snr_iio_get_topology,
4573 	.set_mapping		= snr_iio_set_mapping,
4574 	.cleanup_mapping	= snr_iio_cleanup_mapping,
4575 };
4576 
4577 static struct intel_uncore_type snr_uncore_irp = {
4578 	.name			= "irp",
4579 	.num_counters		= 2,
4580 	.num_boxes		= 5,
4581 	.perf_ctr_bits		= 48,
4582 	.event_ctl		= SNR_IRP0_MSR_PMON_CTL0,
4583 	.perf_ctr		= SNR_IRP0_MSR_PMON_CTR0,
4584 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4585 	.box_ctl		= SNR_IRP0_MSR_PMON_BOX_CTL,
4586 	.msr_offset		= SNR_IRP_MSR_OFFSET,
4587 	.ops			= &ivbep_uncore_msr_ops,
4588 	.format_group		= &ivbep_uncore_format_group,
4589 };
4590 
4591 static struct intel_uncore_type snr_uncore_m2pcie = {
4592 	.name		= "m2pcie",
4593 	.num_counters	= 4,
4594 	.num_boxes	= 5,
4595 	.perf_ctr_bits	= 48,
4596 	.event_ctl	= SNR_M2PCIE_MSR_PMON_CTL0,
4597 	.perf_ctr	= SNR_M2PCIE_MSR_PMON_CTR0,
4598 	.box_ctl	= SNR_M2PCIE_MSR_PMON_BOX_CTL,
4599 	.msr_offset	= SNR_M2PCIE_MSR_OFFSET,
4600 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4601 	.ops		= &ivbep_uncore_msr_ops,
4602 	.format_group	= &ivbep_uncore_format_group,
4603 };
4604 
snr_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)4605 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4606 {
4607 	struct hw_perf_event *hwc = &event->hw;
4608 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4609 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4610 
4611 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
4612 		reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4613 		reg1->idx = ev_sel - 0xb;
4614 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
4615 	}
4616 	return 0;
4617 }
4618 
4619 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4620 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4621 	.hw_config		= snr_pcu_hw_config,
4622 	.get_constraint		= snbep_pcu_get_constraint,
4623 	.put_constraint		= snbep_pcu_put_constraint,
4624 };
4625 
4626 static struct intel_uncore_type snr_uncore_pcu = {
4627 	.name			= "pcu",
4628 	.num_counters		= 4,
4629 	.num_boxes		= 1,
4630 	.perf_ctr_bits		= 48,
4631 	.perf_ctr		= SNR_PCU_MSR_PMON_CTR0,
4632 	.event_ctl		= SNR_PCU_MSR_PMON_CTL0,
4633 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4634 	.box_ctl		= SNR_PCU_MSR_PMON_BOX_CTL,
4635 	.num_shared_regs	= 1,
4636 	.ops			= &snr_uncore_pcu_ops,
4637 	.format_group		= &skx_uncore_pcu_format_group,
4638 };
4639 
4640 enum perf_uncore_snr_iio_freerunning_type_id {
4641 	SNR_IIO_MSR_IOCLK,
4642 	SNR_IIO_MSR_BW_IN,
4643 
4644 	SNR_IIO_FREERUNNING_TYPE_MAX,
4645 };
4646 
4647 static struct freerunning_counters snr_iio_freerunning[] = {
4648 	[SNR_IIO_MSR_IOCLK]	= { 0x1eac, 0x1, 0x10, 1, 48 },
4649 	[SNR_IIO_MSR_BW_IN]	= { 0x1f00, 0x1, 0x10, 8, 48 },
4650 };
4651 
4652 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4653 	/* Free-Running IIO CLOCKS Counter */
4654 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4655 	/* Free-Running IIO BANDWIDTH IN Counters */
4656 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4657 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4658 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4659 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4660 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4661 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4662 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4663 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4664 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4665 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4666 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4667 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4668 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
4669 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
4670 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
4671 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
4672 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
4673 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
4674 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
4675 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
4676 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
4677 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
4678 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
4679 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
4680 	{ /* end: all zeroes */ },
4681 };
4682 
4683 static struct intel_uncore_type snr_uncore_iio_free_running = {
4684 	.name			= "iio_free_running",
4685 	.num_counters		= 9,
4686 	.num_boxes		= 5,
4687 	.num_freerunning_types	= SNR_IIO_FREERUNNING_TYPE_MAX,
4688 	.freerunning		= snr_iio_freerunning,
4689 	.ops			= &skx_uncore_iio_freerunning_ops,
4690 	.event_descs		= snr_uncore_iio_freerunning_events,
4691 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4692 };
4693 
4694 static struct intel_uncore_type *snr_msr_uncores[] = {
4695 	&snr_uncore_ubox,
4696 	&snr_uncore_chabox,
4697 	&snr_uncore_iio,
4698 	&snr_uncore_irp,
4699 	&snr_uncore_m2pcie,
4700 	&snr_uncore_pcu,
4701 	&snr_uncore_iio_free_running,
4702 	NULL,
4703 };
4704 
snr_uncore_cpu_init(void)4705 void snr_uncore_cpu_init(void)
4706 {
4707 	uncore_msr_uncores = snr_msr_uncores;
4708 }
4709 
snr_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4710 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4711 {
4712 	struct pci_dev *pdev = box->pci_dev;
4713 	int box_ctl = uncore_pci_box_ctl(box);
4714 
4715 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4716 	pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4717 }
4718 
4719 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4720 	.init_box	= snr_m2m_uncore_pci_init_box,
4721 	.disable_box	= snbep_uncore_pci_disable_box,
4722 	.enable_box	= snbep_uncore_pci_enable_box,
4723 	.disable_event	= snbep_uncore_pci_disable_event,
4724 	.enable_event	= snbep_uncore_pci_enable_event,
4725 	.read_counter	= snbep_uncore_pci_read_counter,
4726 };
4727 
4728 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4729 	&format_attr_event.attr,
4730 	&format_attr_umask_ext3.attr,
4731 	&format_attr_edge.attr,
4732 	&format_attr_inv.attr,
4733 	&format_attr_thresh8.attr,
4734 	NULL,
4735 };
4736 
4737 static const struct attribute_group snr_m2m_uncore_format_group = {
4738 	.name = "format",
4739 	.attrs = snr_m2m_uncore_formats_attr,
4740 };
4741 
4742 static struct intel_uncore_type snr_uncore_m2m = {
4743 	.name		= "m2m",
4744 	.num_counters   = 4,
4745 	.num_boxes	= 1,
4746 	.perf_ctr_bits	= 48,
4747 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
4748 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
4749 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4750 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
4751 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
4752 	.ops		= &snr_m2m_uncore_pci_ops,
4753 	.format_group	= &snr_m2m_uncore_format_group,
4754 };
4755 
snr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)4756 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4757 {
4758 	struct pci_dev *pdev = box->pci_dev;
4759 	struct hw_perf_event *hwc = &event->hw;
4760 
4761 	pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4762 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4763 }
4764 
4765 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4766 	.init_box	= snr_m2m_uncore_pci_init_box,
4767 	.disable_box	= snbep_uncore_pci_disable_box,
4768 	.enable_box	= snbep_uncore_pci_enable_box,
4769 	.disable_event	= snbep_uncore_pci_disable_event,
4770 	.enable_event	= snr_uncore_pci_enable_event,
4771 	.read_counter	= snbep_uncore_pci_read_counter,
4772 };
4773 
4774 static struct intel_uncore_type snr_uncore_pcie3 = {
4775 	.name		= "pcie3",
4776 	.num_counters	= 4,
4777 	.num_boxes	= 1,
4778 	.perf_ctr_bits	= 48,
4779 	.perf_ctr	= SNR_PCIE3_PCI_PMON_CTR0,
4780 	.event_ctl	= SNR_PCIE3_PCI_PMON_CTL0,
4781 	.event_mask	= SKX_IIO_PMON_RAW_EVENT_MASK,
4782 	.event_mask_ext	= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4783 	.box_ctl	= SNR_PCIE3_PCI_PMON_BOX_CTL,
4784 	.ops		= &snr_pcie3_uncore_pci_ops,
4785 	.format_group	= &skx_uncore_iio_format_group,
4786 };
4787 
4788 enum {
4789 	SNR_PCI_UNCORE_M2M,
4790 	SNR_PCI_UNCORE_PCIE3,
4791 };
4792 
4793 static struct intel_uncore_type *snr_pci_uncores[] = {
4794 	[SNR_PCI_UNCORE_M2M]		= &snr_uncore_m2m,
4795 	[SNR_PCI_UNCORE_PCIE3]		= &snr_uncore_pcie3,
4796 	NULL,
4797 };
4798 
4799 static const struct pci_device_id snr_uncore_pci_ids[] = {
4800 	{ /* M2M */
4801 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4802 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4803 	},
4804 	{ /* end: all zeroes */ }
4805 };
4806 
4807 static struct pci_driver snr_uncore_pci_driver = {
4808 	.name		= "snr_uncore",
4809 	.id_table	= snr_uncore_pci_ids,
4810 };
4811 
4812 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4813 	{ /* PCIe3 RP */
4814 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4815 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4816 	},
4817 	{ /* end: all zeroes */ }
4818 };
4819 
4820 static struct pci_driver snr_uncore_pci_sub_driver = {
4821 	.name		= "snr_uncore_sub",
4822 	.id_table	= snr_uncore_pci_sub_ids,
4823 };
4824 
snr_uncore_pci_init(void)4825 int snr_uncore_pci_init(void)
4826 {
4827 	/* SNR UBOX DID */
4828 	int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4829 					 SKX_GIDNIDMAP, true);
4830 
4831 	if (ret)
4832 		return ret;
4833 
4834 	uncore_pci_uncores = snr_pci_uncores;
4835 	uncore_pci_driver = &snr_uncore_pci_driver;
4836 	uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4837 	return 0;
4838 }
4839 
4840 #define SNR_MC_DEVICE_ID	0x3451
4841 
snr_uncore_get_mc_dev(unsigned int device,int id)4842 static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
4843 {
4844 	struct pci_dev *mc_dev = NULL;
4845 	int pkg;
4846 
4847 	while (1) {
4848 		mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
4849 		if (!mc_dev)
4850 			break;
4851 		pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4852 		if (pkg == id)
4853 			break;
4854 	}
4855 	return mc_dev;
4856 }
4857 
snr_uncore_mmio_map(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)4858 static int snr_uncore_mmio_map(struct intel_uncore_box *box,
4859 			       unsigned int box_ctl, int mem_offset,
4860 			       unsigned int device)
4861 {
4862 	struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
4863 	struct intel_uncore_type *type = box->pmu->type;
4864 	resource_size_t addr;
4865 	u32 pci_dword;
4866 
4867 	if (!pdev)
4868 		return -ENODEV;
4869 
4870 	pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4871 	addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4872 
4873 	pci_read_config_dword(pdev, mem_offset, &pci_dword);
4874 	addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4875 
4876 	addr += box_ctl;
4877 
4878 	pci_dev_put(pdev);
4879 
4880 	box->io_addr = ioremap(addr, type->mmio_map_size);
4881 	if (!box->io_addr) {
4882 		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4883 		return -EINVAL;
4884 	}
4885 
4886 	return 0;
4887 }
4888 
__snr_uncore_mmio_init_box(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)4889 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4890 				       unsigned int box_ctl, int mem_offset,
4891 				       unsigned int device)
4892 {
4893 	if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
4894 		writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4895 }
4896 
snr_uncore_mmio_init_box(struct intel_uncore_box * box)4897 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4898 {
4899 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4900 				   SNR_IMC_MMIO_MEM0_OFFSET,
4901 				   SNR_MC_DEVICE_ID);
4902 }
4903 
snr_uncore_mmio_disable_box(struct intel_uncore_box * box)4904 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4905 {
4906 	u32 config;
4907 
4908 	if (!box->io_addr)
4909 		return;
4910 
4911 	config = readl(box->io_addr);
4912 	config |= SNBEP_PMON_BOX_CTL_FRZ;
4913 	writel(config, box->io_addr);
4914 }
4915 
snr_uncore_mmio_enable_box(struct intel_uncore_box * box)4916 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4917 {
4918 	u32 config;
4919 
4920 	if (!box->io_addr)
4921 		return;
4922 
4923 	config = readl(box->io_addr);
4924 	config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4925 	writel(config, box->io_addr);
4926 }
4927 
snr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)4928 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4929 					   struct perf_event *event)
4930 {
4931 	struct hw_perf_event *hwc = &event->hw;
4932 
4933 	if (!box->io_addr)
4934 		return;
4935 
4936 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4937 		return;
4938 
4939 	writel(hwc->config | SNBEP_PMON_CTL_EN,
4940 	       box->io_addr + hwc->config_base);
4941 }
4942 
snr_uncore_mmio_disable_event(struct intel_uncore_box * box,struct perf_event * event)4943 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4944 					    struct perf_event *event)
4945 {
4946 	struct hw_perf_event *hwc = &event->hw;
4947 
4948 	if (!box->io_addr)
4949 		return;
4950 
4951 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4952 		return;
4953 
4954 	writel(hwc->config, box->io_addr + hwc->config_base);
4955 }
4956 
4957 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4958 	.init_box	= snr_uncore_mmio_init_box,
4959 	.exit_box	= uncore_mmio_exit_box,
4960 	.disable_box	= snr_uncore_mmio_disable_box,
4961 	.enable_box	= snr_uncore_mmio_enable_box,
4962 	.disable_event	= snr_uncore_mmio_disable_event,
4963 	.enable_event	= snr_uncore_mmio_enable_event,
4964 	.read_counter	= uncore_mmio_read_counter,
4965 };
4966 
4967 static struct uncore_event_desc snr_uncore_imc_events[] = {
4968 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
4969 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
4970 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4971 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4972 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4973 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4974 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4975 	{ /* end: all zeroes */ },
4976 };
4977 
4978 static struct intel_uncore_type snr_uncore_imc = {
4979 	.name		= "imc",
4980 	.num_counters   = 4,
4981 	.num_boxes	= 2,
4982 	.perf_ctr_bits	= 48,
4983 	.fixed_ctr_bits	= 48,
4984 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
4985 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
4986 	.event_descs	= snr_uncore_imc_events,
4987 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
4988 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
4989 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4990 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
4991 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
4992 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
4993 	.ops		= &snr_uncore_mmio_ops,
4994 	.format_group	= &skx_uncore_format_group,
4995 };
4996 
4997 enum perf_uncore_snr_imc_freerunning_type_id {
4998 	SNR_IMC_DCLK,
4999 	SNR_IMC_DDR,
5000 
5001 	SNR_IMC_FREERUNNING_TYPE_MAX,
5002 };
5003 
5004 static struct freerunning_counters snr_imc_freerunning[] = {
5005 	[SNR_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5006 	[SNR_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5007 };
5008 
5009 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
5010 	INTEL_UNCORE_EVENT_DESC(dclk,		"event=0xff,umask=0x10"),
5011 
5012 	INTEL_UNCORE_EVENT_DESC(read,		"event=0xff,umask=0x20"),
5013 	INTEL_UNCORE_EVENT_DESC(read.scale,	"6.103515625e-5"),
5014 	INTEL_UNCORE_EVENT_DESC(read.unit,	"MiB"),
5015 	INTEL_UNCORE_EVENT_DESC(write,		"event=0xff,umask=0x21"),
5016 	INTEL_UNCORE_EVENT_DESC(write.scale,	"6.103515625e-5"),
5017 	INTEL_UNCORE_EVENT_DESC(write.unit,	"MiB"),
5018 	{ /* end: all zeroes */ },
5019 };
5020 
5021 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
5022 	.init_box	= snr_uncore_mmio_init_box,
5023 	.exit_box	= uncore_mmio_exit_box,
5024 	.read_counter	= uncore_mmio_read_counter,
5025 	.hw_config	= uncore_freerunning_hw_config,
5026 };
5027 
5028 static struct intel_uncore_type snr_uncore_imc_free_running = {
5029 	.name			= "imc_free_running",
5030 	.num_counters		= 3,
5031 	.num_boxes		= 1,
5032 	.num_freerunning_types	= SNR_IMC_FREERUNNING_TYPE_MAX,
5033 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5034 	.freerunning		= snr_imc_freerunning,
5035 	.ops			= &snr_uncore_imc_freerunning_ops,
5036 	.event_descs		= snr_uncore_imc_freerunning_events,
5037 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5038 };
5039 
5040 static struct intel_uncore_type *snr_mmio_uncores[] = {
5041 	&snr_uncore_imc,
5042 	&snr_uncore_imc_free_running,
5043 	NULL,
5044 };
5045 
snr_uncore_mmio_init(void)5046 void snr_uncore_mmio_init(void)
5047 {
5048 	uncore_mmio_uncores = snr_mmio_uncores;
5049 }
5050 
5051 /* end of SNR uncore support */
5052 
5053 /* ICX uncore support */
5054 
5055 static unsigned icx_cha_msr_offsets[] = {
5056 	0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
5057 	0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
5058 	0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
5059 	0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
5060 	0x1c,  0x2a,  0x38,  0x46,
5061 };
5062 
icx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5063 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5064 {
5065 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5066 	bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
5067 
5068 	if (tie_en) {
5069 		reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5070 			    icx_cha_msr_offsets[box->pmu->pmu_idx];
5071 		reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5072 		reg1->idx = 0;
5073 	}
5074 
5075 	return 0;
5076 }
5077 
5078 static struct intel_uncore_ops icx_uncore_chabox_ops = {
5079 	.init_box		= ivbep_uncore_msr_init_box,
5080 	.disable_box		= snbep_uncore_msr_disable_box,
5081 	.enable_box		= snbep_uncore_msr_enable_box,
5082 	.disable_event		= snbep_uncore_msr_disable_event,
5083 	.enable_event		= snr_cha_enable_event,
5084 	.read_counter		= uncore_msr_read_counter,
5085 	.hw_config		= icx_cha_hw_config,
5086 };
5087 
5088 static struct intel_uncore_type icx_uncore_chabox = {
5089 	.name			= "cha",
5090 	.num_counters		= 4,
5091 	.perf_ctr_bits		= 48,
5092 	.event_ctl		= ICX_C34_MSR_PMON_CTL0,
5093 	.perf_ctr		= ICX_C34_MSR_PMON_CTR0,
5094 	.box_ctl		= ICX_C34_MSR_PMON_BOX_CTL,
5095 	.msr_offsets		= icx_cha_msr_offsets,
5096 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5097 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
5098 	.constraints		= skx_uncore_chabox_constraints,
5099 	.ops			= &icx_uncore_chabox_ops,
5100 	.format_group		= &snr_uncore_chabox_format_group,
5101 };
5102 
5103 static unsigned icx_msr_offsets[] = {
5104 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5105 };
5106 
5107 static struct event_constraint icx_uncore_iio_constraints[] = {
5108 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5109 	UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5110 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5111 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
5112 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5113 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5114 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
5115 	EVENT_CONSTRAINT_END
5116 };
5117 
5118 static umode_t
icx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)5119 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5120 {
5121 	/* Root bus 0x00 is valid only for pmu_idx = 5. */
5122 	return pmu_iio_mapping_visible(kobj, attr, die, 5);
5123 }
5124 
5125 static struct attribute_group icx_iio_mapping_group = {
5126 	.is_visible	= icx_iio_mapping_visible,
5127 };
5128 
5129 static const struct attribute_group *icx_iio_attr_update[] = {
5130 	&icx_iio_mapping_group,
5131 	NULL,
5132 };
5133 
5134 /*
5135  * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5136  */
5137 enum {
5138 	ICX_PCIE1_PMON_ID,
5139 	ICX_PCIE2_PMON_ID,
5140 	ICX_PCIE3_PMON_ID,
5141 	ICX_PCIE4_PMON_ID,
5142 	ICX_PCIE5_PMON_ID,
5143 	ICX_CBDMA_DMI_PMON_ID
5144 };
5145 
5146 static u8 icx_sad_pmon_mapping[] = {
5147 	ICX_CBDMA_DMI_PMON_ID,
5148 	ICX_PCIE1_PMON_ID,
5149 	ICX_PCIE2_PMON_ID,
5150 	ICX_PCIE3_PMON_ID,
5151 	ICX_PCIE4_PMON_ID,
5152 	ICX_PCIE5_PMON_ID,
5153 };
5154 
icx_iio_get_topology(struct intel_uncore_type * type)5155 static int icx_iio_get_topology(struct intel_uncore_type *type)
5156 {
5157 	return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5158 }
5159 
icx_iio_set_mapping(struct intel_uncore_type * type)5160 static int icx_iio_set_mapping(struct intel_uncore_type *type)
5161 {
5162 	/* Detect ICX-D system. This case is not supported */
5163 	if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) {
5164 		pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
5165 		return -EPERM;
5166 	}
5167 	return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5168 }
5169 
icx_iio_cleanup_mapping(struct intel_uncore_type * type)5170 static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
5171 {
5172 	pmu_iio_cleanup_mapping(type, &icx_iio_mapping_group);
5173 }
5174 
5175 static struct intel_uncore_type icx_uncore_iio = {
5176 	.name			= "iio",
5177 	.num_counters		= 4,
5178 	.num_boxes		= 6,
5179 	.perf_ctr_bits		= 48,
5180 	.event_ctl		= ICX_IIO_MSR_PMON_CTL0,
5181 	.perf_ctr		= ICX_IIO_MSR_PMON_CTR0,
5182 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
5183 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5184 	.box_ctl		= ICX_IIO_MSR_PMON_BOX_CTL,
5185 	.msr_offsets		= icx_msr_offsets,
5186 	.constraints		= icx_uncore_iio_constraints,
5187 	.ops			= &skx_uncore_iio_ops,
5188 	.format_group		= &snr_uncore_iio_format_group,
5189 	.attr_update		= icx_iio_attr_update,
5190 	.get_topology		= icx_iio_get_topology,
5191 	.set_mapping		= icx_iio_set_mapping,
5192 	.cleanup_mapping	= icx_iio_cleanup_mapping,
5193 };
5194 
5195 static struct intel_uncore_type icx_uncore_irp = {
5196 	.name			= "irp",
5197 	.num_counters		= 2,
5198 	.num_boxes		= 6,
5199 	.perf_ctr_bits		= 48,
5200 	.event_ctl		= ICX_IRP0_MSR_PMON_CTL0,
5201 	.perf_ctr		= ICX_IRP0_MSR_PMON_CTR0,
5202 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
5203 	.box_ctl		= ICX_IRP0_MSR_PMON_BOX_CTL,
5204 	.msr_offsets		= icx_msr_offsets,
5205 	.ops			= &ivbep_uncore_msr_ops,
5206 	.format_group		= &ivbep_uncore_format_group,
5207 };
5208 
5209 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5210 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5211 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5212 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5213 	EVENT_CONSTRAINT_END
5214 };
5215 
5216 static struct intel_uncore_type icx_uncore_m2pcie = {
5217 	.name		= "m2pcie",
5218 	.num_counters	= 4,
5219 	.num_boxes	= 6,
5220 	.perf_ctr_bits	= 48,
5221 	.event_ctl	= ICX_M2PCIE_MSR_PMON_CTL0,
5222 	.perf_ctr	= ICX_M2PCIE_MSR_PMON_CTR0,
5223 	.box_ctl	= ICX_M2PCIE_MSR_PMON_BOX_CTL,
5224 	.msr_offsets	= icx_msr_offsets,
5225 	.constraints	= icx_uncore_m2pcie_constraints,
5226 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5227 	.ops		= &ivbep_uncore_msr_ops,
5228 	.format_group	= &ivbep_uncore_format_group,
5229 };
5230 
5231 enum perf_uncore_icx_iio_freerunning_type_id {
5232 	ICX_IIO_MSR_IOCLK,
5233 	ICX_IIO_MSR_BW_IN,
5234 
5235 	ICX_IIO_FREERUNNING_TYPE_MAX,
5236 };
5237 
5238 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5239 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5240 };
5241 
5242 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5243 	0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5244 };
5245 
5246 static struct freerunning_counters icx_iio_freerunning[] = {
5247 	[ICX_IIO_MSR_IOCLK]	= { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5248 	[ICX_IIO_MSR_BW_IN]	= { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5249 };
5250 
5251 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5252 	/* Free-Running IIO CLOCKS Counter */
5253 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
5254 	/* Free-Running IIO BANDWIDTH IN Counters */
5255 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
5256 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
5257 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
5258 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
5259 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
5260 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
5261 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
5262 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
5263 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
5264 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
5265 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
5266 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
5267 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
5268 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
5269 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
5270 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
5271 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
5272 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
5273 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
5274 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
5275 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
5276 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
5277 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
5278 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
5279 	{ /* end: all zeroes */ },
5280 };
5281 
5282 static struct intel_uncore_type icx_uncore_iio_free_running = {
5283 	.name			= "iio_free_running",
5284 	.num_counters		= 9,
5285 	.num_boxes		= 6,
5286 	.num_freerunning_types	= ICX_IIO_FREERUNNING_TYPE_MAX,
5287 	.freerunning		= icx_iio_freerunning,
5288 	.ops			= &skx_uncore_iio_freerunning_ops,
5289 	.event_descs		= icx_uncore_iio_freerunning_events,
5290 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5291 };
5292 
5293 static struct intel_uncore_type *icx_msr_uncores[] = {
5294 	&skx_uncore_ubox,
5295 	&icx_uncore_chabox,
5296 	&icx_uncore_iio,
5297 	&icx_uncore_irp,
5298 	&icx_uncore_m2pcie,
5299 	&skx_uncore_pcu,
5300 	&icx_uncore_iio_free_running,
5301 	NULL,
5302 };
5303 
5304 /*
5305  * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5306  * registers which located at Device 30, Function 3
5307  */
5308 #define ICX_CAPID6		0x9c
5309 #define ICX_CAPID7		0xa0
5310 
icx_count_chabox(void)5311 static u64 icx_count_chabox(void)
5312 {
5313 	struct pci_dev *dev = NULL;
5314 	u64 caps = 0;
5315 
5316 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5317 	if (!dev)
5318 		goto out;
5319 
5320 	pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5321 	pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5322 out:
5323 	pci_dev_put(dev);
5324 	return hweight64(caps);
5325 }
5326 
icx_uncore_cpu_init(void)5327 void icx_uncore_cpu_init(void)
5328 {
5329 	u64 num_boxes = icx_count_chabox();
5330 
5331 	if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5332 		return;
5333 	icx_uncore_chabox.num_boxes = num_boxes;
5334 	uncore_msr_uncores = icx_msr_uncores;
5335 }
5336 
5337 static struct intel_uncore_type icx_uncore_m2m = {
5338 	.name		= "m2m",
5339 	.num_counters   = 4,
5340 	.num_boxes	= 4,
5341 	.perf_ctr_bits	= 48,
5342 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
5343 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
5344 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5345 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
5346 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
5347 	.ops		= &snr_m2m_uncore_pci_ops,
5348 	.format_group	= &snr_m2m_uncore_format_group,
5349 };
5350 
5351 static struct attribute *icx_upi_uncore_formats_attr[] = {
5352 	&format_attr_event.attr,
5353 	&format_attr_umask_ext4.attr,
5354 	&format_attr_edge.attr,
5355 	&format_attr_inv.attr,
5356 	&format_attr_thresh8.attr,
5357 	NULL,
5358 };
5359 
5360 static const struct attribute_group icx_upi_uncore_format_group = {
5361 	.name = "format",
5362 	.attrs = icx_upi_uncore_formats_attr,
5363 };
5364 
5365 static struct intel_uncore_type icx_uncore_upi = {
5366 	.name		= "upi",
5367 	.num_counters   = 4,
5368 	.num_boxes	= 3,
5369 	.perf_ctr_bits	= 48,
5370 	.perf_ctr	= ICX_UPI_PCI_PMON_CTR0,
5371 	.event_ctl	= ICX_UPI_PCI_PMON_CTL0,
5372 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5373 	.event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5374 	.box_ctl	= ICX_UPI_PCI_PMON_BOX_CTL,
5375 	.ops		= &skx_upi_uncore_pci_ops,
5376 	.format_group	= &icx_upi_uncore_format_group,
5377 };
5378 
5379 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5380 	UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5381 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5382 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5383 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5384 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5385 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5386 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5387 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5388 	EVENT_CONSTRAINT_END
5389 };
5390 
5391 static struct intel_uncore_type icx_uncore_m3upi = {
5392 	.name		= "m3upi",
5393 	.num_counters   = 4,
5394 	.num_boxes	= 3,
5395 	.perf_ctr_bits	= 48,
5396 	.perf_ctr	= ICX_M3UPI_PCI_PMON_CTR0,
5397 	.event_ctl	= ICX_M3UPI_PCI_PMON_CTL0,
5398 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5399 	.box_ctl	= ICX_M3UPI_PCI_PMON_BOX_CTL,
5400 	.constraints	= icx_uncore_m3upi_constraints,
5401 	.ops		= &ivbep_uncore_pci_ops,
5402 	.format_group	= &skx_uncore_format_group,
5403 };
5404 
5405 enum {
5406 	ICX_PCI_UNCORE_M2M,
5407 	ICX_PCI_UNCORE_UPI,
5408 	ICX_PCI_UNCORE_M3UPI,
5409 };
5410 
5411 static struct intel_uncore_type *icx_pci_uncores[] = {
5412 	[ICX_PCI_UNCORE_M2M]		= &icx_uncore_m2m,
5413 	[ICX_PCI_UNCORE_UPI]		= &icx_uncore_upi,
5414 	[ICX_PCI_UNCORE_M3UPI]		= &icx_uncore_m3upi,
5415 	NULL,
5416 };
5417 
5418 static const struct pci_device_id icx_uncore_pci_ids[] = {
5419 	{ /* M2M 0 */
5420 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5421 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5422 	},
5423 	{ /* M2M 1 */
5424 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5425 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5426 	},
5427 	{ /* M2M 2 */
5428 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5429 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5430 	},
5431 	{ /* M2M 3 */
5432 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5433 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5434 	},
5435 	{ /* UPI Link 0 */
5436 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5437 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5438 	},
5439 	{ /* UPI Link 1 */
5440 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5441 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5442 	},
5443 	{ /* UPI Link 2 */
5444 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5445 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5446 	},
5447 	{ /* M3UPI Link 0 */
5448 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5449 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5450 	},
5451 	{ /* M3UPI Link 1 */
5452 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5453 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5454 	},
5455 	{ /* M3UPI Link 2 */
5456 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5457 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5458 	},
5459 	{ /* end: all zeroes */ }
5460 };
5461 
5462 static struct pci_driver icx_uncore_pci_driver = {
5463 	.name		= "icx_uncore",
5464 	.id_table	= icx_uncore_pci_ids,
5465 };
5466 
icx_uncore_pci_init(void)5467 int icx_uncore_pci_init(void)
5468 {
5469 	/* ICX UBOX DID */
5470 	int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5471 					 SKX_GIDNIDMAP, true);
5472 
5473 	if (ret)
5474 		return ret;
5475 
5476 	uncore_pci_uncores = icx_pci_uncores;
5477 	uncore_pci_driver = &icx_uncore_pci_driver;
5478 	return 0;
5479 }
5480 
icx_uncore_imc_init_box(struct intel_uncore_box * box)5481 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5482 {
5483 	unsigned int box_ctl = box->pmu->type->box_ctl +
5484 			       box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5485 	int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5486 			 SNR_IMC_MMIO_MEM0_OFFSET;
5487 
5488 	__snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
5489 				   SNR_MC_DEVICE_ID);
5490 }
5491 
5492 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5493 	.init_box	= icx_uncore_imc_init_box,
5494 	.exit_box	= uncore_mmio_exit_box,
5495 	.disable_box	= snr_uncore_mmio_disable_box,
5496 	.enable_box	= snr_uncore_mmio_enable_box,
5497 	.disable_event	= snr_uncore_mmio_disable_event,
5498 	.enable_event	= snr_uncore_mmio_enable_event,
5499 	.read_counter	= uncore_mmio_read_counter,
5500 };
5501 
5502 static struct intel_uncore_type icx_uncore_imc = {
5503 	.name		= "imc",
5504 	.num_counters   = 4,
5505 	.num_boxes	= 12,
5506 	.perf_ctr_bits	= 48,
5507 	.fixed_ctr_bits	= 48,
5508 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5509 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5510 	.event_descs	= snr_uncore_imc_events,
5511 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5512 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5513 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5514 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5515 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5516 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5517 	.ops		= &icx_uncore_mmio_ops,
5518 	.format_group	= &skx_uncore_format_group,
5519 };
5520 
5521 enum perf_uncore_icx_imc_freerunning_type_id {
5522 	ICX_IMC_DCLK,
5523 	ICX_IMC_DDR,
5524 	ICX_IMC_DDRT,
5525 
5526 	ICX_IMC_FREERUNNING_TYPE_MAX,
5527 };
5528 
5529 static struct freerunning_counters icx_imc_freerunning[] = {
5530 	[ICX_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5531 	[ICX_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5532 	[ICX_IMC_DDRT]	= { 0x22a0, 0x8, 0, 2, 48 },
5533 };
5534 
5535 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5536 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
5537 
5538 	INTEL_UNCORE_EVENT_DESC(read,			"event=0xff,umask=0x20"),
5539 	INTEL_UNCORE_EVENT_DESC(read.scale,		"6.103515625e-5"),
5540 	INTEL_UNCORE_EVENT_DESC(read.unit,		"MiB"),
5541 	INTEL_UNCORE_EVENT_DESC(write,			"event=0xff,umask=0x21"),
5542 	INTEL_UNCORE_EVENT_DESC(write.scale,		"6.103515625e-5"),
5543 	INTEL_UNCORE_EVENT_DESC(write.unit,		"MiB"),
5544 
5545 	INTEL_UNCORE_EVENT_DESC(ddrt_read,		"event=0xff,umask=0x30"),
5546 	INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,	"6.103515625e-5"),
5547 	INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,		"MiB"),
5548 	INTEL_UNCORE_EVENT_DESC(ddrt_write,		"event=0xff,umask=0x31"),
5549 	INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,	"6.103515625e-5"),
5550 	INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,	"MiB"),
5551 	{ /* end: all zeroes */ },
5552 };
5553 
icx_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)5554 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5555 {
5556 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5557 			 SNR_IMC_MMIO_MEM0_OFFSET;
5558 
5559 	snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5560 			    mem_offset, SNR_MC_DEVICE_ID);
5561 }
5562 
5563 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5564 	.init_box	= icx_uncore_imc_freerunning_init_box,
5565 	.exit_box	= uncore_mmio_exit_box,
5566 	.read_counter	= uncore_mmio_read_counter,
5567 	.hw_config	= uncore_freerunning_hw_config,
5568 };
5569 
5570 static struct intel_uncore_type icx_uncore_imc_free_running = {
5571 	.name			= "imc_free_running",
5572 	.num_counters		= 5,
5573 	.num_boxes		= 4,
5574 	.num_freerunning_types	= ICX_IMC_FREERUNNING_TYPE_MAX,
5575 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5576 	.freerunning		= icx_imc_freerunning,
5577 	.ops			= &icx_uncore_imc_freerunning_ops,
5578 	.event_descs		= icx_uncore_imc_freerunning_events,
5579 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5580 };
5581 
5582 static struct intel_uncore_type *icx_mmio_uncores[] = {
5583 	&icx_uncore_imc,
5584 	&icx_uncore_imc_free_running,
5585 	NULL,
5586 };
5587 
icx_uncore_mmio_init(void)5588 void icx_uncore_mmio_init(void)
5589 {
5590 	uncore_mmio_uncores = icx_mmio_uncores;
5591 }
5592 
5593 /* end of ICX uncore support */
5594 
5595 /* SPR uncore support */
5596 
spr_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)5597 static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
5598 					struct perf_event *event)
5599 {
5600 	struct hw_perf_event *hwc = &event->hw;
5601 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5602 
5603 	if (reg1->idx != EXTRA_REG_NONE)
5604 		wrmsrl(reg1->reg, reg1->config);
5605 
5606 	wrmsrl(hwc->config_base, hwc->config);
5607 }
5608 
spr_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)5609 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
5610 					 struct perf_event *event)
5611 {
5612 	struct hw_perf_event *hwc = &event->hw;
5613 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5614 
5615 	if (reg1->idx != EXTRA_REG_NONE)
5616 		wrmsrl(reg1->reg, 0);
5617 
5618 	wrmsrl(hwc->config_base, 0);
5619 }
5620 
spr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5621 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5622 {
5623 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5624 	bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
5625 	struct intel_uncore_type *type = box->pmu->type;
5626 
5627 	if (tie_en) {
5628 		reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
5629 			    HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
5630 		reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
5631 		reg1->idx = 0;
5632 	}
5633 
5634 	return 0;
5635 }
5636 
5637 static struct intel_uncore_ops spr_uncore_chabox_ops = {
5638 	.init_box		= intel_generic_uncore_msr_init_box,
5639 	.disable_box		= intel_generic_uncore_msr_disable_box,
5640 	.enable_box		= intel_generic_uncore_msr_enable_box,
5641 	.disable_event		= spr_uncore_msr_disable_event,
5642 	.enable_event		= spr_uncore_msr_enable_event,
5643 	.read_counter		= uncore_msr_read_counter,
5644 	.hw_config		= spr_cha_hw_config,
5645 	.get_constraint		= uncore_get_constraint,
5646 	.put_constraint		= uncore_put_constraint,
5647 };
5648 
5649 static struct attribute *spr_uncore_cha_formats_attr[] = {
5650 	&format_attr_event.attr,
5651 	&format_attr_umask_ext4.attr,
5652 	&format_attr_tid_en2.attr,
5653 	&format_attr_edge.attr,
5654 	&format_attr_inv.attr,
5655 	&format_attr_thresh8.attr,
5656 	&format_attr_filter_tid5.attr,
5657 	NULL,
5658 };
5659 static const struct attribute_group spr_uncore_chabox_format_group = {
5660 	.name = "format",
5661 	.attrs = spr_uncore_cha_formats_attr,
5662 };
5663 
alias_show(struct device * dev,struct device_attribute * attr,char * buf)5664 static ssize_t alias_show(struct device *dev,
5665 			  struct device_attribute *attr,
5666 			  char *buf)
5667 {
5668 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
5669 	char pmu_name[UNCORE_PMU_NAME_LEN];
5670 
5671 	uncore_get_alias_name(pmu_name, pmu);
5672 	return sysfs_emit(buf, "%s\n", pmu_name);
5673 }
5674 
5675 static DEVICE_ATTR_RO(alias);
5676 
5677 static struct attribute *uncore_alias_attrs[] = {
5678 	&dev_attr_alias.attr,
5679 	NULL
5680 };
5681 
5682 ATTRIBUTE_GROUPS(uncore_alias);
5683 
5684 static struct intel_uncore_type spr_uncore_chabox = {
5685 	.name			= "cha",
5686 	.event_mask		= SPR_CHA_PMON_EVENT_MASK,
5687 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,
5688 	.num_shared_regs	= 1,
5689 	.constraints		= skx_uncore_chabox_constraints,
5690 	.ops			= &spr_uncore_chabox_ops,
5691 	.format_group		= &spr_uncore_chabox_format_group,
5692 	.attr_update		= uncore_alias_groups,
5693 };
5694 
5695 static struct intel_uncore_type spr_uncore_iio = {
5696 	.name			= "iio",
5697 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
5698 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5699 	.format_group		= &snr_uncore_iio_format_group,
5700 	.attr_update		= uncore_alias_groups,
5701 	.constraints		= icx_uncore_iio_constraints,
5702 };
5703 
5704 static struct attribute *spr_uncore_raw_formats_attr[] = {
5705 	&format_attr_event.attr,
5706 	&format_attr_umask_ext4.attr,
5707 	&format_attr_edge.attr,
5708 	&format_attr_inv.attr,
5709 	&format_attr_thresh8.attr,
5710 	NULL,
5711 };
5712 
5713 static const struct attribute_group spr_uncore_raw_format_group = {
5714 	.name			= "format",
5715 	.attrs			= spr_uncore_raw_formats_attr,
5716 };
5717 
5718 #define SPR_UNCORE_COMMON_FORMAT()				\
5719 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,	\
5720 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,	\
5721 	.format_group		= &spr_uncore_raw_format_group,	\
5722 	.attr_update		= uncore_alias_groups
5723 
5724 static struct intel_uncore_type spr_uncore_irp = {
5725 	SPR_UNCORE_COMMON_FORMAT(),
5726 	.name			= "irp",
5727 
5728 };
5729 
5730 static struct event_constraint spr_uncore_m2pcie_constraints[] = {
5731 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5732 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5733 	EVENT_CONSTRAINT_END
5734 };
5735 
5736 static struct intel_uncore_type spr_uncore_m2pcie = {
5737 	SPR_UNCORE_COMMON_FORMAT(),
5738 	.name			= "m2pcie",
5739 	.constraints		= spr_uncore_m2pcie_constraints,
5740 };
5741 
5742 static struct intel_uncore_type spr_uncore_pcu = {
5743 	.name			= "pcu",
5744 	.attr_update		= uncore_alias_groups,
5745 };
5746 
spr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)5747 static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5748 					 struct perf_event *event)
5749 {
5750 	struct hw_perf_event *hwc = &event->hw;
5751 
5752 	if (!box->io_addr)
5753 		return;
5754 
5755 	if (uncore_pmc_fixed(hwc->idx))
5756 		writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
5757 	else
5758 		writel(hwc->config, box->io_addr + hwc->config_base);
5759 }
5760 
5761 static struct intel_uncore_ops spr_uncore_mmio_ops = {
5762 	.init_box		= intel_generic_uncore_mmio_init_box,
5763 	.exit_box		= uncore_mmio_exit_box,
5764 	.disable_box		= intel_generic_uncore_mmio_disable_box,
5765 	.enable_box		= intel_generic_uncore_mmio_enable_box,
5766 	.disable_event		= intel_generic_uncore_mmio_disable_event,
5767 	.enable_event		= spr_uncore_mmio_enable_event,
5768 	.read_counter		= uncore_mmio_read_counter,
5769 };
5770 
5771 static struct intel_uncore_type spr_uncore_imc = {
5772 	SPR_UNCORE_COMMON_FORMAT(),
5773 	.name			= "imc",
5774 	.fixed_ctr_bits		= 48,
5775 	.fixed_ctr		= SNR_IMC_MMIO_PMON_FIXED_CTR,
5776 	.fixed_ctl		= SNR_IMC_MMIO_PMON_FIXED_CTL,
5777 	.ops			= &spr_uncore_mmio_ops,
5778 };
5779 
spr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)5780 static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
5781 					struct perf_event *event)
5782 {
5783 	struct pci_dev *pdev = box->pci_dev;
5784 	struct hw_perf_event *hwc = &event->hw;
5785 
5786 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
5787 	pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
5788 }
5789 
5790 static struct intel_uncore_ops spr_uncore_pci_ops = {
5791 	.init_box		= intel_generic_uncore_pci_init_box,
5792 	.disable_box		= intel_generic_uncore_pci_disable_box,
5793 	.enable_box		= intel_generic_uncore_pci_enable_box,
5794 	.disable_event		= intel_generic_uncore_pci_disable_event,
5795 	.enable_event		= spr_uncore_pci_enable_event,
5796 	.read_counter		= intel_generic_uncore_pci_read_counter,
5797 };
5798 
5799 #define SPR_UNCORE_PCI_COMMON_FORMAT()			\
5800 	SPR_UNCORE_COMMON_FORMAT(),			\
5801 	.ops			= &spr_uncore_pci_ops
5802 
5803 static struct intel_uncore_type spr_uncore_m2m = {
5804 	SPR_UNCORE_PCI_COMMON_FORMAT(),
5805 	.name			= "m2m",
5806 };
5807 
5808 static struct intel_uncore_type spr_uncore_upi = {
5809 	SPR_UNCORE_PCI_COMMON_FORMAT(),
5810 	.name			= "upi",
5811 };
5812 
5813 static struct intel_uncore_type spr_uncore_m3upi = {
5814 	SPR_UNCORE_PCI_COMMON_FORMAT(),
5815 	.name			= "m3upi",
5816 	.constraints		= icx_uncore_m3upi_constraints,
5817 };
5818 
5819 static struct intel_uncore_type spr_uncore_mdf = {
5820 	SPR_UNCORE_COMMON_FORMAT(),
5821 	.name			= "mdf",
5822 };
5823 
5824 #define UNCORE_SPR_NUM_UNCORE_TYPES		12
5825 #define UNCORE_SPR_CHA				0
5826 #define UNCORE_SPR_IIO				1
5827 #define UNCORE_SPR_IMC				6
5828 
5829 static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
5830 	&spr_uncore_chabox,
5831 	&spr_uncore_iio,
5832 	&spr_uncore_irp,
5833 	&spr_uncore_m2pcie,
5834 	&spr_uncore_pcu,
5835 	NULL,
5836 	&spr_uncore_imc,
5837 	&spr_uncore_m2m,
5838 	&spr_uncore_upi,
5839 	&spr_uncore_m3upi,
5840 	NULL,
5841 	&spr_uncore_mdf,
5842 };
5843 
5844 enum perf_uncore_spr_iio_freerunning_type_id {
5845 	SPR_IIO_MSR_IOCLK,
5846 	SPR_IIO_MSR_BW_IN,
5847 	SPR_IIO_MSR_BW_OUT,
5848 
5849 	SPR_IIO_FREERUNNING_TYPE_MAX,
5850 };
5851 
5852 static struct freerunning_counters spr_iio_freerunning[] = {
5853 	[SPR_IIO_MSR_IOCLK]	= { 0x340e, 0x1, 0x10, 1, 48 },
5854 	[SPR_IIO_MSR_BW_IN]	= { 0x3800, 0x1, 0x10, 8, 48 },
5855 	[SPR_IIO_MSR_BW_OUT]	= { 0x3808, 0x1, 0x10, 8, 48 },
5856 };
5857 
5858 static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
5859 	/* Free-Running IIO CLOCKS Counter */
5860 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
5861 	/* Free-Running IIO BANDWIDTH IN Counters */
5862 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
5863 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
5864 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
5865 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
5866 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
5867 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
5868 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
5869 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
5870 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
5871 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
5872 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
5873 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
5874 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
5875 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
5876 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
5877 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
5878 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
5879 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
5880 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
5881 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
5882 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
5883 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
5884 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
5885 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
5886 	/* Free-Running IIO BANDWIDTH OUT Counters */
5887 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x30"),
5888 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
5889 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
5890 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x31"),
5891 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
5892 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
5893 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x32"),
5894 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
5895 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
5896 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x33"),
5897 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
5898 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
5899 	INTEL_UNCORE_EVENT_DESC(bw_out_port4,		"event=0xff,umask=0x34"),
5900 	INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale,	"3.814697266e-6"),
5901 	INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit,	"MiB"),
5902 	INTEL_UNCORE_EVENT_DESC(bw_out_port5,		"event=0xff,umask=0x35"),
5903 	INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale,	"3.814697266e-6"),
5904 	INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit,	"MiB"),
5905 	INTEL_UNCORE_EVENT_DESC(bw_out_port6,		"event=0xff,umask=0x36"),
5906 	INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale,	"3.814697266e-6"),
5907 	INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit,	"MiB"),
5908 	INTEL_UNCORE_EVENT_DESC(bw_out_port7,		"event=0xff,umask=0x37"),
5909 	INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale,	"3.814697266e-6"),
5910 	INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit,	"MiB"),
5911 	{ /* end: all zeroes */ },
5912 };
5913 
5914 static struct intel_uncore_type spr_uncore_iio_free_running = {
5915 	.name			= "iio_free_running",
5916 	.num_counters		= 17,
5917 	.num_freerunning_types	= SPR_IIO_FREERUNNING_TYPE_MAX,
5918 	.freerunning		= spr_iio_freerunning,
5919 	.ops			= &skx_uncore_iio_freerunning_ops,
5920 	.event_descs		= spr_uncore_iio_freerunning_events,
5921 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5922 };
5923 
5924 enum perf_uncore_spr_imc_freerunning_type_id {
5925 	SPR_IMC_DCLK,
5926 	SPR_IMC_PQ_CYCLES,
5927 
5928 	SPR_IMC_FREERUNNING_TYPE_MAX,
5929 };
5930 
5931 static struct freerunning_counters spr_imc_freerunning[] = {
5932 	[SPR_IMC_DCLK]		= { 0x22b0, 0x0, 0, 1, 48 },
5933 	[SPR_IMC_PQ_CYCLES]	= { 0x2318, 0x8, 0, 2, 48 },
5934 };
5935 
5936 static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
5937 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
5938 
5939 	INTEL_UNCORE_EVENT_DESC(rpq_cycles,		"event=0xff,umask=0x20"),
5940 	INTEL_UNCORE_EVENT_DESC(wpq_cycles,		"event=0xff,umask=0x21"),
5941 	{ /* end: all zeroes */ },
5942 };
5943 
5944 #define SPR_MC_DEVICE_ID	0x3251
5945 
spr_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)5946 static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5947 {
5948 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
5949 
5950 	snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5951 			    mem_offset, SPR_MC_DEVICE_ID);
5952 }
5953 
5954 static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
5955 	.init_box	= spr_uncore_imc_freerunning_init_box,
5956 	.exit_box	= uncore_mmio_exit_box,
5957 	.read_counter	= uncore_mmio_read_counter,
5958 	.hw_config	= uncore_freerunning_hw_config,
5959 };
5960 
5961 static struct intel_uncore_type spr_uncore_imc_free_running = {
5962 	.name			= "imc_free_running",
5963 	.num_counters		= 3,
5964 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5965 	.num_freerunning_types	= SPR_IMC_FREERUNNING_TYPE_MAX,
5966 	.freerunning		= spr_imc_freerunning,
5967 	.ops			= &spr_uncore_imc_freerunning_ops,
5968 	.event_descs		= spr_uncore_imc_freerunning_events,
5969 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5970 };
5971 
5972 #define UNCORE_SPR_MSR_EXTRA_UNCORES		1
5973 #define UNCORE_SPR_MMIO_EXTRA_UNCORES		1
5974 
5975 static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
5976 	&spr_uncore_iio_free_running,
5977 };
5978 
5979 static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
5980 	&spr_uncore_imc_free_running,
5981 };
5982 
uncore_type_customized_copy(struct intel_uncore_type * to_type,struct intel_uncore_type * from_type)5983 static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
5984 					struct intel_uncore_type *from_type)
5985 {
5986 	if (!to_type || !from_type)
5987 		return;
5988 
5989 	if (from_type->name)
5990 		to_type->name = from_type->name;
5991 	if (from_type->fixed_ctr_bits)
5992 		to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
5993 	if (from_type->event_mask)
5994 		to_type->event_mask = from_type->event_mask;
5995 	if (from_type->event_mask_ext)
5996 		to_type->event_mask_ext = from_type->event_mask_ext;
5997 	if (from_type->fixed_ctr)
5998 		to_type->fixed_ctr = from_type->fixed_ctr;
5999 	if (from_type->fixed_ctl)
6000 		to_type->fixed_ctl = from_type->fixed_ctl;
6001 	if (from_type->fixed_ctr_bits)
6002 		to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6003 	if (from_type->num_shared_regs)
6004 		to_type->num_shared_regs = from_type->num_shared_regs;
6005 	if (from_type->constraints)
6006 		to_type->constraints = from_type->constraints;
6007 	if (from_type->ops)
6008 		to_type->ops = from_type->ops;
6009 	if (from_type->event_descs)
6010 		to_type->event_descs = from_type->event_descs;
6011 	if (from_type->format_group)
6012 		to_type->format_group = from_type->format_group;
6013 	if (from_type->attr_update)
6014 		to_type->attr_update = from_type->attr_update;
6015 }
6016 
6017 static struct intel_uncore_type **
uncore_get_uncores(enum uncore_access_type type_id,int num_extra,struct intel_uncore_type ** extra)6018 uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
6019 		    struct intel_uncore_type **extra)
6020 {
6021 	struct intel_uncore_type **types, **start_types;
6022 	int i;
6023 
6024 	start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
6025 
6026 	/* Only copy the customized features */
6027 	for (; *types; types++) {
6028 		if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
6029 			continue;
6030 		uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
6031 	}
6032 
6033 	for (i = 0; i < num_extra; i++, types++)
6034 		*types = extra[i];
6035 
6036 	return start_types;
6037 }
6038 
6039 static struct intel_uncore_type *
uncore_find_type_by_id(struct intel_uncore_type ** types,int type_id)6040 uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
6041 {
6042 	for (; *types; types++) {
6043 		if (type_id == (*types)->type_id)
6044 			return *types;
6045 	}
6046 
6047 	return NULL;
6048 }
6049 
uncore_type_max_boxes(struct intel_uncore_type ** types,int type_id)6050 static int uncore_type_max_boxes(struct intel_uncore_type **types,
6051 				 int type_id)
6052 {
6053 	struct intel_uncore_type *type;
6054 	int i, max = 0;
6055 
6056 	type = uncore_find_type_by_id(types, type_id);
6057 	if (!type)
6058 		return 0;
6059 
6060 	for (i = 0; i < type->num_boxes; i++) {
6061 		if (type->box_ids[i] > max)
6062 			max = type->box_ids[i];
6063 	}
6064 
6065 	return max + 1;
6066 }
6067 
6068 #define SPR_MSR_UNC_CBO_CONFIG		0x2FFE
6069 
spr_uncore_cpu_init(void)6070 void spr_uncore_cpu_init(void)
6071 {
6072 	struct intel_uncore_type *type;
6073 	u64 num_cbo;
6074 
6075 	uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6076 						UNCORE_SPR_MSR_EXTRA_UNCORES,
6077 						spr_msr_uncores);
6078 
6079 	type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
6080 	if (type) {
6081 		/*
6082 		 * The value from the discovery table (stored in the type->num_boxes
6083 		 * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a
6084 		 * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it.
6085 		 */
6086 		rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
6087 		/*
6088 		 * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact
6089 		 * the EMR XCC. Don't let the value from the MSR replace the existing value.
6090 		 */
6091 		if (num_cbo)
6092 			type->num_boxes = num_cbo;
6093 	}
6094 	spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6095 }
6096 
spr_uncore_pci_init(void)6097 int spr_uncore_pci_init(void)
6098 {
6099 	uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL);
6100 	return 0;
6101 }
6102 
spr_uncore_mmio_init(void)6103 void spr_uncore_mmio_init(void)
6104 {
6105 	int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
6106 
6107 	if (ret)
6108 		uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL);
6109 	else {
6110 		uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
6111 							 UNCORE_SPR_MMIO_EXTRA_UNCORES,
6112 							 spr_mmio_uncores);
6113 
6114 		spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
6115 	}
6116 }
6117 
6118 /* end of SPR uncore support */
6119