• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Nehalem-EX/Westmere-EX uncore support */
2 #include "perf_event_intel_uncore.h"
3 
4 /* NHM-EX event control */
5 #define NHMEX_PMON_CTL_EV_SEL_MASK	0x000000ff
6 #define NHMEX_PMON_CTL_UMASK_MASK	0x0000ff00
7 #define NHMEX_PMON_CTL_EN_BIT0		(1 << 0)
8 #define NHMEX_PMON_CTL_EDGE_DET		(1 << 18)
9 #define NHMEX_PMON_CTL_PMI_EN		(1 << 20)
10 #define NHMEX_PMON_CTL_EN_BIT22		(1 << 22)
11 #define NHMEX_PMON_CTL_INVERT		(1 << 23)
12 #define NHMEX_PMON_CTL_TRESH_MASK	0xff000000
13 #define NHMEX_PMON_RAW_EVENT_MASK	(NHMEX_PMON_CTL_EV_SEL_MASK | \
14 					 NHMEX_PMON_CTL_UMASK_MASK | \
15 					 NHMEX_PMON_CTL_EDGE_DET | \
16 					 NHMEX_PMON_CTL_INVERT | \
17 					 NHMEX_PMON_CTL_TRESH_MASK)
18 
19 /* NHM-EX Ubox */
20 #define NHMEX_U_MSR_PMON_GLOBAL_CTL		0xc00
21 #define NHMEX_U_MSR_PMON_CTR			0xc11
22 #define NHMEX_U_MSR_PMON_EV_SEL			0xc10
23 
24 #define NHMEX_U_PMON_GLOBAL_EN			(1 << 0)
25 #define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL	0x0000001e
26 #define NHMEX_U_PMON_GLOBAL_EN_ALL		(1 << 28)
27 #define NHMEX_U_PMON_GLOBAL_RST_ALL		(1 << 29)
28 #define NHMEX_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
29 
30 #define NHMEX_U_PMON_RAW_EVENT_MASK		\
31 		(NHMEX_PMON_CTL_EV_SEL_MASK |	\
32 		 NHMEX_PMON_CTL_EDGE_DET)
33 
34 /* NHM-EX Cbox */
35 #define NHMEX_C0_MSR_PMON_GLOBAL_CTL		0xd00
36 #define NHMEX_C0_MSR_PMON_CTR0			0xd11
37 #define NHMEX_C0_MSR_PMON_EV_SEL0		0xd10
38 #define NHMEX_C_MSR_OFFSET			0x20
39 
40 /* NHM-EX Bbox */
41 #define NHMEX_B0_MSR_PMON_GLOBAL_CTL		0xc20
42 #define NHMEX_B0_MSR_PMON_CTR0			0xc31
43 #define NHMEX_B0_MSR_PMON_CTL0			0xc30
44 #define NHMEX_B_MSR_OFFSET			0x40
45 #define NHMEX_B0_MSR_MATCH			0xe45
46 #define NHMEX_B0_MSR_MASK			0xe46
47 #define NHMEX_B1_MSR_MATCH			0xe4d
48 #define NHMEX_B1_MSR_MASK			0xe4e
49 
50 #define NHMEX_B_PMON_CTL_EN			(1 << 0)
51 #define NHMEX_B_PMON_CTL_EV_SEL_SHIFT		1
52 #define NHMEX_B_PMON_CTL_EV_SEL_MASK		\
53 		(0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT)
54 #define NHMEX_B_PMON_CTR_SHIFT		6
55 #define NHMEX_B_PMON_CTR_MASK		\
56 		(0x3 << NHMEX_B_PMON_CTR_SHIFT)
57 #define NHMEX_B_PMON_RAW_EVENT_MASK		\
58 		(NHMEX_B_PMON_CTL_EV_SEL_MASK | \
59 		 NHMEX_B_PMON_CTR_MASK)
60 
61 /* NHM-EX Sbox */
62 #define NHMEX_S0_MSR_PMON_GLOBAL_CTL		0xc40
63 #define NHMEX_S0_MSR_PMON_CTR0			0xc51
64 #define NHMEX_S0_MSR_PMON_CTL0			0xc50
65 #define NHMEX_S_MSR_OFFSET			0x80
66 #define NHMEX_S0_MSR_MM_CFG			0xe48
67 #define NHMEX_S0_MSR_MATCH			0xe49
68 #define NHMEX_S0_MSR_MASK			0xe4a
69 #define NHMEX_S1_MSR_MM_CFG			0xe58
70 #define NHMEX_S1_MSR_MATCH			0xe59
71 #define NHMEX_S1_MSR_MASK			0xe5a
72 
73 #define NHMEX_S_PMON_MM_CFG_EN			(0x1ULL << 63)
74 #define NHMEX_S_EVENT_TO_R_PROG_EV		0
75 
76 /* NHM-EX Mbox */
77 #define NHMEX_M0_MSR_GLOBAL_CTL			0xca0
78 #define NHMEX_M0_MSR_PMU_DSP			0xca5
79 #define NHMEX_M0_MSR_PMU_ISS			0xca6
80 #define NHMEX_M0_MSR_PMU_MAP			0xca7
81 #define NHMEX_M0_MSR_PMU_MSC_THR		0xca8
82 #define NHMEX_M0_MSR_PMU_PGT			0xca9
83 #define NHMEX_M0_MSR_PMU_PLD			0xcaa
84 #define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC		0xcab
85 #define NHMEX_M0_MSR_PMU_CTL0			0xcb0
86 #define NHMEX_M0_MSR_PMU_CNT0			0xcb1
87 #define NHMEX_M_MSR_OFFSET			0x40
88 #define NHMEX_M0_MSR_PMU_MM_CFG			0xe54
89 #define NHMEX_M1_MSR_PMU_MM_CFG			0xe5c
90 
91 #define NHMEX_M_PMON_MM_CFG_EN			(1ULL << 63)
92 #define NHMEX_M_PMON_ADDR_MATCH_MASK		0x3ffffffffULL
93 #define NHMEX_M_PMON_ADDR_MASK_MASK		0x7ffffffULL
94 #define NHMEX_M_PMON_ADDR_MASK_SHIFT		34
95 
96 #define NHMEX_M_PMON_CTL_EN			(1 << 0)
97 #define NHMEX_M_PMON_CTL_PMI_EN			(1 << 1)
98 #define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT	2
99 #define NHMEX_M_PMON_CTL_COUNT_MODE_MASK	\
100 	(0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT)
101 #define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT	4
102 #define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK	\
103 	(0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT)
104 #define NHMEX_M_PMON_CTL_WRAP_MODE		(1 << 6)
105 #define NHMEX_M_PMON_CTL_FLAG_MODE		(1 << 7)
106 #define NHMEX_M_PMON_CTL_INC_SEL_SHIFT		9
107 #define NHMEX_M_PMON_CTL_INC_SEL_MASK		\
108 	(0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
109 #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT	19
110 #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK	\
111 	(0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT)
112 #define NHMEX_M_PMON_RAW_EVENT_MASK			\
113 		(NHMEX_M_PMON_CTL_COUNT_MODE_MASK |	\
114 		 NHMEX_M_PMON_CTL_STORAGE_MODE_MASK |	\
115 		 NHMEX_M_PMON_CTL_WRAP_MODE |		\
116 		 NHMEX_M_PMON_CTL_FLAG_MODE |		\
117 		 NHMEX_M_PMON_CTL_INC_SEL_MASK |	\
118 		 NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
119 
120 #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK		(((1 << 11) - 1) | (1 << 23))
121 #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n)	(0x7ULL << (11 + 3 * (n)))
122 
123 #define WSMEX_M_PMON_ZDP_CTL_FVC_MASK		(((1 << 12) - 1) | (1 << 24))
124 #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n)	(0x7ULL << (12 + 3 * (n)))
125 
126 /*
127  * use the 9~13 bits to select event If the 7th bit is not set,
128  * otherwise use the 19~21 bits to select event.
129  */
130 #define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
131 #define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \
132 				NHMEX_M_PMON_CTL_FLAG_MODE)
133 #define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \
134 			   NHMEX_M_PMON_CTL_FLAG_MODE)
135 #define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \
136 				NHMEX_M_PMON_CTL_FLAG_MODE)
137 #define MBOX_INC_SEL_EXTAR_REG(c, r) \
138 		EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \
139 				MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r)
140 #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \
141 		EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \
142 				MBOX_SET_FLAG_SEL_MASK, \
143 				(u64)-1, NHMEX_M_##r)
144 
145 /* NHM-EX Rbox */
146 #define NHMEX_R_MSR_GLOBAL_CTL			0xe00
147 #define NHMEX_R_MSR_PMON_CTL0			0xe10
148 #define NHMEX_R_MSR_PMON_CNT0			0xe11
149 #define NHMEX_R_MSR_OFFSET			0x20
150 
151 #define NHMEX_R_MSR_PORTN_QLX_CFG(n)		\
152 		((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4))
153 #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n)		(0xe04 + (n))
154 #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n)		(0xe24 + (n))
155 #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n)		\
156 		(((n) < 4 ? 0 : 0x10) + (n) * 4)
157 #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n)	\
158 		(0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
159 #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n)	\
160 		(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1)
161 #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n)	\
162 		(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2)
163 #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n)	\
164 		(0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
165 #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n)	\
166 		(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1)
167 #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n)	\
168 		(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2)
169 
170 #define NHMEX_R_PMON_CTL_EN			(1 << 0)
171 #define NHMEX_R_PMON_CTL_EV_SEL_SHIFT		1
172 #define NHMEX_R_PMON_CTL_EV_SEL_MASK		\
173 		(0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT)
174 #define NHMEX_R_PMON_CTL_PMI_EN			(1 << 6)
175 #define NHMEX_R_PMON_RAW_EVENT_MASK		NHMEX_R_PMON_CTL_EV_SEL_MASK
176 
177 /* NHM-EX Wbox */
178 #define NHMEX_W_MSR_GLOBAL_CTL			0xc80
179 #define NHMEX_W_MSR_PMON_CNT0			0xc90
180 #define NHMEX_W_MSR_PMON_EVT_SEL0		0xc91
181 #define NHMEX_W_MSR_PMON_FIXED_CTR		0x394
182 #define NHMEX_W_MSR_PMON_FIXED_CTL		0x395
183 
184 #define NHMEX_W_PMON_GLOBAL_FIXED_EN		(1ULL << 31)
185 
186 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
187 				((1ULL << (n)) - 1)))
188 
189 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
190 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
191 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
192 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
193 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
194 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
195 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
196 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
197 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
198 
nhmex_uncore_msr_init_box(struct intel_uncore_box * box)199 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
200 {
201 	wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
202 }
203 
nhmex_uncore_msr_disable_box(struct intel_uncore_box * box)204 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
205 {
206 	unsigned msr = uncore_msr_box_ctl(box);
207 	u64 config;
208 
209 	if (msr) {
210 		rdmsrl(msr, config);
211 		config &= ~((1ULL << uncore_num_counters(box)) - 1);
212 		/* WBox has a fixed counter */
213 		if (uncore_msr_fixed_ctl(box))
214 			config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
215 		wrmsrl(msr, config);
216 	}
217 }
218 
nhmex_uncore_msr_enable_box(struct intel_uncore_box * box)219 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
220 {
221 	unsigned msr = uncore_msr_box_ctl(box);
222 	u64 config;
223 
224 	if (msr) {
225 		rdmsrl(msr, config);
226 		config |= (1ULL << uncore_num_counters(box)) - 1;
227 		/* WBox has a fixed counter */
228 		if (uncore_msr_fixed_ctl(box))
229 			config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
230 		wrmsrl(msr, config);
231 	}
232 }
233 
nhmex_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)234 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
235 {
236 	wrmsrl(event->hw.config_base, 0);
237 }
238 
nhmex_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)239 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
240 {
241 	struct hw_perf_event *hwc = &event->hw;
242 
243 	if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
244 		wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
245 	else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
246 		wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
247 	else
248 		wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
249 }
250 
251 #define NHMEX_UNCORE_OPS_COMMON_INIT()				\
252 	.init_box	= nhmex_uncore_msr_init_box,		\
253 	.disable_box	= nhmex_uncore_msr_disable_box,		\
254 	.enable_box	= nhmex_uncore_msr_enable_box,		\
255 	.disable_event	= nhmex_uncore_msr_disable_event,	\
256 	.read_counter	= uncore_msr_read_counter
257 
258 static struct intel_uncore_ops nhmex_uncore_ops = {
259 	NHMEX_UNCORE_OPS_COMMON_INIT(),
260 	.enable_event	= nhmex_uncore_msr_enable_event,
261 };
262 
263 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
264 	&format_attr_event.attr,
265 	&format_attr_edge.attr,
266 	NULL,
267 };
268 
269 static struct attribute_group nhmex_uncore_ubox_format_group = {
270 	.name		= "format",
271 	.attrs		= nhmex_uncore_ubox_formats_attr,
272 };
273 
274 static struct intel_uncore_type nhmex_uncore_ubox = {
275 	.name		= "ubox",
276 	.num_counters	= 1,
277 	.num_boxes	= 1,
278 	.perf_ctr_bits	= 48,
279 	.event_ctl	= NHMEX_U_MSR_PMON_EV_SEL,
280 	.perf_ctr	= NHMEX_U_MSR_PMON_CTR,
281 	.event_mask	= NHMEX_U_PMON_RAW_EVENT_MASK,
282 	.box_ctl	= NHMEX_U_MSR_PMON_GLOBAL_CTL,
283 	.ops		= &nhmex_uncore_ops,
284 	.format_group	= &nhmex_uncore_ubox_format_group
285 };
286 
287 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
288 	&format_attr_event.attr,
289 	&format_attr_umask.attr,
290 	&format_attr_edge.attr,
291 	&format_attr_inv.attr,
292 	&format_attr_thresh8.attr,
293 	NULL,
294 };
295 
296 static struct attribute_group nhmex_uncore_cbox_format_group = {
297 	.name = "format",
298 	.attrs = nhmex_uncore_cbox_formats_attr,
299 };
300 
301 /* msr offset for each instance of cbox */
302 static unsigned nhmex_cbox_msr_offsets[] = {
303 	0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
304 };
305 
306 static struct intel_uncore_type nhmex_uncore_cbox = {
307 	.name			= "cbox",
308 	.num_counters		= 6,
309 	.num_boxes		= 10,
310 	.perf_ctr_bits		= 48,
311 	.event_ctl		= NHMEX_C0_MSR_PMON_EV_SEL0,
312 	.perf_ctr		= NHMEX_C0_MSR_PMON_CTR0,
313 	.event_mask		= NHMEX_PMON_RAW_EVENT_MASK,
314 	.box_ctl		= NHMEX_C0_MSR_PMON_GLOBAL_CTL,
315 	.msr_offsets		= nhmex_cbox_msr_offsets,
316 	.pair_ctr_ctl		= 1,
317 	.ops			= &nhmex_uncore_ops,
318 	.format_group		= &nhmex_uncore_cbox_format_group
319 };
320 
321 static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
322 	INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
323 	{ /* end: all zeroes */ },
324 };
325 
326 static struct intel_uncore_type nhmex_uncore_wbox = {
327 	.name			= "wbox",
328 	.num_counters		= 4,
329 	.num_boxes		= 1,
330 	.perf_ctr_bits		= 48,
331 	.event_ctl		= NHMEX_W_MSR_PMON_CNT0,
332 	.perf_ctr		= NHMEX_W_MSR_PMON_EVT_SEL0,
333 	.fixed_ctr		= NHMEX_W_MSR_PMON_FIXED_CTR,
334 	.fixed_ctl		= NHMEX_W_MSR_PMON_FIXED_CTL,
335 	.event_mask		= NHMEX_PMON_RAW_EVENT_MASK,
336 	.box_ctl		= NHMEX_W_MSR_GLOBAL_CTL,
337 	.pair_ctr_ctl		= 1,
338 	.event_descs		= nhmex_uncore_wbox_events,
339 	.ops			= &nhmex_uncore_ops,
340 	.format_group		= &nhmex_uncore_cbox_format_group
341 };
342 
nhmex_bbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)343 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
344 {
345 	struct hw_perf_event *hwc = &event->hw;
346 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
347 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
348 	int ctr, ev_sel;
349 
350 	ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
351 		NHMEX_B_PMON_CTR_SHIFT;
352 	ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
353 		  NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
354 
355 	/* events that do not use the match/mask registers */
356 	if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
357 	    (ctr == 2 && ev_sel != 0x4) || ctr == 3)
358 		return 0;
359 
360 	if (box->pmu->pmu_idx == 0)
361 		reg1->reg = NHMEX_B0_MSR_MATCH;
362 	else
363 		reg1->reg = NHMEX_B1_MSR_MATCH;
364 	reg1->idx = 0;
365 	reg1->config = event->attr.config1;
366 	reg2->config = event->attr.config2;
367 	return 0;
368 }
369 
nhmex_bbox_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)370 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
371 {
372 	struct hw_perf_event *hwc = &event->hw;
373 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
374 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
375 
376 	if (reg1->idx != EXTRA_REG_NONE) {
377 		wrmsrl(reg1->reg, reg1->config);
378 		wrmsrl(reg1->reg + 1, reg2->config);
379 	}
380 	wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
381 		(hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
382 }
383 
384 /*
385  * The Bbox has 4 counters, but each counter monitors different events.
386  * Use bits 6-7 in the event config to select counter.
387  */
388 static struct event_constraint nhmex_uncore_bbox_constraints[] = {
389 	EVENT_CONSTRAINT(0 , 1, 0xc0),
390 	EVENT_CONSTRAINT(0x40, 2, 0xc0),
391 	EVENT_CONSTRAINT(0x80, 4, 0xc0),
392 	EVENT_CONSTRAINT(0xc0, 8, 0xc0),
393 	EVENT_CONSTRAINT_END,
394 };
395 
396 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
397 	&format_attr_event5.attr,
398 	&format_attr_counter.attr,
399 	&format_attr_match.attr,
400 	&format_attr_mask.attr,
401 	NULL,
402 };
403 
404 static struct attribute_group nhmex_uncore_bbox_format_group = {
405 	.name = "format",
406 	.attrs = nhmex_uncore_bbox_formats_attr,
407 };
408 
409 static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
410 	NHMEX_UNCORE_OPS_COMMON_INIT(),
411 	.enable_event		= nhmex_bbox_msr_enable_event,
412 	.hw_config		= nhmex_bbox_hw_config,
413 	.get_constraint		= uncore_get_constraint,
414 	.put_constraint		= uncore_put_constraint,
415 };
416 
417 static struct intel_uncore_type nhmex_uncore_bbox = {
418 	.name			= "bbox",
419 	.num_counters		= 4,
420 	.num_boxes		= 2,
421 	.perf_ctr_bits		= 48,
422 	.event_ctl		= NHMEX_B0_MSR_PMON_CTL0,
423 	.perf_ctr		= NHMEX_B0_MSR_PMON_CTR0,
424 	.event_mask		= NHMEX_B_PMON_RAW_EVENT_MASK,
425 	.box_ctl		= NHMEX_B0_MSR_PMON_GLOBAL_CTL,
426 	.msr_offset		= NHMEX_B_MSR_OFFSET,
427 	.pair_ctr_ctl		= 1,
428 	.num_shared_regs	= 1,
429 	.constraints		= nhmex_uncore_bbox_constraints,
430 	.ops			= &nhmex_uncore_bbox_ops,
431 	.format_group		= &nhmex_uncore_bbox_format_group
432 };
433 
nhmex_sbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)434 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
435 {
436 	struct hw_perf_event *hwc = &event->hw;
437 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
438 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
439 
440 	/* only TO_R_PROG_EV event uses the match/mask register */
441 	if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
442 	    NHMEX_S_EVENT_TO_R_PROG_EV)
443 		return 0;
444 
445 	if (box->pmu->pmu_idx == 0)
446 		reg1->reg = NHMEX_S0_MSR_MM_CFG;
447 	else
448 		reg1->reg = NHMEX_S1_MSR_MM_CFG;
449 	reg1->idx = 0;
450 	reg1->config = event->attr.config1;
451 	reg2->config = event->attr.config2;
452 	return 0;
453 }
454 
nhmex_sbox_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)455 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
456 {
457 	struct hw_perf_event *hwc = &event->hw;
458 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
459 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
460 
461 	if (reg1->idx != EXTRA_REG_NONE) {
462 		wrmsrl(reg1->reg, 0);
463 		wrmsrl(reg1->reg + 1, reg1->config);
464 		wrmsrl(reg1->reg + 2, reg2->config);
465 		wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
466 	}
467 	wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
468 }
469 
470 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
471 	&format_attr_event.attr,
472 	&format_attr_umask.attr,
473 	&format_attr_edge.attr,
474 	&format_attr_inv.attr,
475 	&format_attr_thresh8.attr,
476 	&format_attr_match.attr,
477 	&format_attr_mask.attr,
478 	NULL,
479 };
480 
481 static struct attribute_group nhmex_uncore_sbox_format_group = {
482 	.name			= "format",
483 	.attrs			= nhmex_uncore_sbox_formats_attr,
484 };
485 
486 static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
487 	NHMEX_UNCORE_OPS_COMMON_INIT(),
488 	.enable_event		= nhmex_sbox_msr_enable_event,
489 	.hw_config		= nhmex_sbox_hw_config,
490 	.get_constraint		= uncore_get_constraint,
491 	.put_constraint		= uncore_put_constraint,
492 };
493 
494 static struct intel_uncore_type nhmex_uncore_sbox = {
495 	.name			= "sbox",
496 	.num_counters		= 4,
497 	.num_boxes		= 2,
498 	.perf_ctr_bits		= 48,
499 	.event_ctl		= NHMEX_S0_MSR_PMON_CTL0,
500 	.perf_ctr		= NHMEX_S0_MSR_PMON_CTR0,
501 	.event_mask		= NHMEX_PMON_RAW_EVENT_MASK,
502 	.box_ctl		= NHMEX_S0_MSR_PMON_GLOBAL_CTL,
503 	.msr_offset		= NHMEX_S_MSR_OFFSET,
504 	.pair_ctr_ctl		= 1,
505 	.num_shared_regs	= 1,
506 	.ops			= &nhmex_uncore_sbox_ops,
507 	.format_group		= &nhmex_uncore_sbox_format_group
508 };
509 
510 enum {
511 	EXTRA_REG_NHMEX_M_FILTER,
512 	EXTRA_REG_NHMEX_M_DSP,
513 	EXTRA_REG_NHMEX_M_ISS,
514 	EXTRA_REG_NHMEX_M_MAP,
515 	EXTRA_REG_NHMEX_M_MSC_THR,
516 	EXTRA_REG_NHMEX_M_PGT,
517 	EXTRA_REG_NHMEX_M_PLD,
518 	EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
519 };
520 
521 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
522 	MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
523 	MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
524 	MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
525 	MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
526 	/* event 0xa uses two extra registers */
527 	MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
528 	MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
529 	MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
530 	/* events 0xd ~ 0x10 use the same extra register */
531 	MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
532 	MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
533 	MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
534 	MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
535 	MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
536 	MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
537 	MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
538 	MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
539 	MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
540 	EVENT_EXTRA_END
541 };
542 
543 /* Nehalem-EX or Westmere-EX ? */
544 static bool uncore_nhmex;
545 
nhmex_mbox_get_shared_reg(struct intel_uncore_box * box,int idx,u64 config)546 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
547 {
548 	struct intel_uncore_extra_reg *er;
549 	unsigned long flags;
550 	bool ret = false;
551 	u64 mask;
552 
553 	if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
554 		er = &box->shared_regs[idx];
555 		raw_spin_lock_irqsave(&er->lock, flags);
556 		if (!atomic_read(&er->ref) || er->config == config) {
557 			atomic_inc(&er->ref);
558 			er->config = config;
559 			ret = true;
560 		}
561 		raw_spin_unlock_irqrestore(&er->lock, flags);
562 
563 		return ret;
564 	}
565 	/*
566 	 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
567 	 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
568 	 * fields which are shared.
569 	 */
570 	idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
571 	if (WARN_ON_ONCE(idx >= 4))
572 		return false;
573 
574 	/* mask of the shared fields */
575 	if (uncore_nhmex)
576 		mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
577 	else
578 		mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
579 	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
580 
581 	raw_spin_lock_irqsave(&er->lock, flags);
582 	/* add mask of the non-shared field if it's in use */
583 	if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
584 		if (uncore_nhmex)
585 			mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
586 		else
587 			mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
588 	}
589 
590 	if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
591 		atomic_add(1 << (idx * 8), &er->ref);
592 		if (uncore_nhmex)
593 			mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
594 				NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
595 		else
596 			mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
597 				WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
598 		er->config &= ~mask;
599 		er->config |= (config & mask);
600 		ret = true;
601 	}
602 	raw_spin_unlock_irqrestore(&er->lock, flags);
603 
604 	return ret;
605 }
606 
nhmex_mbox_put_shared_reg(struct intel_uncore_box * box,int idx)607 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
608 {
609 	struct intel_uncore_extra_reg *er;
610 
611 	if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
612 		er = &box->shared_regs[idx];
613 		atomic_dec(&er->ref);
614 		return;
615 	}
616 
617 	idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
618 	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
619 	atomic_sub(1 << (idx * 8), &er->ref);
620 }
621 
nhmex_mbox_alter_er(struct perf_event * event,int new_idx,bool modify)622 static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
623 {
624 	struct hw_perf_event *hwc = &event->hw;
625 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
626 	u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
627 	u64 config = reg1->config;
628 
629 	/* get the non-shared control bits and shift them */
630 	idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
631 	if (uncore_nhmex)
632 		config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
633 	else
634 		config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
635 	if (new_idx > orig_idx) {
636 		idx = new_idx - orig_idx;
637 		config <<= 3 * idx;
638 	} else {
639 		idx = orig_idx - new_idx;
640 		config >>= 3 * idx;
641 	}
642 
643 	/* add the shared control bits back */
644 	if (uncore_nhmex)
645 		config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
646 	else
647 		config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
648 	config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
649 	if (modify) {
650 		/* adjust the main event selector */
651 		if (new_idx > orig_idx)
652 			hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
653 		else
654 			hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
655 		reg1->config = config;
656 		reg1->idx = ~0xff | new_idx;
657 	}
658 	return config;
659 }
660 
661 static struct event_constraint *
nhmex_mbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)662 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
663 {
664 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
665 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
666 	int i, idx[2], alloc = 0;
667 	u64 config1 = reg1->config;
668 
669 	idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
670 	idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
671 again:
672 	for (i = 0; i < 2; i++) {
673 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
674 			idx[i] = 0xff;
675 
676 		if (idx[i] == 0xff)
677 			continue;
678 
679 		if (!nhmex_mbox_get_shared_reg(box, idx[i],
680 				__BITS_VALUE(config1, i, 32)))
681 			goto fail;
682 		alloc |= (0x1 << i);
683 	}
684 
685 	/* for the match/mask registers */
686 	if (reg2->idx != EXTRA_REG_NONE &&
687 	    (uncore_box_is_fake(box) || !reg2->alloc) &&
688 	    !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
689 		goto fail;
690 
691 	/*
692 	 * If it's a fake box -- as per validate_{group,event}() we
693 	 * shouldn't touch event state and we can avoid doing so
694 	 * since both will only call get_event_constraints() once
695 	 * on each event, this avoids the need for reg->alloc.
696 	 */
697 	if (!uncore_box_is_fake(box)) {
698 		if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
699 			nhmex_mbox_alter_er(event, idx[0], true);
700 		reg1->alloc |= alloc;
701 		if (reg2->idx != EXTRA_REG_NONE)
702 			reg2->alloc = 1;
703 	}
704 	return NULL;
705 fail:
706 	if (idx[0] != 0xff && !(alloc & 0x1) &&
707 	    idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
708 		/*
709 		 * events 0xd ~ 0x10 are functional identical, but are
710 		 * controlled by different fields in the ZDP_CTL_FVC
711 		 * register. If we failed to take one field, try the
712 		 * rest 3 choices.
713 		 */
714 		BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
715 		idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
716 		idx[0] = (idx[0] + 1) % 4;
717 		idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
718 		if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
719 			config1 = nhmex_mbox_alter_er(event, idx[0], false);
720 			goto again;
721 		}
722 	}
723 
724 	if (alloc & 0x1)
725 		nhmex_mbox_put_shared_reg(box, idx[0]);
726 	if (alloc & 0x2)
727 		nhmex_mbox_put_shared_reg(box, idx[1]);
728 	return &uncore_constraint_empty;
729 }
730 
nhmex_mbox_put_constraint(struct intel_uncore_box * box,struct perf_event * event)731 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
732 {
733 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
734 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
735 
736 	if (uncore_box_is_fake(box))
737 		return;
738 
739 	if (reg1->alloc & 0x1)
740 		nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
741 	if (reg1->alloc & 0x2)
742 		nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
743 	reg1->alloc = 0;
744 
745 	if (reg2->alloc) {
746 		nhmex_mbox_put_shared_reg(box, reg2->idx);
747 		reg2->alloc = 0;
748 	}
749 }
750 
nhmex_mbox_extra_reg_idx(struct extra_reg * er)751 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
752 {
753 	if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
754 		return er->idx;
755 	return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
756 }
757 
nhmex_mbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)758 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
759 {
760 	struct intel_uncore_type *type = box->pmu->type;
761 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
762 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
763 	struct extra_reg *er;
764 	unsigned msr;
765 	int reg_idx = 0;
766 	/*
767 	 * The mbox events may require 2 extra MSRs at the most. But only
768 	 * the lower 32 bits in these MSRs are significant, so we can use
769 	 * config1 to pass two MSRs' config.
770 	 */
771 	for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
772 		if (er->event != (event->hw.config & er->config_mask))
773 			continue;
774 		if (event->attr.config1 & ~er->valid_mask)
775 			return -EINVAL;
776 
777 		msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
778 		if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
779 			return -EINVAL;
780 
781 		/* always use the 32~63 bits to pass the PLD config */
782 		if (er->idx == EXTRA_REG_NHMEX_M_PLD)
783 			reg_idx = 1;
784 		else if (WARN_ON_ONCE(reg_idx > 0))
785 			return -EINVAL;
786 
787 		reg1->idx &= ~(0xff << (reg_idx * 8));
788 		reg1->reg &= ~(0xffff << (reg_idx * 16));
789 		reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
790 		reg1->reg |= msr << (reg_idx * 16);
791 		reg1->config = event->attr.config1;
792 		reg_idx++;
793 	}
794 	/*
795 	 * The mbox only provides ability to perform address matching
796 	 * for the PLD events.
797 	 */
798 	if (reg_idx == 2) {
799 		reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
800 		if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
801 			reg2->config = event->attr.config2;
802 		else
803 			reg2->config = ~0ULL;
804 		if (box->pmu->pmu_idx == 0)
805 			reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
806 		else
807 			reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
808 	}
809 	return 0;
810 }
811 
nhmex_mbox_shared_reg_config(struct intel_uncore_box * box,int idx)812 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
813 {
814 	struct intel_uncore_extra_reg *er;
815 	unsigned long flags;
816 	u64 config;
817 
818 	if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
819 		return box->shared_regs[idx].config;
820 
821 	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
822 	raw_spin_lock_irqsave(&er->lock, flags);
823 	config = er->config;
824 	raw_spin_unlock_irqrestore(&er->lock, flags);
825 	return config;
826 }
827 
nhmex_mbox_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)828 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
829 {
830 	struct hw_perf_event *hwc = &event->hw;
831 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
832 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
833 	int idx;
834 
835 	idx = __BITS_VALUE(reg1->idx, 0, 8);
836 	if (idx != 0xff)
837 		wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
838 			nhmex_mbox_shared_reg_config(box, idx));
839 	idx = __BITS_VALUE(reg1->idx, 1, 8);
840 	if (idx != 0xff)
841 		wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
842 			nhmex_mbox_shared_reg_config(box, idx));
843 
844 	if (reg2->idx != EXTRA_REG_NONE) {
845 		wrmsrl(reg2->reg, 0);
846 		if (reg2->config != ~0ULL) {
847 			wrmsrl(reg2->reg + 1,
848 				reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
849 			wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
850 				(reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
851 			wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
852 		}
853 	}
854 
855 	wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
856 }
857 
858 DEFINE_UNCORE_FORMAT_ATTR(count_mode,		count_mode,	"config:2-3");
859 DEFINE_UNCORE_FORMAT_ATTR(storage_mode,		storage_mode,	"config:4-5");
860 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,		wrap_mode,	"config:6");
861 DEFINE_UNCORE_FORMAT_ATTR(flag_mode,		flag_mode,	"config:7");
862 DEFINE_UNCORE_FORMAT_ATTR(inc_sel,		inc_sel,	"config:9-13");
863 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,		set_flag_sel,	"config:19-21");
864 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en,	filter_cfg_en,	"config2:63");
865 DEFINE_UNCORE_FORMAT_ATTR(filter_match,		filter_match,	"config2:0-33");
866 DEFINE_UNCORE_FORMAT_ATTR(filter_mask,		filter_mask,	"config2:34-61");
867 DEFINE_UNCORE_FORMAT_ATTR(dsp,			dsp,		"config1:0-31");
868 DEFINE_UNCORE_FORMAT_ATTR(thr,			thr,		"config1:0-31");
869 DEFINE_UNCORE_FORMAT_ATTR(fvc,			fvc,		"config1:0-31");
870 DEFINE_UNCORE_FORMAT_ATTR(pgt,			pgt,		"config1:0-31");
871 DEFINE_UNCORE_FORMAT_ATTR(map,			map,		"config1:0-31");
872 DEFINE_UNCORE_FORMAT_ATTR(iss,			iss,		"config1:0-31");
873 DEFINE_UNCORE_FORMAT_ATTR(pld,			pld,		"config1:32-63");
874 
875 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
876 	&format_attr_count_mode.attr,
877 	&format_attr_storage_mode.attr,
878 	&format_attr_wrap_mode.attr,
879 	&format_attr_flag_mode.attr,
880 	&format_attr_inc_sel.attr,
881 	&format_attr_set_flag_sel.attr,
882 	&format_attr_filter_cfg_en.attr,
883 	&format_attr_filter_match.attr,
884 	&format_attr_filter_mask.attr,
885 	&format_attr_dsp.attr,
886 	&format_attr_thr.attr,
887 	&format_attr_fvc.attr,
888 	&format_attr_pgt.attr,
889 	&format_attr_map.attr,
890 	&format_attr_iss.attr,
891 	&format_attr_pld.attr,
892 	NULL,
893 };
894 
895 static struct attribute_group nhmex_uncore_mbox_format_group = {
896 	.name		= "format",
897 	.attrs		= nhmex_uncore_mbox_formats_attr,
898 };
899 
900 static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
901 	INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
902 	INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
903 	{ /* end: all zeroes */ },
904 };
905 
906 static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
907 	INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
908 	INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
909 	{ /* end: all zeroes */ },
910 };
911 
912 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
913 	NHMEX_UNCORE_OPS_COMMON_INIT(),
914 	.enable_event	= nhmex_mbox_msr_enable_event,
915 	.hw_config	= nhmex_mbox_hw_config,
916 	.get_constraint	= nhmex_mbox_get_constraint,
917 	.put_constraint	= nhmex_mbox_put_constraint,
918 };
919 
920 static struct intel_uncore_type nhmex_uncore_mbox = {
921 	.name			= "mbox",
922 	.num_counters		= 6,
923 	.num_boxes		= 2,
924 	.perf_ctr_bits		= 48,
925 	.event_ctl		= NHMEX_M0_MSR_PMU_CTL0,
926 	.perf_ctr		= NHMEX_M0_MSR_PMU_CNT0,
927 	.event_mask		= NHMEX_M_PMON_RAW_EVENT_MASK,
928 	.box_ctl		= NHMEX_M0_MSR_GLOBAL_CTL,
929 	.msr_offset		= NHMEX_M_MSR_OFFSET,
930 	.pair_ctr_ctl		= 1,
931 	.num_shared_regs	= 8,
932 	.event_descs		= nhmex_uncore_mbox_events,
933 	.ops			= &nhmex_uncore_mbox_ops,
934 	.format_group		= &nhmex_uncore_mbox_format_group,
935 };
936 
nhmex_rbox_alter_er(struct intel_uncore_box * box,struct perf_event * event)937 static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
938 {
939 	struct hw_perf_event *hwc = &event->hw;
940 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
941 
942 	/* adjust the main event selector and extra register index */
943 	if (reg1->idx % 2) {
944 		reg1->idx--;
945 		hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
946 	} else {
947 		reg1->idx++;
948 		hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
949 	}
950 
951 	/* adjust extra register config */
952 	switch (reg1->idx % 6) {
953 	case 2:
954 		/* shift the 8~15 bits to the 0~7 bits */
955 		reg1->config >>= 8;
956 		break;
957 	case 3:
958 		/* shift the 0~7 bits to the 8~15 bits */
959 		reg1->config <<= 8;
960 		break;
961 	}
962 }
963 
964 /*
965  * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
966  * An event set consists of 6 events, the 3rd and 4th events in
967  * an event set use the same extra register. So an event set uses
968  * 5 extra registers.
969  */
970 static struct event_constraint *
nhmex_rbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)971 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
972 {
973 	struct hw_perf_event *hwc = &event->hw;
974 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
975 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
976 	struct intel_uncore_extra_reg *er;
977 	unsigned long flags;
978 	int idx, er_idx;
979 	u64 config1;
980 	bool ok = false;
981 
982 	if (!uncore_box_is_fake(box) && reg1->alloc)
983 		return NULL;
984 
985 	idx = reg1->idx % 6;
986 	config1 = reg1->config;
987 again:
988 	er_idx = idx;
989 	/* the 3rd and 4th events use the same extra register */
990 	if (er_idx > 2)
991 		er_idx--;
992 	er_idx += (reg1->idx / 6) * 5;
993 
994 	er = &box->shared_regs[er_idx];
995 	raw_spin_lock_irqsave(&er->lock, flags);
996 	if (idx < 2) {
997 		if (!atomic_read(&er->ref) || er->config == reg1->config) {
998 			atomic_inc(&er->ref);
999 			er->config = reg1->config;
1000 			ok = true;
1001 		}
1002 	} else if (idx == 2 || idx == 3) {
1003 		/*
1004 		 * these two events use different fields in a extra register,
1005 		 * the 0~7 bits and the 8~15 bits respectively.
1006 		 */
1007 		u64 mask = 0xff << ((idx - 2) * 8);
1008 		if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
1009 				!((er->config ^ config1) & mask)) {
1010 			atomic_add(1 << ((idx - 2) * 8), &er->ref);
1011 			er->config &= ~mask;
1012 			er->config |= config1 & mask;
1013 			ok = true;
1014 		}
1015 	} else {
1016 		if (!atomic_read(&er->ref) ||
1017 				(er->config == (hwc->config >> 32) &&
1018 				 er->config1 == reg1->config &&
1019 				 er->config2 == reg2->config)) {
1020 			atomic_inc(&er->ref);
1021 			er->config = (hwc->config >> 32);
1022 			er->config1 = reg1->config;
1023 			er->config2 = reg2->config;
1024 			ok = true;
1025 		}
1026 	}
1027 	raw_spin_unlock_irqrestore(&er->lock, flags);
1028 
1029 	if (!ok) {
1030 		/*
1031 		 * The Rbox events are always in pairs. The paired
1032 		 * events are functional identical, but use different
1033 		 * extra registers. If we failed to take an extra
1034 		 * register, try the alternative.
1035 		 */
1036 		idx ^= 1;
1037 		if (idx != reg1->idx % 6) {
1038 			if (idx == 2)
1039 				config1 >>= 8;
1040 			else if (idx == 3)
1041 				config1 <<= 8;
1042 			goto again;
1043 		}
1044 	} else {
1045 		if (!uncore_box_is_fake(box)) {
1046 			if (idx != reg1->idx % 6)
1047 				nhmex_rbox_alter_er(box, event);
1048 			reg1->alloc = 1;
1049 		}
1050 		return NULL;
1051 	}
1052 	return &uncore_constraint_empty;
1053 }
1054 
nhmex_rbox_put_constraint(struct intel_uncore_box * box,struct perf_event * event)1055 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1056 {
1057 	struct intel_uncore_extra_reg *er;
1058 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1059 	int idx, er_idx;
1060 
1061 	if (uncore_box_is_fake(box) || !reg1->alloc)
1062 		return;
1063 
1064 	idx = reg1->idx % 6;
1065 	er_idx = idx;
1066 	if (er_idx > 2)
1067 		er_idx--;
1068 	er_idx += (reg1->idx / 6) * 5;
1069 
1070 	er = &box->shared_regs[er_idx];
1071 	if (idx == 2 || idx == 3)
1072 		atomic_sub(1 << ((idx - 2) * 8), &er->ref);
1073 	else
1074 		atomic_dec(&er->ref);
1075 
1076 	reg1->alloc = 0;
1077 }
1078 
nhmex_rbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1079 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1080 {
1081 	struct hw_perf_event *hwc = &event->hw;
1082 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1083 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1084 	int idx;
1085 
1086 	idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
1087 		NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1088 	if (idx >= 0x18)
1089 		return -EINVAL;
1090 
1091 	reg1->idx = idx;
1092 	reg1->config = event->attr.config1;
1093 
1094 	switch (idx % 6) {
1095 	case 4:
1096 	case 5:
1097 		hwc->config |= event->attr.config & (~0ULL << 32);
1098 		reg2->config = event->attr.config2;
1099 		break;
1100 	}
1101 	return 0;
1102 }
1103 
nhmex_rbox_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)1104 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1105 {
1106 	struct hw_perf_event *hwc = &event->hw;
1107 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1108 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1109 	int idx, port;
1110 
1111 	idx = reg1->idx;
1112 	port = idx / 6 + box->pmu->pmu_idx * 4;
1113 
1114 	switch (idx % 6) {
1115 	case 0:
1116 		wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
1117 		break;
1118 	case 1:
1119 		wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
1120 		break;
1121 	case 2:
1122 	case 3:
1123 		wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
1124 			uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
1125 		break;
1126 	case 4:
1127 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
1128 			hwc->config >> 32);
1129 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
1130 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
1131 		break;
1132 	case 5:
1133 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
1134 			hwc->config >> 32);
1135 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
1136 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
1137 		break;
1138 	}
1139 
1140 	wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1141 		(hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
1142 }
1143 
1144 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
1145 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
1146 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
1147 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
1148 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
1149 
1150 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
1151 	&format_attr_event5.attr,
1152 	&format_attr_xbr_mm_cfg.attr,
1153 	&format_attr_xbr_match.attr,
1154 	&format_attr_xbr_mask.attr,
1155 	&format_attr_qlx_cfg.attr,
1156 	&format_attr_iperf_cfg.attr,
1157 	NULL,
1158 };
1159 
1160 static struct attribute_group nhmex_uncore_rbox_format_group = {
1161 	.name = "format",
1162 	.attrs = nhmex_uncore_rbox_formats_attr,
1163 };
1164 
1165 static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
1166 	INTEL_UNCORE_EVENT_DESC(qpi0_flit_send,		"event=0x0,iperf_cfg=0x80000000"),
1167 	INTEL_UNCORE_EVENT_DESC(qpi1_filt_send,		"event=0x6,iperf_cfg=0x80000000"),
1168 	INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt,		"event=0x0,iperf_cfg=0x40000000"),
1169 	INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt,		"event=0x6,iperf_cfg=0x40000000"),
1170 	INTEL_UNCORE_EVENT_DESC(qpi0_date_response,	"event=0x0,iperf_cfg=0xc4"),
1171 	INTEL_UNCORE_EVENT_DESC(qpi1_date_response,	"event=0x6,iperf_cfg=0xc4"),
1172 	{ /* end: all zeroes */ },
1173 };
1174 
1175 static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
1176 	NHMEX_UNCORE_OPS_COMMON_INIT(),
1177 	.enable_event		= nhmex_rbox_msr_enable_event,
1178 	.hw_config		= nhmex_rbox_hw_config,
1179 	.get_constraint		= nhmex_rbox_get_constraint,
1180 	.put_constraint		= nhmex_rbox_put_constraint,
1181 };
1182 
1183 static struct intel_uncore_type nhmex_uncore_rbox = {
1184 	.name			= "rbox",
1185 	.num_counters		= 8,
1186 	.num_boxes		= 2,
1187 	.perf_ctr_bits		= 48,
1188 	.event_ctl		= NHMEX_R_MSR_PMON_CTL0,
1189 	.perf_ctr		= NHMEX_R_MSR_PMON_CNT0,
1190 	.event_mask		= NHMEX_R_PMON_RAW_EVENT_MASK,
1191 	.box_ctl		= NHMEX_R_MSR_GLOBAL_CTL,
1192 	.msr_offset		= NHMEX_R_MSR_OFFSET,
1193 	.pair_ctr_ctl		= 1,
1194 	.num_shared_regs	= 20,
1195 	.event_descs		= nhmex_uncore_rbox_events,
1196 	.ops			= &nhmex_uncore_rbox_ops,
1197 	.format_group		= &nhmex_uncore_rbox_format_group
1198 };
1199 
1200 static struct intel_uncore_type *nhmex_msr_uncores[] = {
1201 	&nhmex_uncore_ubox,
1202 	&nhmex_uncore_cbox,
1203 	&nhmex_uncore_bbox,
1204 	&nhmex_uncore_sbox,
1205 	&nhmex_uncore_mbox,
1206 	&nhmex_uncore_rbox,
1207 	&nhmex_uncore_wbox,
1208 	NULL,
1209 };
1210 
nhmex_uncore_cpu_init(void)1211 void nhmex_uncore_cpu_init(void)
1212 {
1213 	if (boot_cpu_data.x86_model == 46)
1214 		uncore_nhmex = true;
1215 	else
1216 		nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
1217 	if (nhmex_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1218 		nhmex_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1219 	uncore_msr_uncores = nhmex_msr_uncores;
1220 }
1221 /* end of Nehalem-EX uncore support */
1222