• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: MIT
2 
3 /*
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include <linux/seq_file.h>
8 
9 #include "debugfs_gt.h"
10 #include "debugfs_gt_pm.h"
11 #include "i915_drv.h"
12 #include "intel_gt.h"
13 #include "intel_gt_clock_utils.h"
14 #include "intel_llc.h"
15 #include "intel_rc6.h"
16 #include "intel_rps.h"
17 #include "intel_runtime_pm.h"
18 #include "intel_sideband.h"
19 #include "intel_uncore.h"
20 
fw_domains_show(struct seq_file * m,void * data)21 static int fw_domains_show(struct seq_file *m, void *data)
22 {
23 	struct intel_gt *gt = m->private;
24 	struct intel_uncore *uncore = gt->uncore;
25 	struct intel_uncore_forcewake_domain *fw_domain;
26 	unsigned int tmp;
27 
28 	seq_printf(m, "user.bypass_count = %u\n",
29 		   uncore->user_forcewake_count);
30 
31 	for_each_fw_domain(fw_domain, uncore, tmp)
32 		seq_printf(m, "%s.wake_count = %u\n",
33 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
34 			   READ_ONCE(fw_domain->wake_count));
35 
36 	return 0;
37 }
38 DEFINE_GT_DEBUGFS_ATTRIBUTE(fw_domains);
39 
print_rc6_res(struct seq_file * m,const char * title,const i915_reg_t reg)40 static void print_rc6_res(struct seq_file *m,
41 			  const char *title,
42 			  const i915_reg_t reg)
43 {
44 	struct intel_gt *gt = m->private;
45 	intel_wakeref_t wakeref;
46 
47 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
48 		seq_printf(m, "%s %u (%llu us)\n", title,
49 			   intel_uncore_read(gt->uncore, reg),
50 			   intel_rc6_residency_us(&gt->rc6, reg));
51 }
52 
vlv_drpc(struct seq_file * m)53 static int vlv_drpc(struct seq_file *m)
54 {
55 	struct intel_gt *gt = m->private;
56 	struct intel_uncore *uncore = gt->uncore;
57 	u32 rcctl1, pw_status;
58 
59 	pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
60 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
61 
62 	seq_printf(m, "RC6 Enabled: %s\n",
63 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
64 					GEN6_RC_CTL_EI_MODE(1))));
65 	seq_printf(m, "Render Power Well: %s\n",
66 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
67 	seq_printf(m, "Media Power Well: %s\n",
68 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
69 
70 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
71 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
72 
73 	return fw_domains_show(m, NULL);
74 }
75 
gen6_drpc(struct seq_file * m)76 static int gen6_drpc(struct seq_file *m)
77 {
78 	struct intel_gt *gt = m->private;
79 	struct drm_i915_private *i915 = gt->i915;
80 	struct intel_uncore *uncore = gt->uncore;
81 	u32 gt_core_status, rcctl1, rc6vids = 0;
82 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
83 
84 	gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
85 
86 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
87 	if (INTEL_GEN(i915) >= 9) {
88 		gen9_powergate_enable =
89 			intel_uncore_read(uncore, GEN9_PG_ENABLE);
90 		gen9_powergate_status =
91 			intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
92 	}
93 
94 	if (INTEL_GEN(i915) <= 7)
95 		sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
96 				       &rc6vids, NULL);
97 
98 	seq_printf(m, "RC1e Enabled: %s\n",
99 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
100 	seq_printf(m, "RC6 Enabled: %s\n",
101 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
102 	if (INTEL_GEN(i915) >= 9) {
103 		seq_printf(m, "Render Well Gating Enabled: %s\n",
104 			   yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
105 		seq_printf(m, "Media Well Gating Enabled: %s\n",
106 			   yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
107 	}
108 	seq_printf(m, "Deep RC6 Enabled: %s\n",
109 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
110 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
111 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
112 	seq_puts(m, "Current RC state: ");
113 	switch (gt_core_status & GEN6_RCn_MASK) {
114 	case GEN6_RC0:
115 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
116 			seq_puts(m, "Core Power Down\n");
117 		else
118 			seq_puts(m, "on\n");
119 		break;
120 	case GEN6_RC3:
121 		seq_puts(m, "RC3\n");
122 		break;
123 	case GEN6_RC6:
124 		seq_puts(m, "RC6\n");
125 		break;
126 	case GEN6_RC7:
127 		seq_puts(m, "RC7\n");
128 		break;
129 	default:
130 		seq_puts(m, "Unknown\n");
131 		break;
132 	}
133 
134 	seq_printf(m, "Core Power Down: %s\n",
135 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
136 	if (INTEL_GEN(i915) >= 9) {
137 		seq_printf(m, "Render Power Well: %s\n",
138 			   (gen9_powergate_status &
139 			    GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
140 		seq_printf(m, "Media Power Well: %s\n",
141 			   (gen9_powergate_status &
142 			    GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
143 	}
144 
145 	/* Not exactly sure what this is */
146 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
147 		      GEN6_GT_GFX_RC6_LOCKED);
148 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
149 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
150 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
151 
152 	if (INTEL_GEN(i915) <= 7) {
153 		seq_printf(m, "RC6   voltage: %dmV\n",
154 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
155 		seq_printf(m, "RC6+  voltage: %dmV\n",
156 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
157 		seq_printf(m, "RC6++ voltage: %dmV\n",
158 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
159 	}
160 
161 	return fw_domains_show(m, NULL);
162 }
163 
ilk_drpc(struct seq_file * m)164 static int ilk_drpc(struct seq_file *m)
165 {
166 	struct intel_gt *gt = m->private;
167 	struct intel_uncore *uncore = gt->uncore;
168 	u32 rgvmodectl, rstdbyctl;
169 	u16 crstandvid;
170 
171 	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
172 	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
173 	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
174 
175 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
176 	seq_printf(m, "Boost freq: %d\n",
177 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
178 		   MEMMODE_BOOST_FREQ_SHIFT);
179 	seq_printf(m, "HW control enabled: %s\n",
180 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
181 	seq_printf(m, "SW control enabled: %s\n",
182 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
183 	seq_printf(m, "Gated voltage change: %s\n",
184 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
185 	seq_printf(m, "Starting frequency: P%d\n",
186 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
187 	seq_printf(m, "Max P-state: P%d\n",
188 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
189 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
190 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
191 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
192 	seq_printf(m, "Render standby enabled: %s\n",
193 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
194 	seq_puts(m, "Current RS state: ");
195 	switch (rstdbyctl & RSX_STATUS_MASK) {
196 	case RSX_STATUS_ON:
197 		seq_puts(m, "on\n");
198 		break;
199 	case RSX_STATUS_RC1:
200 		seq_puts(m, "RC1\n");
201 		break;
202 	case RSX_STATUS_RC1E:
203 		seq_puts(m, "RC1E\n");
204 		break;
205 	case RSX_STATUS_RS1:
206 		seq_puts(m, "RS1\n");
207 		break;
208 	case RSX_STATUS_RS2:
209 		seq_puts(m, "RS2 (RC6)\n");
210 		break;
211 	case RSX_STATUS_RS3:
212 		seq_puts(m, "RC3 (RC6+)\n");
213 		break;
214 	default:
215 		seq_puts(m, "unknown\n");
216 		break;
217 	}
218 
219 	return 0;
220 }
221 
drpc_show(struct seq_file * m,void * unused)222 static int drpc_show(struct seq_file *m, void *unused)
223 {
224 	struct intel_gt *gt = m->private;
225 	struct drm_i915_private *i915 = gt->i915;
226 	intel_wakeref_t wakeref;
227 	int err = -ENODEV;
228 
229 	with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
230 		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
231 			err = vlv_drpc(m);
232 		else if (INTEL_GEN(i915) >= 6)
233 			err = gen6_drpc(m);
234 		else
235 			err = ilk_drpc(m);
236 	}
237 
238 	return err;
239 }
240 DEFINE_GT_DEBUGFS_ATTRIBUTE(drpc);
241 
frequency_show(struct seq_file * m,void * unused)242 static int frequency_show(struct seq_file *m, void *unused)
243 {
244 	struct intel_gt *gt = m->private;
245 	struct drm_i915_private *i915 = gt->i915;
246 	struct intel_uncore *uncore = gt->uncore;
247 	struct intel_rps *rps = &gt->rps;
248 	intel_wakeref_t wakeref;
249 
250 	wakeref = intel_runtime_pm_get(uncore->rpm);
251 
252 	if (IS_GEN(i915, 5)) {
253 		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
254 		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
255 
256 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
257 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
258 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
259 			   MEMSTAT_VID_SHIFT);
260 		seq_printf(m, "Current P-state: %d\n",
261 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
262 	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
263 		u32 rpmodectl, freq_sts;
264 
265 		rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
266 		seq_printf(m, "Video Turbo Mode: %s\n",
267 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
268 		seq_printf(m, "HW control enabled: %s\n",
269 			   yesno(rpmodectl & GEN6_RP_ENABLE));
270 		seq_printf(m, "SW control enabled: %s\n",
271 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
272 				 GEN6_RP_MEDIA_SW_MODE));
273 
274 		vlv_punit_get(i915);
275 		freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
276 		vlv_punit_put(i915);
277 
278 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
279 		seq_printf(m, "DDR freq: %d MHz\n", i915->mem_freq);
280 
281 		seq_printf(m, "actual GPU freq: %d MHz\n",
282 			   intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
283 
284 		seq_printf(m, "current GPU freq: %d MHz\n",
285 			   intel_gpu_freq(rps, rps->cur_freq));
286 
287 		seq_printf(m, "max GPU freq: %d MHz\n",
288 			   intel_gpu_freq(rps, rps->max_freq));
289 
290 		seq_printf(m, "min GPU freq: %d MHz\n",
291 			   intel_gpu_freq(rps, rps->min_freq));
292 
293 		seq_printf(m, "idle GPU freq: %d MHz\n",
294 			   intel_gpu_freq(rps, rps->idle_freq));
295 
296 		seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
297 			   intel_gpu_freq(rps, rps->efficient_freq));
298 	} else if (INTEL_GEN(i915) >= 6) {
299 		u32 rp_state_limits;
300 		u32 gt_perf_status;
301 		u32 rp_state_cap;
302 		u32 rpmodectl, rpinclimit, rpdeclimit;
303 		u32 rpstat, cagf, reqf;
304 		u32 rpcurupei, rpcurup, rpprevup;
305 		u32 rpcurdownei, rpcurdown, rpprevdown;
306 		u32 rpupei, rpupt, rpdownei, rpdownt;
307 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
308 		int max_freq;
309 
310 		rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
311 		if (IS_GEN9_LP(i915)) {
312 			rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
313 			gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
314 		} else {
315 			rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
316 			gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS);
317 		}
318 
319 		/* RPSTAT1 is in the GT power well */
320 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
321 
322 		reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
323 		if (INTEL_GEN(i915) >= 9) {
324 			reqf >>= 23;
325 		} else {
326 			reqf &= ~GEN6_TURBO_DISABLE;
327 			if (IS_HASWELL(i915) || IS_BROADWELL(i915))
328 				reqf >>= 24;
329 			else
330 				reqf >>= 25;
331 		}
332 		reqf = intel_gpu_freq(rps, reqf);
333 
334 		rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
335 		rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
336 		rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
337 
338 		rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1);
339 		rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
340 		rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
341 		rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
342 		rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
343 		rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
344 		rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
345 
346 		rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI);
347 		rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
348 
349 		rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
350 		rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
351 
352 		cagf = intel_rps_read_actual_frequency(rps);
353 
354 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
355 
356 		if (INTEL_GEN(i915) >= 11) {
357 			pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
358 			pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
359 			/*
360 			 * The equivalent to the PM ISR & IIR cannot be read
361 			 * without affecting the current state of the system
362 			 */
363 			pm_isr = 0;
364 			pm_iir = 0;
365 		} else if (INTEL_GEN(i915) >= 8) {
366 			pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
367 			pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
368 			pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
369 			pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2));
370 		} else {
371 			pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
372 			pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
373 			pm_isr = intel_uncore_read(uncore, GEN6_PMISR);
374 			pm_iir = intel_uncore_read(uncore, GEN6_PMIIR);
375 		}
376 		pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
377 
378 		seq_printf(m, "Video Turbo Mode: %s\n",
379 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
380 		seq_printf(m, "HW control enabled: %s\n",
381 			   yesno(rpmodectl & GEN6_RP_ENABLE));
382 		seq_printf(m, "SW control enabled: %s\n",
383 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
384 				 GEN6_RP_MEDIA_SW_MODE));
385 
386 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
387 			   pm_ier, pm_imr, pm_mask);
388 		if (INTEL_GEN(i915) <= 10)
389 			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
390 				   pm_isr, pm_iir);
391 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
392 			   rps->pm_intrmsk_mbz);
393 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
394 		seq_printf(m, "Render p-state ratio: %d\n",
395 			   (gt_perf_status & (INTEL_GEN(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
396 		seq_printf(m, "Render p-state VID: %d\n",
397 			   gt_perf_status & 0xff);
398 		seq_printf(m, "Render p-state limit: %d\n",
399 			   rp_state_limits & 0xff);
400 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
401 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
402 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
403 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
404 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
405 		seq_printf(m, "CAGF: %dMHz\n", cagf);
406 		seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
407 			   rpcurupei,
408 			   intel_gt_pm_interval_to_ns(gt, rpcurupei));
409 		seq_printf(m, "RP CUR UP: %d (%dns)\n",
410 			   rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
411 		seq_printf(m, "RP PREV UP: %d (%dns)\n",
412 			   rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
413 		seq_printf(m, "Up threshold: %d%%\n",
414 			   rps->power.up_threshold);
415 		seq_printf(m, "RP UP EI: %d (%dns)\n",
416 			   rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
417 		seq_printf(m, "RP UP THRESHOLD: %d (%dns)\n",
418 			   rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
419 
420 		seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
421 			   rpcurdownei,
422 			   intel_gt_pm_interval_to_ns(gt, rpcurdownei));
423 		seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
424 			   rpcurdown,
425 			   intel_gt_pm_interval_to_ns(gt, rpcurdown));
426 		seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
427 			   rpprevdown,
428 			   intel_gt_pm_interval_to_ns(gt, rpprevdown));
429 		seq_printf(m, "Down threshold: %d%%\n",
430 			   rps->power.down_threshold);
431 		seq_printf(m, "RP DOWN EI: %d (%dns)\n",
432 			   rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
433 		seq_printf(m, "RP DOWN THRESHOLD: %d (%dns)\n",
434 			   rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
435 
436 		max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
437 			    rp_state_cap >> 16) & 0xff;
438 		max_freq *= (IS_GEN9_BC(i915) ||
439 			     INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
440 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
441 			   intel_gpu_freq(rps, max_freq));
442 
443 		max_freq = (rp_state_cap & 0xff00) >> 8;
444 		max_freq *= (IS_GEN9_BC(i915) ||
445 			     INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
446 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
447 			   intel_gpu_freq(rps, max_freq));
448 
449 		max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
450 			    rp_state_cap >> 0) & 0xff;
451 		max_freq *= (IS_GEN9_BC(i915) ||
452 			     INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
453 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
454 			   intel_gpu_freq(rps, max_freq));
455 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
456 			   intel_gpu_freq(rps, rps->max_freq));
457 
458 		seq_printf(m, "Current freq: %d MHz\n",
459 			   intel_gpu_freq(rps, rps->cur_freq));
460 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
461 		seq_printf(m, "Idle freq: %d MHz\n",
462 			   intel_gpu_freq(rps, rps->idle_freq));
463 		seq_printf(m, "Min freq: %d MHz\n",
464 			   intel_gpu_freq(rps, rps->min_freq));
465 		seq_printf(m, "Boost freq: %d MHz\n",
466 			   intel_gpu_freq(rps, rps->boost_freq));
467 		seq_printf(m, "Max freq: %d MHz\n",
468 			   intel_gpu_freq(rps, rps->max_freq));
469 		seq_printf(m,
470 			   "efficient (RPe) frequency: %d MHz\n",
471 			   intel_gpu_freq(rps, rps->efficient_freq));
472 	} else {
473 		seq_puts(m, "no P-state info available\n");
474 	}
475 
476 	seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
477 	seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
478 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
479 
480 	intel_runtime_pm_put(uncore->rpm, wakeref);
481 
482 	return 0;
483 }
484 DEFINE_GT_DEBUGFS_ATTRIBUTE(frequency);
485 
llc_show(struct seq_file * m,void * data)486 static int llc_show(struct seq_file *m, void *data)
487 {
488 	struct intel_gt *gt = m->private;
489 	struct drm_i915_private *i915 = gt->i915;
490 	const bool edram = INTEL_GEN(i915) > 8;
491 	struct intel_rps *rps = &gt->rps;
492 	unsigned int max_gpu_freq, min_gpu_freq;
493 	intel_wakeref_t wakeref;
494 	int gpu_freq, ia_freq;
495 
496 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(i915)));
497 	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
498 		   i915->edram_size_mb);
499 
500 	min_gpu_freq = rps->min_freq;
501 	max_gpu_freq = rps->max_freq;
502 	if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
503 		/* Convert GT frequency to 50 HZ units */
504 		min_gpu_freq /= GEN9_FREQ_SCALER;
505 		max_gpu_freq /= GEN9_FREQ_SCALER;
506 	}
507 
508 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
509 
510 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
511 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
512 		ia_freq = gpu_freq;
513 		sandybridge_pcode_read(i915,
514 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
515 				       &ia_freq, NULL);
516 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
517 			   intel_gpu_freq(rps,
518 					  (gpu_freq *
519 					   (IS_GEN9_BC(i915) ||
520 					    INTEL_GEN(i915) >= 10 ?
521 					    GEN9_FREQ_SCALER : 1))),
522 			   ((ia_freq >> 0) & 0xff) * 100,
523 			   ((ia_freq >> 8) & 0xff) * 100);
524 	}
525 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
526 
527 	return 0;
528 }
529 
llc_eval(void * data)530 static bool llc_eval(void *data)
531 {
532 	struct intel_gt *gt = data;
533 
534 	return HAS_LLC(gt->i915);
535 }
536 
537 DEFINE_GT_DEBUGFS_ATTRIBUTE(llc);
538 
rps_power_to_str(unsigned int power)539 static const char *rps_power_to_str(unsigned int power)
540 {
541 	static const char * const strings[] = {
542 		[LOW_POWER] = "low power",
543 		[BETWEEN] = "mixed",
544 		[HIGH_POWER] = "high power",
545 	};
546 
547 	if (power >= ARRAY_SIZE(strings) || !strings[power])
548 		return "unknown";
549 
550 	return strings[power];
551 }
552 
rps_boost_show(struct seq_file * m,void * data)553 static int rps_boost_show(struct seq_file *m, void *data)
554 {
555 	struct intel_gt *gt = m->private;
556 	struct drm_i915_private *i915 = gt->i915;
557 	struct intel_rps *rps = &gt->rps;
558 
559 	seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
560 	seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
561 	seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
562 	seq_printf(m, "Boosts outstanding? %d\n",
563 		   atomic_read(&rps->num_waiters));
564 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
565 	seq_printf(m, "Frequency requested %d, actual %d\n",
566 		   intel_gpu_freq(rps, rps->cur_freq),
567 		   intel_rps_read_actual_frequency(rps));
568 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
569 		   intel_gpu_freq(rps, rps->min_freq),
570 		   intel_gpu_freq(rps, rps->min_freq_softlimit),
571 		   intel_gpu_freq(rps, rps->max_freq_softlimit),
572 		   intel_gpu_freq(rps, rps->max_freq));
573 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
574 		   intel_gpu_freq(rps, rps->idle_freq),
575 		   intel_gpu_freq(rps, rps->efficient_freq),
576 		   intel_gpu_freq(rps, rps->boost_freq));
577 
578 	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
579 
580 	if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
581 		struct intel_uncore *uncore = gt->uncore;
582 		u32 rpup, rpupei;
583 		u32 rpdown, rpdownei;
584 
585 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
586 		rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
587 		rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
588 		rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
589 		rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
590 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
591 
592 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
593 			   rps_power_to_str(rps->power.mode));
594 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
595 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
596 			   rps->power.up_threshold);
597 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
598 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
599 			   rps->power.down_threshold);
600 	} else {
601 		seq_puts(m, "\nRPS Autotuning inactive\n");
602 	}
603 
604 	return 0;
605 }
606 
rps_eval(void * data)607 static bool rps_eval(void *data)
608 {
609 	struct intel_gt *gt = data;
610 
611 	return HAS_RPS(gt->i915);
612 }
613 
614 DEFINE_GT_DEBUGFS_ATTRIBUTE(rps_boost);
615 
debugfs_gt_pm_register(struct intel_gt * gt,struct dentry * root)616 void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root)
617 {
618 	static const struct debugfs_gt_file files[] = {
619 		{ "drpc", &drpc_fops, NULL },
620 		{ "frequency", &frequency_fops, NULL },
621 		{ "forcewake", &fw_domains_fops, NULL },
622 		{ "llc", &llc_fops, llc_eval },
623 		{ "rps_boost", &rps_boost_fops, rps_eval },
624 	};
625 
626 	intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
627 }
628