1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
7 #include <drm/i915_drm.h>
8
9 #include "i915_drv.h"
10 #include "intel_breadcrumbs.h"
11 #include "intel_gt.h"
12 #include "intel_gt_clock_utils.h"
13 #include "intel_gt_irq.h"
14 #include "intel_gt_pm_irq.h"
15 #include "intel_rps.h"
16 #include "intel_sideband.h"
17 #include "../../../platform/x86/intel_ips.h"
18
19 #define BUSY_MAX_EI 20u /* ms */
20
21 /*
22 * Lock protecting IPS related data structures
23 */
24 static DEFINE_SPINLOCK(mchdev_lock);
25
rps_to_gt(struct intel_rps * rps)26 static struct intel_gt *rps_to_gt(struct intel_rps *rps)
27 {
28 return container_of(rps, struct intel_gt, rps);
29 }
30
rps_to_i915(struct intel_rps * rps)31 static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
32 {
33 return rps_to_gt(rps)->i915;
34 }
35
rps_to_uncore(struct intel_rps * rps)36 static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
37 {
38 return rps_to_gt(rps)->uncore;
39 }
40
rps_pm_sanitize_mask(struct intel_rps * rps,u32 mask)41 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
42 {
43 return mask & ~rps->pm_intrmsk_mbz;
44 }
45
set(struct intel_uncore * uncore,i915_reg_t reg,u32 val)46 static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
47 {
48 intel_uncore_write_fw(uncore, reg, val);
49 }
50
rps_timer(struct timer_list * t)51 static void rps_timer(struct timer_list *t)
52 {
53 struct intel_rps *rps = from_timer(rps, t, timer);
54 struct intel_engine_cs *engine;
55 ktime_t dt, last, timestamp;
56 enum intel_engine_id id;
57 s64 max_busy[3] = {};
58
59 timestamp = 0;
60 for_each_engine(engine, rps_to_gt(rps), id) {
61 s64 busy;
62 int i;
63
64 dt = intel_engine_get_busy_time(engine, ×tamp);
65 last = engine->stats.rps;
66 engine->stats.rps = dt;
67
68 busy = ktime_to_ns(ktime_sub(dt, last));
69 for (i = 0; i < ARRAY_SIZE(max_busy); i++) {
70 if (busy > max_busy[i])
71 swap(busy, max_busy[i]);
72 }
73 }
74 last = rps->pm_timestamp;
75 rps->pm_timestamp = timestamp;
76
77 if (intel_rps_is_active(rps)) {
78 s64 busy;
79 int i;
80
81 dt = ktime_sub(timestamp, last);
82
83 /*
84 * Our goal is to evaluate each engine independently, so we run
85 * at the lowest clocks required to sustain the heaviest
86 * workload. However, a task may be split into sequential
87 * dependent operations across a set of engines, such that
88 * the independent contributions do not account for high load,
89 * but overall the task is GPU bound. For example, consider
90 * video decode on vcs followed by colour post-processing
91 * on vecs, followed by general post-processing on rcs.
92 * Since multi-engines being active does imply a single
93 * continuous workload across all engines, we hedge our
94 * bets by only contributing a factor of the distributed
95 * load into our busyness calculation.
96 */
97 busy = max_busy[0];
98 for (i = 1; i < ARRAY_SIZE(max_busy); i++) {
99 if (!max_busy[i])
100 break;
101
102 busy += div_u64(max_busy[i], 1 << i);
103 }
104 GT_TRACE(rps_to_gt(rps),
105 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
106 busy, (int)div64_u64(100 * busy, dt),
107 max_busy[0], max_busy[1], max_busy[2],
108 rps->pm_interval);
109
110 if (100 * busy > rps->power.up_threshold * dt &&
111 rps->cur_freq < rps->max_freq_softlimit) {
112 rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
113 rps->pm_interval = 1;
114 schedule_work(&rps->work);
115 } else if (100 * busy < rps->power.down_threshold * dt &&
116 rps->cur_freq > rps->min_freq_softlimit) {
117 rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
118 rps->pm_interval = 1;
119 schedule_work(&rps->work);
120 } else {
121 rps->last_adj = 0;
122 }
123
124 mod_timer(&rps->timer,
125 jiffies + msecs_to_jiffies(rps->pm_interval));
126 rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI);
127 }
128 }
129
rps_start_timer(struct intel_rps * rps)130 static void rps_start_timer(struct intel_rps *rps)
131 {
132 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
133 rps->pm_interval = 1;
134 mod_timer(&rps->timer, jiffies + 1);
135 }
136
rps_stop_timer(struct intel_rps * rps)137 static void rps_stop_timer(struct intel_rps *rps)
138 {
139 del_timer_sync(&rps->timer);
140 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
141 cancel_work_sync(&rps->work);
142 }
143
rps_pm_mask(struct intel_rps * rps,u8 val)144 static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
145 {
146 u32 mask = 0;
147
148 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
149 if (val > rps->min_freq_softlimit)
150 mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
151 GEN6_PM_RP_DOWN_THRESHOLD |
152 GEN6_PM_RP_DOWN_TIMEOUT);
153
154 if (val < rps->max_freq_softlimit)
155 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
156
157 mask &= rps->pm_events;
158
159 return rps_pm_sanitize_mask(rps, ~mask);
160 }
161
rps_reset_ei(struct intel_rps * rps)162 static void rps_reset_ei(struct intel_rps *rps)
163 {
164 memset(&rps->ei, 0, sizeof(rps->ei));
165 }
166
rps_enable_interrupts(struct intel_rps * rps)167 static void rps_enable_interrupts(struct intel_rps *rps)
168 {
169 struct intel_gt *gt = rps_to_gt(rps);
170
171 GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
172 rps->pm_events, rps_pm_mask(rps, rps->last_freq));
173
174 rps_reset_ei(rps);
175
176 spin_lock_irq(>->irq_lock);
177 gen6_gt_pm_enable_irq(gt, rps->pm_events);
178 spin_unlock_irq(>->irq_lock);
179
180 intel_uncore_write(gt->uncore,
181 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
182 }
183
gen6_rps_reset_interrupts(struct intel_rps * rps)184 static void gen6_rps_reset_interrupts(struct intel_rps *rps)
185 {
186 gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
187 }
188
gen11_rps_reset_interrupts(struct intel_rps * rps)189 static void gen11_rps_reset_interrupts(struct intel_rps *rps)
190 {
191 while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
192 ;
193 }
194
rps_reset_interrupts(struct intel_rps * rps)195 static void rps_reset_interrupts(struct intel_rps *rps)
196 {
197 struct intel_gt *gt = rps_to_gt(rps);
198
199 spin_lock_irq(>->irq_lock);
200 if (INTEL_GEN(gt->i915) >= 11)
201 gen11_rps_reset_interrupts(rps);
202 else
203 gen6_rps_reset_interrupts(rps);
204
205 rps->pm_iir = 0;
206 spin_unlock_irq(>->irq_lock);
207 }
208
rps_disable_interrupts(struct intel_rps * rps)209 static void rps_disable_interrupts(struct intel_rps *rps)
210 {
211 struct intel_gt *gt = rps_to_gt(rps);
212
213 intel_uncore_write(gt->uncore,
214 GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
215
216 spin_lock_irq(>->irq_lock);
217 gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
218 spin_unlock_irq(>->irq_lock);
219
220 intel_synchronize_irq(gt->i915);
221
222 /*
223 * Now that we will not be generating any more work, flush any
224 * outstanding tasks. As we are called on the RPS idle path,
225 * we will reset the GPU to minimum frequencies, so the current
226 * state of the worker can be discarded.
227 */
228 cancel_work_sync(&rps->work);
229
230 rps_reset_interrupts(rps);
231 GT_TRACE(gt, "interrupts:off\n");
232 }
233
234 static const struct cparams {
235 u16 i;
236 u16 t;
237 u16 m;
238 u16 c;
239 } cparams[] = {
240 { 1, 1333, 301, 28664 },
241 { 1, 1066, 294, 24460 },
242 { 1, 800, 294, 25192 },
243 { 0, 1333, 276, 27605 },
244 { 0, 1066, 276, 27605 },
245 { 0, 800, 231, 23784 },
246 };
247
gen5_rps_init(struct intel_rps * rps)248 static void gen5_rps_init(struct intel_rps *rps)
249 {
250 struct drm_i915_private *i915 = rps_to_i915(rps);
251 struct intel_uncore *uncore = rps_to_uncore(rps);
252 u8 fmax, fmin, fstart;
253 u32 rgvmodectl;
254 int c_m, i;
255
256 if (i915->fsb_freq <= 3200)
257 c_m = 0;
258 else if (i915->fsb_freq <= 4800)
259 c_m = 1;
260 else
261 c_m = 2;
262
263 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
264 if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
265 rps->ips.m = cparams[i].m;
266 rps->ips.c = cparams[i].c;
267 break;
268 }
269 }
270
271 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
272
273 /* Set up min, max, and cur for interrupt handling */
274 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
275 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
276 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
277 MEMMODE_FSTART_SHIFT;
278 drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n",
279 fmax, fmin, fstart);
280
281 rps->min_freq = fmax;
282 rps->efficient_freq = fstart;
283 rps->max_freq = fmin;
284 }
285
286 static unsigned long
__ips_chipset_val(struct intel_ips * ips)287 __ips_chipset_val(struct intel_ips *ips)
288 {
289 struct intel_uncore *uncore =
290 rps_to_uncore(container_of(ips, struct intel_rps, ips));
291 unsigned long now = jiffies_to_msecs(jiffies), dt;
292 unsigned long result;
293 u64 total, delta;
294
295 lockdep_assert_held(&mchdev_lock);
296
297 /*
298 * Prevent division-by-zero if we are asking too fast.
299 * Also, we don't get interesting results if we are polling
300 * faster than once in 10ms, so just return the saved value
301 * in such cases.
302 */
303 dt = now - ips->last_time1;
304 if (dt <= 10)
305 return ips->chipset_power;
306
307 /* FIXME: handle per-counter overflow */
308 total = intel_uncore_read(uncore, DMIEC);
309 total += intel_uncore_read(uncore, DDREC);
310 total += intel_uncore_read(uncore, CSIEC);
311
312 delta = total - ips->last_count1;
313
314 result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
315
316 ips->last_count1 = total;
317 ips->last_time1 = now;
318
319 ips->chipset_power = result;
320
321 return result;
322 }
323
ips_mch_val(struct intel_uncore * uncore)324 static unsigned long ips_mch_val(struct intel_uncore *uncore)
325 {
326 unsigned int m, x, b;
327 u32 tsfs;
328
329 tsfs = intel_uncore_read(uncore, TSFS);
330 x = intel_uncore_read8(uncore, TR1);
331
332 b = tsfs & TSFS_INTR_MASK;
333 m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
334
335 return m * x / 127 - b;
336 }
337
_pxvid_to_vd(u8 pxvid)338 static int _pxvid_to_vd(u8 pxvid)
339 {
340 if (pxvid == 0)
341 return 0;
342
343 if (pxvid >= 8 && pxvid < 31)
344 pxvid = 31;
345
346 return (pxvid + 2) * 125;
347 }
348
pvid_to_extvid(struct drm_i915_private * i915,u8 pxvid)349 static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
350 {
351 const int vd = _pxvid_to_vd(pxvid);
352
353 if (INTEL_INFO(i915)->is_mobile)
354 return max(vd - 1125, 0);
355
356 return vd;
357 }
358
__gen5_ips_update(struct intel_ips * ips)359 static void __gen5_ips_update(struct intel_ips *ips)
360 {
361 struct intel_uncore *uncore =
362 rps_to_uncore(container_of(ips, struct intel_rps, ips));
363 u64 now, delta, dt;
364 u32 count;
365
366 lockdep_assert_held(&mchdev_lock);
367
368 now = ktime_get_raw_ns();
369 dt = now - ips->last_time2;
370 do_div(dt, NSEC_PER_MSEC);
371
372 /* Don't divide by 0 */
373 if (dt <= 10)
374 return;
375
376 count = intel_uncore_read(uncore, GFXEC);
377 delta = count - ips->last_count2;
378
379 ips->last_count2 = count;
380 ips->last_time2 = now;
381
382 /* More magic constants... */
383 ips->gfx_power = div_u64(delta * 1181, dt * 10);
384 }
385
gen5_rps_update(struct intel_rps * rps)386 static void gen5_rps_update(struct intel_rps *rps)
387 {
388 spin_lock_irq(&mchdev_lock);
389 __gen5_ips_update(&rps->ips);
390 spin_unlock_irq(&mchdev_lock);
391 }
392
gen5_rps_set(struct intel_rps * rps,u8 val)393 static bool gen5_rps_set(struct intel_rps *rps, u8 val)
394 {
395 struct intel_uncore *uncore = rps_to_uncore(rps);
396 u16 rgvswctl;
397
398 lockdep_assert_held(&mchdev_lock);
399
400 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
401 if (rgvswctl & MEMCTL_CMD_STS) {
402 DRM_DEBUG("gpu busy, RCS change rejected\n");
403 return false; /* still busy with another command */
404 }
405
406 /* Invert the frequency bin into an ips delay */
407 val = rps->max_freq - val;
408 val = rps->min_freq + val;
409
410 rgvswctl =
411 (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
412 (val << MEMCTL_FREQ_SHIFT) |
413 MEMCTL_SFCAVM;
414 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
415 intel_uncore_posting_read16(uncore, MEMSWCTL);
416
417 rgvswctl |= MEMCTL_CMD_STS;
418 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
419
420 return true;
421 }
422
intel_pxfreq(u32 vidfreq)423 static unsigned long intel_pxfreq(u32 vidfreq)
424 {
425 int div = (vidfreq & 0x3f0000) >> 16;
426 int post = (vidfreq & 0x3000) >> 12;
427 int pre = (vidfreq & 0x7);
428
429 if (!pre)
430 return 0;
431
432 return div * 133333 / (pre << post);
433 }
434
init_emon(struct intel_uncore * uncore)435 static unsigned int init_emon(struct intel_uncore *uncore)
436 {
437 u8 pxw[16];
438 int i;
439
440 /* Disable to program */
441 intel_uncore_write(uncore, ECR, 0);
442 intel_uncore_posting_read(uncore, ECR);
443
444 /* Program energy weights for various events */
445 intel_uncore_write(uncore, SDEW, 0x15040d00);
446 intel_uncore_write(uncore, CSIEW0, 0x007f0000);
447 intel_uncore_write(uncore, CSIEW1, 0x1e220004);
448 intel_uncore_write(uncore, CSIEW2, 0x04000004);
449
450 for (i = 0; i < 5; i++)
451 intel_uncore_write(uncore, PEW(i), 0);
452 for (i = 0; i < 3; i++)
453 intel_uncore_write(uncore, DEW(i), 0);
454
455 /* Program P-state weights to account for frequency power adjustment */
456 for (i = 0; i < 16; i++) {
457 u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
458 unsigned int freq = intel_pxfreq(pxvidfreq);
459 unsigned int vid =
460 (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
461 unsigned int val;
462
463 val = vid * vid * freq / 1000 * 255;
464 val /= 127 * 127 * 900;
465
466 pxw[i] = val;
467 }
468 /* Render standby states get 0 weight */
469 pxw[14] = 0;
470 pxw[15] = 0;
471
472 for (i = 0; i < 4; i++) {
473 intel_uncore_write(uncore, PXW(i),
474 pxw[i * 4 + 0] << 24 |
475 pxw[i * 4 + 1] << 16 |
476 pxw[i * 4 + 2] << 8 |
477 pxw[i * 4 + 3] << 0);
478 }
479
480 /* Adjust magic regs to magic values (more experimental results) */
481 intel_uncore_write(uncore, OGW0, 0);
482 intel_uncore_write(uncore, OGW1, 0);
483 intel_uncore_write(uncore, EG0, 0x00007f00);
484 intel_uncore_write(uncore, EG1, 0x0000000e);
485 intel_uncore_write(uncore, EG2, 0x000e0000);
486 intel_uncore_write(uncore, EG3, 0x68000300);
487 intel_uncore_write(uncore, EG4, 0x42000000);
488 intel_uncore_write(uncore, EG5, 0x00140031);
489 intel_uncore_write(uncore, EG6, 0);
490 intel_uncore_write(uncore, EG7, 0);
491
492 for (i = 0; i < 8; i++)
493 intel_uncore_write(uncore, PXWL(i), 0);
494
495 /* Enable PMON + select events */
496 intel_uncore_write(uncore, ECR, 0x80000019);
497
498 return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
499 }
500
gen5_rps_enable(struct intel_rps * rps)501 static bool gen5_rps_enable(struct intel_rps *rps)
502 {
503 struct intel_uncore *uncore = rps_to_uncore(rps);
504 u8 fstart, vstart;
505 u32 rgvmodectl;
506
507 spin_lock_irq(&mchdev_lock);
508
509 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
510
511 /* Enable temp reporting */
512 intel_uncore_write16(uncore, PMMISC,
513 intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
514 intel_uncore_write16(uncore, TSC1,
515 intel_uncore_read16(uncore, TSC1) | TSE);
516
517 /* 100ms RC evaluation intervals */
518 intel_uncore_write(uncore, RCUPEI, 100000);
519 intel_uncore_write(uncore, RCDNEI, 100000);
520
521 /* Set max/min thresholds to 90ms and 80ms respectively */
522 intel_uncore_write(uncore, RCBMAXAVG, 90000);
523 intel_uncore_write(uncore, RCBMINAVG, 80000);
524
525 intel_uncore_write(uncore, MEMIHYST, 1);
526
527 /* Set up min, max, and cur for interrupt handling */
528 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
529 MEMMODE_FSTART_SHIFT;
530
531 vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
532 PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
533
534 intel_uncore_write(uncore,
535 MEMINTREN,
536 MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
537
538 intel_uncore_write(uncore, VIDSTART, vstart);
539 intel_uncore_posting_read(uncore, VIDSTART);
540
541 rgvmodectl |= MEMMODE_SWMODE_EN;
542 intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
543
544 if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
545 MEMCTL_CMD_STS) == 0, 10))
546 drm_err(&uncore->i915->drm,
547 "stuck trying to change perf mode\n");
548 mdelay(1);
549
550 gen5_rps_set(rps, rps->cur_freq);
551
552 rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
553 rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
554 rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
555 rps->ips.last_time1 = jiffies_to_msecs(jiffies);
556
557 rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
558 rps->ips.last_time2 = ktime_get_raw_ns();
559
560 spin_unlock_irq(&mchdev_lock);
561
562 rps->ips.corr = init_emon(uncore);
563
564 return true;
565 }
566
gen5_rps_disable(struct intel_rps * rps)567 static void gen5_rps_disable(struct intel_rps *rps)
568 {
569 struct intel_uncore *uncore = rps_to_uncore(rps);
570 u16 rgvswctl;
571
572 spin_lock_irq(&mchdev_lock);
573
574 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
575
576 /* Ack interrupts, disable EFC interrupt */
577 intel_uncore_write(uncore, MEMINTREN,
578 intel_uncore_read(uncore, MEMINTREN) &
579 ~MEMINT_EVAL_CHG_EN);
580 intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
581 intel_uncore_write(uncore, DEIER,
582 intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
583 intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
584 intel_uncore_write(uncore, DEIMR,
585 intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
586
587 /* Go back to the starting frequency */
588 gen5_rps_set(rps, rps->idle_freq);
589 mdelay(1);
590 rgvswctl |= MEMCTL_CMD_STS;
591 intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
592 mdelay(1);
593
594 spin_unlock_irq(&mchdev_lock);
595 }
596
rps_limits(struct intel_rps * rps,u8 val)597 static u32 rps_limits(struct intel_rps *rps, u8 val)
598 {
599 u32 limits;
600
601 /*
602 * Only set the down limit when we've reached the lowest level to avoid
603 * getting more interrupts, otherwise leave this clear. This prevents a
604 * race in the hw when coming out of rc6: There's a tiny window where
605 * the hw runs at the minimal clock before selecting the desired
606 * frequency, if the down threshold expires in that window we will not
607 * receive a down interrupt.
608 */
609 if (INTEL_GEN(rps_to_i915(rps)) >= 9) {
610 limits = rps->max_freq_softlimit << 23;
611 if (val <= rps->min_freq_softlimit)
612 limits |= rps->min_freq_softlimit << 14;
613 } else {
614 limits = rps->max_freq_softlimit << 24;
615 if (val <= rps->min_freq_softlimit)
616 limits |= rps->min_freq_softlimit << 16;
617 }
618
619 return limits;
620 }
621
rps_set_power(struct intel_rps * rps,int new_power)622 static void rps_set_power(struct intel_rps *rps, int new_power)
623 {
624 struct intel_gt *gt = rps_to_gt(rps);
625 struct intel_uncore *uncore = gt->uncore;
626 u32 threshold_up = 0, threshold_down = 0; /* in % */
627 u32 ei_up = 0, ei_down = 0;
628
629 lockdep_assert_held(&rps->power.mutex);
630
631 if (new_power == rps->power.mode)
632 return;
633
634 threshold_up = 95;
635 threshold_down = 85;
636
637 /* Note the units here are not exactly 1us, but 1280ns. */
638 switch (new_power) {
639 case LOW_POWER:
640 ei_up = 16000;
641 ei_down = 32000;
642 break;
643
644 case BETWEEN:
645 ei_up = 13000;
646 ei_down = 32000;
647 break;
648
649 case HIGH_POWER:
650 ei_up = 10000;
651 ei_down = 32000;
652 break;
653 }
654
655 /* When byt can survive without system hang with dynamic
656 * sw freq adjustments, this restriction can be lifted.
657 */
658 if (IS_VALLEYVIEW(gt->i915))
659 goto skip_hw_write;
660
661 GT_TRACE(gt,
662 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
663 new_power, threshold_up, ei_up, threshold_down, ei_down);
664
665 set(uncore, GEN6_RP_UP_EI,
666 intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
667 set(uncore, GEN6_RP_UP_THRESHOLD,
668 intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10));
669
670 set(uncore, GEN6_RP_DOWN_EI,
671 intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
672 set(uncore, GEN6_RP_DOWN_THRESHOLD,
673 intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
674
675 set(uncore, GEN6_RP_CONTROL,
676 (INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
677 GEN6_RP_MEDIA_HW_NORMAL_MODE |
678 GEN6_RP_MEDIA_IS_GFX |
679 GEN6_RP_ENABLE |
680 GEN6_RP_UP_BUSY_AVG |
681 GEN6_RP_DOWN_IDLE_AVG);
682
683 skip_hw_write:
684 rps->power.mode = new_power;
685 rps->power.up_threshold = threshold_up;
686 rps->power.down_threshold = threshold_down;
687 }
688
gen6_rps_set_thresholds(struct intel_rps * rps,u8 val)689 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
690 {
691 int new_power;
692
693 new_power = rps->power.mode;
694 switch (rps->power.mode) {
695 case LOW_POWER:
696 if (val > rps->efficient_freq + 1 &&
697 val > rps->cur_freq)
698 new_power = BETWEEN;
699 break;
700
701 case BETWEEN:
702 if (val <= rps->efficient_freq &&
703 val < rps->cur_freq)
704 new_power = LOW_POWER;
705 else if (val >= rps->rp0_freq &&
706 val > rps->cur_freq)
707 new_power = HIGH_POWER;
708 break;
709
710 case HIGH_POWER:
711 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
712 val < rps->cur_freq)
713 new_power = BETWEEN;
714 break;
715 }
716 /* Max/min bins are special */
717 if (val <= rps->min_freq_softlimit)
718 new_power = LOW_POWER;
719 if (val >= rps->max_freq_softlimit)
720 new_power = HIGH_POWER;
721
722 mutex_lock(&rps->power.mutex);
723 if (rps->power.interactive)
724 new_power = HIGH_POWER;
725 rps_set_power(rps, new_power);
726 mutex_unlock(&rps->power.mutex);
727 }
728
intel_rps_mark_interactive(struct intel_rps * rps,bool interactive)729 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
730 {
731 GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive));
732
733 mutex_lock(&rps->power.mutex);
734 if (interactive) {
735 if (!rps->power.interactive++ && intel_rps_is_active(rps))
736 rps_set_power(rps, HIGH_POWER);
737 } else {
738 GEM_BUG_ON(!rps->power.interactive);
739 rps->power.interactive--;
740 }
741 mutex_unlock(&rps->power.mutex);
742 }
743
gen6_rps_set(struct intel_rps * rps,u8 val)744 static int gen6_rps_set(struct intel_rps *rps, u8 val)
745 {
746 struct intel_uncore *uncore = rps_to_uncore(rps);
747 struct drm_i915_private *i915 = rps_to_i915(rps);
748 u32 swreq;
749
750 if (INTEL_GEN(i915) >= 9)
751 swreq = GEN9_FREQUENCY(val);
752 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
753 swreq = HSW_FREQUENCY(val);
754 else
755 swreq = (GEN6_FREQUENCY(val) |
756 GEN6_OFFSET(0) |
757 GEN6_AGGRESSIVE_TURBO);
758 set(uncore, GEN6_RPNSWREQ, swreq);
759
760 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
761 val, intel_gpu_freq(rps, val), swreq);
762
763 return 0;
764 }
765
vlv_rps_set(struct intel_rps * rps,u8 val)766 static int vlv_rps_set(struct intel_rps *rps, u8 val)
767 {
768 struct drm_i915_private *i915 = rps_to_i915(rps);
769 int err;
770
771 vlv_punit_get(i915);
772 err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
773 vlv_punit_put(i915);
774
775 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
776 val, intel_gpu_freq(rps, val));
777
778 return err;
779 }
780
rps_set(struct intel_rps * rps,u8 val,bool update)781 static int rps_set(struct intel_rps *rps, u8 val, bool update)
782 {
783 struct drm_i915_private *i915 = rps_to_i915(rps);
784 int err;
785
786 if (INTEL_GEN(i915) < 6)
787 return 0;
788
789 if (val == rps->last_freq)
790 return 0;
791
792 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
793 err = vlv_rps_set(rps, val);
794 else
795 err = gen6_rps_set(rps, val);
796 if (err)
797 return err;
798
799 if (update)
800 gen6_rps_set_thresholds(rps, val);
801 rps->last_freq = val;
802
803 return 0;
804 }
805
intel_rps_unpark(struct intel_rps * rps)806 void intel_rps_unpark(struct intel_rps *rps)
807 {
808 if (!intel_rps_is_enabled(rps))
809 return;
810
811 GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
812
813 /*
814 * Use the user's desired frequency as a guide, but for better
815 * performance, jump directly to RPe as our starting frequency.
816 */
817 mutex_lock(&rps->lock);
818
819 intel_rps_set_active(rps);
820 intel_rps_set(rps,
821 clamp(rps->cur_freq,
822 rps->min_freq_softlimit,
823 rps->max_freq_softlimit));
824
825 mutex_unlock(&rps->lock);
826
827 rps->pm_iir = 0;
828 if (intel_rps_has_interrupts(rps))
829 rps_enable_interrupts(rps);
830 if (intel_rps_uses_timer(rps))
831 rps_start_timer(rps);
832
833 if (IS_GEN(rps_to_i915(rps), 5))
834 gen5_rps_update(rps);
835 }
836
intel_rps_park(struct intel_rps * rps)837 void intel_rps_park(struct intel_rps *rps)
838 {
839 int adj;
840
841 if (!intel_rps_clear_active(rps))
842 return;
843
844 if (intel_rps_uses_timer(rps))
845 rps_stop_timer(rps);
846 if (intel_rps_has_interrupts(rps))
847 rps_disable_interrupts(rps);
848
849 if (rps->last_freq <= rps->idle_freq)
850 return;
851
852 /*
853 * The punit delays the write of the frequency and voltage until it
854 * determines the GPU is awake. During normal usage we don't want to
855 * waste power changing the frequency if the GPU is sleeping (rc6).
856 * However, the GPU and driver is now idle and we do not want to delay
857 * switching to minimum voltage (reducing power whilst idle) as we do
858 * not expect to be woken in the near future and so must flush the
859 * change by waking the device.
860 *
861 * We choose to take the media powerwell (either would do to trick the
862 * punit into committing the voltage change) as that takes a lot less
863 * power than the render powerwell.
864 */
865 intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
866 rps_set(rps, rps->idle_freq, false);
867 intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
868
869 /*
870 * Since we will try and restart from the previously requested
871 * frequency on unparking, treat this idle point as a downclock
872 * interrupt and reduce the frequency for resume. If we park/unpark
873 * more frequently than the rps worker can run, we will not respond
874 * to any EI and never see a change in frequency.
875 *
876 * (Note we accommodate Cherryview's limitation of only using an
877 * even bin by applying it to all.)
878 */
879 adj = rps->last_adj;
880 if (adj < 0)
881 adj *= 2;
882 else /* CHV needs even encode values */
883 adj = -2;
884 rps->last_adj = adj;
885 rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
886 if (rps->cur_freq < rps->efficient_freq) {
887 rps->cur_freq = rps->efficient_freq;
888 rps->last_adj = 0;
889 }
890
891 GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
892 }
893
intel_rps_boost(struct i915_request * rq)894 void intel_rps_boost(struct i915_request *rq)
895 {
896 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
897 unsigned long flags;
898
899 if (i915_request_signaled(rq) || !intel_rps_is_active(rps))
900 return;
901
902 /* Serializes with i915_request_retire() */
903 spin_lock_irqsave(&rq->lock, flags);
904 if (!i915_request_has_waitboost(rq) &&
905 !dma_fence_is_signaled_locked(&rq->fence)) {
906 set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
907
908 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
909 rq->fence.context, rq->fence.seqno);
910
911 if (!atomic_fetch_inc(&rps->num_waiters) &&
912 READ_ONCE(rps->cur_freq) < rps->boost_freq)
913 schedule_work(&rps->work);
914
915 atomic_inc(&rps->boosts);
916 }
917 spin_unlock_irqrestore(&rq->lock, flags);
918 }
919
intel_rps_set(struct intel_rps * rps,u8 val)920 int intel_rps_set(struct intel_rps *rps, u8 val)
921 {
922 int err;
923
924 lockdep_assert_held(&rps->lock);
925 GEM_BUG_ON(val > rps->max_freq);
926 GEM_BUG_ON(val < rps->min_freq);
927
928 if (intel_rps_is_active(rps)) {
929 err = rps_set(rps, val, true);
930 if (err)
931 return err;
932
933 /*
934 * Make sure we continue to get interrupts
935 * until we hit the minimum or maximum frequencies.
936 */
937 if (intel_rps_has_interrupts(rps)) {
938 struct intel_uncore *uncore = rps_to_uncore(rps);
939
940 set(uncore,
941 GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
942
943 set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
944 }
945 }
946
947 rps->cur_freq = val;
948 return 0;
949 }
950
gen6_rps_init(struct intel_rps * rps)951 static void gen6_rps_init(struct intel_rps *rps)
952 {
953 struct drm_i915_private *i915 = rps_to_i915(rps);
954 struct intel_uncore *uncore = rps_to_uncore(rps);
955
956 /* All of these values are in units of 50MHz */
957
958 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
959 if (IS_GEN9_LP(i915)) {
960 u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
961
962 rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
963 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
964 rps->min_freq = (rp_state_cap >> 0) & 0xff;
965 } else {
966 u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
967
968 rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
969 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
970 rps->min_freq = (rp_state_cap >> 16) & 0xff;
971 }
972
973 /* hw_max = RP0 until we check for overclocking */
974 rps->max_freq = rps->rp0_freq;
975
976 rps->efficient_freq = rps->rp1_freq;
977 if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
978 IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
979 u32 ddcc_status = 0;
980
981 if (sandybridge_pcode_read(i915,
982 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
983 &ddcc_status, NULL) == 0)
984 rps->efficient_freq =
985 clamp_t(u8,
986 (ddcc_status >> 8) & 0xff,
987 rps->min_freq,
988 rps->max_freq);
989 }
990
991 if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
992 /* Store the frequency values in 16.66 MHZ units, which is
993 * the natural hardware unit for SKL
994 */
995 rps->rp0_freq *= GEN9_FREQ_SCALER;
996 rps->rp1_freq *= GEN9_FREQ_SCALER;
997 rps->min_freq *= GEN9_FREQ_SCALER;
998 rps->max_freq *= GEN9_FREQ_SCALER;
999 rps->efficient_freq *= GEN9_FREQ_SCALER;
1000 }
1001 }
1002
rps_reset(struct intel_rps * rps)1003 static bool rps_reset(struct intel_rps *rps)
1004 {
1005 struct drm_i915_private *i915 = rps_to_i915(rps);
1006
1007 /* force a reset */
1008 rps->power.mode = -1;
1009 rps->last_freq = -1;
1010
1011 if (rps_set(rps, rps->min_freq, true)) {
1012 drm_err(&i915->drm, "Failed to reset RPS to initial values\n");
1013 return false;
1014 }
1015
1016 rps->cur_freq = rps->min_freq;
1017 return true;
1018 }
1019
1020 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
gen9_rps_enable(struct intel_rps * rps)1021 static bool gen9_rps_enable(struct intel_rps *rps)
1022 {
1023 struct intel_gt *gt = rps_to_gt(rps);
1024 struct intel_uncore *uncore = gt->uncore;
1025
1026 /* Program defaults and thresholds for RPS */
1027 if (IS_GEN(gt->i915, 9))
1028 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
1029 GEN9_FREQUENCY(rps->rp1_freq));
1030
1031 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
1032
1033 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
1034
1035 return rps_reset(rps);
1036 }
1037
gen8_rps_enable(struct intel_rps * rps)1038 static bool gen8_rps_enable(struct intel_rps *rps)
1039 {
1040 struct intel_uncore *uncore = rps_to_uncore(rps);
1041
1042 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
1043 HSW_FREQUENCY(rps->rp1_freq));
1044
1045 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1046
1047 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
1048
1049 return rps_reset(rps);
1050 }
1051
gen6_rps_enable(struct intel_rps * rps)1052 static bool gen6_rps_enable(struct intel_rps *rps)
1053 {
1054 struct intel_uncore *uncore = rps_to_uncore(rps);
1055
1056 /* Power down if completely idle for over 50ms */
1057 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
1058 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1059
1060 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
1061 GEN6_PM_RP_DOWN_THRESHOLD |
1062 GEN6_PM_RP_DOWN_TIMEOUT);
1063
1064 return rps_reset(rps);
1065 }
1066
chv_rps_max_freq(struct intel_rps * rps)1067 static int chv_rps_max_freq(struct intel_rps *rps)
1068 {
1069 struct drm_i915_private *i915 = rps_to_i915(rps);
1070 struct intel_gt *gt = rps_to_gt(rps);
1071 u32 val;
1072
1073 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
1074
1075 switch (gt->info.sseu.eu_total) {
1076 case 8:
1077 /* (2 * 4) config */
1078 val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
1079 break;
1080 case 12:
1081 /* (2 * 6) config */
1082 val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
1083 break;
1084 case 16:
1085 /* (2 * 8) config */
1086 default:
1087 /* Setting (2 * 8) Min RP0 for any other combination */
1088 val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
1089 break;
1090 }
1091
1092 return val & FB_GFX_FREQ_FUSE_MASK;
1093 }
1094
chv_rps_rpe_freq(struct intel_rps * rps)1095 static int chv_rps_rpe_freq(struct intel_rps *rps)
1096 {
1097 struct drm_i915_private *i915 = rps_to_i915(rps);
1098 u32 val;
1099
1100 val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
1101 val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
1102
1103 return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
1104 }
1105
chv_rps_guar_freq(struct intel_rps * rps)1106 static int chv_rps_guar_freq(struct intel_rps *rps)
1107 {
1108 struct drm_i915_private *i915 = rps_to_i915(rps);
1109 u32 val;
1110
1111 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
1112
1113 return val & FB_GFX_FREQ_FUSE_MASK;
1114 }
1115
chv_rps_min_freq(struct intel_rps * rps)1116 static u32 chv_rps_min_freq(struct intel_rps *rps)
1117 {
1118 struct drm_i915_private *i915 = rps_to_i915(rps);
1119 u32 val;
1120
1121 val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
1122 val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
1123
1124 return val & FB_GFX_FREQ_FUSE_MASK;
1125 }
1126
chv_rps_enable(struct intel_rps * rps)1127 static bool chv_rps_enable(struct intel_rps *rps)
1128 {
1129 struct intel_uncore *uncore = rps_to_uncore(rps);
1130 struct drm_i915_private *i915 = rps_to_i915(rps);
1131 u32 val;
1132
1133 /* 1: Program defaults and thresholds for RPS*/
1134 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1135 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1136 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1137 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1138 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1139
1140 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1141
1142 /* 2: Enable RPS */
1143 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1144 GEN6_RP_MEDIA_HW_NORMAL_MODE |
1145 GEN6_RP_MEDIA_IS_GFX |
1146 GEN6_RP_ENABLE |
1147 GEN6_RP_UP_BUSY_AVG |
1148 GEN6_RP_DOWN_IDLE_AVG);
1149
1150 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
1151 GEN6_PM_RP_DOWN_THRESHOLD |
1152 GEN6_PM_RP_DOWN_TIMEOUT);
1153
1154 /* Setting Fixed Bias */
1155 vlv_punit_get(i915);
1156
1157 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
1158 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1159
1160 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1161
1162 vlv_punit_put(i915);
1163
1164 /* RPS code assumes GPLL is used */
1165 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1166 "GPLL not enabled\n");
1167
1168 drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
1169 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
1170
1171 return rps_reset(rps);
1172 }
1173
vlv_rps_guar_freq(struct intel_rps * rps)1174 static int vlv_rps_guar_freq(struct intel_rps *rps)
1175 {
1176 struct drm_i915_private *i915 = rps_to_i915(rps);
1177 u32 val, rp1;
1178
1179 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1180
1181 rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
1182 rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
1183
1184 return rp1;
1185 }
1186
vlv_rps_max_freq(struct intel_rps * rps)1187 static int vlv_rps_max_freq(struct intel_rps *rps)
1188 {
1189 struct drm_i915_private *i915 = rps_to_i915(rps);
1190 u32 val, rp0;
1191
1192 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1193
1194 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
1195 /* Clamp to max */
1196 rp0 = min_t(u32, rp0, 0xea);
1197
1198 return rp0;
1199 }
1200
vlv_rps_rpe_freq(struct intel_rps * rps)1201 static int vlv_rps_rpe_freq(struct intel_rps *rps)
1202 {
1203 struct drm_i915_private *i915 = rps_to_i915(rps);
1204 u32 val, rpe;
1205
1206 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
1207 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
1208 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
1209 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
1210
1211 return rpe;
1212 }
1213
vlv_rps_min_freq(struct intel_rps * rps)1214 static int vlv_rps_min_freq(struct intel_rps *rps)
1215 {
1216 struct drm_i915_private *i915 = rps_to_i915(rps);
1217 u32 val;
1218
1219 val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
1220 /*
1221 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
1222 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
1223 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
1224 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
1225 * to make sure it matches what Punit accepts.
1226 */
1227 return max_t(u32, val, 0xc0);
1228 }
1229
vlv_rps_enable(struct intel_rps * rps)1230 static bool vlv_rps_enable(struct intel_rps *rps)
1231 {
1232 struct intel_uncore *uncore = rps_to_uncore(rps);
1233 struct drm_i915_private *i915 = rps_to_i915(rps);
1234 u32 val;
1235
1236 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1237 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1238 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1239 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1240 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1241
1242 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1243
1244 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1245 GEN6_RP_MEDIA_TURBO |
1246 GEN6_RP_MEDIA_HW_NORMAL_MODE |
1247 GEN6_RP_MEDIA_IS_GFX |
1248 GEN6_RP_ENABLE |
1249 GEN6_RP_UP_BUSY_AVG |
1250 GEN6_RP_DOWN_IDLE_CONT);
1251
1252 /* WaGsvRC0ResidencyMethod:vlv */
1253 rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
1254
1255 vlv_punit_get(i915);
1256
1257 /* Setting Fixed Bias */
1258 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
1259 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1260
1261 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1262
1263 vlv_punit_put(i915);
1264
1265 /* RPS code assumes GPLL is used */
1266 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1267 "GPLL not enabled\n");
1268
1269 drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
1270 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
1271
1272 return rps_reset(rps);
1273 }
1274
__ips_gfx_val(struct intel_ips * ips)1275 static unsigned long __ips_gfx_val(struct intel_ips *ips)
1276 {
1277 struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
1278 struct intel_uncore *uncore = rps_to_uncore(rps);
1279 unsigned long t, corr, state1, corr2, state2;
1280 u32 pxvid, ext_v;
1281
1282 lockdep_assert_held(&mchdev_lock);
1283
1284 pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
1285 pxvid = (pxvid >> 24) & 0x7f;
1286 ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
1287
1288 state1 = ext_v;
1289
1290 /* Revel in the empirically derived constants */
1291
1292 /* Correction factor in 1/100000 units */
1293 t = ips_mch_val(uncore);
1294 if (t > 80)
1295 corr = t * 2349 + 135940;
1296 else if (t >= 50)
1297 corr = t * 964 + 29317;
1298 else /* < 50 */
1299 corr = t * 301 + 1004;
1300
1301 corr = corr * 150142 * state1 / 10000 - 78642;
1302 corr /= 100000;
1303 corr2 = corr * ips->corr;
1304
1305 state2 = corr2 * state1 / 10000;
1306 state2 /= 100; /* convert to mW */
1307
1308 __gen5_ips_update(ips);
1309
1310 return ips->gfx_power + state2;
1311 }
1312
has_busy_stats(struct intel_rps * rps)1313 static bool has_busy_stats(struct intel_rps *rps)
1314 {
1315 struct intel_engine_cs *engine;
1316 enum intel_engine_id id;
1317
1318 for_each_engine(engine, rps_to_gt(rps), id) {
1319 if (!intel_engine_supports_stats(engine))
1320 return false;
1321 }
1322
1323 return true;
1324 }
1325
intel_rps_enable(struct intel_rps * rps)1326 void intel_rps_enable(struct intel_rps *rps)
1327 {
1328 struct drm_i915_private *i915 = rps_to_i915(rps);
1329 struct intel_uncore *uncore = rps_to_uncore(rps);
1330 bool enabled = false;
1331
1332 if (!HAS_RPS(i915))
1333 return;
1334
1335 intel_gt_check_clock_frequency(rps_to_gt(rps));
1336
1337 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1338 if (rps->max_freq <= rps->min_freq)
1339 /* leave disabled, no room for dynamic reclocking */;
1340 else if (IS_CHERRYVIEW(i915))
1341 enabled = chv_rps_enable(rps);
1342 else if (IS_VALLEYVIEW(i915))
1343 enabled = vlv_rps_enable(rps);
1344 else if (INTEL_GEN(i915) >= 9)
1345 enabled = gen9_rps_enable(rps);
1346 else if (INTEL_GEN(i915) >= 8)
1347 enabled = gen8_rps_enable(rps);
1348 else if (INTEL_GEN(i915) >= 6)
1349 enabled = gen6_rps_enable(rps);
1350 else if (IS_IRONLAKE_M(i915))
1351 enabled = gen5_rps_enable(rps);
1352 else
1353 MISSING_CASE(INTEL_GEN(i915));
1354 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1355 if (!enabled)
1356 return;
1357
1358 GT_TRACE(rps_to_gt(rps),
1359 "min:%x, max:%x, freq:[%d, %d]\n",
1360 rps->min_freq, rps->max_freq,
1361 intel_gpu_freq(rps, rps->min_freq),
1362 intel_gpu_freq(rps, rps->max_freq));
1363
1364 GEM_BUG_ON(rps->max_freq < rps->min_freq);
1365 GEM_BUG_ON(rps->idle_freq > rps->max_freq);
1366
1367 GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
1368 GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
1369
1370 if (has_busy_stats(rps))
1371 intel_rps_set_timer(rps);
1372 else if (INTEL_GEN(i915) >= 6)
1373 intel_rps_set_interrupts(rps);
1374 else
1375 /* Ironlake currently uses intel_ips.ko */ {}
1376
1377 intel_rps_set_enabled(rps);
1378 }
1379
gen6_rps_disable(struct intel_rps * rps)1380 static void gen6_rps_disable(struct intel_rps *rps)
1381 {
1382 set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
1383 }
1384
intel_rps_disable(struct intel_rps * rps)1385 void intel_rps_disable(struct intel_rps *rps)
1386 {
1387 struct drm_i915_private *i915 = rps_to_i915(rps);
1388
1389 intel_rps_clear_enabled(rps);
1390 intel_rps_clear_interrupts(rps);
1391 intel_rps_clear_timer(rps);
1392
1393 if (INTEL_GEN(i915) >= 6)
1394 gen6_rps_disable(rps);
1395 else if (IS_IRONLAKE_M(i915))
1396 gen5_rps_disable(rps);
1397 }
1398
byt_gpu_freq(struct intel_rps * rps,int val)1399 static int byt_gpu_freq(struct intel_rps *rps, int val)
1400 {
1401 /*
1402 * N = val - 0xb7
1403 * Slow = Fast = GPLL ref * N
1404 */
1405 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
1406 }
1407
byt_freq_opcode(struct intel_rps * rps,int val)1408 static int byt_freq_opcode(struct intel_rps *rps, int val)
1409 {
1410 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
1411 }
1412
chv_gpu_freq(struct intel_rps * rps,int val)1413 static int chv_gpu_freq(struct intel_rps *rps, int val)
1414 {
1415 /*
1416 * N = val / 2
1417 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
1418 */
1419 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
1420 }
1421
chv_freq_opcode(struct intel_rps * rps,int val)1422 static int chv_freq_opcode(struct intel_rps *rps, int val)
1423 {
1424 /* CHV needs even values */
1425 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
1426 }
1427
intel_gpu_freq(struct intel_rps * rps,int val)1428 int intel_gpu_freq(struct intel_rps *rps, int val)
1429 {
1430 struct drm_i915_private *i915 = rps_to_i915(rps);
1431
1432 if (INTEL_GEN(i915) >= 9)
1433 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
1434 GEN9_FREQ_SCALER);
1435 else if (IS_CHERRYVIEW(i915))
1436 return chv_gpu_freq(rps, val);
1437 else if (IS_VALLEYVIEW(i915))
1438 return byt_gpu_freq(rps, val);
1439 else
1440 return val * GT_FREQUENCY_MULTIPLIER;
1441 }
1442
intel_freq_opcode(struct intel_rps * rps,int val)1443 int intel_freq_opcode(struct intel_rps *rps, int val)
1444 {
1445 struct drm_i915_private *i915 = rps_to_i915(rps);
1446
1447 if (INTEL_GEN(i915) >= 9)
1448 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
1449 GT_FREQUENCY_MULTIPLIER);
1450 else if (IS_CHERRYVIEW(i915))
1451 return chv_freq_opcode(rps, val);
1452 else if (IS_VALLEYVIEW(i915))
1453 return byt_freq_opcode(rps, val);
1454 else
1455 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
1456 }
1457
vlv_init_gpll_ref_freq(struct intel_rps * rps)1458 static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
1459 {
1460 struct drm_i915_private *i915 = rps_to_i915(rps);
1461
1462 rps->gpll_ref_freq =
1463 vlv_get_cck_clock(i915, "GPLL ref",
1464 CCK_GPLL_CLOCK_CONTROL,
1465 i915->czclk_freq);
1466
1467 drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
1468 rps->gpll_ref_freq);
1469 }
1470
vlv_rps_init(struct intel_rps * rps)1471 static void vlv_rps_init(struct intel_rps *rps)
1472 {
1473 struct drm_i915_private *i915 = rps_to_i915(rps);
1474 u32 val;
1475
1476 vlv_iosf_sb_get(i915,
1477 BIT(VLV_IOSF_SB_PUNIT) |
1478 BIT(VLV_IOSF_SB_NC) |
1479 BIT(VLV_IOSF_SB_CCK));
1480
1481 vlv_init_gpll_ref_freq(rps);
1482
1483 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1484 switch ((val >> 6) & 3) {
1485 case 0:
1486 case 1:
1487 i915->mem_freq = 800;
1488 break;
1489 case 2:
1490 i915->mem_freq = 1066;
1491 break;
1492 case 3:
1493 i915->mem_freq = 1333;
1494 break;
1495 }
1496 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
1497
1498 rps->max_freq = vlv_rps_max_freq(rps);
1499 rps->rp0_freq = rps->max_freq;
1500 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
1501 intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
1502
1503 rps->efficient_freq = vlv_rps_rpe_freq(rps);
1504 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
1505 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
1506
1507 rps->rp1_freq = vlv_rps_guar_freq(rps);
1508 drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
1509 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
1510
1511 rps->min_freq = vlv_rps_min_freq(rps);
1512 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
1513 intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
1514
1515 vlv_iosf_sb_put(i915,
1516 BIT(VLV_IOSF_SB_PUNIT) |
1517 BIT(VLV_IOSF_SB_NC) |
1518 BIT(VLV_IOSF_SB_CCK));
1519 }
1520
chv_rps_init(struct intel_rps * rps)1521 static void chv_rps_init(struct intel_rps *rps)
1522 {
1523 struct drm_i915_private *i915 = rps_to_i915(rps);
1524 u32 val;
1525
1526 vlv_iosf_sb_get(i915,
1527 BIT(VLV_IOSF_SB_PUNIT) |
1528 BIT(VLV_IOSF_SB_NC) |
1529 BIT(VLV_IOSF_SB_CCK));
1530
1531 vlv_init_gpll_ref_freq(rps);
1532
1533 val = vlv_cck_read(i915, CCK_FUSE_REG);
1534
1535 switch ((val >> 2) & 0x7) {
1536 case 3:
1537 i915->mem_freq = 2000;
1538 break;
1539 default:
1540 i915->mem_freq = 1600;
1541 break;
1542 }
1543 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
1544
1545 rps->max_freq = chv_rps_max_freq(rps);
1546 rps->rp0_freq = rps->max_freq;
1547 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
1548 intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
1549
1550 rps->efficient_freq = chv_rps_rpe_freq(rps);
1551 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
1552 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
1553
1554 rps->rp1_freq = chv_rps_guar_freq(rps);
1555 drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n",
1556 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
1557
1558 rps->min_freq = chv_rps_min_freq(rps);
1559 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
1560 intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
1561
1562 vlv_iosf_sb_put(i915,
1563 BIT(VLV_IOSF_SB_PUNIT) |
1564 BIT(VLV_IOSF_SB_NC) |
1565 BIT(VLV_IOSF_SB_CCK));
1566
1567 drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq |
1568 rps->rp1_freq | rps->min_freq) & 1,
1569 "Odd GPU freq values\n");
1570 }
1571
vlv_c0_read(struct intel_uncore * uncore,struct intel_rps_ei * ei)1572 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
1573 {
1574 ei->ktime = ktime_get_raw();
1575 ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
1576 ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
1577 }
1578
vlv_wa_c0_ei(struct intel_rps * rps,u32 pm_iir)1579 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
1580 {
1581 struct intel_uncore *uncore = rps_to_uncore(rps);
1582 const struct intel_rps_ei *prev = &rps->ei;
1583 struct intel_rps_ei now;
1584 u32 events = 0;
1585
1586 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1587 return 0;
1588
1589 vlv_c0_read(uncore, &now);
1590
1591 if (prev->ktime) {
1592 u64 time, c0;
1593 u32 render, media;
1594
1595 time = ktime_us_delta(now.ktime, prev->ktime);
1596
1597 time *= rps_to_i915(rps)->czclk_freq;
1598
1599 /* Workload can be split between render + media,
1600 * e.g. SwapBuffers being blitted in X after being rendered in
1601 * mesa. To account for this we need to combine both engines
1602 * into our activity counter.
1603 */
1604 render = now.render_c0 - prev->render_c0;
1605 media = now.media_c0 - prev->media_c0;
1606 c0 = max(render, media);
1607 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1608
1609 if (c0 > time * rps->power.up_threshold)
1610 events = GEN6_PM_RP_UP_THRESHOLD;
1611 else if (c0 < time * rps->power.down_threshold)
1612 events = GEN6_PM_RP_DOWN_THRESHOLD;
1613 }
1614
1615 rps->ei = now;
1616 return events;
1617 }
1618
rps_work(struct work_struct * work)1619 static void rps_work(struct work_struct *work)
1620 {
1621 struct intel_rps *rps = container_of(work, typeof(*rps), work);
1622 struct intel_gt *gt = rps_to_gt(rps);
1623 struct drm_i915_private *i915 = rps_to_i915(rps);
1624 bool client_boost = false;
1625 int new_freq, adj, min, max;
1626 u32 pm_iir = 0;
1627
1628 spin_lock_irq(>->irq_lock);
1629 pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
1630 client_boost = atomic_read(&rps->num_waiters);
1631 spin_unlock_irq(>->irq_lock);
1632
1633 /* Make sure we didn't queue anything we're not going to process. */
1634 if (!pm_iir && !client_boost)
1635 goto out;
1636
1637 mutex_lock(&rps->lock);
1638 if (!intel_rps_is_active(rps)) {
1639 mutex_unlock(&rps->lock);
1640 return;
1641 }
1642
1643 pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
1644
1645 adj = rps->last_adj;
1646 new_freq = rps->cur_freq;
1647 min = rps->min_freq_softlimit;
1648 max = rps->max_freq_softlimit;
1649 if (client_boost)
1650 max = rps->max_freq;
1651
1652 GT_TRACE(gt,
1653 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
1654 pm_iir, yesno(client_boost),
1655 adj, new_freq, min, max);
1656
1657 if (client_boost && new_freq < rps->boost_freq) {
1658 new_freq = rps->boost_freq;
1659 adj = 0;
1660 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1661 if (adj > 0)
1662 adj *= 2;
1663 else /* CHV needs even encode values */
1664 adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
1665
1666 if (new_freq >= rps->max_freq_softlimit)
1667 adj = 0;
1668 } else if (client_boost) {
1669 adj = 0;
1670 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1671 if (rps->cur_freq > rps->efficient_freq)
1672 new_freq = rps->efficient_freq;
1673 else if (rps->cur_freq > rps->min_freq_softlimit)
1674 new_freq = rps->min_freq_softlimit;
1675 adj = 0;
1676 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1677 if (adj < 0)
1678 adj *= 2;
1679 else /* CHV needs even encode values */
1680 adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
1681
1682 if (new_freq <= rps->min_freq_softlimit)
1683 adj = 0;
1684 } else { /* unknown event */
1685 adj = 0;
1686 }
1687
1688 /*
1689 * sysfs frequency limits may have snuck in while
1690 * servicing the interrupt
1691 */
1692 new_freq += adj;
1693 new_freq = clamp_t(int, new_freq, min, max);
1694
1695 if (intel_rps_set(rps, new_freq)) {
1696 drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
1697 adj = 0;
1698 }
1699 rps->last_adj = adj;
1700
1701 mutex_unlock(&rps->lock);
1702
1703 out:
1704 spin_lock_irq(>->irq_lock);
1705 gen6_gt_pm_unmask_irq(gt, rps->pm_events);
1706 spin_unlock_irq(>->irq_lock);
1707 }
1708
gen11_rps_irq_handler(struct intel_rps * rps,u32 pm_iir)1709 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1710 {
1711 struct intel_gt *gt = rps_to_gt(rps);
1712 const u32 events = rps->pm_events & pm_iir;
1713
1714 lockdep_assert_held(>->irq_lock);
1715
1716 if (unlikely(!events))
1717 return;
1718
1719 GT_TRACE(gt, "irq events:%x\n", events);
1720
1721 gen6_gt_pm_mask_irq(gt, events);
1722
1723 rps->pm_iir |= events;
1724 schedule_work(&rps->work);
1725 }
1726
gen6_rps_irq_handler(struct intel_rps * rps,u32 pm_iir)1727 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1728 {
1729 struct intel_gt *gt = rps_to_gt(rps);
1730 u32 events;
1731
1732 events = pm_iir & rps->pm_events;
1733 if (events) {
1734 spin_lock(>->irq_lock);
1735
1736 GT_TRACE(gt, "irq events:%x\n", events);
1737
1738 gen6_gt_pm_mask_irq(gt, events);
1739 rps->pm_iir |= events;
1740
1741 schedule_work(&rps->work);
1742 spin_unlock(>->irq_lock);
1743 }
1744
1745 if (INTEL_GEN(gt->i915) >= 8)
1746 return;
1747
1748 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1749 intel_engine_signal_breadcrumbs(gt->engine[VECS0]);
1750
1751 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1752 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1753 }
1754
gen5_rps_irq_handler(struct intel_rps * rps)1755 void gen5_rps_irq_handler(struct intel_rps *rps)
1756 {
1757 struct intel_uncore *uncore = rps_to_uncore(rps);
1758 u32 busy_up, busy_down, max_avg, min_avg;
1759 u8 new_freq;
1760
1761 spin_lock(&mchdev_lock);
1762
1763 intel_uncore_write16(uncore,
1764 MEMINTRSTS,
1765 intel_uncore_read(uncore, MEMINTRSTS));
1766
1767 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
1768 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
1769 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
1770 max_avg = intel_uncore_read(uncore, RCBMAXAVG);
1771 min_avg = intel_uncore_read(uncore, RCBMINAVG);
1772
1773 /* Handle RCS change request from hw */
1774 new_freq = rps->cur_freq;
1775 if (busy_up > max_avg)
1776 new_freq++;
1777 else if (busy_down < min_avg)
1778 new_freq--;
1779 new_freq = clamp(new_freq,
1780 rps->min_freq_softlimit,
1781 rps->max_freq_softlimit);
1782
1783 if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq))
1784 rps->cur_freq = new_freq;
1785
1786 spin_unlock(&mchdev_lock);
1787 }
1788
intel_rps_init_early(struct intel_rps * rps)1789 void intel_rps_init_early(struct intel_rps *rps)
1790 {
1791 mutex_init(&rps->lock);
1792 mutex_init(&rps->power.mutex);
1793
1794 INIT_WORK(&rps->work, rps_work);
1795 timer_setup(&rps->timer, rps_timer, 0);
1796
1797 atomic_set(&rps->num_waiters, 0);
1798 }
1799
intel_rps_init(struct intel_rps * rps)1800 void intel_rps_init(struct intel_rps *rps)
1801 {
1802 struct drm_i915_private *i915 = rps_to_i915(rps);
1803
1804 if (IS_CHERRYVIEW(i915))
1805 chv_rps_init(rps);
1806 else if (IS_VALLEYVIEW(i915))
1807 vlv_rps_init(rps);
1808 else if (INTEL_GEN(i915) >= 6)
1809 gen6_rps_init(rps);
1810 else if (IS_IRONLAKE_M(i915))
1811 gen5_rps_init(rps);
1812
1813 /* Derive initial user preferences/limits from the hardware limits */
1814 rps->max_freq_softlimit = rps->max_freq;
1815 rps->min_freq_softlimit = rps->min_freq;
1816
1817 /* After setting max-softlimit, find the overclock max freq */
1818 if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
1819 u32 params = 0;
1820
1821 sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
1822 ¶ms, NULL);
1823 if (params & BIT(31)) { /* OC supported */
1824 drm_dbg(&i915->drm,
1825 "Overclocking supported, max: %dMHz, overclock: %dMHz\n",
1826 (rps->max_freq & 0xff) * 50,
1827 (params & 0xff) * 50);
1828 rps->max_freq = params & 0xff;
1829 }
1830 }
1831
1832 /* Finally allow us to boost to max by default */
1833 rps->boost_freq = rps->max_freq;
1834 rps->idle_freq = rps->min_freq;
1835
1836 /* Start in the middle, from here we will autotune based on workload */
1837 rps->cur_freq = rps->efficient_freq;
1838
1839 rps->pm_intrmsk_mbz = 0;
1840
1841 /*
1842 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
1843 * if GEN6_PM_UP_EI_EXPIRED is masked.
1844 *
1845 * TODO: verify if this can be reproduced on VLV,CHV.
1846 */
1847 if (INTEL_GEN(i915) <= 7)
1848 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
1849
1850 if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11)
1851 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1852 }
1853
intel_rps_sanitize(struct intel_rps * rps)1854 void intel_rps_sanitize(struct intel_rps *rps)
1855 {
1856 if (INTEL_GEN(rps_to_i915(rps)) >= 6)
1857 rps_disable_interrupts(rps);
1858 }
1859
intel_rps_get_cagf(struct intel_rps * rps,u32 rpstat)1860 u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
1861 {
1862 struct drm_i915_private *i915 = rps_to_i915(rps);
1863 u32 cagf;
1864
1865 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1866 cagf = (rpstat >> 8) & 0xff;
1867 else if (INTEL_GEN(i915) >= 9)
1868 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1869 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1870 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1871 else
1872 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1873
1874 return cagf;
1875 }
1876
read_cagf(struct intel_rps * rps)1877 static u32 read_cagf(struct intel_rps *rps)
1878 {
1879 struct drm_i915_private *i915 = rps_to_i915(rps);
1880 u32 freq;
1881
1882 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1883 vlv_punit_get(i915);
1884 freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1885 vlv_punit_put(i915);
1886 } else {
1887 freq = intel_uncore_read(rps_to_uncore(rps), GEN6_RPSTAT1);
1888 }
1889
1890 return intel_rps_get_cagf(rps, freq);
1891 }
1892
intel_rps_read_actual_frequency(struct intel_rps * rps)1893 u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
1894 {
1895 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
1896 intel_wakeref_t wakeref;
1897 u32 freq = 0;
1898
1899 with_intel_runtime_pm_if_in_use(rpm, wakeref)
1900 freq = intel_gpu_freq(rps, read_cagf(rps));
1901
1902 return freq;
1903 }
1904
1905 /* External interface for intel_ips.ko */
1906
1907 static struct drm_i915_private __rcu *ips_mchdev;
1908
1909 /**
1910 * Tells the intel_ips driver that the i915 driver is now loaded, if
1911 * IPS got loaded first.
1912 *
1913 * This awkward dance is so that neither module has to depend on the
1914 * other in order for IPS to do the appropriate communication of
1915 * GPU turbo limits to i915.
1916 */
1917 static void
ips_ping_for_i915_load(void)1918 ips_ping_for_i915_load(void)
1919 {
1920 void (*link)(void);
1921
1922 link = symbol_get(ips_link_to_i915_driver);
1923 if (link) {
1924 link();
1925 symbol_put(ips_link_to_i915_driver);
1926 }
1927 }
1928
intel_rps_driver_register(struct intel_rps * rps)1929 void intel_rps_driver_register(struct intel_rps *rps)
1930 {
1931 struct intel_gt *gt = rps_to_gt(rps);
1932
1933 /*
1934 * We only register the i915 ips part with intel-ips once everything is
1935 * set up, to avoid intel-ips sneaking in and reading bogus values.
1936 */
1937 if (IS_GEN(gt->i915, 5)) {
1938 GEM_BUG_ON(ips_mchdev);
1939 rcu_assign_pointer(ips_mchdev, gt->i915);
1940 ips_ping_for_i915_load();
1941 }
1942 }
1943
intel_rps_driver_unregister(struct intel_rps * rps)1944 void intel_rps_driver_unregister(struct intel_rps *rps)
1945 {
1946 if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps))
1947 rcu_assign_pointer(ips_mchdev, NULL);
1948 }
1949
mchdev_get(void)1950 static struct drm_i915_private *mchdev_get(void)
1951 {
1952 struct drm_i915_private *i915;
1953
1954 rcu_read_lock();
1955 i915 = rcu_dereference(ips_mchdev);
1956 if (!kref_get_unless_zero(&i915->drm.ref))
1957 i915 = NULL;
1958 rcu_read_unlock();
1959
1960 return i915;
1961 }
1962
1963 /**
1964 * i915_read_mch_val - return value for IPS use
1965 *
1966 * Calculate and return a value for the IPS driver to use when deciding whether
1967 * we have thermal and power headroom to increase CPU or GPU power budget.
1968 */
i915_read_mch_val(void)1969 unsigned long i915_read_mch_val(void)
1970 {
1971 struct drm_i915_private *i915;
1972 unsigned long chipset_val = 0;
1973 unsigned long graphics_val = 0;
1974 intel_wakeref_t wakeref;
1975
1976 i915 = mchdev_get();
1977 if (!i915)
1978 return 0;
1979
1980 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1981 struct intel_ips *ips = &i915->gt.rps.ips;
1982
1983 spin_lock_irq(&mchdev_lock);
1984 chipset_val = __ips_chipset_val(ips);
1985 graphics_val = __ips_gfx_val(ips);
1986 spin_unlock_irq(&mchdev_lock);
1987 }
1988
1989 drm_dev_put(&i915->drm);
1990 return chipset_val + graphics_val;
1991 }
1992 EXPORT_SYMBOL_GPL(i915_read_mch_val);
1993
1994 /**
1995 * i915_gpu_raise - raise GPU frequency limit
1996 *
1997 * Raise the limit; IPS indicates we have thermal headroom.
1998 */
i915_gpu_raise(void)1999 bool i915_gpu_raise(void)
2000 {
2001 struct drm_i915_private *i915;
2002 struct intel_rps *rps;
2003
2004 i915 = mchdev_get();
2005 if (!i915)
2006 return false;
2007
2008 rps = &i915->gt.rps;
2009
2010 spin_lock_irq(&mchdev_lock);
2011 if (rps->max_freq_softlimit < rps->max_freq)
2012 rps->max_freq_softlimit++;
2013 spin_unlock_irq(&mchdev_lock);
2014
2015 drm_dev_put(&i915->drm);
2016 return true;
2017 }
2018 EXPORT_SYMBOL_GPL(i915_gpu_raise);
2019
2020 /**
2021 * i915_gpu_lower - lower GPU frequency limit
2022 *
2023 * IPS indicates we're close to a thermal limit, so throttle back the GPU
2024 * frequency maximum.
2025 */
i915_gpu_lower(void)2026 bool i915_gpu_lower(void)
2027 {
2028 struct drm_i915_private *i915;
2029 struct intel_rps *rps;
2030
2031 i915 = mchdev_get();
2032 if (!i915)
2033 return false;
2034
2035 rps = &i915->gt.rps;
2036
2037 spin_lock_irq(&mchdev_lock);
2038 if (rps->max_freq_softlimit > rps->min_freq)
2039 rps->max_freq_softlimit--;
2040 spin_unlock_irq(&mchdev_lock);
2041
2042 drm_dev_put(&i915->drm);
2043 return true;
2044 }
2045 EXPORT_SYMBOL_GPL(i915_gpu_lower);
2046
2047 /**
2048 * i915_gpu_busy - indicate GPU business to IPS
2049 *
2050 * Tell the IPS driver whether or not the GPU is busy.
2051 */
i915_gpu_busy(void)2052 bool i915_gpu_busy(void)
2053 {
2054 struct drm_i915_private *i915;
2055 bool ret;
2056
2057 i915 = mchdev_get();
2058 if (!i915)
2059 return false;
2060
2061 ret = i915->gt.awake;
2062
2063 drm_dev_put(&i915->drm);
2064 return ret;
2065 }
2066 EXPORT_SYMBOL_GPL(i915_gpu_busy);
2067
2068 /**
2069 * i915_gpu_turbo_disable - disable graphics turbo
2070 *
2071 * Disable graphics turbo by resetting the max frequency and setting the
2072 * current frequency to the default.
2073 */
i915_gpu_turbo_disable(void)2074 bool i915_gpu_turbo_disable(void)
2075 {
2076 struct drm_i915_private *i915;
2077 struct intel_rps *rps;
2078 bool ret;
2079
2080 i915 = mchdev_get();
2081 if (!i915)
2082 return false;
2083
2084 rps = &i915->gt.rps;
2085
2086 spin_lock_irq(&mchdev_lock);
2087 rps->max_freq_softlimit = rps->min_freq;
2088 ret = gen5_rps_set(&i915->gt.rps, rps->min_freq);
2089 spin_unlock_irq(&mchdev_lock);
2090
2091 drm_dev_put(&i915->drm);
2092 return ret;
2093 }
2094 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
2095
2096 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2097 #include "selftest_rps.c"
2098 #endif
2099