1
2 /*
3 * SPDX-License-Identifier: MIT
4 *
5 * Copyright © 2019 Intel Corporation
6 */
7
8 #include <linux/sort.h>
9
10 #include "intel_gt_clock_utils.h"
11
12 #include "selftest_llc.h"
13 #include "selftest_rc6.h"
14 #include "selftest_rps.h"
15
cmp_u64(const void * A,const void * B)16 static int cmp_u64(const void *A, const void *B)
17 {
18 const u64 *a = A, *b = B;
19
20 if (a < b)
21 return -1;
22 else if (a > b)
23 return 1;
24 else
25 return 0;
26 }
27
cmp_u32(const void * A,const void * B)28 static int cmp_u32(const void *A, const void *B)
29 {
30 const u32 *a = A, *b = B;
31
32 if (a < b)
33 return -1;
34 else if (a > b)
35 return 1;
36 else
37 return 0;
38 }
39
measure_clocks(struct intel_engine_cs * engine,u32 * out_cycles,ktime_t * out_dt)40 static void measure_clocks(struct intel_engine_cs *engine,
41 u32 *out_cycles, ktime_t *out_dt)
42 {
43 ktime_t dt[5];
44 u32 cycles[5];
45 int i;
46
47 for (i = 0; i < 5; i++) {
48 preempt_disable();
49 cycles[i] = -ENGINE_READ_FW(engine, RING_TIMESTAMP);
50 dt[i] = ktime_get();
51
52 udelay(1000);
53
54 dt[i] = ktime_sub(ktime_get(), dt[i]);
55 cycles[i] += ENGINE_READ_FW(engine, RING_TIMESTAMP);
56 preempt_enable();
57 }
58
59 /* Use the median of both cycle/dt; close enough */
60 sort(cycles, 5, sizeof(*cycles), cmp_u32, NULL);
61 *out_cycles = (cycles[1] + 2 * cycles[2] + cycles[3]) / 4;
62
63 sort(dt, 5, sizeof(*dt), cmp_u64, NULL);
64 *out_dt = div_u64(dt[1] + 2 * dt[2] + dt[3], 4);
65 }
66
live_gt_clocks(void * arg)67 static int live_gt_clocks(void *arg)
68 {
69 struct intel_gt *gt = arg;
70 struct intel_engine_cs *engine;
71 enum intel_engine_id id;
72 int err = 0;
73
74 if (!RUNTIME_INFO(gt->i915)->cs_timestamp_frequency_hz) { /* unknown */
75 pr_info("CS_TIMESTAMP frequency unknown\n");
76 return 0;
77 }
78
79 if (INTEL_GEN(gt->i915) < 4) /* Any CS_TIMESTAMP? */
80 return 0;
81
82 if (IS_GEN(gt->i915, 5))
83 /*
84 * XXX CS_TIMESTAMP low dword is dysfunctional?
85 *
86 * Ville's experiments indicate the high dword still works,
87 * but at a correspondingly reduced frequency.
88 */
89 return 0;
90
91 if (IS_GEN(gt->i915, 4))
92 /*
93 * XXX CS_TIMESTAMP appears gibberish
94 *
95 * Ville's experiments indicate that it mostly appears 'stuck'
96 * in that we see the register report the same cycle count
97 * for a couple of reads.
98 */
99 return 0;
100
101 intel_gt_pm_get(gt);
102 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
103
104 for_each_engine(engine, gt, id) {
105 u32 cycles;
106 u32 expected;
107 u64 time;
108 u64 dt;
109
110 if (INTEL_GEN(engine->i915) < 7 && engine->id != RCS0)
111 continue;
112
113 measure_clocks(engine, &cycles, &dt);
114
115 time = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
116 expected = i915_cs_timestamp_ns_to_ticks(engine->i915, dt);
117
118 pr_info("%s: TIMESTAMP %d cycles [%lldns] in %lldns [%d cycles], using CS clock frequency of %uKHz\n",
119 engine->name, cycles, time, dt, expected,
120 RUNTIME_INFO(engine->i915)->cs_timestamp_frequency_hz / 1000);
121
122 if (9 * time < 8 * dt || 8 * time > 9 * dt) {
123 pr_err("%s: CS ticks did not match walltime!\n",
124 engine->name);
125 err = -EINVAL;
126 break;
127 }
128
129 if (9 * expected < 8 * cycles || 8 * expected > 9 * cycles) {
130 pr_err("%s: walltime did not match CS ticks!\n",
131 engine->name);
132 err = -EINVAL;
133 break;
134 }
135 }
136
137 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
138 intel_gt_pm_put(gt);
139
140 return err;
141 }
142
live_gt_resume(void * arg)143 static int live_gt_resume(void *arg)
144 {
145 struct intel_gt *gt = arg;
146 IGT_TIMEOUT(end_time);
147 int err;
148
149 /* Do several suspend/resume cycles to check we don't explode! */
150 do {
151 intel_gt_suspend_prepare(gt);
152 intel_gt_suspend_late(gt);
153
154 if (gt->rc6.enabled) {
155 pr_err("rc6 still enabled after suspend!\n");
156 intel_gt_set_wedged_on_init(gt);
157 err = -EINVAL;
158 break;
159 }
160
161 err = intel_gt_resume(gt);
162 if (err)
163 break;
164
165 if (gt->rc6.supported && !gt->rc6.enabled) {
166 pr_err("rc6 not enabled upon resume!\n");
167 intel_gt_set_wedged_on_init(gt);
168 err = -EINVAL;
169 break;
170 }
171
172 err = st_llc_verify(>->llc);
173 if (err) {
174 pr_err("llc state not restored upon resume!\n");
175 intel_gt_set_wedged_on_init(gt);
176 break;
177 }
178 } while (!__igt_timeout(end_time, NULL));
179
180 return err;
181 }
182
intel_gt_pm_live_selftests(struct drm_i915_private * i915)183 int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
184 {
185 static const struct i915_subtest tests[] = {
186 SUBTEST(live_gt_clocks),
187 SUBTEST(live_rc6_manual),
188 SUBTEST(live_rps_clock_interval),
189 SUBTEST(live_rps_control),
190 SUBTEST(live_rps_frequency_cs),
191 SUBTEST(live_rps_frequency_srm),
192 SUBTEST(live_rps_power),
193 SUBTEST(live_rps_interrupt),
194 SUBTEST(live_rps_dynamic),
195 SUBTEST(live_gt_resume),
196 };
197
198 if (intel_gt_is_wedged(&i915->gt))
199 return 0;
200
201 return intel_gt_live_subtests(tests, &i915->gt);
202 }
203
intel_gt_pm_late_selftests(struct drm_i915_private * i915)204 int intel_gt_pm_late_selftests(struct drm_i915_private *i915)
205 {
206 static const struct i915_subtest tests[] = {
207 /*
208 * These tests may leave the system in an undesirable state.
209 * They are intended to be run last in CI and the system
210 * rebooted afterwards.
211 */
212 SUBTEST(live_rc6_ctx_wa),
213 };
214
215 if (intel_gt_is_wedged(&i915->gt))
216 return 0;
217
218 return intel_gt_live_subtests(tests, &i915->gt);
219 }
220