1 // SPDX-License-Identifier: GPL-2.0+
2
3 #include <drm/drm_atomic.h>
4 #include <drm/drm_atomic_helper.h>
5 #include <drm/drm_probe_helper.h>
6 #include <drm/drm_vblank.h>
7
8 #include "vkms_drv.h"
9
vkms_vblank_simulate(struct hrtimer * timer)10 static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
11 {
12 struct vkms_output *output = container_of(timer, struct vkms_output,
13 vblank_hrtimer);
14 struct drm_crtc *crtc = &output->crtc;
15 struct vkms_crtc_state *state;
16 u64 ret_overrun;
17 bool ret;
18
19 ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
20 output->period_ns);
21 if (ret_overrun != 1)
22 pr_warn("%s: vblank timer overrun\n", __func__);
23
24 spin_lock(&output->lock);
25 ret = drm_crtc_handle_vblank(crtc);
26 if (!ret)
27 DRM_ERROR("vkms failure on handling vblank");
28
29 state = output->composer_state;
30 spin_unlock(&output->lock);
31
32 if (state && output->composer_enabled) {
33 u64 frame = drm_crtc_accurate_vblank_count(crtc);
34
35 /* update frame_start only if a queued vkms_composer_worker()
36 * has read the data
37 */
38 spin_lock(&output->composer_lock);
39 if (!state->crc_pending)
40 state->frame_start = frame;
41 else
42 DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
43 state->frame_start, frame);
44 state->frame_end = frame;
45 state->crc_pending = true;
46 spin_unlock(&output->composer_lock);
47
48 ret = queue_work(output->composer_workq, &state->composer_work);
49 if (!ret)
50 DRM_DEBUG_DRIVER("Composer worker already queued\n");
51 }
52
53 return HRTIMER_RESTART;
54 }
55
vkms_enable_vblank(struct drm_crtc * crtc)56 static int vkms_enable_vblank(struct drm_crtc *crtc)
57 {
58 struct drm_device *dev = crtc->dev;
59 unsigned int pipe = drm_crtc_index(crtc);
60 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
61 struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
62
63 drm_calc_timestamping_constants(crtc, &crtc->mode);
64
65 hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
66 out->vblank_hrtimer.function = &vkms_vblank_simulate;
67 out->period_ns = ktime_set(0, vblank->framedur_ns);
68 hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
69
70 return 0;
71 }
72
vkms_disable_vblank(struct drm_crtc * crtc)73 static void vkms_disable_vblank(struct drm_crtc *crtc)
74 {
75 struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
76
77 hrtimer_cancel(&out->vblank_hrtimer);
78 }
79
vkms_get_vblank_timestamp(struct drm_crtc * crtc,int * max_error,ktime_t * vblank_time,bool in_vblank_irq)80 static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
81 int *max_error, ktime_t *vblank_time,
82 bool in_vblank_irq)
83 {
84 struct drm_device *dev = crtc->dev;
85 unsigned int pipe = crtc->index;
86 struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
87 struct vkms_output *output = &vkmsdev->output;
88 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
89
90 if (!READ_ONCE(vblank->enabled)) {
91 *vblank_time = ktime_get();
92 return true;
93 }
94
95 *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
96
97 if (WARN_ON(*vblank_time == vblank->time))
98 return true;
99
100 /*
101 * To prevent races we roll the hrtimer forward before we do any
102 * interrupt processing - this is how real hw works (the interrupt is
103 * only generated after all the vblank registers are updated) and what
104 * the vblank core expects. Therefore we need to always correct the
105 * timestampe by one frame.
106 */
107 *vblank_time -= output->period_ns;
108
109 return true;
110 }
111
112 static struct drm_crtc_state *
vkms_atomic_crtc_duplicate_state(struct drm_crtc * crtc)113 vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
114 {
115 struct vkms_crtc_state *vkms_state;
116
117 if (WARN_ON(!crtc->state))
118 return NULL;
119
120 vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
121 if (!vkms_state)
122 return NULL;
123
124 __drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
125
126 INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
127
128 return &vkms_state->base;
129 }
130
vkms_atomic_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)131 static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
132 struct drm_crtc_state *state)
133 {
134 struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
135
136 __drm_atomic_helper_crtc_destroy_state(state);
137
138 WARN_ON(work_pending(&vkms_state->composer_work));
139 kfree(vkms_state->active_planes);
140 kfree(vkms_state);
141 }
142
vkms_atomic_crtc_reset(struct drm_crtc * crtc)143 static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
144 {
145 struct vkms_crtc_state *vkms_state =
146 kzalloc(sizeof(*vkms_state), GFP_KERNEL);
147
148 if (crtc->state)
149 vkms_atomic_crtc_destroy_state(crtc, crtc->state);
150
151 __drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
152 if (vkms_state)
153 INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
154 }
155
156 static const struct drm_crtc_funcs vkms_crtc_funcs = {
157 .set_config = drm_atomic_helper_set_config,
158 .destroy = drm_crtc_cleanup,
159 .page_flip = drm_atomic_helper_page_flip,
160 .reset = vkms_atomic_crtc_reset,
161 .atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
162 .atomic_destroy_state = vkms_atomic_crtc_destroy_state,
163 .enable_vblank = vkms_enable_vblank,
164 .disable_vblank = vkms_disable_vblank,
165 .get_vblank_timestamp = vkms_get_vblank_timestamp,
166 .get_crc_sources = vkms_get_crc_sources,
167 .set_crc_source = vkms_set_crc_source,
168 .verify_crc_source = vkms_verify_crc_source,
169 };
170
vkms_crtc_atomic_check(struct drm_crtc * crtc,struct drm_crtc_state * state)171 static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
172 struct drm_crtc_state *state)
173 {
174 struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
175 struct drm_plane *plane;
176 struct drm_plane_state *plane_state;
177 int i = 0, ret;
178
179 if (vkms_state->active_planes)
180 return 0;
181
182 ret = drm_atomic_add_affected_planes(state->state, crtc);
183 if (ret < 0)
184 return ret;
185
186 drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
187 plane_state = drm_atomic_get_existing_plane_state(state->state,
188 plane);
189 WARN_ON(!plane_state);
190
191 if (!plane_state->visible)
192 continue;
193
194 i++;
195 }
196
197 vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
198 if (!vkms_state->active_planes)
199 return -ENOMEM;
200 vkms_state->num_active_planes = i;
201
202 i = 0;
203 drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
204 plane_state = drm_atomic_get_existing_plane_state(state->state,
205 plane);
206
207 if (!plane_state->visible)
208 continue;
209
210 vkms_state->active_planes[i++] =
211 to_vkms_plane_state(plane_state);
212 }
213
214 return 0;
215 }
216
vkms_crtc_atomic_enable(struct drm_crtc * crtc,struct drm_crtc_state * old_state)217 static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
218 struct drm_crtc_state *old_state)
219 {
220 drm_crtc_vblank_on(crtc);
221 }
222
vkms_crtc_atomic_disable(struct drm_crtc * crtc,struct drm_crtc_state * old_state)223 static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
224 struct drm_crtc_state *old_state)
225 {
226 drm_crtc_vblank_off(crtc);
227 }
228
vkms_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state)229 static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
230 struct drm_crtc_state *old_crtc_state)
231 {
232 struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
233
234 /* This lock is held across the atomic commit to block vblank timer
235 * from scheduling vkms_composer_worker until the composer is updated
236 */
237 spin_lock_irq(&vkms_output->lock);
238 }
239
vkms_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state)240 static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
241 struct drm_crtc_state *old_crtc_state)
242 {
243 struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
244
245 if (crtc->state->event) {
246 spin_lock(&crtc->dev->event_lock);
247
248 if (drm_crtc_vblank_get(crtc) != 0)
249 drm_crtc_send_vblank_event(crtc, crtc->state->event);
250 else
251 drm_crtc_arm_vblank_event(crtc, crtc->state->event);
252
253 spin_unlock(&crtc->dev->event_lock);
254
255 crtc->state->event = NULL;
256 }
257
258 vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
259
260 spin_unlock_irq(&vkms_output->lock);
261 }
262
263 static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
264 .atomic_check = vkms_crtc_atomic_check,
265 .atomic_begin = vkms_crtc_atomic_begin,
266 .atomic_flush = vkms_crtc_atomic_flush,
267 .atomic_enable = vkms_crtc_atomic_enable,
268 .atomic_disable = vkms_crtc_atomic_disable,
269 };
270
vkms_crtc_init(struct drm_device * dev,struct drm_crtc * crtc,struct drm_plane * primary,struct drm_plane * cursor)271 int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
272 struct drm_plane *primary, struct drm_plane *cursor)
273 {
274 struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc);
275 int ret;
276
277 ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
278 &vkms_crtc_funcs, NULL);
279 if (ret) {
280 DRM_ERROR("Failed to init CRTC\n");
281 return ret;
282 }
283
284 drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
285
286 spin_lock_init(&vkms_out->lock);
287 spin_lock_init(&vkms_out->composer_lock);
288
289 vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
290 if (!vkms_out->composer_workq)
291 return -ENOMEM;
292
293 return ret;
294 }
295