1 // SPDX-License-Identifier: GPL-2.0+
2
3 #include <linux/crc32.h>
4
5 #include <drm/drm_atomic.h>
6 #include <drm/drm_atomic_helper.h>
7 #include <drm/drm_gem_framebuffer_helper.h>
8 #include <drm/drm_vblank.h>
9
10 #include "vkms_drv.h"
11
get_pixel_from_buffer(int x,int y,const u8 * buffer,const struct vkms_composer * composer)12 static u32 get_pixel_from_buffer(int x, int y, const u8 *buffer,
13 const struct vkms_composer *composer)
14 {
15 u32 pixel;
16 int src_offset = composer->offset + (y * composer->pitch)
17 + (x * composer->cpp);
18
19 pixel = *(u32 *)&buffer[src_offset];
20
21 return pixel;
22 }
23
24 /**
25 * compute_crc - Compute CRC value on output frame
26 *
27 * @vaddr: address to final framebuffer
28 * @composer: framebuffer's metadata
29 *
30 * returns CRC value computed using crc32 on the visible portion of
31 * the final framebuffer at vaddr_out
32 */
compute_crc(const u8 * vaddr,const struct vkms_composer * composer)33 static uint32_t compute_crc(const u8 *vaddr,
34 const struct vkms_composer *composer)
35 {
36 int x, y;
37 u32 crc = 0, pixel = 0;
38 int x_src = composer->src.x1 >> 16;
39 int y_src = composer->src.y1 >> 16;
40 int h_src = drm_rect_height(&composer->src) >> 16;
41 int w_src = drm_rect_width(&composer->src) >> 16;
42
43 for (y = y_src; y < y_src + h_src; ++y) {
44 for (x = x_src; x < x_src + w_src; ++x) {
45 pixel = get_pixel_from_buffer(x, y, vaddr, composer);
46 crc = crc32_le(crc, (void *)&pixel, sizeof(u32));
47 }
48 }
49
50 return crc;
51 }
52
blend_channel(u8 src,u8 dst,u8 alpha)53 static u8 blend_channel(u8 src, u8 dst, u8 alpha)
54 {
55 u32 pre_blend;
56 u8 new_color;
57
58 pre_blend = (src * 255 + dst * (255 - alpha));
59
60 /* Faster div by 255 */
61 new_color = ((pre_blend + ((pre_blend + 257) >> 8)) >> 8);
62
63 return new_color;
64 }
65
alpha_blending(const u8 * argb_src,u8 * argb_dst)66 static void alpha_blending(const u8 *argb_src, u8 *argb_dst)
67 {
68 u8 alpha;
69
70 alpha = argb_src[3];
71 argb_dst[0] = blend_channel(argb_src[0], argb_dst[0], alpha);
72 argb_dst[1] = blend_channel(argb_src[1], argb_dst[1], alpha);
73 argb_dst[2] = blend_channel(argb_src[2], argb_dst[2], alpha);
74 /* Opaque primary */
75 argb_dst[3] = 0xFF;
76 }
77
78 /**
79 * blend - blend value at vaddr_src with value at vaddr_dst
80 * @vaddr_dst: destination address
81 * @vaddr_src: source address
82 * @dst_composer: destination framebuffer's metadata
83 * @src_composer: source framebuffer's metadata
84 *
85 * Blend the vaddr_src value with the vaddr_dst value using the pre-multiplied
86 * alpha blending equation, since DRM currently assumes that the pixel color
87 * values have already been pre-multiplied with the alpha channel values. See
88 * more drm_plane_create_blend_mode_property(). This function uses buffer's
89 * metadata to locate the new composite values at vaddr_dst.
90 */
blend(void * vaddr_dst,void * vaddr_src,struct vkms_composer * dst_composer,struct vkms_composer * src_composer)91 static void blend(void *vaddr_dst, void *vaddr_src,
92 struct vkms_composer *dst_composer,
93 struct vkms_composer *src_composer)
94 {
95 int i, j, j_dst, i_dst;
96 int offset_src, offset_dst;
97 u8 *pixel_dst, *pixel_src;
98
99 int x_src = src_composer->src.x1 >> 16;
100 int y_src = src_composer->src.y1 >> 16;
101
102 int x_dst = src_composer->dst.x1;
103 int y_dst = src_composer->dst.y1;
104 int h_dst = drm_rect_height(&src_composer->dst);
105 int w_dst = drm_rect_width(&src_composer->dst);
106
107 int y_limit = y_src + h_dst;
108 int x_limit = x_src + w_dst;
109
110 for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
111 for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
112 offset_dst = dst_composer->offset
113 + (i_dst * dst_composer->pitch)
114 + (j_dst++ * dst_composer->cpp);
115 offset_src = src_composer->offset
116 + (i * src_composer->pitch)
117 + (j * src_composer->cpp);
118
119 pixel_src = (u8 *)(vaddr_src + offset_src);
120 pixel_dst = (u8 *)(vaddr_dst + offset_dst);
121 alpha_blending(pixel_src, pixel_dst);
122 }
123 i_dst++;
124 }
125 }
126
compose_cursor(struct vkms_composer * cursor_composer,struct vkms_composer * primary_composer,void * vaddr_out)127 static void compose_cursor(struct vkms_composer *cursor_composer,
128 struct vkms_composer *primary_composer,
129 void *vaddr_out)
130 {
131 struct drm_gem_object *cursor_obj;
132 struct vkms_gem_object *cursor_vkms_obj;
133
134 cursor_obj = drm_gem_fb_get_obj(&cursor_composer->fb, 0);
135 cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj);
136
137 if (WARN_ON(!cursor_vkms_obj->vaddr))
138 return;
139
140 blend(vaddr_out, cursor_vkms_obj->vaddr,
141 primary_composer, cursor_composer);
142 }
143
compose_planes(void ** vaddr_out,struct vkms_composer * primary_composer,struct vkms_composer * cursor_composer)144 static int compose_planes(void **vaddr_out,
145 struct vkms_composer *primary_composer,
146 struct vkms_composer *cursor_composer)
147 {
148 struct drm_framebuffer *fb = &primary_composer->fb;
149 struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
150 struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
151
152 if (!*vaddr_out) {
153 *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
154 if (!*vaddr_out) {
155 DRM_ERROR("Cannot allocate memory for output frame.");
156 return -ENOMEM;
157 }
158 }
159
160 if (WARN_ON(!vkms_obj->vaddr))
161 return -EINVAL;
162
163 memcpy(*vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
164
165 if (cursor_composer)
166 compose_cursor(cursor_composer, primary_composer, *vaddr_out);
167
168 return 0;
169 }
170
171 /**
172 * vkms_composer_worker - ordered work_struct to compute CRC
173 *
174 * @work: work_struct
175 *
176 * Work handler for composing and computing CRCs. work_struct scheduled in
177 * an ordered workqueue that's periodically scheduled to run by
178 * _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state().
179 */
vkms_composer_worker(struct work_struct * work)180 void vkms_composer_worker(struct work_struct *work)
181 {
182 struct vkms_crtc_state *crtc_state = container_of(work,
183 struct vkms_crtc_state,
184 composer_work);
185 struct drm_crtc *crtc = crtc_state->base.crtc;
186 struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
187 struct vkms_composer *primary_composer = NULL;
188 struct vkms_composer *cursor_composer = NULL;
189 bool crc_pending, wb_pending;
190 void *vaddr_out = NULL;
191 u32 crc32 = 0;
192 u64 frame_start, frame_end;
193 int ret;
194
195 spin_lock_irq(&out->composer_lock);
196 frame_start = crtc_state->frame_start;
197 frame_end = crtc_state->frame_end;
198 crc_pending = crtc_state->crc_pending;
199 wb_pending = crtc_state->wb_pending;
200 crtc_state->frame_start = 0;
201 crtc_state->frame_end = 0;
202 crtc_state->crc_pending = false;
203 spin_unlock_irq(&out->composer_lock);
204
205 /*
206 * We raced with the vblank hrtimer and previous work already computed
207 * the crc, nothing to do.
208 */
209 if (!crc_pending)
210 return;
211
212 if (crtc_state->num_active_planes >= 1)
213 primary_composer = crtc_state->active_planes[0]->composer;
214
215 if (crtc_state->num_active_planes == 2)
216 cursor_composer = crtc_state->active_planes[1]->composer;
217
218 if (!primary_composer)
219 return;
220
221 if (wb_pending)
222 vaddr_out = crtc_state->active_writeback;
223
224 ret = compose_planes(&vaddr_out, primary_composer, cursor_composer);
225 if (ret) {
226 if (ret == -EINVAL && !wb_pending)
227 kfree(vaddr_out);
228 return;
229 }
230
231 crc32 = compute_crc(vaddr_out, primary_composer);
232
233 if (wb_pending) {
234 drm_writeback_signal_completion(&out->wb_connector, 0);
235 spin_lock_irq(&out->composer_lock);
236 crtc_state->wb_pending = false;
237 spin_unlock_irq(&out->composer_lock);
238 } else {
239 kfree(vaddr_out);
240 }
241
242 /*
243 * The worker can fall behind the vblank hrtimer, make sure we catch up.
244 */
245 while (frame_start <= frame_end)
246 drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
247 }
248
249 static const char * const pipe_crc_sources[] = {"auto"};
250
vkms_get_crc_sources(struct drm_crtc * crtc,size_t * count)251 const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
252 size_t *count)
253 {
254 *count = ARRAY_SIZE(pipe_crc_sources);
255 return pipe_crc_sources;
256 }
257
vkms_crc_parse_source(const char * src_name,bool * enabled)258 static int vkms_crc_parse_source(const char *src_name, bool *enabled)
259 {
260 int ret = 0;
261
262 if (!src_name) {
263 *enabled = false;
264 } else if (strcmp(src_name, "auto") == 0) {
265 *enabled = true;
266 } else {
267 *enabled = false;
268 ret = -EINVAL;
269 }
270
271 return ret;
272 }
273
vkms_verify_crc_source(struct drm_crtc * crtc,const char * src_name,size_t * values_cnt)274 int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
275 size_t *values_cnt)
276 {
277 bool enabled;
278
279 if (vkms_crc_parse_source(src_name, &enabled) < 0) {
280 DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
281 return -EINVAL;
282 }
283
284 *values_cnt = 1;
285
286 return 0;
287 }
288
vkms_set_composer(struct vkms_output * out,bool enabled)289 void vkms_set_composer(struct vkms_output *out, bool enabled)
290 {
291 bool old_enabled;
292
293 if (enabled)
294 drm_crtc_vblank_get(&out->crtc);
295
296 spin_lock_irq(&out->lock);
297 old_enabled = out->composer_enabled;
298 out->composer_enabled = enabled;
299 spin_unlock_irq(&out->lock);
300
301 if (old_enabled)
302 drm_crtc_vblank_put(&out->crtc);
303 }
304
vkms_set_crc_source(struct drm_crtc * crtc,const char * src_name)305 int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
306 {
307 struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
308 bool enabled = false;
309 int ret = 0;
310
311 ret = vkms_crc_parse_source(src_name, &enabled);
312
313 vkms_set_composer(out, enabled);
314
315 return ret;
316 }
317