• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_kms.h"
29 #include <drm/drm_plane_helper.h>
30 #include <drm/drm_atomic.h>
31 #include <drm/drm_atomic_helper.h>
32 
33 
34 #define vmw_crtc_to_sou(x) \
35 	container_of(x, struct vmw_screen_object_unit, base.crtc)
36 #define vmw_encoder_to_sou(x) \
37 	container_of(x, struct vmw_screen_object_unit, base.encoder)
38 #define vmw_connector_to_sou(x) \
39 	container_of(x, struct vmw_screen_object_unit, base.connector)
40 
41 /**
42  * struct vmw_kms_sou_surface_dirty - Closure structure for
43  * blit surface to screen command.
44  * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
45  * @left: Left side of bounding box.
46  * @right: Right side of bounding box.
47  * @top: Top side of bounding box.
48  * @bottom: Bottom side of bounding box.
49  * @dst_x: Difference between source clip rects and framebuffer coordinates.
50  * @dst_y: Difference between source clip rects and framebuffer coordinates.
51  * @sid: Surface id of surface to copy from.
52  */
53 struct vmw_kms_sou_surface_dirty {
54 	struct vmw_kms_dirty base;
55 	s32 left, right, top, bottom;
56 	s32 dst_x, dst_y;
57 	u32 sid;
58 };
59 
60 /*
61  * SVGA commands that are used by this code. Please see the device headers
62  * for explanation.
63  */
64 struct vmw_kms_sou_readback_blit {
65 	uint32 header;
66 	SVGAFifoCmdBlitScreenToGMRFB body;
67 };
68 
69 struct vmw_kms_sou_dmabuf_blit {
70 	uint32 header;
71 	SVGAFifoCmdBlitGMRFBToScreen body;
72 };
73 
74 struct vmw_kms_sou_dirty_cmd {
75 	SVGA3dCmdHeader header;
76 	SVGA3dCmdBlitSurfaceToScreen body;
77 };
78 
79 /**
80  * Display unit using screen objects.
81  */
82 struct vmw_screen_object_unit {
83 	struct vmw_display_unit base;
84 
85 	unsigned long buffer_size; /**< Size of allocated buffer */
86 	struct vmw_dma_buffer *buffer; /**< Backing store buffer */
87 
88 	bool defined;
89 };
90 
vmw_sou_destroy(struct vmw_screen_object_unit * sou)91 static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
92 {
93 	vmw_du_cleanup(&sou->base);
94 	kfree(sou);
95 }
96 
97 
98 /*
99  * Screen Object Display Unit CRTC functions
100  */
101 
vmw_sou_crtc_destroy(struct drm_crtc * crtc)102 static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
103 {
104 	vmw_sou_destroy(vmw_crtc_to_sou(crtc));
105 }
106 
107 /**
108  * Send the fifo command to create a screen.
109  */
vmw_sou_fifo_create(struct vmw_private * dev_priv,struct vmw_screen_object_unit * sou,uint32_t x,uint32_t y,struct drm_display_mode * mode)110 static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
111 			       struct vmw_screen_object_unit *sou,
112 			       uint32_t x, uint32_t y,
113 			       struct drm_display_mode *mode)
114 {
115 	size_t fifo_size;
116 
117 	struct {
118 		struct {
119 			uint32_t cmdType;
120 		} header;
121 		SVGAScreenObject obj;
122 	} *cmd;
123 
124 	BUG_ON(!sou->buffer);
125 
126 	fifo_size = sizeof(*cmd);
127 	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
128 	/* The hardware has hung, nothing we can do about it here. */
129 	if (unlikely(cmd == NULL)) {
130 		DRM_ERROR("Fifo reserve failed.\n");
131 		return -ENOMEM;
132 	}
133 
134 	memset(cmd, 0, fifo_size);
135 	cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
136 	cmd->obj.structSize = sizeof(SVGAScreenObject);
137 	cmd->obj.id = sou->base.unit;
138 	cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
139 		(sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
140 	cmd->obj.size.width = mode->hdisplay;
141 	cmd->obj.size.height = mode->vdisplay;
142 	if (sou->base.is_implicit) {
143 		cmd->obj.root.x = x;
144 		cmd->obj.root.y = y;
145 	} else {
146 		cmd->obj.root.x = sou->base.gui_x;
147 		cmd->obj.root.y = sou->base.gui_y;
148 	}
149 	sou->base.set_gui_x = cmd->obj.root.x;
150 	sou->base.set_gui_y = cmd->obj.root.y;
151 
152 	/* Ok to assume that buffer is pinned in vram */
153 	vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
154 	cmd->obj.backingStore.pitch = mode->hdisplay * 4;
155 
156 	vmw_fifo_commit(dev_priv, fifo_size);
157 
158 	sou->defined = true;
159 
160 	return 0;
161 }
162 
163 /**
164  * Send the fifo command to destroy a screen.
165  */
vmw_sou_fifo_destroy(struct vmw_private * dev_priv,struct vmw_screen_object_unit * sou)166 static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
167 				struct vmw_screen_object_unit *sou)
168 {
169 	size_t fifo_size;
170 	int ret;
171 
172 	struct {
173 		struct {
174 			uint32_t cmdType;
175 		} header;
176 		SVGAFifoCmdDestroyScreen body;
177 	} *cmd;
178 
179 	/* no need to do anything */
180 	if (unlikely(!sou->defined))
181 		return 0;
182 
183 	fifo_size = sizeof(*cmd);
184 	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
185 	/* the hardware has hung, nothing we can do about it here */
186 	if (unlikely(cmd == NULL)) {
187 		DRM_ERROR("Fifo reserve failed.\n");
188 		return -ENOMEM;
189 	}
190 
191 	memset(cmd, 0, fifo_size);
192 	cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
193 	cmd->body.screenId = sou->base.unit;
194 
195 	vmw_fifo_commit(dev_priv, fifo_size);
196 
197 	/* Force sync */
198 	ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
199 	if (unlikely(ret != 0))
200 		DRM_ERROR("Failed to sync with HW");
201 	else
202 		sou->defined = false;
203 
204 	return ret;
205 }
206 
207 /**
208  * vmw_sou_crtc_mode_set_nofb - Create new screen
209  *
210  * @crtc: CRTC associated with the new screen
211  *
212  * This function creates/destroys a screen.  This function cannot fail, so if
213  * somehow we run into a failure, just do the best we can to get out.
214  */
vmw_sou_crtc_mode_set_nofb(struct drm_crtc * crtc)215 static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
216 {
217 	struct vmw_private *dev_priv;
218 	struct vmw_screen_object_unit *sou;
219 	struct vmw_framebuffer *vfb;
220 	struct drm_framebuffer *fb;
221 	struct drm_plane_state *ps;
222 	struct vmw_plane_state *vps;
223 	int ret;
224 
225 
226 	sou      = vmw_crtc_to_sou(crtc);
227 	dev_priv = vmw_priv(crtc->dev);
228 	ps       = crtc->primary->state;
229 	fb       = ps->fb;
230 	vps      = vmw_plane_state_to_vps(ps);
231 
232 	vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
233 
234 	if (sou->defined) {
235 		ret = vmw_sou_fifo_destroy(dev_priv, sou);
236 		if (ret) {
237 			DRM_ERROR("Failed to destroy Screen Object\n");
238 			return;
239 		}
240 	}
241 
242 	if (vfb) {
243 		sou->buffer = vps->dmabuf;
244 		sou->buffer_size = vps->dmabuf_size;
245 
246 		ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y,
247 					  &crtc->mode);
248 		if (ret)
249 			DRM_ERROR("Failed to define Screen Object %dx%d\n",
250 				  crtc->x, crtc->y);
251 
252 		vmw_kms_add_active(dev_priv, &sou->base, vfb);
253 	} else {
254 		sou->buffer = NULL;
255 		sou->buffer_size = 0;
256 
257 		vmw_kms_del_active(dev_priv, &sou->base);
258 	}
259 }
260 
261 /**
262  * vmw_sou_crtc_helper_prepare - Noop
263  *
264  * @crtc: CRTC associated with the new screen
265  *
266  * Prepares the CRTC for a mode set, but we don't need to do anything here.
267  */
vmw_sou_crtc_helper_prepare(struct drm_crtc * crtc)268 static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
269 {
270 }
271 
272 /**
273  * vmw_sou_crtc_atomic_enable - Noop
274  *
275  * @crtc: CRTC associated with the new screen
276  *
277  * This is called after a mode set has been completed.
278  */
vmw_sou_crtc_atomic_enable(struct drm_crtc * crtc,struct drm_crtc_state * old_state)279 static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
280 				       struct drm_crtc_state *old_state)
281 {
282 }
283 
284 /**
285  * vmw_sou_crtc_atomic_disable - Turns off CRTC
286  *
287  * @crtc: CRTC to be turned off
288  */
vmw_sou_crtc_atomic_disable(struct drm_crtc * crtc,struct drm_crtc_state * old_state)289 static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
290 					struct drm_crtc_state *old_state)
291 {
292 	struct vmw_private *dev_priv;
293 	struct vmw_screen_object_unit *sou;
294 	int ret;
295 
296 
297 	if (!crtc) {
298 		DRM_ERROR("CRTC is NULL\n");
299 		return;
300 	}
301 
302 	sou = vmw_crtc_to_sou(crtc);
303 	dev_priv = vmw_priv(crtc->dev);
304 
305 	if (sou->defined) {
306 		ret = vmw_sou_fifo_destroy(dev_priv, sou);
307 		if (ret)
308 			DRM_ERROR("Failed to destroy Screen Object\n");
309 	}
310 }
311 
vmw_sou_crtc_page_flip(struct drm_crtc * crtc,struct drm_framebuffer * new_fb,struct drm_pending_vblank_event * event,uint32_t flags,struct drm_modeset_acquire_ctx * ctx)312 static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
313 				  struct drm_framebuffer *new_fb,
314 				  struct drm_pending_vblank_event *event,
315 				  uint32_t flags,
316 				  struct drm_modeset_acquire_ctx *ctx)
317 {
318 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
319 	struct drm_framebuffer *old_fb = crtc->primary->fb;
320 	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
321 	struct vmw_fence_obj *fence = NULL;
322 	struct drm_vmw_rect vclips;
323 	int ret;
324 
325 	if (!vmw_kms_crtc_flippable(dev_priv, crtc))
326 		return -EINVAL;
327 
328 	flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
329 	ret = drm_atomic_helper_page_flip(crtc, new_fb, NULL, flags, ctx);
330 	if (ret) {
331 		DRM_ERROR("Page flip error %d.\n", ret);
332 		return ret;
333 	}
334 
335 	/* do a full screen dirty update */
336 	vclips.x = crtc->x;
337 	vclips.y = crtc->y;
338 	vclips.w = crtc->mode.hdisplay;
339 	vclips.h = crtc->mode.vdisplay;
340 
341 	if (vfb->dmabuf)
342 		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
343 						  NULL, &vclips, 1, 1,
344 						  true, &fence);
345 	else
346 		ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
347 						   NULL, &vclips, NULL,
348 						   0, 0, 1, 1, &fence);
349 
350 
351 	if (ret != 0)
352 		goto out_no_fence;
353 	if (!fence) {
354 		ret = -EINVAL;
355 		goto out_no_fence;
356 	}
357 
358 	if (event) {
359 		struct drm_file *file_priv = event->base.file_priv;
360 
361 		ret = vmw_event_fence_action_queue(file_priv, fence,
362 						   &event->base,
363 						   &event->event.tv_sec,
364 						   &event->event.tv_usec,
365 						   true);
366 	}
367 
368 	/*
369 	 * No need to hold on to this now. The only cleanup
370 	 * we need to do if we fail is unref the fence.
371 	 */
372 	vmw_fence_obj_unreference(&fence);
373 
374 	if (vmw_crtc_to_du(crtc)->is_implicit)
375 		vmw_kms_update_implicit_fb(dev_priv, crtc);
376 
377 	return ret;
378 
379 out_no_fence:
380 	drm_atomic_set_fb_for_plane(crtc->primary->state, old_fb);
381 	return ret;
382 }
383 
384 static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
385 	.gamma_set = vmw_du_crtc_gamma_set,
386 	.destroy = vmw_sou_crtc_destroy,
387 	.reset = vmw_du_crtc_reset,
388 	.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
389 	.atomic_destroy_state = vmw_du_crtc_destroy_state,
390 	.set_config = vmw_kms_set_config,
391 	.page_flip = vmw_sou_crtc_page_flip,
392 };
393 
394 /*
395  * Screen Object Display Unit encoder functions
396  */
397 
vmw_sou_encoder_destroy(struct drm_encoder * encoder)398 static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
399 {
400 	vmw_sou_destroy(vmw_encoder_to_sou(encoder));
401 }
402 
403 static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
404 	.destroy = vmw_sou_encoder_destroy,
405 };
406 
407 /*
408  * Screen Object Display Unit connector functions
409  */
410 
vmw_sou_connector_destroy(struct drm_connector * connector)411 static void vmw_sou_connector_destroy(struct drm_connector *connector)
412 {
413 	vmw_sou_destroy(vmw_connector_to_sou(connector));
414 }
415 
416 static const struct drm_connector_funcs vmw_sou_connector_funcs = {
417 	.dpms = vmw_du_connector_dpms,
418 	.detect = vmw_du_connector_detect,
419 	.fill_modes = vmw_du_connector_fill_modes,
420 	.set_property = vmw_du_connector_set_property,
421 	.destroy = vmw_sou_connector_destroy,
422 	.reset = vmw_du_connector_reset,
423 	.atomic_duplicate_state = vmw_du_connector_duplicate_state,
424 	.atomic_destroy_state = vmw_du_connector_destroy_state,
425 	.atomic_set_property = vmw_du_connector_atomic_set_property,
426 	.atomic_get_property = vmw_du_connector_atomic_get_property,
427 };
428 
429 
430 static const struct
431 drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
432 	.best_encoder = drm_atomic_helper_best_encoder,
433 };
434 
435 
436 
437 /*
438  * Screen Object Display Plane Functions
439  */
440 
441 /**
442  * vmw_sou_primary_plane_cleanup_fb - Frees sou backing buffer
443  *
444  * @plane:  display plane
445  * @old_state: Contains the FB to clean up
446  *
447  * Unpins the display surface
448  *
449  * Returns 0 on success
450  */
451 static void
vmw_sou_primary_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)452 vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
453 				 struct drm_plane_state *old_state)
454 {
455 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
456 	struct drm_crtc *crtc = plane->state->crtc ?
457 		plane->state->crtc : old_state->crtc;
458 
459 	if (vps->dmabuf)
460 		vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false);
461 	vmw_dmabuf_unreference(&vps->dmabuf);
462 	vps->dmabuf_size = 0;
463 
464 	vmw_du_plane_cleanup_fb(plane, old_state);
465 }
466 
467 
468 /**
469  * vmw_sou_primary_plane_prepare_fb - allocate backing buffer
470  *
471  * @plane:  display plane
472  * @new_state: info on the new plane state, including the FB
473  *
474  * The SOU backing buffer is our equivalent of the display plane.
475  *
476  * Returns 0 on success
477  */
478 static int
vmw_sou_primary_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)479 vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
480 				 struct drm_plane_state *new_state)
481 {
482 	struct drm_framebuffer *new_fb = new_state->fb;
483 	struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc;
484 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
485 	struct vmw_private *dev_priv;
486 	size_t size;
487 	int ret;
488 
489 
490 	if (!new_fb) {
491 		vmw_dmabuf_unreference(&vps->dmabuf);
492 		vps->dmabuf_size = 0;
493 
494 		return 0;
495 	}
496 
497 	size = new_state->crtc_w * new_state->crtc_h * 4;
498 	dev_priv = vmw_priv(crtc->dev);
499 
500 	if (vps->dmabuf) {
501 		if (vps->dmabuf_size == size) {
502 			/*
503 			 * Note that this might temporarily up the pin-count
504 			 * to 2, until cleanup_fb() is called.
505 			 */
506 			return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf,
507 						      true);
508 		}
509 
510 		vmw_dmabuf_unreference(&vps->dmabuf);
511 		vps->dmabuf_size = 0;
512 	}
513 
514 	vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL);
515 	if (!vps->dmabuf)
516 		return -ENOMEM;
517 
518 	vmw_svga_enable(dev_priv);
519 
520 	/* After we have alloced the backing store might not be able to
521 	 * resume the overlays, this is preferred to failing to alloc.
522 	 */
523 	vmw_overlay_pause_all(dev_priv);
524 	ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size,
525 			      &vmw_vram_ne_placement,
526 			      false, &vmw_dmabuf_bo_free);
527 	vmw_overlay_resume_all(dev_priv);
528 	if (ret) {
529 		vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
530 		return ret;
531 	}
532 
533 	vps->dmabuf_size = size;
534 
535 	/*
536 	 * TTM already thinks the buffer is pinned, but make sure the
537 	 * pin_count is upped.
538 	 */
539 	return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true);
540 }
541 
542 
543 static void
vmw_sou_primary_plane_atomic_update(struct drm_plane * plane,struct drm_plane_state * old_state)544 vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
545 				    struct drm_plane_state *old_state)
546 {
547 	struct drm_crtc *crtc = plane->state->crtc;
548 
549 	if (crtc)
550 		crtc->primary->fb = plane->state->fb;
551 }
552 
553 
554 static const struct drm_plane_funcs vmw_sou_plane_funcs = {
555 	.update_plane = drm_atomic_helper_update_plane,
556 	.disable_plane = drm_atomic_helper_disable_plane,
557 	.destroy = vmw_du_primary_plane_destroy,
558 	.reset = vmw_du_plane_reset,
559 	.atomic_duplicate_state = vmw_du_plane_duplicate_state,
560 	.atomic_destroy_state = vmw_du_plane_destroy_state,
561 };
562 
563 static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
564 	.update_plane = drm_atomic_helper_update_plane,
565 	.disable_plane = drm_atomic_helper_disable_plane,
566 	.destroy = vmw_du_cursor_plane_destroy,
567 	.reset = vmw_du_plane_reset,
568 	.atomic_duplicate_state = vmw_du_plane_duplicate_state,
569 	.atomic_destroy_state = vmw_du_plane_destroy_state,
570 };
571 
572 /*
573  * Atomic Helpers
574  */
575 static const struct
576 drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
577 	.atomic_check = vmw_du_cursor_plane_atomic_check,
578 	.atomic_update = vmw_du_cursor_plane_atomic_update,
579 	.prepare_fb = vmw_du_cursor_plane_prepare_fb,
580 	.cleanup_fb = vmw_du_plane_cleanup_fb,
581 };
582 
583 static const struct
584 drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = {
585 	.atomic_check = vmw_du_primary_plane_atomic_check,
586 	.atomic_update = vmw_sou_primary_plane_atomic_update,
587 	.prepare_fb = vmw_sou_primary_plane_prepare_fb,
588 	.cleanup_fb = vmw_sou_primary_plane_cleanup_fb,
589 };
590 
591 static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
592 	.prepare = vmw_sou_crtc_helper_prepare,
593 	.mode_set_nofb = vmw_sou_crtc_mode_set_nofb,
594 	.atomic_check = vmw_du_crtc_atomic_check,
595 	.atomic_begin = vmw_du_crtc_atomic_begin,
596 	.atomic_flush = vmw_du_crtc_atomic_flush,
597 	.atomic_enable = vmw_sou_crtc_atomic_enable,
598 	.atomic_disable = vmw_sou_crtc_atomic_disable,
599 };
600 
601 
vmw_sou_init(struct vmw_private * dev_priv,unsigned unit)602 static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
603 {
604 	struct vmw_screen_object_unit *sou;
605 	struct drm_device *dev = dev_priv->dev;
606 	struct drm_connector *connector;
607 	struct drm_encoder *encoder;
608 	struct drm_plane *primary, *cursor;
609 	struct drm_crtc *crtc;
610 	int ret;
611 
612 	sou = kzalloc(sizeof(*sou), GFP_KERNEL);
613 	if (!sou)
614 		return -ENOMEM;
615 
616 	sou->base.unit = unit;
617 	crtc = &sou->base.crtc;
618 	encoder = &sou->base.encoder;
619 	connector = &sou->base.connector;
620 	primary = &sou->base.primary;
621 	cursor = &sou->base.cursor;
622 
623 	sou->base.active_implicit = false;
624 	sou->base.pref_active = (unit == 0);
625 	sou->base.pref_width = dev_priv->initial_width;
626 	sou->base.pref_height = dev_priv->initial_height;
627 	sou->base.pref_mode = NULL;
628 
629 	/*
630 	 * Remove this after enabling atomic because property values can
631 	 * only exist in a state object
632 	 */
633 	sou->base.is_implicit = false;
634 
635 	/* Initialize primary plane */
636 	vmw_du_plane_reset(primary);
637 
638 	ret = drm_universal_plane_init(dev, &sou->base.primary,
639 				       0, &vmw_sou_plane_funcs,
640 				       vmw_primary_plane_formats,
641 				       ARRAY_SIZE(vmw_primary_plane_formats),
642 				       NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
643 	if (ret) {
644 		DRM_ERROR("Failed to initialize primary plane");
645 		goto err_free;
646 	}
647 
648 	drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
649 
650 	/* Initialize cursor plane */
651 	vmw_du_plane_reset(cursor);
652 
653 	ret = drm_universal_plane_init(dev, &sou->base.cursor,
654 			0, &vmw_sou_cursor_funcs,
655 			vmw_cursor_plane_formats,
656 			ARRAY_SIZE(vmw_cursor_plane_formats),
657 			NULL, DRM_PLANE_TYPE_CURSOR, NULL);
658 	if (ret) {
659 		DRM_ERROR("Failed to initialize cursor plane");
660 		drm_plane_cleanup(&sou->base.primary);
661 		goto err_free;
662 	}
663 
664 	drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs);
665 
666 	vmw_du_connector_reset(connector);
667 	ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
668 				 DRM_MODE_CONNECTOR_VIRTUAL);
669 	if (ret) {
670 		DRM_ERROR("Failed to initialize connector\n");
671 		goto err_free;
672 	}
673 
674 	drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
675 	connector->status = vmw_du_connector_detect(connector, true);
676 	vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
677 
678 
679 	ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
680 			       DRM_MODE_ENCODER_VIRTUAL, NULL);
681 	if (ret) {
682 		DRM_ERROR("Failed to initialize encoder\n");
683 		goto err_free_connector;
684 	}
685 
686 	(void) drm_mode_connector_attach_encoder(connector, encoder);
687 	encoder->possible_crtcs = (1 << unit);
688 	encoder->possible_clones = 0;
689 
690 	ret = drm_connector_register(connector);
691 	if (ret) {
692 		DRM_ERROR("Failed to register connector\n");
693 		goto err_free_encoder;
694 	}
695 
696 
697 	vmw_du_crtc_reset(crtc);
698 	ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary,
699 					&sou->base.cursor,
700 					&vmw_screen_object_crtc_funcs, NULL);
701 	if (ret) {
702 		DRM_ERROR("Failed to initialize CRTC\n");
703 		goto err_free_unregister;
704 	}
705 
706 	drm_crtc_helper_add(crtc, &vmw_sou_crtc_helper_funcs);
707 
708 	drm_mode_crtc_set_gamma_size(crtc, 256);
709 
710 	drm_object_attach_property(&connector->base,
711 				   dev_priv->hotplug_mode_update_property, 1);
712 	drm_object_attach_property(&connector->base,
713 				   dev->mode_config.suggested_x_property, 0);
714 	drm_object_attach_property(&connector->base,
715 				   dev->mode_config.suggested_y_property, 0);
716 	if (dev_priv->implicit_placement_property)
717 		drm_object_attach_property
718 			(&connector->base,
719 			 dev_priv->implicit_placement_property,
720 			 sou->base.is_implicit);
721 
722 	return 0;
723 
724 err_free_unregister:
725 	drm_connector_unregister(connector);
726 err_free_encoder:
727 	drm_encoder_cleanup(encoder);
728 err_free_connector:
729 	drm_connector_cleanup(connector);
730 err_free:
731 	kfree(sou);
732 	return ret;
733 }
734 
vmw_kms_sou_init_display(struct vmw_private * dev_priv)735 int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
736 {
737 	struct drm_device *dev = dev_priv->dev;
738 	int i, ret;
739 
740 	if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
741 		DRM_INFO("Not using screen objects,"
742 			 " missing cap SCREEN_OBJECT_2\n");
743 		return -ENOSYS;
744 	}
745 
746 	ret = -ENOMEM;
747 	dev_priv->num_implicit = 0;
748 	dev_priv->implicit_fb = NULL;
749 
750 	ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
751 	if (unlikely(ret != 0))
752 		return ret;
753 
754 	vmw_kms_create_implicit_placement_property(dev_priv, false);
755 
756 	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
757 		vmw_sou_init(dev_priv, i);
758 
759 	dev_priv->active_display_unit = vmw_du_screen_object;
760 
761 	DRM_INFO("Screen Objects Display Unit initialized\n");
762 
763 	return 0;
764 }
765 
do_dmabuf_define_gmrfb(struct vmw_private * dev_priv,struct vmw_framebuffer * framebuffer)766 static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
767 				  struct vmw_framebuffer *framebuffer)
768 {
769 	struct vmw_dma_buffer *buf =
770 		container_of(framebuffer, struct vmw_framebuffer_dmabuf,
771 			     base)->buffer;
772 	int depth = framebuffer->base.format->depth;
773 	struct {
774 		uint32_t header;
775 		SVGAFifoCmdDefineGMRFB body;
776 	} *cmd;
777 
778 	/* Emulate RGBA support, contrary to svga_reg.h this is not
779 	 * supported by hosts. This is only a problem if we are reading
780 	 * this value later and expecting what we uploaded back.
781 	 */
782 	if (depth == 32)
783 		depth = 24;
784 
785 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
786 	if (!cmd) {
787 		DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
788 		return -ENOMEM;
789 	}
790 
791 	cmd->header = SVGA_CMD_DEFINE_GMRFB;
792 	cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8;
793 	cmd->body.format.colorDepth = depth;
794 	cmd->body.format.reserved = 0;
795 	cmd->body.bytesPerLine = framebuffer->base.pitches[0];
796 	/* Buffer is reserved in vram or GMR */
797 	vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
798 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
799 
800 	return 0;
801 }
802 
803 /**
804  * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
805  * blit surface to screen command.
806  *
807  * @dirty: The closure structure.
808  *
809  * Fills in the missing fields in the command, and translates the cliprects
810  * to match the destination bounding box encoded.
811  */
vmw_sou_surface_fifo_commit(struct vmw_kms_dirty * dirty)812 static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
813 {
814 	struct vmw_kms_sou_surface_dirty *sdirty =
815 		container_of(dirty, typeof(*sdirty), base);
816 	struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
817 	s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
818 	s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
819 	size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
820 	SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
821 	int i;
822 
823 	if (!dirty->num_hits) {
824 		vmw_fifo_commit(dirty->dev_priv, 0);
825 		return;
826 	}
827 
828 	cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
829 	cmd->header.size = sizeof(cmd->body) + region_size;
830 
831 	/*
832 	 * Use the destination bounding box to specify destination - and
833 	 * source bounding regions.
834 	 */
835 	cmd->body.destRect.left = sdirty->left;
836 	cmd->body.destRect.right = sdirty->right;
837 	cmd->body.destRect.top = sdirty->top;
838 	cmd->body.destRect.bottom = sdirty->bottom;
839 
840 	cmd->body.srcRect.left = sdirty->left + trans_x;
841 	cmd->body.srcRect.right = sdirty->right + trans_x;
842 	cmd->body.srcRect.top = sdirty->top + trans_y;
843 	cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
844 
845 	cmd->body.srcImage.sid = sdirty->sid;
846 	cmd->body.destScreenId = dirty->unit->unit;
847 
848 	/* Blits are relative to the destination rect. Translate. */
849 	for (i = 0; i < dirty->num_hits; ++i, ++blit) {
850 		blit->left -= sdirty->left;
851 		blit->right -= sdirty->left;
852 		blit->top -= sdirty->top;
853 		blit->bottom -= sdirty->top;
854 	}
855 
856 	vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
857 
858 	sdirty->left = sdirty->top = S32_MAX;
859 	sdirty->right = sdirty->bottom = S32_MIN;
860 }
861 
862 /**
863  * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
864  *
865  * @dirty: The closure structure
866  *
867  * Encodes a SVGASignedRect cliprect and updates the bounding box of the
868  * BLIT_SURFACE_TO_SCREEN command.
869  */
vmw_sou_surface_clip(struct vmw_kms_dirty * dirty)870 static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
871 {
872 	struct vmw_kms_sou_surface_dirty *sdirty =
873 		container_of(dirty, typeof(*sdirty), base);
874 	struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
875 	SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
876 
877 	/* Destination rect. */
878 	blit += dirty->num_hits;
879 	blit->left = dirty->unit_x1;
880 	blit->top = dirty->unit_y1;
881 	blit->right = dirty->unit_x2;
882 	blit->bottom = dirty->unit_y2;
883 
884 	/* Destination bounding box */
885 	sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
886 	sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
887 	sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
888 	sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
889 
890 	dirty->num_hits++;
891 }
892 
893 /**
894  * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
895  *
896  * @dev_priv: Pointer to the device private structure.
897  * @framebuffer: Pointer to the surface-buffer backed framebuffer.
898  * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
899  * @vclips: Alternate array of clip rects. Either @clips or @vclips must
900  * be NULL.
901  * @srf: Pointer to surface to blit from. If NULL, the surface attached
902  * to @framebuffer will be used.
903  * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
904  * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
905  * @num_clips: Number of clip rects in @clips.
906  * @inc: Increment to use when looping over @clips.
907  * @out_fence: If non-NULL, will return a ref-counted pointer to a
908  * struct vmw_fence_obj. The returned fence pointer may be NULL in which
909  * case the device has already synchronized.
910  *
911  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
912  * interrupted.
913  */
vmw_kms_sou_do_surface_dirty(struct vmw_private * dev_priv,struct vmw_framebuffer * framebuffer,struct drm_clip_rect * clips,struct drm_vmw_rect * vclips,struct vmw_resource * srf,s32 dest_x,s32 dest_y,unsigned num_clips,int inc,struct vmw_fence_obj ** out_fence)914 int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
915 				 struct vmw_framebuffer *framebuffer,
916 				 struct drm_clip_rect *clips,
917 				 struct drm_vmw_rect *vclips,
918 				 struct vmw_resource *srf,
919 				 s32 dest_x,
920 				 s32 dest_y,
921 				 unsigned num_clips, int inc,
922 				 struct vmw_fence_obj **out_fence)
923 {
924 	struct vmw_framebuffer_surface *vfbs =
925 		container_of(framebuffer, typeof(*vfbs), base);
926 	struct vmw_kms_sou_surface_dirty sdirty;
927 	struct vmw_validation_ctx ctx;
928 	int ret;
929 
930 	if (!srf)
931 		srf = &vfbs->surface->res;
932 
933 	ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
934 	if (ret)
935 		return ret;
936 
937 	sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
938 	sdirty.base.clip = vmw_sou_surface_clip;
939 	sdirty.base.dev_priv = dev_priv;
940 	sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
941 	  sizeof(SVGASignedRect) * num_clips;
942 
943 	sdirty.sid = srf->id;
944 	sdirty.left = sdirty.top = S32_MAX;
945 	sdirty.right = sdirty.bottom = S32_MIN;
946 	sdirty.dst_x = dest_x;
947 	sdirty.dst_y = dest_y;
948 
949 	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
950 				   dest_x, dest_y, num_clips, inc,
951 				   &sdirty.base);
952 	vmw_kms_helper_resource_finish(&ctx, out_fence);
953 
954 	return ret;
955 }
956 
957 /**
958  * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
959  *
960  * @dirty: The closure structure.
961  *
962  * Commits a previously built command buffer of readback clips.
963  */
vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty * dirty)964 static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
965 {
966 	if (!dirty->num_hits) {
967 		vmw_fifo_commit(dirty->dev_priv, 0);
968 		return;
969 	}
970 
971 	vmw_fifo_commit(dirty->dev_priv,
972 			sizeof(struct vmw_kms_sou_dmabuf_blit) *
973 			dirty->num_hits);
974 }
975 
976 /**
977  * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
978  *
979  * @dirty: The closure structure
980  *
981  * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
982  */
vmw_sou_dmabuf_clip(struct vmw_kms_dirty * dirty)983 static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
984 {
985 	struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
986 
987 	blit += dirty->num_hits;
988 	blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
989 	blit->body.destScreenId = dirty->unit->unit;
990 	blit->body.srcOrigin.x = dirty->fb_x;
991 	blit->body.srcOrigin.y = dirty->fb_y;
992 	blit->body.destRect.left = dirty->unit_x1;
993 	blit->body.destRect.top = dirty->unit_y1;
994 	blit->body.destRect.right = dirty->unit_x2;
995 	blit->body.destRect.bottom = dirty->unit_y2;
996 	dirty->num_hits++;
997 }
998 
999 /**
1000  * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
1001  *
1002  * @dev_priv: Pointer to the device private structure.
1003  * @framebuffer: Pointer to the dma-buffer backed framebuffer.
1004  * @clips: Array of clip rects.
1005  * @vclips: Alternate array of clip rects. Either @clips or @vclips must
1006  * be NULL.
1007  * @num_clips: Number of clip rects in @clips.
1008  * @increment: Increment to use when looping over @clips.
1009  * @interruptible: Whether to perform waits interruptible if possible.
1010  * @out_fence: If non-NULL, will return a ref-counted pointer to a
1011  * struct vmw_fence_obj. The returned fence pointer may be NULL in which
1012  * case the device has already synchronized.
1013  *
1014  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1015  * interrupted.
1016  */
vmw_kms_sou_do_dmabuf_dirty(struct vmw_private * dev_priv,struct vmw_framebuffer * framebuffer,struct drm_clip_rect * clips,struct drm_vmw_rect * vclips,unsigned num_clips,int increment,bool interruptible,struct vmw_fence_obj ** out_fence)1017 int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
1018 				struct vmw_framebuffer *framebuffer,
1019 				struct drm_clip_rect *clips,
1020 				struct drm_vmw_rect *vclips,
1021 				unsigned num_clips, int increment,
1022 				bool interruptible,
1023 				struct vmw_fence_obj **out_fence)
1024 {
1025 	struct vmw_dma_buffer *buf =
1026 		container_of(framebuffer, struct vmw_framebuffer_dmabuf,
1027 			     base)->buffer;
1028 	struct vmw_kms_dirty dirty;
1029 	int ret;
1030 
1031 	ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
1032 					    false);
1033 	if (ret)
1034 		return ret;
1035 
1036 	ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
1037 	if (unlikely(ret != 0))
1038 		goto out_revert;
1039 
1040 	dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
1041 	dirty.clip = vmw_sou_dmabuf_clip;
1042 	dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
1043 		num_clips;
1044 	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
1045 				   0, 0, num_clips, increment, &dirty);
1046 	vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
1047 
1048 	return ret;
1049 
1050 out_revert:
1051 	vmw_kms_helper_buffer_revert(buf);
1052 
1053 	return ret;
1054 }
1055 
1056 
1057 /**
1058  * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
1059  *
1060  * @dirty: The closure structure.
1061  *
1062  * Commits a previously built command buffer of readback clips.
1063  */
vmw_sou_readback_fifo_commit(struct vmw_kms_dirty * dirty)1064 static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
1065 {
1066 	if (!dirty->num_hits) {
1067 		vmw_fifo_commit(dirty->dev_priv, 0);
1068 		return;
1069 	}
1070 
1071 	vmw_fifo_commit(dirty->dev_priv,
1072 			sizeof(struct vmw_kms_sou_readback_blit) *
1073 			dirty->num_hits);
1074 }
1075 
1076 /**
1077  * vmw_sou_readback_clip - Callback to encode a readback cliprect.
1078  *
1079  * @dirty: The closure structure
1080  *
1081  * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
1082  */
vmw_sou_readback_clip(struct vmw_kms_dirty * dirty)1083 static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
1084 {
1085 	struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
1086 
1087 	blit += dirty->num_hits;
1088 	blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
1089 	blit->body.srcScreenId = dirty->unit->unit;
1090 	blit->body.destOrigin.x = dirty->fb_x;
1091 	blit->body.destOrigin.y = dirty->fb_y;
1092 	blit->body.srcRect.left = dirty->unit_x1;
1093 	blit->body.srcRect.top = dirty->unit_y1;
1094 	blit->body.srcRect.right = dirty->unit_x2;
1095 	blit->body.srcRect.bottom = dirty->unit_y2;
1096 	dirty->num_hits++;
1097 }
1098 
1099 /**
1100  * vmw_kms_sou_readback - Perform a readback from the screen object system to
1101  * a dma-buffer backed framebuffer.
1102  *
1103  * @dev_priv: Pointer to the device private structure.
1104  * @file_priv: Pointer to a struct drm_file identifying the caller.
1105  * Must be set to NULL if @user_fence_rep is NULL.
1106  * @vfb: Pointer to the dma-buffer backed framebuffer.
1107  * @user_fence_rep: User-space provided structure for fence information.
1108  * Must be set to non-NULL if @file_priv is non-NULL.
1109  * @vclips: Array of clip rects.
1110  * @num_clips: Number of clip rects in @vclips.
1111  *
1112  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1113  * interrupted.
1114  */
vmw_kms_sou_readback(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct drm_vmw_fence_rep __user * user_fence_rep,struct drm_vmw_rect * vclips,uint32_t num_clips)1115 int vmw_kms_sou_readback(struct vmw_private *dev_priv,
1116 			 struct drm_file *file_priv,
1117 			 struct vmw_framebuffer *vfb,
1118 			 struct drm_vmw_fence_rep __user *user_fence_rep,
1119 			 struct drm_vmw_rect *vclips,
1120 			 uint32_t num_clips)
1121 {
1122 	struct vmw_dma_buffer *buf =
1123 		container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
1124 	struct vmw_kms_dirty dirty;
1125 	int ret;
1126 
1127 	ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
1128 	if (ret)
1129 		return ret;
1130 
1131 	ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
1132 	if (unlikely(ret != 0))
1133 		goto out_revert;
1134 
1135 	dirty.fifo_commit = vmw_sou_readback_fifo_commit;
1136 	dirty.clip = vmw_sou_readback_clip;
1137 	dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
1138 		num_clips;
1139 	ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
1140 				   0, 0, num_clips, 1, &dirty);
1141 	vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
1142 				     user_fence_rep);
1143 
1144 	return ret;
1145 
1146 out_revert:
1147 	vmw_kms_helper_buffer_revert(buf);
1148 
1149 	return ret;
1150 }
1151