• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include "vmwgfx_kms.h"
28 
29 #include "vmwgfx_bo.h"
30 #include "vmw_surface_cache.h"
31 
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_damage_helper.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_rect.h>
37 #include <drm/drm_sysfs.h>
38 #include <drm/drm_edid.h>
39 
vmw_du_cleanup(struct vmw_display_unit * du)40 void vmw_du_cleanup(struct vmw_display_unit *du)
41 {
42 	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
43 	drm_plane_cleanup(&du->primary);
44 	if (vmw_cmd_supported(dev_priv))
45 		drm_plane_cleanup(&du->cursor.base);
46 
47 	drm_connector_unregister(&du->connector);
48 	drm_crtc_cleanup(&du->crtc);
49 	drm_encoder_cleanup(&du->encoder);
50 	drm_connector_cleanup(&du->connector);
51 }
52 
53 /*
54  * Display Unit Cursor functions
55  */
56 
57 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
58 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
59 				  struct vmw_plane_state *vps,
60 				  u32 *image, u32 width, u32 height,
61 				  u32 hotspotX, u32 hotspotY);
62 
63 struct vmw_svga_fifo_cmd_define_cursor {
64 	u32 cmd;
65 	SVGAFifoCmdDefineAlphaCursor cursor;
66 };
67 
68 /**
69  * vmw_send_define_cursor_cmd - queue a define cursor command
70  * @dev_priv: the private driver struct
71  * @image: buffer which holds the cursor image
72  * @width: width of the mouse cursor image
73  * @height: height of the mouse cursor image
74  * @hotspotX: the horizontal position of mouse hotspot
75  * @hotspotY: the vertical position of mouse hotspot
76  */
vmw_send_define_cursor_cmd(struct vmw_private * dev_priv,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)77 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
78 				       u32 *image, u32 width, u32 height,
79 				       u32 hotspotX, u32 hotspotY)
80 {
81 	struct vmw_svga_fifo_cmd_define_cursor *cmd;
82 	const u32 image_size = width * height * sizeof(*image);
83 	const u32 cmd_size = sizeof(*cmd) + image_size;
84 
85 	/* Try to reserve fifocmd space and swallow any failures;
86 	   such reservations cannot be left unconsumed for long
87 	   under the risk of clogging other fifocmd users, so
88 	   we treat reservations separtely from the way we treat
89 	   other fallible KMS-atomic resources at prepare_fb */
90 	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
91 
92 	if (unlikely(!cmd))
93 		return;
94 
95 	memset(cmd, 0, sizeof(*cmd));
96 
97 	memcpy(&cmd[1], image, image_size);
98 
99 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
100 	cmd->cursor.id = 0;
101 	cmd->cursor.width = width;
102 	cmd->cursor.height = height;
103 	cmd->cursor.hotspotX = hotspotX;
104 	cmd->cursor.hotspotY = hotspotY;
105 
106 	vmw_cmd_commit_flush(dev_priv, cmd_size);
107 }
108 
109 /**
110  * vmw_cursor_update_image - update the cursor image on the provided plane
111  * @dev_priv: the private driver struct
112  * @vps: the plane state of the cursor plane
113  * @image: buffer which holds the cursor image
114  * @width: width of the mouse cursor image
115  * @height: height of the mouse cursor image
116  * @hotspotX: the horizontal position of mouse hotspot
117  * @hotspotY: the vertical position of mouse hotspot
118  */
vmw_cursor_update_image(struct vmw_private * dev_priv,struct vmw_plane_state * vps,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)119 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
120 				    struct vmw_plane_state *vps,
121 				    u32 *image, u32 width, u32 height,
122 				    u32 hotspotX, u32 hotspotY)
123 {
124 	if (vps->cursor.bo)
125 		vmw_cursor_update_mob(dev_priv, vps, image,
126 				      vps->base.crtc_w, vps->base.crtc_h,
127 				      hotspotX, hotspotY);
128 
129 	else
130 		vmw_send_define_cursor_cmd(dev_priv, image, width, height,
131 					   hotspotX, hotspotY);
132 }
133 
134 
135 /**
136  * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
137  *
138  * Called from inside vmw_du_cursor_plane_atomic_update to actually
139  * make the cursor-image live.
140  *
141  * @dev_priv: device to work with
142  * @vps: the plane state of the cursor plane
143  * @image: cursor source data to fill the MOB with
144  * @width: source data width
145  * @height: source data height
146  * @hotspotX: cursor hotspot x
147  * @hotspotY: cursor hotspot Y
148  */
vmw_cursor_update_mob(struct vmw_private * dev_priv,struct vmw_plane_state * vps,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)149 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
150 				  struct vmw_plane_state *vps,
151 				  u32 *image, u32 width, u32 height,
152 				  u32 hotspotX, u32 hotspotY)
153 {
154 	SVGAGBCursorHeader *header;
155 	SVGAGBAlphaCursorHeader *alpha_header;
156 	const u32 image_size = width * height * sizeof(*image);
157 
158 	header = vmw_bo_map_and_cache(vps->cursor.bo);
159 	alpha_header = &header->header.alphaHeader;
160 
161 	memset(header, 0, sizeof(*header));
162 
163 	header->type = SVGA_ALPHA_CURSOR;
164 	header->sizeInBytes = image_size;
165 
166 	alpha_header->hotspotX = hotspotX;
167 	alpha_header->hotspotY = hotspotY;
168 	alpha_header->width = width;
169 	alpha_header->height = height;
170 
171 	memcpy(header + 1, image, image_size);
172 	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
173 		  vps->cursor.bo->tbo.resource->start);
174 }
175 
176 
vmw_du_cursor_mob_size(u32 w,u32 h)177 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
178 {
179 	return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
180 }
181 
182 /**
183  * vmw_du_cursor_plane_acquire_image -- Acquire the image data
184  * @vps: cursor plane state
185  */
vmw_du_cursor_plane_acquire_image(struct vmw_plane_state * vps)186 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
187 {
188 	if (vps->surf) {
189 		if (vps->surf_mapped)
190 			return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
191 		return vps->surf->snooper.image;
192 	} else if (vps->bo)
193 		return vmw_bo_map_and_cache(vps->bo);
194 	return NULL;
195 }
196 
vmw_du_cursor_plane_has_changed(struct vmw_plane_state * old_vps,struct vmw_plane_state * new_vps)197 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
198 					    struct vmw_plane_state *new_vps)
199 {
200 	void *old_image;
201 	void *new_image;
202 	u32 size;
203 	bool changed;
204 
205 	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
206 	    old_vps->base.crtc_h != new_vps->base.crtc_h)
207 	    return true;
208 
209 	if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
210 	    old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
211 	    return true;
212 
213 	size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
214 
215 	old_image = vmw_du_cursor_plane_acquire_image(old_vps);
216 	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
217 
218 	changed = false;
219 	if (old_image && new_image && old_image != new_image)
220 		changed = memcmp(old_image, new_image, size) != 0;
221 
222 	return changed;
223 }
224 
vmw_du_destroy_cursor_mob(struct vmw_bo ** vbo)225 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
226 {
227 	if (!(*vbo))
228 		return;
229 
230 	ttm_bo_unpin(&(*vbo)->tbo);
231 	vmw_bo_unreference(vbo);
232 }
233 
vmw_du_put_cursor_mob(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)234 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
235 				  struct vmw_plane_state *vps)
236 {
237 	u32 i;
238 
239 	if (!vps->cursor.bo)
240 		return;
241 
242 	vmw_du_cursor_plane_unmap_cm(vps);
243 
244 	/* Look for a free slot to return this mob to the cache. */
245 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
246 		if (!vcp->cursor_mobs[i]) {
247 			vcp->cursor_mobs[i] = vps->cursor.bo;
248 			vps->cursor.bo = NULL;
249 			return;
250 		}
251 	}
252 
253 	/* Cache is full: See if this mob is bigger than an existing mob. */
254 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
255 		if (vcp->cursor_mobs[i]->tbo.base.size <
256 		    vps->cursor.bo->tbo.base.size) {
257 			vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
258 			vcp->cursor_mobs[i] = vps->cursor.bo;
259 			vps->cursor.bo = NULL;
260 			return;
261 		}
262 	}
263 
264 	/* Destroy it if it's not worth caching. */
265 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
266 }
267 
vmw_du_get_cursor_mob(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)268 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
269 				 struct vmw_plane_state *vps)
270 {
271 	struct vmw_private *dev_priv = vcp->base.dev->dev_private;
272 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
273 	u32 i;
274 	u32 cursor_max_dim, mob_max_size;
275 	struct vmw_fence_obj *fence = NULL;
276 	int ret;
277 
278 	if (!dev_priv->has_mob ||
279 	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
280 		return -EINVAL;
281 
282 	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
283 	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
284 
285 	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
286 	    vps->base.crtc_h > cursor_max_dim)
287 		return -EINVAL;
288 
289 	if (vps->cursor.bo) {
290 		if (vps->cursor.bo->tbo.base.size >= size)
291 			return 0;
292 		vmw_du_put_cursor_mob(vcp, vps);
293 	}
294 
295 	/* Look for an unused mob in the cache. */
296 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
297 		if (vcp->cursor_mobs[i] &&
298 		    vcp->cursor_mobs[i]->tbo.base.size >= size) {
299 			vps->cursor.bo = vcp->cursor_mobs[i];
300 			vcp->cursor_mobs[i] = NULL;
301 			return 0;
302 		}
303 	}
304 	/* Create a new mob if we can't find an existing one. */
305 	ret = vmw_bo_create_and_populate(dev_priv, size,
306 					 VMW_BO_DOMAIN_MOB,
307 					 &vps->cursor.bo);
308 
309 	if (ret != 0)
310 		return ret;
311 
312 	/* Fence the mob creation so we are guarateed to have the mob */
313 	ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
314 	if (ret != 0)
315 		goto teardown;
316 
317 	ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
318 	if (ret != 0) {
319 		ttm_bo_unreserve(&vps->cursor.bo->tbo);
320 		goto teardown;
321 	}
322 
323 	dma_fence_wait(&fence->base, false);
324 	dma_fence_put(&fence->base);
325 
326 	ttm_bo_unreserve(&vps->cursor.bo->tbo);
327 	return 0;
328 
329 teardown:
330 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
331 	return ret;
332 }
333 
334 
vmw_cursor_update_position(struct vmw_private * dev_priv,bool show,int x,int y)335 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
336 				       bool show, int x, int y)
337 {
338 	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
339 					     : SVGA_CURSOR_ON_HIDE;
340 	uint32_t count;
341 
342 	spin_lock(&dev_priv->cursor_lock);
343 	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
344 		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
345 		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
346 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
347 		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
348 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
349 	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
350 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
351 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
352 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
353 		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
354 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
355 	} else {
356 		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
357 		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
358 		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
359 	}
360 	spin_unlock(&dev_priv->cursor_lock);
361 }
362 
vmw_kms_cursor_snoop(struct vmw_surface * srf,struct ttm_object_file * tfile,struct ttm_buffer_object * bo,SVGA3dCmdHeader * header)363 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
364 			  struct ttm_object_file *tfile,
365 			  struct ttm_buffer_object *bo,
366 			  SVGA3dCmdHeader *header)
367 {
368 	struct ttm_bo_kmap_obj map;
369 	unsigned long kmap_offset;
370 	unsigned long kmap_num;
371 	SVGA3dCopyBox *box;
372 	unsigned box_count;
373 	void *virtual;
374 	bool is_iomem;
375 	struct vmw_dma_cmd {
376 		SVGA3dCmdHeader header;
377 		SVGA3dCmdSurfaceDMA dma;
378 	} *cmd;
379 	int i, ret;
380 	const struct SVGA3dSurfaceDesc *desc =
381 		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
382 	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
383 
384 	cmd = container_of(header, struct vmw_dma_cmd, header);
385 
386 	/* No snooper installed, nothing to copy */
387 	if (!srf->snooper.image)
388 		return;
389 
390 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
391 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
392 		return;
393 	}
394 
395 	if (cmd->header.size < 64) {
396 		DRM_ERROR("at least one full copy box must be given\n");
397 		return;
398 	}
399 
400 	box = (SVGA3dCopyBox *)&cmd[1];
401 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
402 			sizeof(SVGA3dCopyBox);
403 
404 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
405 	    box->x != 0    || box->y != 0    || box->z != 0    ||
406 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
407 	    box->d != 1    || box_count != 1 ||
408 	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
409 		/* TODO handle none page aligned offsets */
410 		/* TODO handle more dst & src != 0 */
411 		/* TODO handle more then one copy */
412 		DRM_ERROR("Can't snoop dma request for cursor!\n");
413 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
414 			  box->srcx, box->srcy, box->srcz,
415 			  box->x, box->y, box->z,
416 			  box->w, box->h, box->d, box_count,
417 			  cmd->dma.guest.ptr.offset);
418 		return;
419 	}
420 
421 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
422 	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
423 
424 	ret = ttm_bo_reserve(bo, true, false, NULL);
425 	if (unlikely(ret != 0)) {
426 		DRM_ERROR("reserve failed\n");
427 		return;
428 	}
429 
430 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
431 	if (unlikely(ret != 0))
432 		goto err_unreserve;
433 
434 	virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
435 
436 	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
437 		memcpy(srf->snooper.image, virtual,
438 		       VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
439 	} else {
440 		/* Image is unsigned pointer. */
441 		for (i = 0; i < box->h; i++)
442 			memcpy(srf->snooper.image + i * image_pitch,
443 			       virtual + i * cmd->dma.guest.pitch,
444 			       box->w * desc->pitchBytesPerBlock);
445 	}
446 
447 	srf->snooper.age++;
448 
449 	ttm_bo_kunmap(&map);
450 err_unreserve:
451 	ttm_bo_unreserve(bo);
452 }
453 
454 /**
455  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
456  *
457  * @dev_priv: Pointer to the device private struct.
458  *
459  * Clears all legacy hotspots.
460  */
vmw_kms_legacy_hotspot_clear(struct vmw_private * dev_priv)461 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
462 {
463 	struct drm_device *dev = &dev_priv->drm;
464 	struct vmw_display_unit *du;
465 	struct drm_crtc *crtc;
466 
467 	drm_modeset_lock_all(dev);
468 	drm_for_each_crtc(crtc, dev) {
469 		du = vmw_crtc_to_du(crtc);
470 
471 		du->hotspot_x = 0;
472 		du->hotspot_y = 0;
473 	}
474 	drm_modeset_unlock_all(dev);
475 }
476 
vmw_kms_cursor_post_execbuf(struct vmw_private * dev_priv)477 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
478 {
479 	struct drm_device *dev = &dev_priv->drm;
480 	struct vmw_display_unit *du;
481 	struct drm_crtc *crtc;
482 
483 	mutex_lock(&dev->mode_config.mutex);
484 
485 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
486 		du = vmw_crtc_to_du(crtc);
487 		if (!du->cursor_surface ||
488 		    du->cursor_age == du->cursor_surface->snooper.age ||
489 		    !du->cursor_surface->snooper.image)
490 			continue;
491 
492 		du->cursor_age = du->cursor_surface->snooper.age;
493 		vmw_send_define_cursor_cmd(dev_priv,
494 					   du->cursor_surface->snooper.image,
495 					   VMW_CURSOR_SNOOP_WIDTH,
496 					   VMW_CURSOR_SNOOP_HEIGHT,
497 					   du->hotspot_x + du->core_hotspot_x,
498 					   du->hotspot_y + du->core_hotspot_y);
499 	}
500 
501 	mutex_unlock(&dev->mode_config.mutex);
502 }
503 
504 
vmw_du_cursor_plane_destroy(struct drm_plane * plane)505 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
506 {
507 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
508 	u32 i;
509 
510 	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
511 
512 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
513 		vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
514 
515 	drm_plane_cleanup(plane);
516 }
517 
518 
vmw_du_primary_plane_destroy(struct drm_plane * plane)519 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
520 {
521 	drm_plane_cleanup(plane);
522 
523 	/* Planes are static in our case so we don't free it */
524 }
525 
526 
527 /**
528  * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
529  *
530  * @vps: plane state associated with the display surface
531  * @unreference: true if we also want to unreference the display.
532  */
vmw_du_plane_unpin_surf(struct vmw_plane_state * vps,bool unreference)533 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
534 			     bool unreference)
535 {
536 	if (vps->surf) {
537 		if (vps->pinned) {
538 			vmw_resource_unpin(&vps->surf->res);
539 			vps->pinned--;
540 		}
541 
542 		if (unreference) {
543 			if (vps->pinned)
544 				DRM_ERROR("Surface still pinned\n");
545 			vmw_surface_unreference(&vps->surf);
546 		}
547 	}
548 }
549 
550 
551 /**
552  * vmw_du_plane_cleanup_fb - Unpins the plane surface
553  *
554  * @plane:  display plane
555  * @old_state: Contains the FB to clean up
556  *
557  * Unpins the framebuffer surface
558  *
559  * Returns 0 on success
560  */
561 void
vmw_du_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)562 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
563 			struct drm_plane_state *old_state)
564 {
565 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
566 
567 	vmw_du_plane_unpin_surf(vps, false);
568 }
569 
570 
571 /**
572  * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
573  *
574  * @vps: plane_state
575  *
576  * Returns 0 on success
577  */
578 
579 static int
vmw_du_cursor_plane_map_cm(struct vmw_plane_state * vps)580 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
581 {
582 	int ret;
583 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
584 	struct ttm_buffer_object *bo;
585 
586 	if (!vps->cursor.bo)
587 		return -EINVAL;
588 
589 	bo = &vps->cursor.bo->tbo;
590 
591 	if (bo->base.size < size)
592 		return -EINVAL;
593 
594 	if (vps->cursor.bo->map.virtual)
595 		return 0;
596 
597 	ret = ttm_bo_reserve(bo, false, false, NULL);
598 	if (unlikely(ret != 0))
599 		return -ENOMEM;
600 
601 	vmw_bo_map_and_cache(vps->cursor.bo);
602 
603 	ttm_bo_unreserve(bo);
604 
605 	if (unlikely(ret != 0))
606 		return -ENOMEM;
607 
608 	return 0;
609 }
610 
611 
612 /**
613  * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
614  *
615  * @vps: state of the cursor plane
616  *
617  * Returns 0 on success
618  */
619 
620 static int
vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state * vps)621 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
622 {
623 	int ret = 0;
624 	struct vmw_bo *vbo = vps->cursor.bo;
625 
626 	if (!vbo || !vbo->map.virtual)
627 		return 0;
628 
629 	ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
630 	if (likely(ret == 0)) {
631 		vmw_bo_unmap(vbo);
632 		ttm_bo_unreserve(&vbo->tbo);
633 	}
634 
635 	return ret;
636 }
637 
638 
639 /**
640  * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
641  *
642  * @plane: cursor plane
643  * @old_state: contains the state to clean up
644  *
645  * Unmaps all cursor bo mappings and unpins the cursor surface
646  *
647  * Returns 0 on success
648  */
649 void
vmw_du_cursor_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)650 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
651 			       struct drm_plane_state *old_state)
652 {
653 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
654 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
655 
656 	if (vps->surf_mapped) {
657 		vmw_bo_unmap(vps->surf->res.guest_memory_bo);
658 		vps->surf_mapped = false;
659 	}
660 
661 	vmw_du_cursor_plane_unmap_cm(vps);
662 	vmw_du_put_cursor_mob(vcp, vps);
663 
664 	vmw_du_plane_unpin_surf(vps, false);
665 
666 	if (vps->surf) {
667 		vmw_surface_unreference(&vps->surf);
668 		vps->surf = NULL;
669 	}
670 
671 	if (vps->bo) {
672 		vmw_bo_unreference(&vps->bo);
673 		vps->bo = NULL;
674 	}
675 }
676 
677 
678 /**
679  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
680  *
681  * @plane:  display plane
682  * @new_state: info on the new plane state, including the FB
683  *
684  * Returns 0 on success
685  */
686 int
vmw_du_cursor_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)687 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
688 			       struct drm_plane_state *new_state)
689 {
690 	struct drm_framebuffer *fb = new_state->fb;
691 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
692 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
693 	int ret = 0;
694 
695 	if (vps->surf) {
696 		if (vps->surf_mapped) {
697 			vmw_bo_unmap(vps->surf->res.guest_memory_bo);
698 			vps->surf_mapped = false;
699 		}
700 		vmw_surface_unreference(&vps->surf);
701 		vps->surf = NULL;
702 	}
703 
704 	if (vps->bo) {
705 		vmw_bo_unreference(&vps->bo);
706 		vps->bo = NULL;
707 	}
708 
709 	if (fb) {
710 		if (vmw_framebuffer_to_vfb(fb)->bo) {
711 			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
712 			vmw_bo_reference(vps->bo);
713 		} else {
714 			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
715 			vmw_surface_reference(vps->surf);
716 		}
717 	}
718 
719 	if (!vps->surf && vps->bo) {
720 		const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
721 
722 		/*
723 		 * Not using vmw_bo_map_and_cache() helper here as we need to
724 		 * reserve the ttm_buffer_object first which
725 		 * vmw_bo_map_and_cache() omits.
726 		 */
727 		ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
728 
729 		if (unlikely(ret != 0))
730 			return -ENOMEM;
731 
732 		ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
733 
734 		ttm_bo_unreserve(&vps->bo->tbo);
735 
736 		if (unlikely(ret != 0))
737 			return -ENOMEM;
738 	} else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
739 
740 		WARN_ON(vps->surf->snooper.image);
741 		ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
742 				     NULL);
743 		if (unlikely(ret != 0))
744 			return -ENOMEM;
745 		vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
746 		ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
747 		vps->surf_mapped = true;
748 	}
749 
750 	if (vps->surf || vps->bo) {
751 		vmw_du_get_cursor_mob(vcp, vps);
752 		vmw_du_cursor_plane_map_cm(vps);
753 	}
754 
755 	return 0;
756 }
757 
758 
759 void
vmw_du_cursor_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * state)760 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
761 				  struct drm_atomic_state *state)
762 {
763 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
764 									   plane);
765 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
766 									   plane);
767 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
768 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
769 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
770 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
771 	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
772 	s32 hotspot_x, hotspot_y;
773 
774 	hotspot_x = du->hotspot_x;
775 	hotspot_y = du->hotspot_y;
776 
777 	if (new_state->fb) {
778 		hotspot_x += new_state->fb->hot_x;
779 		hotspot_y += new_state->fb->hot_y;
780 	}
781 
782 	du->cursor_surface = vps->surf;
783 	du->cursor_bo = vps->bo;
784 
785 	if (!vps->surf && !vps->bo) {
786 		vmw_cursor_update_position(dev_priv, false, 0, 0);
787 		return;
788 	}
789 
790 	vps->cursor.hotspot_x = hotspot_x;
791 	vps->cursor.hotspot_y = hotspot_y;
792 
793 	if (vps->surf) {
794 		du->cursor_age = du->cursor_surface->snooper.age;
795 	}
796 
797 	if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
798 		/*
799 		 * If it hasn't changed, avoid making the device do extra
800 		 * work by keeping the old cursor active.
801 		 */
802 		struct vmw_cursor_plane_state tmp = old_vps->cursor;
803 		old_vps->cursor = vps->cursor;
804 		vps->cursor = tmp;
805 	} else {
806 		void *image = vmw_du_cursor_plane_acquire_image(vps);
807 		if (image)
808 			vmw_cursor_update_image(dev_priv, vps, image,
809 						new_state->crtc_w,
810 						new_state->crtc_h,
811 						hotspot_x, hotspot_y);
812 	}
813 
814 	du->cursor_x = new_state->crtc_x + du->set_gui_x;
815 	du->cursor_y = new_state->crtc_y + du->set_gui_y;
816 
817 	vmw_cursor_update_position(dev_priv, true,
818 				   du->cursor_x + hotspot_x,
819 				   du->cursor_y + hotspot_y);
820 
821 	du->core_hotspot_x = hotspot_x - du->hotspot_x;
822 	du->core_hotspot_y = hotspot_y - du->hotspot_y;
823 }
824 
825 
826 /**
827  * vmw_du_primary_plane_atomic_check - check if the new state is okay
828  *
829  * @plane: display plane
830  * @state: info on the new plane state, including the FB
831  *
832  * Check if the new state is settable given the current state.  Other
833  * than what the atomic helper checks, we care about crtc fitting
834  * the FB and maintaining one active framebuffer.
835  *
836  * Returns 0 on success
837  */
vmw_du_primary_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)838 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
839 				      struct drm_atomic_state *state)
840 {
841 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
842 									   plane);
843 	struct drm_crtc_state *crtc_state = NULL;
844 	struct drm_framebuffer *new_fb = new_state->fb;
845 	int ret;
846 
847 	if (new_state->crtc)
848 		crtc_state = drm_atomic_get_new_crtc_state(state,
849 							   new_state->crtc);
850 
851 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
852 						  DRM_PLANE_NO_SCALING,
853 						  DRM_PLANE_NO_SCALING,
854 						  false, true);
855 
856 	if (!ret && new_fb) {
857 		struct drm_crtc *crtc = new_state->crtc;
858 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
859 
860 		vmw_connector_state_to_vcs(du->connector.state);
861 	}
862 
863 
864 	return ret;
865 }
866 
867 
868 /**
869  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
870  *
871  * @plane: cursor plane
872  * @state: info on the new plane state
873  *
874  * This is a chance to fail if the new cursor state does not fit
875  * our requirements.
876  *
877  * Returns 0 on success
878  */
vmw_du_cursor_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)879 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
880 				     struct drm_atomic_state *state)
881 {
882 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
883 									   plane);
884 	int ret = 0;
885 	struct drm_crtc_state *crtc_state = NULL;
886 	struct vmw_surface *surface = NULL;
887 	struct drm_framebuffer *fb = new_state->fb;
888 
889 	if (new_state->crtc)
890 		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
891 							   new_state->crtc);
892 
893 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
894 						  DRM_PLANE_NO_SCALING,
895 						  DRM_PLANE_NO_SCALING,
896 						  true, true);
897 	if (ret)
898 		return ret;
899 
900 	/* Turning off */
901 	if (!fb)
902 		return 0;
903 
904 	/* A lot of the code assumes this */
905 	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
906 		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
907 			  new_state->crtc_w, new_state->crtc_h);
908 		return -EINVAL;
909 	}
910 
911 	if (!vmw_framebuffer_to_vfb(fb)->bo) {
912 		surface = vmw_framebuffer_to_vfbs(fb)->surface;
913 
914 		WARN_ON(!surface);
915 
916 		if (!surface ||
917 		    (!surface->snooper.image && !surface->res.guest_memory_bo)) {
918 			DRM_ERROR("surface not suitable for cursor\n");
919 			return -EINVAL;
920 		}
921 	}
922 
923 	return 0;
924 }
925 
926 
vmw_du_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)927 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
928 			     struct drm_atomic_state *state)
929 {
930 	struct vmw_private *vmw = vmw_priv(crtc->dev);
931 	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
932 									 crtc);
933 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
934 	int connector_mask = drm_connector_mask(&du->connector);
935 	bool has_primary = new_state->plane_mask &
936 			   drm_plane_mask(crtc->primary);
937 
938 	/*
939 	 * This is fine in general, but broken userspace might expect
940 	 * some actual rendering so give a clue as why it's blank.
941 	 */
942 	if (new_state->enable && !has_primary)
943 		drm_dbg_driver(&vmw->drm,
944 			       "CRTC without a primary plane will be blank.\n");
945 
946 
947 	if (new_state->connector_mask != connector_mask &&
948 	    new_state->connector_mask != 0) {
949 		DRM_ERROR("Invalid connectors configuration\n");
950 		return -EINVAL;
951 	}
952 
953 	/*
954 	 * Our virtual device does not have a dot clock, so use the logical
955 	 * clock value as the dot clock.
956 	 */
957 	if (new_state->mode.crtc_clock == 0)
958 		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
959 
960 	return 0;
961 }
962 
963 
vmw_du_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)964 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
965 			      struct drm_atomic_state *state)
966 {
967 }
968 
969 
vmw_du_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)970 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
971 			      struct drm_atomic_state *state)
972 {
973 }
974 
975 
976 /**
977  * vmw_du_crtc_duplicate_state - duplicate crtc state
978  * @crtc: DRM crtc
979  *
980  * Allocates and returns a copy of the crtc state (both common and
981  * vmw-specific) for the specified crtc.
982  *
983  * Returns: The newly allocated crtc state, or NULL on failure.
984  */
985 struct drm_crtc_state *
vmw_du_crtc_duplicate_state(struct drm_crtc * crtc)986 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
987 {
988 	struct drm_crtc_state *state;
989 	struct vmw_crtc_state *vcs;
990 
991 	if (WARN_ON(!crtc->state))
992 		return NULL;
993 
994 	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
995 
996 	if (!vcs)
997 		return NULL;
998 
999 	state = &vcs->base;
1000 
1001 	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
1002 
1003 	return state;
1004 }
1005 
1006 
1007 /**
1008  * vmw_du_crtc_reset - creates a blank vmw crtc state
1009  * @crtc: DRM crtc
1010  *
1011  * Resets the atomic state for @crtc by freeing the state pointer (which
1012  * might be NULL, e.g. at driver load time) and allocating a new empty state
1013  * object.
1014  */
vmw_du_crtc_reset(struct drm_crtc * crtc)1015 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1016 {
1017 	struct vmw_crtc_state *vcs;
1018 
1019 
1020 	if (crtc->state) {
1021 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
1022 
1023 		kfree(vmw_crtc_state_to_vcs(crtc->state));
1024 	}
1025 
1026 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1027 
1028 	if (!vcs) {
1029 		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1030 		return;
1031 	}
1032 
1033 	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1034 }
1035 
1036 
1037 /**
1038  * vmw_du_crtc_destroy_state - destroy crtc state
1039  * @crtc: DRM crtc
1040  * @state: state object to destroy
1041  *
1042  * Destroys the crtc state (both common and vmw-specific) for the
1043  * specified plane.
1044  */
1045 void
vmw_du_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)1046 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1047 			  struct drm_crtc_state *state)
1048 {
1049 	drm_atomic_helper_crtc_destroy_state(crtc, state);
1050 }
1051 
1052 
1053 /**
1054  * vmw_du_plane_duplicate_state - duplicate plane state
1055  * @plane: drm plane
1056  *
1057  * Allocates and returns a copy of the plane state (both common and
1058  * vmw-specific) for the specified plane.
1059  *
1060  * Returns: The newly allocated plane state, or NULL on failure.
1061  */
1062 struct drm_plane_state *
vmw_du_plane_duplicate_state(struct drm_plane * plane)1063 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1064 {
1065 	struct drm_plane_state *state;
1066 	struct vmw_plane_state *vps;
1067 
1068 	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1069 
1070 	if (!vps)
1071 		return NULL;
1072 
1073 	vps->pinned = 0;
1074 	vps->cpp = 0;
1075 
1076 	memset(&vps->cursor, 0, sizeof(vps->cursor));
1077 
1078 	/* Each ref counted resource needs to be acquired again */
1079 	if (vps->surf)
1080 		(void) vmw_surface_reference(vps->surf);
1081 
1082 	if (vps->bo)
1083 		(void) vmw_bo_reference(vps->bo);
1084 
1085 	state = &vps->base;
1086 
1087 	__drm_atomic_helper_plane_duplicate_state(plane, state);
1088 
1089 	return state;
1090 }
1091 
1092 
1093 /**
1094  * vmw_du_plane_reset - creates a blank vmw plane state
1095  * @plane: drm plane
1096  *
1097  * Resets the atomic state for @plane by freeing the state pointer (which might
1098  * be NULL, e.g. at driver load time) and allocating a new empty state object.
1099  */
vmw_du_plane_reset(struct drm_plane * plane)1100 void vmw_du_plane_reset(struct drm_plane *plane)
1101 {
1102 	struct vmw_plane_state *vps;
1103 
1104 	if (plane->state)
1105 		vmw_du_plane_destroy_state(plane, plane->state);
1106 
1107 	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1108 
1109 	if (!vps) {
1110 		DRM_ERROR("Cannot allocate vmw_plane_state\n");
1111 		return;
1112 	}
1113 
1114 	__drm_atomic_helper_plane_reset(plane, &vps->base);
1115 }
1116 
1117 
1118 /**
1119  * vmw_du_plane_destroy_state - destroy plane state
1120  * @plane: DRM plane
1121  * @state: state object to destroy
1122  *
1123  * Destroys the plane state (both common and vmw-specific) for the
1124  * specified plane.
1125  */
1126 void
vmw_du_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)1127 vmw_du_plane_destroy_state(struct drm_plane *plane,
1128 			   struct drm_plane_state *state)
1129 {
1130 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1131 
1132 	/* Should have been freed by cleanup_fb */
1133 	if (vps->surf)
1134 		vmw_surface_unreference(&vps->surf);
1135 
1136 	if (vps->bo)
1137 		vmw_bo_unreference(&vps->bo);
1138 
1139 	drm_atomic_helper_plane_destroy_state(plane, state);
1140 }
1141 
1142 
1143 /**
1144  * vmw_du_connector_duplicate_state - duplicate connector state
1145  * @connector: DRM connector
1146  *
1147  * Allocates and returns a copy of the connector state (both common and
1148  * vmw-specific) for the specified connector.
1149  *
1150  * Returns: The newly allocated connector state, or NULL on failure.
1151  */
1152 struct drm_connector_state *
vmw_du_connector_duplicate_state(struct drm_connector * connector)1153 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1154 {
1155 	struct drm_connector_state *state;
1156 	struct vmw_connector_state *vcs;
1157 
1158 	if (WARN_ON(!connector->state))
1159 		return NULL;
1160 
1161 	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1162 
1163 	if (!vcs)
1164 		return NULL;
1165 
1166 	state = &vcs->base;
1167 
1168 	__drm_atomic_helper_connector_duplicate_state(connector, state);
1169 
1170 	return state;
1171 }
1172 
1173 
1174 /**
1175  * vmw_du_connector_reset - creates a blank vmw connector state
1176  * @connector: DRM connector
1177  *
1178  * Resets the atomic state for @connector by freeing the state pointer (which
1179  * might be NULL, e.g. at driver load time) and allocating a new empty state
1180  * object.
1181  */
vmw_du_connector_reset(struct drm_connector * connector)1182 void vmw_du_connector_reset(struct drm_connector *connector)
1183 {
1184 	struct vmw_connector_state *vcs;
1185 
1186 
1187 	if (connector->state) {
1188 		__drm_atomic_helper_connector_destroy_state(connector->state);
1189 
1190 		kfree(vmw_connector_state_to_vcs(connector->state));
1191 	}
1192 
1193 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1194 
1195 	if (!vcs) {
1196 		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1197 		return;
1198 	}
1199 
1200 	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1201 }
1202 
1203 
1204 /**
1205  * vmw_du_connector_destroy_state - destroy connector state
1206  * @connector: DRM connector
1207  * @state: state object to destroy
1208  *
1209  * Destroys the connector state (both common and vmw-specific) for the
1210  * specified plane.
1211  */
1212 void
vmw_du_connector_destroy_state(struct drm_connector * connector,struct drm_connector_state * state)1213 vmw_du_connector_destroy_state(struct drm_connector *connector,
1214 			  struct drm_connector_state *state)
1215 {
1216 	drm_atomic_helper_connector_destroy_state(connector, state);
1217 }
1218 /*
1219  * Generic framebuffer code
1220  */
1221 
1222 /*
1223  * Surface framebuffer code
1224  */
1225 
vmw_framebuffer_surface_destroy(struct drm_framebuffer * framebuffer)1226 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1227 {
1228 	struct vmw_framebuffer_surface *vfbs =
1229 		vmw_framebuffer_to_vfbs(framebuffer);
1230 
1231 	drm_framebuffer_cleanup(framebuffer);
1232 	vmw_surface_unreference(&vfbs->surface);
1233 
1234 	kfree(vfbs);
1235 }
1236 
1237 /**
1238  * vmw_kms_readback - Perform a readback from the screen system to
1239  * a buffer-object backed framebuffer.
1240  *
1241  * @dev_priv: Pointer to the device private structure.
1242  * @file_priv: Pointer to a struct drm_file identifying the caller.
1243  * Must be set to NULL if @user_fence_rep is NULL.
1244  * @vfb: Pointer to the buffer-object backed framebuffer.
1245  * @user_fence_rep: User-space provided structure for fence information.
1246  * Must be set to non-NULL if @file_priv is non-NULL.
1247  * @vclips: Array of clip rects.
1248  * @num_clips: Number of clip rects in @vclips.
1249  *
1250  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1251  * interrupted.
1252  */
vmw_kms_readback(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct drm_vmw_fence_rep __user * user_fence_rep,struct drm_vmw_rect * vclips,uint32_t num_clips)1253 int vmw_kms_readback(struct vmw_private *dev_priv,
1254 		     struct drm_file *file_priv,
1255 		     struct vmw_framebuffer *vfb,
1256 		     struct drm_vmw_fence_rep __user *user_fence_rep,
1257 		     struct drm_vmw_rect *vclips,
1258 		     uint32_t num_clips)
1259 {
1260 	switch (dev_priv->active_display_unit) {
1261 	case vmw_du_screen_object:
1262 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1263 					    user_fence_rep, vclips, num_clips,
1264 					    NULL);
1265 	case vmw_du_screen_target:
1266 		return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1267 					     user_fence_rep, NULL, vclips, num_clips,
1268 					     1, NULL);
1269 	default:
1270 		WARN_ONCE(true,
1271 			  "Readback called with invalid display system.\n");
1272 }
1273 
1274 	return -ENOSYS;
1275 }
1276 
1277 
1278 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1279 	.destroy = vmw_framebuffer_surface_destroy,
1280 	.dirty = drm_atomic_helper_dirtyfb,
1281 };
1282 
vmw_kms_new_framebuffer_surface(struct vmw_private * dev_priv,struct vmw_surface * surface,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd,bool is_bo_proxy)1283 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1284 					   struct vmw_surface *surface,
1285 					   struct vmw_framebuffer **out,
1286 					   const struct drm_mode_fb_cmd2
1287 					   *mode_cmd,
1288 					   bool is_bo_proxy)
1289 
1290 {
1291 	struct drm_device *dev = &dev_priv->drm;
1292 	struct vmw_framebuffer_surface *vfbs;
1293 	enum SVGA3dSurfaceFormat format;
1294 	int ret;
1295 
1296 	/* 3D is only supported on HWv8 and newer hosts */
1297 	if (dev_priv->active_display_unit == vmw_du_legacy)
1298 		return -ENOSYS;
1299 
1300 	/*
1301 	 * Sanity checks.
1302 	 */
1303 
1304 	if (!drm_any_plane_has_format(&dev_priv->drm,
1305 				      mode_cmd->pixel_format,
1306 				      mode_cmd->modifier[0])) {
1307 		drm_dbg(&dev_priv->drm,
1308 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1309 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1310 		return -EINVAL;
1311 	}
1312 
1313 	/* Surface must be marked as a scanout. */
1314 	if (unlikely(!surface->metadata.scanout))
1315 		return -EINVAL;
1316 
1317 	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1318 		     surface->metadata.num_sizes != 1 ||
1319 		     surface->metadata.base_size.width < mode_cmd->width ||
1320 		     surface->metadata.base_size.height < mode_cmd->height ||
1321 		     surface->metadata.base_size.depth != 1)) {
1322 		DRM_ERROR("Incompatible surface dimensions "
1323 			  "for requested mode.\n");
1324 		return -EINVAL;
1325 	}
1326 
1327 	switch (mode_cmd->pixel_format) {
1328 	case DRM_FORMAT_ARGB8888:
1329 		format = SVGA3D_A8R8G8B8;
1330 		break;
1331 	case DRM_FORMAT_XRGB8888:
1332 		format = SVGA3D_X8R8G8B8;
1333 		break;
1334 	case DRM_FORMAT_RGB565:
1335 		format = SVGA3D_R5G6B5;
1336 		break;
1337 	case DRM_FORMAT_XRGB1555:
1338 		format = SVGA3D_A1R5G5B5;
1339 		break;
1340 	default:
1341 		DRM_ERROR("Invalid pixel format: %p4cc\n",
1342 			  &mode_cmd->pixel_format);
1343 		return -EINVAL;
1344 	}
1345 
1346 	/*
1347 	 * For DX, surface format validation is done when surface->scanout
1348 	 * is set.
1349 	 */
1350 	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1351 		DRM_ERROR("Invalid surface format for requested mode.\n");
1352 		return -EINVAL;
1353 	}
1354 
1355 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1356 	if (!vfbs) {
1357 		ret = -ENOMEM;
1358 		goto out_err1;
1359 	}
1360 
1361 	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1362 	vfbs->surface = vmw_surface_reference(surface);
1363 	vfbs->base.user_handle = mode_cmd->handles[0];
1364 	vfbs->is_bo_proxy = is_bo_proxy;
1365 
1366 	*out = &vfbs->base;
1367 
1368 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1369 				   &vmw_framebuffer_surface_funcs);
1370 	if (ret)
1371 		goto out_err2;
1372 
1373 	return 0;
1374 
1375 out_err2:
1376 	vmw_surface_unreference(&surface);
1377 	kfree(vfbs);
1378 out_err1:
1379 	return ret;
1380 }
1381 
1382 /*
1383  * Buffer-object framebuffer code
1384  */
1385 
vmw_framebuffer_bo_create_handle(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int * handle)1386 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1387 					    struct drm_file *file_priv,
1388 					    unsigned int *handle)
1389 {
1390 	struct vmw_framebuffer_bo *vfbd =
1391 			vmw_framebuffer_to_vfbd(fb);
1392 
1393 	return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1394 }
1395 
vmw_framebuffer_bo_destroy(struct drm_framebuffer * framebuffer)1396 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1397 {
1398 	struct vmw_framebuffer_bo *vfbd =
1399 		vmw_framebuffer_to_vfbd(framebuffer);
1400 
1401 	drm_framebuffer_cleanup(framebuffer);
1402 	vmw_bo_unreference(&vfbd->buffer);
1403 
1404 	kfree(vfbd);
1405 }
1406 
1407 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1408 	.create_handle = vmw_framebuffer_bo_create_handle,
1409 	.destroy = vmw_framebuffer_bo_destroy,
1410 	.dirty = drm_atomic_helper_dirtyfb,
1411 };
1412 
1413 /**
1414  * vmw_create_bo_proxy - create a proxy surface for the buffer object
1415  *
1416  * @dev: DRM device
1417  * @mode_cmd: parameters for the new surface
1418  * @bo_mob: MOB backing the buffer object
1419  * @srf_out: newly created surface
1420  *
1421  * When the content FB is a buffer object, we create a surface as a proxy to the
1422  * same buffer.  This way we can do a surface copy rather than a surface DMA.
1423  * This is a more efficient approach
1424  *
1425  * RETURNS:
1426  * 0 on success, error code otherwise
1427  */
vmw_create_bo_proxy(struct drm_device * dev,const struct drm_mode_fb_cmd2 * mode_cmd,struct vmw_bo * bo_mob,struct vmw_surface ** srf_out)1428 static int vmw_create_bo_proxy(struct drm_device *dev,
1429 			       const struct drm_mode_fb_cmd2 *mode_cmd,
1430 			       struct vmw_bo *bo_mob,
1431 			       struct vmw_surface **srf_out)
1432 {
1433 	struct vmw_surface_metadata metadata = {0};
1434 	uint32_t format;
1435 	struct vmw_resource *res;
1436 	unsigned int bytes_pp;
1437 	int ret;
1438 
1439 	switch (mode_cmd->pixel_format) {
1440 	case DRM_FORMAT_ARGB8888:
1441 	case DRM_FORMAT_XRGB8888:
1442 		format = SVGA3D_X8R8G8B8;
1443 		bytes_pp = 4;
1444 		break;
1445 
1446 	case DRM_FORMAT_RGB565:
1447 	case DRM_FORMAT_XRGB1555:
1448 		format = SVGA3D_R5G6B5;
1449 		bytes_pp = 2;
1450 		break;
1451 
1452 	case 8:
1453 		format = SVGA3D_P8;
1454 		bytes_pp = 1;
1455 		break;
1456 
1457 	default:
1458 		DRM_ERROR("Invalid framebuffer format %p4cc\n",
1459 			  &mode_cmd->pixel_format);
1460 		return -EINVAL;
1461 	}
1462 
1463 	metadata.format = format;
1464 	metadata.mip_levels[0] = 1;
1465 	metadata.num_sizes = 1;
1466 	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1467 	metadata.base_size.height =  mode_cmd->height;
1468 	metadata.base_size.depth = 1;
1469 	metadata.scanout = true;
1470 
1471 	ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1472 	if (ret) {
1473 		DRM_ERROR("Failed to allocate proxy content buffer\n");
1474 		return ret;
1475 	}
1476 
1477 	res = &(*srf_out)->res;
1478 
1479 	/* Reserve and switch the backing mob. */
1480 	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1481 	(void) vmw_resource_reserve(res, false, true);
1482 	vmw_user_bo_unref(&res->guest_memory_bo);
1483 	res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
1484 	res->guest_memory_offset = 0;
1485 	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1486 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1487 
1488 	return 0;
1489 }
1490 
1491 
1492 
vmw_kms_new_framebuffer_bo(struct vmw_private * dev_priv,struct vmw_bo * bo,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd)1493 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1494 				      struct vmw_bo *bo,
1495 				      struct vmw_framebuffer **out,
1496 				      const struct drm_mode_fb_cmd2
1497 				      *mode_cmd)
1498 
1499 {
1500 	struct drm_device *dev = &dev_priv->drm;
1501 	struct vmw_framebuffer_bo *vfbd;
1502 	unsigned int requested_size;
1503 	int ret;
1504 
1505 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1506 	if (unlikely(requested_size > bo->tbo.base.size)) {
1507 		DRM_ERROR("Screen buffer object size is too small "
1508 			  "for requested mode.\n");
1509 		return -EINVAL;
1510 	}
1511 
1512 	if (!drm_any_plane_has_format(&dev_priv->drm,
1513 				      mode_cmd->pixel_format,
1514 				      mode_cmd->modifier[0])) {
1515 		drm_dbg(&dev_priv->drm,
1516 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1517 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1518 		return -EINVAL;
1519 	}
1520 
1521 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1522 	if (!vfbd) {
1523 		ret = -ENOMEM;
1524 		goto out_err1;
1525 	}
1526 
1527 	vfbd->base.base.obj[0] = &bo->tbo.base;
1528 	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1529 	vfbd->base.bo = true;
1530 	vfbd->buffer = vmw_bo_reference(bo);
1531 	vfbd->base.user_handle = mode_cmd->handles[0];
1532 	*out = &vfbd->base;
1533 
1534 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1535 				   &vmw_framebuffer_bo_funcs);
1536 	if (ret)
1537 		goto out_err2;
1538 
1539 	return 0;
1540 
1541 out_err2:
1542 	vmw_bo_unreference(&bo);
1543 	kfree(vfbd);
1544 out_err1:
1545 	return ret;
1546 }
1547 
1548 
1549 /**
1550  * vmw_kms_srf_ok - check if a surface can be created
1551  *
1552  * @dev_priv: Pointer to device private struct.
1553  * @width: requested width
1554  * @height: requested height
1555  *
1556  * Surfaces need to be less than texture size
1557  */
1558 static bool
vmw_kms_srf_ok(struct vmw_private * dev_priv,uint32_t width,uint32_t height)1559 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1560 {
1561 	if (width  > dev_priv->texture_max_width ||
1562 	    height > dev_priv->texture_max_height)
1563 		return false;
1564 
1565 	return true;
1566 }
1567 
1568 /**
1569  * vmw_kms_new_framebuffer - Create a new framebuffer.
1570  *
1571  * @dev_priv: Pointer to device private struct.
1572  * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1573  * Either @bo or @surface must be NULL.
1574  * @surface: Pointer to a surface to wrap the kms framebuffer around.
1575  * Either @bo or @surface must be NULL.
1576  * @only_2d: No presents will occur to this buffer object based framebuffer.
1577  * This helps the code to do some important optimizations.
1578  * @mode_cmd: Frame-buffer metadata.
1579  */
1580 struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private * dev_priv,struct vmw_bo * bo,struct vmw_surface * surface,bool only_2d,const struct drm_mode_fb_cmd2 * mode_cmd)1581 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1582 			struct vmw_bo *bo,
1583 			struct vmw_surface *surface,
1584 			bool only_2d,
1585 			const struct drm_mode_fb_cmd2 *mode_cmd)
1586 {
1587 	struct vmw_framebuffer *vfb = NULL;
1588 	bool is_bo_proxy = false;
1589 	int ret;
1590 
1591 	/*
1592 	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1593 	 * therefore, wrap the buffer object in a surface so we can use the
1594 	 * SurfaceCopy command.
1595 	 */
1596 	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1597 	    bo && only_2d &&
1598 	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1599 	    dev_priv->active_display_unit == vmw_du_screen_target) {
1600 		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1601 					  bo, &surface);
1602 		if (ret)
1603 			return ERR_PTR(ret);
1604 
1605 		is_bo_proxy = true;
1606 	}
1607 
1608 	/* Create the new framebuffer depending one what we have */
1609 	if (surface) {
1610 		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1611 						      mode_cmd,
1612 						      is_bo_proxy);
1613 		/*
1614 		 * vmw_create_bo_proxy() adds a reference that is no longer
1615 		 * needed
1616 		 */
1617 		if (is_bo_proxy)
1618 			vmw_surface_unreference(&surface);
1619 	} else if (bo) {
1620 		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1621 						 mode_cmd);
1622 	} else {
1623 		BUG();
1624 	}
1625 
1626 	if (ret)
1627 		return ERR_PTR(ret);
1628 
1629 	return vfb;
1630 }
1631 
1632 /*
1633  * Generic Kernel modesetting functions
1634  */
1635 
vmw_kms_fb_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)1636 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1637 						 struct drm_file *file_priv,
1638 						 const struct drm_mode_fb_cmd2 *mode_cmd)
1639 {
1640 	struct vmw_private *dev_priv = vmw_priv(dev);
1641 	struct vmw_framebuffer *vfb = NULL;
1642 	struct vmw_surface *surface = NULL;
1643 	struct vmw_bo *bo = NULL;
1644 	int ret;
1645 
1646 	/* returns either a bo or surface */
1647 	ret = vmw_user_lookup_handle(dev_priv, file_priv,
1648 				     mode_cmd->handles[0],
1649 				     &surface, &bo);
1650 	if (ret) {
1651 		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1652 			  mode_cmd->handles[0], mode_cmd->handles[0]);
1653 		goto err_out;
1654 	}
1655 
1656 
1657 	if (!bo &&
1658 	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1659 		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1660 			dev_priv->texture_max_width,
1661 			dev_priv->texture_max_height);
1662 		goto err_out;
1663 	}
1664 
1665 
1666 	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1667 				      !(dev_priv->capabilities & SVGA_CAP_3D),
1668 				      mode_cmd);
1669 	if (IS_ERR(vfb)) {
1670 		ret = PTR_ERR(vfb);
1671 		goto err_out;
1672 	}
1673 
1674 err_out:
1675 	/* vmw_user_lookup_handle takes one ref so does new_fb */
1676 	if (bo)
1677 		vmw_user_bo_unref(&bo);
1678 	if (surface)
1679 		vmw_surface_unreference(&surface);
1680 
1681 	if (ret) {
1682 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1683 		return ERR_PTR(ret);
1684 	}
1685 
1686 	return &vfb->base;
1687 }
1688 
1689 /**
1690  * vmw_kms_check_display_memory - Validates display memory required for a
1691  * topology
1692  * @dev: DRM device
1693  * @num_rects: number of drm_rect in rects
1694  * @rects: array of drm_rect representing the topology to validate indexed by
1695  * crtc index.
1696  *
1697  * Returns:
1698  * 0 on success otherwise negative error code
1699  */
vmw_kms_check_display_memory(struct drm_device * dev,uint32_t num_rects,struct drm_rect * rects)1700 static int vmw_kms_check_display_memory(struct drm_device *dev,
1701 					uint32_t num_rects,
1702 					struct drm_rect *rects)
1703 {
1704 	struct vmw_private *dev_priv = vmw_priv(dev);
1705 	struct drm_rect bounding_box = {0};
1706 	u64 total_pixels = 0, pixel_mem, bb_mem;
1707 	int i;
1708 
1709 	for (i = 0; i < num_rects; i++) {
1710 		/*
1711 		 * For STDU only individual screen (screen target) is limited by
1712 		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1713 		 */
1714 		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1715 		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1716 		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1717 			VMW_DEBUG_KMS("Screen size not supported.\n");
1718 			return -EINVAL;
1719 		}
1720 
1721 		/* Bounding box upper left is at (0,0). */
1722 		if (rects[i].x2 > bounding_box.x2)
1723 			bounding_box.x2 = rects[i].x2;
1724 
1725 		if (rects[i].y2 > bounding_box.y2)
1726 			bounding_box.y2 = rects[i].y2;
1727 
1728 		total_pixels += (u64) drm_rect_width(&rects[i]) *
1729 			(u64) drm_rect_height(&rects[i]);
1730 	}
1731 
1732 	/* Virtual svga device primary limits are always in 32-bpp. */
1733 	pixel_mem = total_pixels * 4;
1734 
1735 	/*
1736 	 * For HV10 and below prim_bb_mem is vram size. When
1737 	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1738 	 * limit on primary bounding box
1739 	 */
1740 	if (pixel_mem > dev_priv->max_primary_mem) {
1741 		VMW_DEBUG_KMS("Combined output size too large.\n");
1742 		return -EINVAL;
1743 	}
1744 
1745 	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1746 	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1747 	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1748 		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1749 
1750 		if (bb_mem > dev_priv->max_primary_mem) {
1751 			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1752 			return -EINVAL;
1753 		}
1754 	}
1755 
1756 	return 0;
1757 }
1758 
1759 /**
1760  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1761  * crtc mutex
1762  * @state: The atomic state pointer containing the new atomic state
1763  * @crtc: The crtc
1764  *
1765  * This function returns the new crtc state if it's part of the state update.
1766  * Otherwise returns the current crtc state. It also makes sure that the
1767  * crtc mutex is locked.
1768  *
1769  * Returns: A valid crtc state pointer or NULL. It may also return a
1770  * pointer error, in particular -EDEADLK if locking needs to be rerun.
1771  */
1772 static struct drm_crtc_state *
vmw_crtc_state_and_lock(struct drm_atomic_state * state,struct drm_crtc * crtc)1773 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1774 {
1775 	struct drm_crtc_state *crtc_state;
1776 
1777 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1778 	if (crtc_state) {
1779 		lockdep_assert_held(&crtc->mutex.mutex.base);
1780 	} else {
1781 		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1782 
1783 		if (ret != 0 && ret != -EALREADY)
1784 			return ERR_PTR(ret);
1785 
1786 		crtc_state = crtc->state;
1787 	}
1788 
1789 	return crtc_state;
1790 }
1791 
1792 /**
1793  * vmw_kms_check_implicit - Verify that all implicit display units scan out
1794  * from the same fb after the new state is committed.
1795  * @dev: The drm_device.
1796  * @state: The new state to be checked.
1797  *
1798  * Returns:
1799  *   Zero on success,
1800  *   -EINVAL on invalid state,
1801  *   -EDEADLK if modeset locking needs to be rerun.
1802  */
vmw_kms_check_implicit(struct drm_device * dev,struct drm_atomic_state * state)1803 static int vmw_kms_check_implicit(struct drm_device *dev,
1804 				  struct drm_atomic_state *state)
1805 {
1806 	struct drm_framebuffer *implicit_fb = NULL;
1807 	struct drm_crtc *crtc;
1808 	struct drm_crtc_state *crtc_state;
1809 	struct drm_plane_state *plane_state;
1810 
1811 	drm_for_each_crtc(crtc, dev) {
1812 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1813 
1814 		if (!du->is_implicit)
1815 			continue;
1816 
1817 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1818 		if (IS_ERR(crtc_state))
1819 			return PTR_ERR(crtc_state);
1820 
1821 		if (!crtc_state || !crtc_state->enable)
1822 			continue;
1823 
1824 		/*
1825 		 * Can't move primary planes across crtcs, so this is OK.
1826 		 * It also means we don't need to take the plane mutex.
1827 		 */
1828 		plane_state = du->primary.state;
1829 		if (plane_state->crtc != crtc)
1830 			continue;
1831 
1832 		if (!implicit_fb)
1833 			implicit_fb = plane_state->fb;
1834 		else if (implicit_fb != plane_state->fb)
1835 			return -EINVAL;
1836 	}
1837 
1838 	return 0;
1839 }
1840 
1841 /**
1842  * vmw_kms_check_topology - Validates topology in drm_atomic_state
1843  * @dev: DRM device
1844  * @state: the driver state object
1845  *
1846  * Returns:
1847  * 0 on success otherwise negative error code
1848  */
vmw_kms_check_topology(struct drm_device * dev,struct drm_atomic_state * state)1849 static int vmw_kms_check_topology(struct drm_device *dev,
1850 				  struct drm_atomic_state *state)
1851 {
1852 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1853 	struct drm_rect *rects;
1854 	struct drm_crtc *crtc;
1855 	uint32_t i;
1856 	int ret = 0;
1857 
1858 	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1859 			GFP_KERNEL);
1860 	if (!rects)
1861 		return -ENOMEM;
1862 
1863 	drm_for_each_crtc(crtc, dev) {
1864 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1865 		struct drm_crtc_state *crtc_state;
1866 
1867 		i = drm_crtc_index(crtc);
1868 
1869 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1870 		if (IS_ERR(crtc_state)) {
1871 			ret = PTR_ERR(crtc_state);
1872 			goto clean;
1873 		}
1874 
1875 		if (!crtc_state)
1876 			continue;
1877 
1878 		if (crtc_state->enable) {
1879 			rects[i].x1 = du->gui_x;
1880 			rects[i].y1 = du->gui_y;
1881 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1882 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1883 		} else {
1884 			rects[i].x1 = 0;
1885 			rects[i].y1 = 0;
1886 			rects[i].x2 = 0;
1887 			rects[i].y2 = 0;
1888 		}
1889 	}
1890 
1891 	/* Determine change to topology due to new atomic state */
1892 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1893 				      new_crtc_state, i) {
1894 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1895 		struct drm_connector *connector;
1896 		struct drm_connector_state *conn_state;
1897 		struct vmw_connector_state *vmw_conn_state;
1898 
1899 		if (!du->pref_active && new_crtc_state->enable) {
1900 			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1901 			ret = -EINVAL;
1902 			goto clean;
1903 		}
1904 
1905 		/*
1906 		 * For vmwgfx each crtc has only one connector attached and it
1907 		 * is not changed so don't really need to check the
1908 		 * crtc->connector_mask and iterate over it.
1909 		 */
1910 		connector = &du->connector;
1911 		conn_state = drm_atomic_get_connector_state(state, connector);
1912 		if (IS_ERR(conn_state)) {
1913 			ret = PTR_ERR(conn_state);
1914 			goto clean;
1915 		}
1916 
1917 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1918 		vmw_conn_state->gui_x = du->gui_x;
1919 		vmw_conn_state->gui_y = du->gui_y;
1920 	}
1921 
1922 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1923 					   rects);
1924 
1925 clean:
1926 	kfree(rects);
1927 	return ret;
1928 }
1929 
1930 /**
1931  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1932  *
1933  * @dev: DRM device
1934  * @state: the driver state object
1935  *
1936  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1937  * us to assign a value to mode->crtc_clock so that
1938  * drm_calc_timestamping_constants() won't throw an error message
1939  *
1940  * Returns:
1941  * Zero for success or -errno
1942  */
1943 static int
vmw_kms_atomic_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)1944 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1945 			     struct drm_atomic_state *state)
1946 {
1947 	struct drm_crtc *crtc;
1948 	struct drm_crtc_state *crtc_state;
1949 	bool need_modeset = false;
1950 	int i, ret;
1951 
1952 	ret = drm_atomic_helper_check(dev, state);
1953 	if (ret)
1954 		return ret;
1955 
1956 	ret = vmw_kms_check_implicit(dev, state);
1957 	if (ret) {
1958 		VMW_DEBUG_KMS("Invalid implicit state\n");
1959 		return ret;
1960 	}
1961 
1962 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1963 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1964 			need_modeset = true;
1965 	}
1966 
1967 	if (need_modeset)
1968 		return vmw_kms_check_topology(dev, state);
1969 
1970 	return ret;
1971 }
1972 
1973 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1974 	.fb_create = vmw_kms_fb_create,
1975 	.atomic_check = vmw_kms_atomic_check_modeset,
1976 	.atomic_commit = drm_atomic_helper_commit,
1977 };
1978 
vmw_kms_generic_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1979 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1980 				   struct drm_file *file_priv,
1981 				   struct vmw_framebuffer *vfb,
1982 				   struct vmw_surface *surface,
1983 				   uint32_t sid,
1984 				   int32_t destX, int32_t destY,
1985 				   struct drm_vmw_rect *clips,
1986 				   uint32_t num_clips)
1987 {
1988 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1989 					    &surface->res, destX, destY,
1990 					    num_clips, 1, NULL, NULL);
1991 }
1992 
1993 
vmw_kms_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1994 int vmw_kms_present(struct vmw_private *dev_priv,
1995 		    struct drm_file *file_priv,
1996 		    struct vmw_framebuffer *vfb,
1997 		    struct vmw_surface *surface,
1998 		    uint32_t sid,
1999 		    int32_t destX, int32_t destY,
2000 		    struct drm_vmw_rect *clips,
2001 		    uint32_t num_clips)
2002 {
2003 	int ret;
2004 
2005 	switch (dev_priv->active_display_unit) {
2006 	case vmw_du_screen_target:
2007 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2008 						 &surface->res, destX, destY,
2009 						 num_clips, 1, NULL, NULL);
2010 		break;
2011 	case vmw_du_screen_object:
2012 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2013 					      sid, destX, destY, clips,
2014 					      num_clips);
2015 		break;
2016 	default:
2017 		WARN_ONCE(true,
2018 			  "Present called with invalid display system.\n");
2019 		ret = -ENOSYS;
2020 		break;
2021 	}
2022 	if (ret)
2023 		return ret;
2024 
2025 	vmw_cmd_flush(dev_priv, false);
2026 
2027 	return 0;
2028 }
2029 
2030 static void
vmw_kms_create_hotplug_mode_update_property(struct vmw_private * dev_priv)2031 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2032 {
2033 	if (dev_priv->hotplug_mode_update_property)
2034 		return;
2035 
2036 	dev_priv->hotplug_mode_update_property =
2037 		drm_property_create_range(&dev_priv->drm,
2038 					  DRM_MODE_PROP_IMMUTABLE,
2039 					  "hotplug_mode_update", 0, 1);
2040 }
2041 
vmw_kms_init(struct vmw_private * dev_priv)2042 int vmw_kms_init(struct vmw_private *dev_priv)
2043 {
2044 	struct drm_device *dev = &dev_priv->drm;
2045 	int ret;
2046 	static const char *display_unit_names[] = {
2047 		"Invalid",
2048 		"Legacy",
2049 		"Screen Object",
2050 		"Screen Target",
2051 		"Invalid (max)"
2052 	};
2053 
2054 	drm_mode_config_init(dev);
2055 	dev->mode_config.funcs = &vmw_kms_funcs;
2056 	dev->mode_config.min_width = 1;
2057 	dev->mode_config.min_height = 1;
2058 	dev->mode_config.max_width = dev_priv->texture_max_width;
2059 	dev->mode_config.max_height = dev_priv->texture_max_height;
2060 	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2061 
2062 	drm_mode_create_suggested_offset_properties(dev);
2063 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
2064 
2065 	ret = vmw_kms_stdu_init_display(dev_priv);
2066 	if (ret) {
2067 		ret = vmw_kms_sou_init_display(dev_priv);
2068 		if (ret) /* Fallback */
2069 			ret = vmw_kms_ldu_init_display(dev_priv);
2070 	}
2071 	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2072 	drm_info(&dev_priv->drm, "%s display unit initialized\n",
2073 		 display_unit_names[dev_priv->active_display_unit]);
2074 
2075 	return ret;
2076 }
2077 
vmw_kms_close(struct vmw_private * dev_priv)2078 int vmw_kms_close(struct vmw_private *dev_priv)
2079 {
2080 	int ret = 0;
2081 
2082 	/*
2083 	 * Docs says we should take the lock before calling this function
2084 	 * but since it destroys encoders and our destructor calls
2085 	 * drm_encoder_cleanup which takes the lock we deadlock.
2086 	 */
2087 	drm_mode_config_cleanup(&dev_priv->drm);
2088 	if (dev_priv->active_display_unit == vmw_du_legacy)
2089 		ret = vmw_kms_ldu_close_display(dev_priv);
2090 
2091 	return ret;
2092 }
2093 
vmw_kms_cursor_bypass_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2094 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2095 				struct drm_file *file_priv)
2096 {
2097 	struct drm_vmw_cursor_bypass_arg *arg = data;
2098 	struct vmw_display_unit *du;
2099 	struct drm_crtc *crtc;
2100 	int ret = 0;
2101 
2102 	mutex_lock(&dev->mode_config.mutex);
2103 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2104 
2105 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2106 			du = vmw_crtc_to_du(crtc);
2107 			du->hotspot_x = arg->xhot;
2108 			du->hotspot_y = arg->yhot;
2109 		}
2110 
2111 		mutex_unlock(&dev->mode_config.mutex);
2112 		return 0;
2113 	}
2114 
2115 	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2116 	if (!crtc) {
2117 		ret = -ENOENT;
2118 		goto out;
2119 	}
2120 
2121 	du = vmw_crtc_to_du(crtc);
2122 
2123 	du->hotspot_x = arg->xhot;
2124 	du->hotspot_y = arg->yhot;
2125 
2126 out:
2127 	mutex_unlock(&dev->mode_config.mutex);
2128 
2129 	return ret;
2130 }
2131 
vmw_kms_write_svga(struct vmw_private * vmw_priv,unsigned width,unsigned height,unsigned pitch,unsigned bpp,unsigned depth)2132 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2133 			unsigned width, unsigned height, unsigned pitch,
2134 			unsigned bpp, unsigned depth)
2135 {
2136 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2137 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2138 	else if (vmw_fifo_have_pitchlock(vmw_priv))
2139 		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2140 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2141 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2142 	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2143 		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2144 
2145 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2146 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2147 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2148 		return -EINVAL;
2149 	}
2150 
2151 	return 0;
2152 }
2153 
2154 static
vmw_kms_validate_mode_vram(struct vmw_private * dev_priv,u64 pitch,u64 height)2155 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2156 				u64 pitch,
2157 				u64 height)
2158 {
2159 	return (pitch * height) < (u64)dev_priv->vram_size;
2160 }
2161 
2162 /**
2163  * vmw_du_update_layout - Update the display unit with topology from resolution
2164  * plugin and generate DRM uevent
2165  * @dev_priv: device private
2166  * @num_rects: number of drm_rect in rects
2167  * @rects: toplogy to update
2168  */
vmw_du_update_layout(struct vmw_private * dev_priv,unsigned int num_rects,struct drm_rect * rects)2169 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2170 				unsigned int num_rects, struct drm_rect *rects)
2171 {
2172 	struct drm_device *dev = &dev_priv->drm;
2173 	struct vmw_display_unit *du;
2174 	struct drm_connector *con;
2175 	struct drm_connector_list_iter conn_iter;
2176 	struct drm_modeset_acquire_ctx ctx;
2177 	struct drm_crtc *crtc;
2178 	int ret;
2179 
2180 	/* Currently gui_x/y is protected with the crtc mutex */
2181 	mutex_lock(&dev->mode_config.mutex);
2182 	drm_modeset_acquire_init(&ctx, 0);
2183 retry:
2184 	drm_for_each_crtc(crtc, dev) {
2185 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2186 		if (ret < 0) {
2187 			if (ret == -EDEADLK) {
2188 				drm_modeset_backoff(&ctx);
2189 				goto retry;
2190 		}
2191 			goto out_fini;
2192 		}
2193 	}
2194 
2195 	drm_connector_list_iter_begin(dev, &conn_iter);
2196 	drm_for_each_connector_iter(con, &conn_iter) {
2197 		du = vmw_connector_to_du(con);
2198 		if (num_rects > du->unit) {
2199 			du->pref_width = drm_rect_width(&rects[du->unit]);
2200 			du->pref_height = drm_rect_height(&rects[du->unit]);
2201 			du->pref_active = true;
2202 			du->gui_x = rects[du->unit].x1;
2203 			du->gui_y = rects[du->unit].y1;
2204 		} else {
2205 			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2206 			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2207 			du->pref_active = false;
2208 			du->gui_x = 0;
2209 			du->gui_y = 0;
2210 		}
2211 	}
2212 	drm_connector_list_iter_end(&conn_iter);
2213 
2214 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2215 		du = vmw_connector_to_du(con);
2216 		if (num_rects > du->unit) {
2217 			drm_object_property_set_value
2218 			  (&con->base, dev->mode_config.suggested_x_property,
2219 			   du->gui_x);
2220 			drm_object_property_set_value
2221 			  (&con->base, dev->mode_config.suggested_y_property,
2222 			   du->gui_y);
2223 		} else {
2224 			drm_object_property_set_value
2225 			  (&con->base, dev->mode_config.suggested_x_property,
2226 			   0);
2227 			drm_object_property_set_value
2228 			  (&con->base, dev->mode_config.suggested_y_property,
2229 			   0);
2230 		}
2231 		con->status = vmw_du_connector_detect(con, true);
2232 	}
2233 out_fini:
2234 	drm_modeset_drop_locks(&ctx);
2235 	drm_modeset_acquire_fini(&ctx);
2236 	mutex_unlock(&dev->mode_config.mutex);
2237 
2238 	drm_sysfs_hotplug_event(dev);
2239 
2240 	return 0;
2241 }
2242 
vmw_du_crtc_gamma_set(struct drm_crtc * crtc,u16 * r,u16 * g,u16 * b,uint32_t size,struct drm_modeset_acquire_ctx * ctx)2243 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2244 			  u16 *r, u16 *g, u16 *b,
2245 			  uint32_t size,
2246 			  struct drm_modeset_acquire_ctx *ctx)
2247 {
2248 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2249 	int i;
2250 
2251 	for (i = 0; i < size; i++) {
2252 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2253 			  r[i], g[i], b[i]);
2254 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2255 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2256 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2257 	}
2258 
2259 	return 0;
2260 }
2261 
vmw_du_connector_dpms(struct drm_connector * connector,int mode)2262 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2263 {
2264 	return 0;
2265 }
2266 
2267 enum drm_connector_status
vmw_du_connector_detect(struct drm_connector * connector,bool force)2268 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2269 {
2270 	uint32_t num_displays;
2271 	struct drm_device *dev = connector->dev;
2272 	struct vmw_private *dev_priv = vmw_priv(dev);
2273 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2274 
2275 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2276 
2277 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2278 		 du->pref_active) ?
2279 		connector_status_connected : connector_status_disconnected);
2280 }
2281 
2282 /**
2283  * vmw_guess_mode_timing - Provide fake timings for a
2284  * 60Hz vrefresh mode.
2285  *
2286  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2287  * members filled in.
2288  */
vmw_guess_mode_timing(struct drm_display_mode * mode)2289 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2290 {
2291 	mode->hsync_start = mode->hdisplay + 50;
2292 	mode->hsync_end = mode->hsync_start + 50;
2293 	mode->htotal = mode->hsync_end + 50;
2294 
2295 	mode->vsync_start = mode->vdisplay + 50;
2296 	mode->vsync_end = mode->vsync_start + 50;
2297 	mode->vtotal = mode->vsync_end + 50;
2298 
2299 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2300 }
2301 
2302 
2303 /**
2304  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2305  * @dev: drm device for the ioctl
2306  * @data: data pointer for the ioctl
2307  * @file_priv: drm file for the ioctl call
2308  *
2309  * Update preferred topology of display unit as per ioctl request. The topology
2310  * is expressed as array of drm_vmw_rect.
2311  * e.g.
2312  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2313  *
2314  * NOTE:
2315  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2316  * device limit on topology, x + w and y + h (lower right) cannot be greater
2317  * than INT_MAX. So topology beyond these limits will return with error.
2318  *
2319  * Returns:
2320  * Zero on success, negative errno on failure.
2321  */
vmw_kms_update_layout_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2322 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2323 				struct drm_file *file_priv)
2324 {
2325 	struct vmw_private *dev_priv = vmw_priv(dev);
2326 	struct drm_mode_config *mode_config = &dev->mode_config;
2327 	struct drm_vmw_update_layout_arg *arg =
2328 		(struct drm_vmw_update_layout_arg *)data;
2329 	void __user *user_rects;
2330 	struct drm_vmw_rect *rects;
2331 	struct drm_rect *drm_rects;
2332 	unsigned rects_size;
2333 	int ret, i;
2334 
2335 	if (!arg->num_outputs) {
2336 		struct drm_rect def_rect = {0, 0,
2337 					    VMWGFX_MIN_INITIAL_WIDTH,
2338 					    VMWGFX_MIN_INITIAL_HEIGHT};
2339 		vmw_du_update_layout(dev_priv, 1, &def_rect);
2340 		return 0;
2341 	}
2342 
2343 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2344 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2345 			GFP_KERNEL);
2346 	if (unlikely(!rects))
2347 		return -ENOMEM;
2348 
2349 	user_rects = (void __user *)(unsigned long)arg->rects;
2350 	ret = copy_from_user(rects, user_rects, rects_size);
2351 	if (unlikely(ret != 0)) {
2352 		DRM_ERROR("Failed to get rects.\n");
2353 		ret = -EFAULT;
2354 		goto out_free;
2355 	}
2356 
2357 	drm_rects = (struct drm_rect *)rects;
2358 
2359 	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2360 	for (i = 0; i < arg->num_outputs; i++) {
2361 		struct drm_vmw_rect curr_rect;
2362 
2363 		/* Verify user-space for overflow as kernel use drm_rect */
2364 		if ((rects[i].x + rects[i].w > INT_MAX) ||
2365 		    (rects[i].y + rects[i].h > INT_MAX)) {
2366 			ret = -ERANGE;
2367 			goto out_free;
2368 		}
2369 
2370 		curr_rect = rects[i];
2371 		drm_rects[i].x1 = curr_rect.x;
2372 		drm_rects[i].y1 = curr_rect.y;
2373 		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2374 		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2375 
2376 		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2377 			      drm_rects[i].x1, drm_rects[i].y1,
2378 			      drm_rects[i].x2, drm_rects[i].y2);
2379 
2380 		/*
2381 		 * Currently this check is limiting the topology within
2382 		 * mode_config->max (which actually is max texture size
2383 		 * supported by virtual device). This limit is here to address
2384 		 * window managers that create a big framebuffer for whole
2385 		 * topology.
2386 		 */
2387 		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2388 		    drm_rects[i].x2 > mode_config->max_width ||
2389 		    drm_rects[i].y2 > mode_config->max_height) {
2390 			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2391 				      drm_rects[i].x1, drm_rects[i].y1,
2392 				      drm_rects[i].x2, drm_rects[i].y2);
2393 			ret = -EINVAL;
2394 			goto out_free;
2395 		}
2396 	}
2397 
2398 	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2399 
2400 	if (ret == 0)
2401 		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2402 
2403 out_free:
2404 	kfree(rects);
2405 	return ret;
2406 }
2407 
2408 /**
2409  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2410  * on a set of cliprects and a set of display units.
2411  *
2412  * @dev_priv: Pointer to a device private structure.
2413  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2414  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2415  * Cliprects are given in framebuffer coordinates.
2416  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2417  * be NULL. Cliprects are given in source coordinates.
2418  * @dest_x: X coordinate offset for the crtc / destination clip rects.
2419  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2420  * @num_clips: Number of cliprects in the @clips or @vclips array.
2421  * @increment: Integer with which to increment the clip counter when looping.
2422  * Used to skip a predetermined number of clip rects.
2423  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2424  */
vmw_kms_helper_dirty(struct vmw_private * dev_priv,struct vmw_framebuffer * framebuffer,const struct drm_clip_rect * clips,const struct drm_vmw_rect * vclips,s32 dest_x,s32 dest_y,int num_clips,int increment,struct vmw_kms_dirty * dirty)2425 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2426 			 struct vmw_framebuffer *framebuffer,
2427 			 const struct drm_clip_rect *clips,
2428 			 const struct drm_vmw_rect *vclips,
2429 			 s32 dest_x, s32 dest_y,
2430 			 int num_clips,
2431 			 int increment,
2432 			 struct vmw_kms_dirty *dirty)
2433 {
2434 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2435 	struct drm_crtc *crtc;
2436 	u32 num_units = 0;
2437 	u32 i, k;
2438 
2439 	dirty->dev_priv = dev_priv;
2440 
2441 	/* If crtc is passed, no need to iterate over other display units */
2442 	if (dirty->crtc) {
2443 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2444 	} else {
2445 		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2446 				    head) {
2447 			struct drm_plane *plane = crtc->primary;
2448 
2449 			if (plane->state->fb == &framebuffer->base)
2450 				units[num_units++] = vmw_crtc_to_du(crtc);
2451 		}
2452 	}
2453 
2454 	for (k = 0; k < num_units; k++) {
2455 		struct vmw_display_unit *unit = units[k];
2456 		s32 crtc_x = unit->crtc.x;
2457 		s32 crtc_y = unit->crtc.y;
2458 		s32 crtc_width = unit->crtc.mode.hdisplay;
2459 		s32 crtc_height = unit->crtc.mode.vdisplay;
2460 		const struct drm_clip_rect *clips_ptr = clips;
2461 		const struct drm_vmw_rect *vclips_ptr = vclips;
2462 
2463 		dirty->unit = unit;
2464 		if (dirty->fifo_reserve_size > 0) {
2465 			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2466 						      dirty->fifo_reserve_size);
2467 			if (!dirty->cmd)
2468 				return -ENOMEM;
2469 
2470 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2471 		}
2472 		dirty->num_hits = 0;
2473 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2474 		       vclips_ptr += increment) {
2475 			s32 clip_left;
2476 			s32 clip_top;
2477 
2478 			/*
2479 			 * Select clip array type. Note that integer type
2480 			 * in @clips is unsigned short, whereas in @vclips
2481 			 * it's 32-bit.
2482 			 */
2483 			if (clips) {
2484 				dirty->fb_x = (s32) clips_ptr->x1;
2485 				dirty->fb_y = (s32) clips_ptr->y1;
2486 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2487 					crtc_x;
2488 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2489 					crtc_y;
2490 			} else {
2491 				dirty->fb_x = vclips_ptr->x;
2492 				dirty->fb_y = vclips_ptr->y;
2493 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2494 					dest_x - crtc_x;
2495 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2496 					dest_y - crtc_y;
2497 			}
2498 
2499 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2500 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2501 
2502 			/* Skip this clip if it's outside the crtc region */
2503 			if (dirty->unit_x1 >= crtc_width ||
2504 			    dirty->unit_y1 >= crtc_height ||
2505 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2506 				continue;
2507 
2508 			/* Clip right and bottom to crtc limits */
2509 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2510 					       crtc_width);
2511 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2512 					       crtc_height);
2513 
2514 			/* Clip left and top to crtc limits */
2515 			clip_left = min_t(s32, dirty->unit_x1, 0);
2516 			clip_top = min_t(s32, dirty->unit_y1, 0);
2517 			dirty->unit_x1 -= clip_left;
2518 			dirty->unit_y1 -= clip_top;
2519 			dirty->fb_x -= clip_left;
2520 			dirty->fb_y -= clip_top;
2521 
2522 			dirty->clip(dirty);
2523 		}
2524 
2525 		dirty->fifo_commit(dirty);
2526 	}
2527 
2528 	return 0;
2529 }
2530 
2531 /**
2532  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2533  * cleanup and fencing
2534  * @dev_priv: Pointer to the device-private struct
2535  * @file_priv: Pointer identifying the client when user-space fencing is used
2536  * @ctx: Pointer to the validation context
2537  * @out_fence: If non-NULL, returned refcounted fence-pointer
2538  * @user_fence_rep: If non-NULL, pointer to user-space address area
2539  * in which to copy user-space fence info
2540  */
vmw_kms_helper_validation_finish(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_validation_context * ctx,struct vmw_fence_obj ** out_fence,struct drm_vmw_fence_rep __user * user_fence_rep)2541 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2542 				      struct drm_file *file_priv,
2543 				      struct vmw_validation_context *ctx,
2544 				      struct vmw_fence_obj **out_fence,
2545 				      struct drm_vmw_fence_rep __user *
2546 				      user_fence_rep)
2547 {
2548 	struct vmw_fence_obj *fence = NULL;
2549 	uint32_t handle = 0;
2550 	int ret = 0;
2551 
2552 	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2553 	    out_fence)
2554 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2555 						 file_priv ? &handle : NULL);
2556 	vmw_validation_done(ctx, fence);
2557 	if (file_priv)
2558 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2559 					    ret, user_fence_rep, fence,
2560 					    handle, -1);
2561 	if (out_fence)
2562 		*out_fence = fence;
2563 	else
2564 		vmw_fence_obj_unreference(&fence);
2565 }
2566 
2567 /**
2568  * vmw_kms_update_proxy - Helper function to update a proxy surface from
2569  * its backing MOB.
2570  *
2571  * @res: Pointer to the surface resource
2572  * @clips: Clip rects in framebuffer (surface) space.
2573  * @num_clips: Number of clips in @clips.
2574  * @increment: Integer with which to increment the clip counter when looping.
2575  * Used to skip a predetermined number of clip rects.
2576  *
2577  * This function makes sure the proxy surface is updated from its backing MOB
2578  * using the region given by @clips. The surface resource @res and its backing
2579  * MOB needs to be reserved and validated on call.
2580  */
vmw_kms_update_proxy(struct vmw_resource * res,const struct drm_clip_rect * clips,unsigned num_clips,int increment)2581 int vmw_kms_update_proxy(struct vmw_resource *res,
2582 			 const struct drm_clip_rect *clips,
2583 			 unsigned num_clips,
2584 			 int increment)
2585 {
2586 	struct vmw_private *dev_priv = res->dev_priv;
2587 	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2588 	struct {
2589 		SVGA3dCmdHeader header;
2590 		SVGA3dCmdUpdateGBImage body;
2591 	} *cmd;
2592 	SVGA3dBox *box;
2593 	size_t copy_size = 0;
2594 	int i;
2595 
2596 	if (!clips)
2597 		return 0;
2598 
2599 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2600 	if (!cmd)
2601 		return -ENOMEM;
2602 
2603 	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2604 		box = &cmd->body.box;
2605 
2606 		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2607 		cmd->header.size = sizeof(cmd->body);
2608 		cmd->body.image.sid = res->id;
2609 		cmd->body.image.face = 0;
2610 		cmd->body.image.mipmap = 0;
2611 
2612 		if (clips->x1 > size->width || clips->x2 > size->width ||
2613 		    clips->y1 > size->height || clips->y2 > size->height) {
2614 			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2615 			return -EINVAL;
2616 		}
2617 
2618 		box->x = clips->x1;
2619 		box->y = clips->y1;
2620 		box->z = 0;
2621 		box->w = clips->x2 - clips->x1;
2622 		box->h = clips->y2 - clips->y1;
2623 		box->d = 1;
2624 
2625 		copy_size += sizeof(*cmd);
2626 	}
2627 
2628 	vmw_cmd_commit(dev_priv, copy_size);
2629 
2630 	return 0;
2631 }
2632 
2633 /**
2634  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2635  * property.
2636  *
2637  * @dev_priv: Pointer to a device private struct.
2638  *
2639  * Sets up the implicit placement property unless it's already set up.
2640  */
2641 void
vmw_kms_create_implicit_placement_property(struct vmw_private * dev_priv)2642 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2643 {
2644 	if (dev_priv->implicit_placement_property)
2645 		return;
2646 
2647 	dev_priv->implicit_placement_property =
2648 		drm_property_create_range(&dev_priv->drm,
2649 					  DRM_MODE_PROP_IMMUTABLE,
2650 					  "implicit_placement", 0, 1);
2651 }
2652 
2653 /**
2654  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2655  *
2656  * @dev: Pointer to the drm device
2657  * Return: 0 on success. Negative error code on failure.
2658  */
vmw_kms_suspend(struct drm_device * dev)2659 int vmw_kms_suspend(struct drm_device *dev)
2660 {
2661 	struct vmw_private *dev_priv = vmw_priv(dev);
2662 
2663 	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2664 	if (IS_ERR(dev_priv->suspend_state)) {
2665 		int ret = PTR_ERR(dev_priv->suspend_state);
2666 
2667 		DRM_ERROR("Failed kms suspend: %d\n", ret);
2668 		dev_priv->suspend_state = NULL;
2669 
2670 		return ret;
2671 	}
2672 
2673 	return 0;
2674 }
2675 
2676 
2677 /**
2678  * vmw_kms_resume - Re-enable modesetting and restore state
2679  *
2680  * @dev: Pointer to the drm device
2681  * Return: 0 on success. Negative error code on failure.
2682  *
2683  * State is resumed from a previous vmw_kms_suspend(). It's illegal
2684  * to call this function without a previous vmw_kms_suspend().
2685  */
vmw_kms_resume(struct drm_device * dev)2686 int vmw_kms_resume(struct drm_device *dev)
2687 {
2688 	struct vmw_private *dev_priv = vmw_priv(dev);
2689 	int ret;
2690 
2691 	if (WARN_ON(!dev_priv->suspend_state))
2692 		return 0;
2693 
2694 	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2695 	dev_priv->suspend_state = NULL;
2696 
2697 	return ret;
2698 }
2699 
2700 /**
2701  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2702  *
2703  * @dev: Pointer to the drm device
2704  */
vmw_kms_lost_device(struct drm_device * dev)2705 void vmw_kms_lost_device(struct drm_device *dev)
2706 {
2707 	drm_atomic_helper_shutdown(dev);
2708 }
2709 
2710 /**
2711  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2712  * @update: The closure structure.
2713  *
2714  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2715  * update on display unit.
2716  *
2717  * Return: 0 on success or a negative error code on failure.
2718  */
vmw_du_helper_plane_update(struct vmw_du_update_plane * update)2719 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2720 {
2721 	struct drm_plane_state *state = update->plane->state;
2722 	struct drm_plane_state *old_state = update->old_state;
2723 	struct drm_atomic_helper_damage_iter iter;
2724 	struct drm_rect clip;
2725 	struct drm_rect bb;
2726 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2727 	uint32_t reserved_size = 0;
2728 	uint32_t submit_size = 0;
2729 	uint32_t curr_size = 0;
2730 	uint32_t num_hits = 0;
2731 	void *cmd_start;
2732 	char *cmd_next;
2733 	int ret;
2734 
2735 	/*
2736 	 * Iterate in advance to check if really need plane update and find the
2737 	 * number of clips that actually are in plane src for fifo allocation.
2738 	 */
2739 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2740 	drm_atomic_for_each_plane_damage(&iter, &clip)
2741 		num_hits++;
2742 
2743 	if (num_hits == 0)
2744 		return 0;
2745 
2746 	if (update->vfb->bo) {
2747 		struct vmw_framebuffer_bo *vfbbo =
2748 			container_of(update->vfb, typeof(*vfbbo), base);
2749 
2750 		/*
2751 		 * For screen targets we want a mappable bo, for everything else we want
2752 		 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2753 		 * is not screen target then mob's shouldn't be available.
2754 		 */
2755 		if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2756 			vmw_bo_placement_set(vfbbo->buffer,
2757 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2758 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2759 		} else {
2760 			WARN_ON(update->dev_priv->has_mob);
2761 			vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2762 		}
2763 		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2764 	} else {
2765 		struct vmw_framebuffer_surface *vfbs =
2766 			container_of(update->vfb, typeof(*vfbs), base);
2767 
2768 		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2769 						  0, VMW_RES_DIRTY_NONE, NULL,
2770 						  NULL);
2771 	}
2772 
2773 	if (ret)
2774 		return ret;
2775 
2776 	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2777 	if (ret)
2778 		goto out_unref;
2779 
2780 	reserved_size = update->calc_fifo_size(update, num_hits);
2781 	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2782 	if (!cmd_start) {
2783 		ret = -ENOMEM;
2784 		goto out_revert;
2785 	}
2786 
2787 	cmd_next = cmd_start;
2788 
2789 	if (update->post_prepare) {
2790 		curr_size = update->post_prepare(update, cmd_next);
2791 		cmd_next += curr_size;
2792 		submit_size += curr_size;
2793 	}
2794 
2795 	if (update->pre_clip) {
2796 		curr_size = update->pre_clip(update, cmd_next, num_hits);
2797 		cmd_next += curr_size;
2798 		submit_size += curr_size;
2799 	}
2800 
2801 	bb.x1 = INT_MAX;
2802 	bb.y1 = INT_MAX;
2803 	bb.x2 = INT_MIN;
2804 	bb.y2 = INT_MIN;
2805 
2806 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2807 	drm_atomic_for_each_plane_damage(&iter, &clip) {
2808 		uint32_t fb_x = clip.x1;
2809 		uint32_t fb_y = clip.y1;
2810 
2811 		vmw_du_translate_to_crtc(state, &clip);
2812 		if (update->clip) {
2813 			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2814 						 fb_y);
2815 			cmd_next += curr_size;
2816 			submit_size += curr_size;
2817 		}
2818 		bb.x1 = min_t(int, bb.x1, clip.x1);
2819 		bb.y1 = min_t(int, bb.y1, clip.y1);
2820 		bb.x2 = max_t(int, bb.x2, clip.x2);
2821 		bb.y2 = max_t(int, bb.y2, clip.y2);
2822 	}
2823 
2824 	curr_size = update->post_clip(update, cmd_next, &bb);
2825 	submit_size += curr_size;
2826 
2827 	if (reserved_size < submit_size)
2828 		submit_size = 0;
2829 
2830 	vmw_cmd_commit(update->dev_priv, submit_size);
2831 
2832 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2833 					 update->out_fence, NULL);
2834 	return ret;
2835 
2836 out_revert:
2837 	vmw_validation_revert(&val_ctx);
2838 
2839 out_unref:
2840 	vmw_validation_unref_lists(&val_ctx);
2841 	return ret;
2842 }
2843 
2844 /**
2845  * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
2846  *
2847  * @connector: the drm connector, part of a DU container
2848  * @mode: drm mode to check
2849  *
2850  * Returns MODE_OK on success, or a drm_mode_status error code.
2851  */
vmw_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)2852 enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
2853 					      struct drm_display_mode *mode)
2854 {
2855 	enum drm_mode_status ret;
2856 	struct drm_device *dev = connector->dev;
2857 	struct vmw_private *dev_priv = vmw_priv(dev);
2858 	u32 assumed_cpp = 4;
2859 
2860 	if (dev_priv->assume_16bpp)
2861 		assumed_cpp = 2;
2862 
2863 	ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
2864 				     dev_priv->texture_max_height);
2865 	if (ret != MODE_OK)
2866 		return ret;
2867 
2868 	if (!vmw_kms_validate_mode_vram(dev_priv,
2869 					mode->hdisplay * assumed_cpp,
2870 					mode->vdisplay))
2871 		return MODE_MEM;
2872 
2873 	return MODE_OK;
2874 }
2875 
2876 /**
2877  * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
2878  *
2879  * @connector: the drm connector, part of a DU container
2880  *
2881  * Returns the number of added modes.
2882  */
vmw_connector_get_modes(struct drm_connector * connector)2883 int vmw_connector_get_modes(struct drm_connector *connector)
2884 {
2885 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2886 	struct drm_device *dev = connector->dev;
2887 	struct vmw_private *dev_priv = vmw_priv(dev);
2888 	struct drm_display_mode *mode = NULL;
2889 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2890 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2891 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2892 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2893 	};
2894 	u32 max_width;
2895 	u32 max_height;
2896 	u32 num_modes;
2897 
2898 	/* Add preferred mode */
2899 	mode = drm_mode_duplicate(dev, &prefmode);
2900 	if (!mode)
2901 		return 0;
2902 
2903 	mode->hdisplay = du->pref_width;
2904 	mode->vdisplay = du->pref_height;
2905 	vmw_guess_mode_timing(mode);
2906 	drm_mode_set_name(mode);
2907 
2908 	drm_mode_probed_add(connector, mode);
2909 	drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2910 
2911 	/* Probe connector for all modes not exceeding our geom limits */
2912 	max_width  = dev_priv->texture_max_width;
2913 	max_height = dev_priv->texture_max_height;
2914 
2915 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2916 		max_width  = min(dev_priv->stdu_max_width,  max_width);
2917 		max_height = min(dev_priv->stdu_max_height, max_height);
2918 	}
2919 
2920 	num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
2921 
2922 	return num_modes;
2923 }
2924