• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/drm_atomic.h>
29 #include <drm/drm_atomic_helper.h>
30 #include <drm/drm_damage_helper.h>
31 #include <drm/drm_fourcc.h>
32 #include <drm/drm_rect.h>
33 #include <drm/drm_sysfs.h>
34 
35 #include "vmwgfx_kms.h"
36 
vmw_du_cleanup(struct vmw_display_unit * du)37 void vmw_du_cleanup(struct vmw_display_unit *du)
38 {
39 	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
40 	drm_plane_cleanup(&du->primary);
41 	if (vmw_cmd_supported(dev_priv))
42 		drm_plane_cleanup(&du->cursor.base);
43 
44 	drm_connector_unregister(&du->connector);
45 	drm_crtc_cleanup(&du->crtc);
46 	drm_encoder_cleanup(&du->encoder);
47 	drm_connector_cleanup(&du->connector);
48 }
49 
50 /*
51  * Display Unit Cursor functions
52  */
53 
54 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
55 				  struct ttm_buffer_object *bo,
56 				  struct ttm_bo_kmap_obj *map,
57 				  u32 *image, u32 width, u32 height,
58 				  u32 hotspotX, u32 hotspotY);
59 
60 struct vmw_svga_fifo_cmd_define_cursor {
61 	u32 cmd;
62 	SVGAFifoCmdDefineAlphaCursor cursor;
63 };
64 
vmw_cursor_update_image(struct vmw_private * dev_priv,struct ttm_buffer_object * cm_bo,struct ttm_bo_kmap_obj * cm_map,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)65 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
66 				    struct ttm_buffer_object *cm_bo,
67 				    struct ttm_bo_kmap_obj *cm_map,
68 				    u32 *image, u32 width, u32 height,
69 				    u32 hotspotX, u32 hotspotY)
70 {
71 	struct vmw_svga_fifo_cmd_define_cursor *cmd;
72 	const u32 image_size = width * height * sizeof(*image);
73 	const u32 cmd_size = sizeof(*cmd) + image_size;
74 
75 	if (cm_bo != NULL) {
76 		vmw_cursor_update_mob(dev_priv, cm_bo, cm_map, image,
77 				      width, height,
78 				      hotspotX, hotspotY);
79 		return;
80 	}
81 
82 	/* Try to reserve fifocmd space and swallow any failures;
83 	   such reservations cannot be left unconsumed for long
84 	   under the risk of clogging other fifocmd users, so
85 	   we treat reservations separtely from the way we treat
86 	   other fallible KMS-atomic resources at prepare_fb */
87 	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
88 
89 	if (unlikely(cmd == NULL))
90 		return;
91 
92 	memset(cmd, 0, sizeof(*cmd));
93 
94 	memcpy(&cmd[1], image, image_size);
95 
96 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
97 	cmd->cursor.id = 0;
98 	cmd->cursor.width = width;
99 	cmd->cursor.height = height;
100 	cmd->cursor.hotspotX = hotspotX;
101 	cmd->cursor.hotspotY = hotspotY;
102 
103 	vmw_cmd_commit_flush(dev_priv, cmd_size);
104 }
105 
106 /**
107  * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
108  *
109  * @dev_priv: device to work with
110  * @bo: BO for the MOB
111  * @map: kmap obj for the BO
112  * @image: cursor source data to fill the MOB with
113  * @width: source data width
114  * @height: source data height
115  * @hotspotX: cursor hotspot x
116  * @hotspotY: cursor hotspot Y
117  */
vmw_cursor_update_mob(struct vmw_private * dev_priv,struct ttm_buffer_object * bo,struct ttm_bo_kmap_obj * map,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)118 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
119 				  struct ttm_buffer_object *bo,
120 				  struct ttm_bo_kmap_obj *map,
121 				  u32 *image, u32 width, u32 height,
122 				  u32 hotspotX, u32 hotspotY)
123 {
124 	SVGAGBCursorHeader *header;
125 	SVGAGBAlphaCursorHeader *alpha_header;
126 	const u32 image_size = width * height * sizeof(*image);
127 	bool dummy;
128 
129 	BUG_ON(!image);
130 
131 	header = (SVGAGBCursorHeader *)ttm_kmap_obj_virtual(map, &dummy);
132 	alpha_header = &header->header.alphaHeader;
133 
134 	header->type = SVGA_ALPHA_CURSOR;
135 	header->sizeInBytes = image_size;
136 
137 	alpha_header->hotspotX = hotspotX;
138 	alpha_header->hotspotY = hotspotY;
139 	alpha_header->width = width;
140 	alpha_header->height = height;
141 
142 	memcpy(header + 1, image, image_size);
143 
144 	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID, bo->resource->start);
145 }
146 
vmw_du_destroy_cursor_mob_array(struct vmw_cursor_plane * vcp)147 void vmw_du_destroy_cursor_mob_array(struct vmw_cursor_plane *vcp)
148 {
149 	size_t i;
150 
151 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mob); i++) {
152 		if (vcp->cursor_mob[i] != NULL) {
153 			ttm_bo_unpin(vcp->cursor_mob[i]);
154 			ttm_bo_put(vcp->cursor_mob[i]);
155 			kfree(vcp->cursor_mob[i]);
156 			vcp->cursor_mob[i] = NULL;
157 		}
158 	}
159 }
160 
161 #define CURSOR_MOB_SIZE(dimension) \
162 	((dimension) * (dimension) * sizeof(u32) + sizeof(SVGAGBCursorHeader))
163 
vmw_du_create_cursor_mob_array(struct vmw_cursor_plane * cursor)164 int vmw_du_create_cursor_mob_array(struct vmw_cursor_plane *cursor)
165 {
166 	struct vmw_private *dev_priv = cursor->base.dev->dev_private;
167 	uint32_t cursor_max_dim, mob_max_size;
168 	int ret = 0;
169 	size_t i;
170 
171 	if (!dev_priv->has_mob || (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
172 		return -ENOSYS;
173 
174 	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
175 	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
176 
177 	if (CURSOR_MOB_SIZE(cursor_max_dim) > mob_max_size)
178 		cursor_max_dim = 64; /* Mandatorily-supported cursor dimension */
179 
180 	for (i = 0; i < ARRAY_SIZE(cursor->cursor_mob); i++) {
181 		struct ttm_buffer_object **const bo = &cursor->cursor_mob[i];
182 
183 		ret = vmw_bo_create_kernel(dev_priv,
184 			CURSOR_MOB_SIZE(cursor_max_dim),
185 			&vmw_mob_placement, bo);
186 
187 		if (ret != 0)
188 			goto teardown;
189 
190 		if ((*bo)->resource->mem_type != VMW_PL_MOB) {
191 			DRM_ERROR("Obtained buffer object is not a MOB.\n");
192 			ret = -ENOSYS;
193 			goto teardown;
194 		}
195 
196 		/* Fence the mob creation so we are guarateed to have the mob */
197 		ret = ttm_bo_reserve(*bo, false, false, NULL);
198 
199 		if (ret != 0)
200 			goto teardown;
201 
202 		vmw_bo_fence_single(*bo, NULL);
203 
204 		ttm_bo_unreserve(*bo);
205 
206 		drm_info(&dev_priv->drm, "Using CursorMob mobid %lu, max dimension %u\n",
207 			 (*bo)->resource->start, cursor_max_dim);
208 	}
209 
210 	return 0;
211 
212 teardown:
213 	vmw_du_destroy_cursor_mob_array(cursor);
214 
215 	return ret;
216 }
217 
218 #undef CURSOR_MOB_SIZE
219 
vmw_cursor_update_bo(struct vmw_private * dev_priv,struct ttm_buffer_object * cm_bo,struct ttm_bo_kmap_obj * cm_map,struct vmw_buffer_object * bo,u32 width,u32 height,u32 hotspotX,u32 hotspotY)220 static void vmw_cursor_update_bo(struct vmw_private *dev_priv,
221 				 struct ttm_buffer_object *cm_bo,
222 				 struct ttm_bo_kmap_obj *cm_map,
223 				 struct vmw_buffer_object *bo,
224 				 u32 width, u32 height,
225 				 u32 hotspotX, u32 hotspotY)
226 {
227 	void *virtual;
228 	bool dummy;
229 
230 	virtual = ttm_kmap_obj_virtual(&bo->map, &dummy);
231 	if (virtual) {
232 		vmw_cursor_update_image(dev_priv, cm_bo, cm_map, virtual,
233 					width, height,
234 					hotspotX, hotspotY);
235 		atomic_dec(&bo->base_mapped_count);
236 	}
237 }
238 
239 
vmw_cursor_update_position(struct vmw_private * dev_priv,bool show,int x,int y)240 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
241 				       bool show, int x, int y)
242 {
243 	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
244 					     : SVGA_CURSOR_ON_HIDE;
245 	uint32_t count;
246 
247 	spin_lock(&dev_priv->cursor_lock);
248 	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
249 		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
250 		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
251 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
252 		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
253 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
254 	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
255 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
256 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
257 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
258 		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
259 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
260 	} else {
261 		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
262 		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
263 		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
264 	}
265 	spin_unlock(&dev_priv->cursor_lock);
266 }
267 
268 
vmw_kms_cursor_snoop(struct vmw_surface * srf,struct ttm_object_file * tfile,struct ttm_buffer_object * bo,SVGA3dCmdHeader * header)269 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
270 			  struct ttm_object_file *tfile,
271 			  struct ttm_buffer_object *bo,
272 			  SVGA3dCmdHeader *header)
273 {
274 	struct ttm_bo_kmap_obj map;
275 	unsigned long kmap_offset;
276 	unsigned long kmap_num;
277 	SVGA3dCopyBox *box;
278 	unsigned box_count;
279 	void *virtual;
280 	bool dummy;
281 	struct vmw_dma_cmd {
282 		SVGA3dCmdHeader header;
283 		SVGA3dCmdSurfaceDMA dma;
284 	} *cmd;
285 	int i, ret;
286 
287 	cmd = container_of(header, struct vmw_dma_cmd, header);
288 
289 	/* No snooper installed */
290 	if (!srf->snooper.image)
291 		return;
292 
293 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
294 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
295 		return;
296 	}
297 
298 	if (cmd->header.size < 64) {
299 		DRM_ERROR("at least one full copy box must be given\n");
300 		return;
301 	}
302 
303 	box = (SVGA3dCopyBox *)&cmd[1];
304 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
305 			sizeof(SVGA3dCopyBox);
306 
307 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
308 	    box->x != 0    || box->y != 0    || box->z != 0    ||
309 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
310 	    box->d != 1    || box_count != 1 ||
311 	    box->w > 64 || box->h > 64) {
312 		/* TODO handle none page aligned offsets */
313 		/* TODO handle more dst & src != 0 */
314 		/* TODO handle more then one copy */
315 		DRM_ERROR("Can't snoop dma request for cursor!\n");
316 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
317 			  box->srcx, box->srcy, box->srcz,
318 			  box->x, box->y, box->z,
319 			  box->w, box->h, box->d, box_count,
320 			  cmd->dma.guest.ptr.offset);
321 		return;
322 	}
323 
324 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
325 	kmap_num = (64*64*4) >> PAGE_SHIFT;
326 
327 	ret = ttm_bo_reserve(bo, true, false, NULL);
328 	if (unlikely(ret != 0)) {
329 		DRM_ERROR("reserve failed\n");
330 		return;
331 	}
332 
333 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
334 	if (unlikely(ret != 0))
335 		goto err_unreserve;
336 
337 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
338 
339 	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
340 		memcpy(srf->snooper.image, virtual, 64*64*4);
341 	} else {
342 		/* Image is unsigned pointer. */
343 		for (i = 0; i < box->h; i++)
344 			memcpy(srf->snooper.image + i * 64,
345 			       virtual + i * cmd->dma.guest.pitch,
346 			       box->w * 4);
347 	}
348 
349 	srf->snooper.age++;
350 
351 	ttm_bo_kunmap(&map);
352 err_unreserve:
353 	ttm_bo_unreserve(bo);
354 }
355 
356 /**
357  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
358  *
359  * @dev_priv: Pointer to the device private struct.
360  *
361  * Clears all legacy hotspots.
362  */
vmw_kms_legacy_hotspot_clear(struct vmw_private * dev_priv)363 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
364 {
365 	struct drm_device *dev = &dev_priv->drm;
366 	struct vmw_display_unit *du;
367 	struct drm_crtc *crtc;
368 
369 	drm_modeset_lock_all(dev);
370 	drm_for_each_crtc(crtc, dev) {
371 		du = vmw_crtc_to_du(crtc);
372 
373 		du->hotspot_x = 0;
374 		du->hotspot_y = 0;
375 	}
376 	drm_modeset_unlock_all(dev);
377 }
378 
vmw_kms_cursor_post_execbuf(struct vmw_private * dev_priv)379 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
380 {
381 	struct drm_device *dev = &dev_priv->drm;
382 	struct vmw_display_unit *du;
383 	struct drm_crtc *crtc;
384 
385 	mutex_lock(&dev->mode_config.mutex);
386 
387 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
388 		du = vmw_crtc_to_du(crtc);
389 		if (!du->cursor_surface ||
390 		    du->cursor_age == du->cursor_surface->snooper.age)
391 			continue;
392 
393 		du->cursor_age = du->cursor_surface->snooper.age;
394 		vmw_cursor_update_image(dev_priv, NULL, NULL,
395 					du->cursor_surface->snooper.image,
396 					64, 64,
397 					du->hotspot_x + du->core_hotspot_x,
398 					du->hotspot_y + du->core_hotspot_y);
399 	}
400 
401 	mutex_unlock(&dev->mode_config.mutex);
402 }
403 
404 
vmw_du_cursor_plane_destroy(struct drm_plane * plane)405 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
406 {
407 	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
408 	vmw_du_destroy_cursor_mob_array(vmw_plane_to_vcp(plane));
409 	drm_plane_cleanup(plane);
410 }
411 
412 
vmw_du_primary_plane_destroy(struct drm_plane * plane)413 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
414 {
415 	drm_plane_cleanup(plane);
416 
417 	/* Planes are static in our case so we don't free it */
418 }
419 
420 
421 /**
422  * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
423  *
424  * @vps: plane state associated with the display surface
425  * @unreference: true if we also want to unreference the display.
426  */
vmw_du_plane_unpin_surf(struct vmw_plane_state * vps,bool unreference)427 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
428 			     bool unreference)
429 {
430 	if (vps->surf) {
431 		if (vps->pinned) {
432 			vmw_resource_unpin(&vps->surf->res);
433 			vps->pinned--;
434 		}
435 
436 		if (unreference) {
437 			if (vps->pinned)
438 				DRM_ERROR("Surface still pinned\n");
439 			vmw_surface_unreference(&vps->surf);
440 		}
441 	}
442 }
443 
444 
445 /**
446  * vmw_du_plane_cleanup_fb - Unpins the plane surface
447  *
448  * @plane:  display plane
449  * @old_state: Contains the FB to clean up
450  *
451  * Unpins the framebuffer surface
452  *
453  * Returns 0 on success
454  */
455 void
vmw_du_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)456 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
457 			struct drm_plane_state *old_state)
458 {
459 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
460 
461 	vmw_du_plane_unpin_surf(vps, false);
462 }
463 
464 
465 /**
466  * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
467  *
468  * @plane: cursor plane
469  * @old_state: contains the state to clean up
470  *
471  * Unmaps all cursor bo mappings and unpins the cursor surface
472  *
473  * Returns 0 on success
474  */
475 void
vmw_du_cursor_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)476 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
477 			       struct drm_plane_state *old_state)
478 {
479 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
480 	bool dummy;
481 
482 	if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) {
483 		const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
484 
485 		if (likely(ret == 0)) {
486 			if (atomic_read(&vps->bo->base_mapped_count) == 0)
487 			    ttm_bo_kunmap(&vps->bo->map);
488 			ttm_bo_unreserve(&vps->bo->base);
489 		}
490 	}
491 
492 	if (vps->cm_bo != NULL && ttm_kmap_obj_virtual(&vps->cm_map, &dummy) != NULL) {
493 		const int ret = ttm_bo_reserve(vps->cm_bo, true, false, NULL);
494 
495 		if (likely(ret == 0)) {
496 			ttm_bo_kunmap(&vps->cm_map);
497 			ttm_bo_unreserve(vps->cm_bo);
498 		}
499 	}
500 
501 	vmw_du_plane_unpin_surf(vps, false);
502 
503 	if (vps->surf) {
504 		vmw_surface_unreference(&vps->surf);
505 		vps->surf = NULL;
506 	}
507 
508 	if (vps->bo) {
509 		vmw_bo_unreference(&vps->bo);
510 		vps->bo = NULL;
511 	}
512 }
513 
514 /**
515  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
516  *
517  * @plane:  display plane
518  * @new_state: info on the new plane state, including the FB
519  *
520  * Returns 0 on success
521  */
522 int
vmw_du_cursor_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)523 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
524 			       struct drm_plane_state *new_state)
525 {
526 	struct drm_framebuffer *fb = new_state->fb;
527 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
528 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
529 	struct ttm_buffer_object *cm_bo = NULL;
530 	bool dummy;
531 	int ret = 0;
532 
533 	if (vps->surf) {
534 		vmw_surface_unreference(&vps->surf);
535 		vps->surf = NULL;
536 	}
537 
538 	if (vps->bo) {
539 		vmw_bo_unreference(&vps->bo);
540 		vps->bo = NULL;
541 	}
542 
543 	if (fb) {
544 		if (vmw_framebuffer_to_vfb(fb)->bo) {
545 			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
546 			vmw_bo_reference(vps->bo);
547 		} else {
548 			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
549 			vmw_surface_reference(vps->surf);
550 		}
551 	}
552 
553 	vps->cm_bo = NULL;
554 
555 	if (vps->surf == NULL && vps->bo != NULL) {
556 		const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
557 
558 		/* Not using vmw_bo_map_and_cache() helper here as we need to reserve
559 		   the ttm_buffer_object first which wmw_bo_map_and_cache() omits. */
560 		ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
561 
562 		if (unlikely(ret != 0))
563 			return -ENOMEM;
564 
565 		ret = ttm_bo_kmap(&vps->bo->base, 0, PFN_UP(size), &vps->bo->map);
566 
567 		if (likely(ret == 0))
568 			atomic_inc(&vps->bo->base_mapped_count);
569 
570 		ttm_bo_unreserve(&vps->bo->base);
571 
572 		if (unlikely(ret != 0))
573 			return -ENOMEM;
574 	}
575 
576 	if (vps->surf || vps->bo) {
577 		unsigned cursor_mob_idx = vps->cursor_mob_idx;
578 
579 		/* Lazily set up cursor MOBs just once -- no reattempts. */
580 		if (cursor_mob_idx == 0 && vcp->cursor_mob[0] == NULL)
581 			if (vmw_du_create_cursor_mob_array(vcp) != 0)
582 				vps->cursor_mob_idx = cursor_mob_idx = -1U;
583 
584 		if (cursor_mob_idx < ARRAY_SIZE(vcp->cursor_mob)) {
585 			const u32 size = sizeof(SVGAGBCursorHeader) +
586 				new_state->crtc_w * new_state->crtc_h * sizeof(u32);
587 
588 			cm_bo = vcp->cursor_mob[cursor_mob_idx];
589 
590 			if (cm_bo->resource->num_pages * PAGE_SIZE < size) {
591 				ret = -EINVAL;
592 				goto error_bo_unmap;
593 			}
594 
595 			ret = ttm_bo_reserve(cm_bo, false, false, NULL);
596 
597 			if (unlikely(ret != 0)) {
598 				ret = -ENOMEM;
599 				goto error_bo_unmap;
600 			}
601 
602 			ret = ttm_bo_kmap(cm_bo, 0, PFN_UP(size), &vps->cm_map);
603 
604 			/*
605 			 * We just want to try to get mob bind to finish
606 			 * so that the first write to SVGA_REG_CURSOR_MOBID
607 			 * is done with a buffer that the device has already
608 			 * seen
609 			 */
610 			(void) ttm_bo_wait(cm_bo, false, false);
611 
612 			ttm_bo_unreserve(cm_bo);
613 
614 			if (unlikely(ret != 0)) {
615 				ret = -ENOMEM;
616 				goto error_bo_unmap;
617 			}
618 
619 			vps->cursor_mob_idx = cursor_mob_idx ^ 1;
620 			vps->cm_bo = cm_bo;
621 		}
622 	}
623 
624 	return 0;
625 
626 error_bo_unmap:
627 	if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) {
628 		const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
629 		if (likely(ret == 0)) {
630 			atomic_dec(&vps->bo->base_mapped_count);
631 			ttm_bo_kunmap(&vps->bo->map);
632 			ttm_bo_unreserve(&vps->bo->base);
633 		}
634 	}
635 
636 	return ret;
637 }
638 
639 
640 void
vmw_du_cursor_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * state)641 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
642 				  struct drm_atomic_state *state)
643 {
644 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
645 									   plane);
646 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
647 									   plane);
648 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
649 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
650 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
651 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
652 	s32 hotspot_x, hotspot_y;
653 
654 	hotspot_x = du->hotspot_x;
655 	hotspot_y = du->hotspot_y;
656 
657 	if (new_state->fb) {
658 		hotspot_x += new_state->fb->hot_x;
659 		hotspot_y += new_state->fb->hot_y;
660 	}
661 
662 	du->cursor_surface = vps->surf;
663 	du->cursor_bo = vps->bo;
664 
665 	if (vps->surf) {
666 		du->cursor_age = du->cursor_surface->snooper.age;
667 
668 		vmw_cursor_update_image(dev_priv, vps->cm_bo, &vps->cm_map,
669 					vps->surf->snooper.image,
670 					new_state->crtc_w,
671 					new_state->crtc_h,
672 					hotspot_x, hotspot_y);
673 	} else if (vps->bo) {
674 		vmw_cursor_update_bo(dev_priv, vps->cm_bo, &vps->cm_map,
675 				     vps->bo,
676 				     new_state->crtc_w,
677 				     new_state->crtc_h,
678 				     hotspot_x, hotspot_y);
679 	} else {
680 		vmw_cursor_update_position(dev_priv, false, 0, 0);
681 		return;
682 	}
683 
684 	du->cursor_x = new_state->crtc_x + du->set_gui_x;
685 	du->cursor_y = new_state->crtc_y + du->set_gui_y;
686 
687 	vmw_cursor_update_position(dev_priv, true,
688 				   du->cursor_x + hotspot_x,
689 				   du->cursor_y + hotspot_y);
690 
691 	du->core_hotspot_x = hotspot_x - du->hotspot_x;
692 	du->core_hotspot_y = hotspot_y - du->hotspot_y;
693 }
694 
695 
696 /**
697  * vmw_du_primary_plane_atomic_check - check if the new state is okay
698  *
699  * @plane: display plane
700  * @state: info on the new plane state, including the FB
701  *
702  * Check if the new state is settable given the current state.  Other
703  * than what the atomic helper checks, we care about crtc fitting
704  * the FB and maintaining one active framebuffer.
705  *
706  * Returns 0 on success
707  */
vmw_du_primary_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)708 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
709 				      struct drm_atomic_state *state)
710 {
711 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
712 									   plane);
713 	struct drm_crtc_state *crtc_state = NULL;
714 	struct drm_framebuffer *new_fb = new_state->fb;
715 	int ret;
716 
717 	if (new_state->crtc)
718 		crtc_state = drm_atomic_get_new_crtc_state(state,
719 							   new_state->crtc);
720 
721 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
722 						  DRM_PLANE_NO_SCALING,
723 						  DRM_PLANE_NO_SCALING,
724 						  false, true);
725 
726 	if (!ret && new_fb) {
727 		struct drm_crtc *crtc = new_state->crtc;
728 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
729 
730 		vmw_connector_state_to_vcs(du->connector.state);
731 	}
732 
733 
734 	return ret;
735 }
736 
737 
738 /**
739  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
740  *
741  * @plane: cursor plane
742  * @state: info on the new plane state
743  *
744  * This is a chance to fail if the new cursor state does not fit
745  * our requirements.
746  *
747  * Returns 0 on success
748  */
vmw_du_cursor_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)749 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
750 				     struct drm_atomic_state *state)
751 {
752 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
753 									   plane);
754 	int ret = 0;
755 	struct drm_crtc_state *crtc_state = NULL;
756 	struct vmw_surface *surface = NULL;
757 	struct drm_framebuffer *fb = new_state->fb;
758 
759 	if (new_state->crtc)
760 		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
761 							   new_state->crtc);
762 
763 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
764 						  DRM_PLANE_NO_SCALING,
765 						  DRM_PLANE_NO_SCALING,
766 						  true, true);
767 	if (ret)
768 		return ret;
769 
770 	/* Turning off */
771 	if (!fb)
772 		return 0;
773 
774 	/* A lot of the code assumes this */
775 	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
776 		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
777 			  new_state->crtc_w, new_state->crtc_h);
778 		return -EINVAL;
779 	}
780 
781 	if (!vmw_framebuffer_to_vfb(fb)->bo)
782 		surface = vmw_framebuffer_to_vfbs(fb)->surface;
783 
784 	if (surface && !surface->snooper.image) {
785 		DRM_ERROR("surface not suitable for cursor\n");
786 		return -EINVAL;
787 	}
788 
789 	return 0;
790 }
791 
792 
vmw_du_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)793 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
794 			     struct drm_atomic_state *state)
795 {
796 	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
797 									 crtc);
798 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
799 	int connector_mask = drm_connector_mask(&du->connector);
800 	bool has_primary = new_state->plane_mask &
801 			   drm_plane_mask(crtc->primary);
802 
803 	/* We always want to have an active plane with an active CRTC */
804 	if (has_primary != new_state->enable)
805 		return -EINVAL;
806 
807 
808 	if (new_state->connector_mask != connector_mask &&
809 	    new_state->connector_mask != 0) {
810 		DRM_ERROR("Invalid connectors configuration\n");
811 		return -EINVAL;
812 	}
813 
814 	/*
815 	 * Our virtual device does not have a dot clock, so use the logical
816 	 * clock value as the dot clock.
817 	 */
818 	if (new_state->mode.crtc_clock == 0)
819 		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
820 
821 	return 0;
822 }
823 
824 
vmw_du_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)825 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
826 			      struct drm_atomic_state *state)
827 {
828 }
829 
830 
vmw_du_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)831 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
832 			      struct drm_atomic_state *state)
833 {
834 }
835 
836 
837 /**
838  * vmw_du_crtc_duplicate_state - duplicate crtc state
839  * @crtc: DRM crtc
840  *
841  * Allocates and returns a copy of the crtc state (both common and
842  * vmw-specific) for the specified crtc.
843  *
844  * Returns: The newly allocated crtc state, or NULL on failure.
845  */
846 struct drm_crtc_state *
vmw_du_crtc_duplicate_state(struct drm_crtc * crtc)847 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
848 {
849 	struct drm_crtc_state *state;
850 	struct vmw_crtc_state *vcs;
851 
852 	if (WARN_ON(!crtc->state))
853 		return NULL;
854 
855 	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
856 
857 	if (!vcs)
858 		return NULL;
859 
860 	state = &vcs->base;
861 
862 	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
863 
864 	return state;
865 }
866 
867 
868 /**
869  * vmw_du_crtc_reset - creates a blank vmw crtc state
870  * @crtc: DRM crtc
871  *
872  * Resets the atomic state for @crtc by freeing the state pointer (which
873  * might be NULL, e.g. at driver load time) and allocating a new empty state
874  * object.
875  */
vmw_du_crtc_reset(struct drm_crtc * crtc)876 void vmw_du_crtc_reset(struct drm_crtc *crtc)
877 {
878 	struct vmw_crtc_state *vcs;
879 
880 
881 	if (crtc->state) {
882 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
883 
884 		kfree(vmw_crtc_state_to_vcs(crtc->state));
885 	}
886 
887 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
888 
889 	if (!vcs) {
890 		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
891 		return;
892 	}
893 
894 	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
895 }
896 
897 
898 /**
899  * vmw_du_crtc_destroy_state - destroy crtc state
900  * @crtc: DRM crtc
901  * @state: state object to destroy
902  *
903  * Destroys the crtc state (both common and vmw-specific) for the
904  * specified plane.
905  */
906 void
vmw_du_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)907 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
908 			  struct drm_crtc_state *state)
909 {
910 	drm_atomic_helper_crtc_destroy_state(crtc, state);
911 }
912 
913 
914 /**
915  * vmw_du_plane_duplicate_state - duplicate plane state
916  * @plane: drm plane
917  *
918  * Allocates and returns a copy of the plane state (both common and
919  * vmw-specific) for the specified plane.
920  *
921  * Returns: The newly allocated plane state, or NULL on failure.
922  */
923 struct drm_plane_state *
vmw_du_plane_duplicate_state(struct drm_plane * plane)924 vmw_du_plane_duplicate_state(struct drm_plane *plane)
925 {
926 	struct drm_plane_state *state;
927 	struct vmw_plane_state *vps;
928 
929 	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
930 
931 	if (!vps)
932 		return NULL;
933 
934 	vps->pinned = 0;
935 	vps->cpp = 0;
936 
937 	/* Each ref counted resource needs to be acquired again */
938 	if (vps->surf)
939 		(void) vmw_surface_reference(vps->surf);
940 
941 	if (vps->bo)
942 		(void) vmw_bo_reference(vps->bo);
943 
944 	state = &vps->base;
945 
946 	__drm_atomic_helper_plane_duplicate_state(plane, state);
947 
948 	return state;
949 }
950 
951 
952 /**
953  * vmw_du_plane_reset - creates a blank vmw plane state
954  * @plane: drm plane
955  *
956  * Resets the atomic state for @plane by freeing the state pointer (which might
957  * be NULL, e.g. at driver load time) and allocating a new empty state object.
958  */
vmw_du_plane_reset(struct drm_plane * plane)959 void vmw_du_plane_reset(struct drm_plane *plane)
960 {
961 	struct vmw_plane_state *vps;
962 
963 	if (plane->state)
964 		vmw_du_plane_destroy_state(plane, plane->state);
965 
966 	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
967 
968 	if (!vps) {
969 		DRM_ERROR("Cannot allocate vmw_plane_state\n");
970 		return;
971 	}
972 
973 	__drm_atomic_helper_plane_reset(plane, &vps->base);
974 }
975 
976 
977 /**
978  * vmw_du_plane_destroy_state - destroy plane state
979  * @plane: DRM plane
980  * @state: state object to destroy
981  *
982  * Destroys the plane state (both common and vmw-specific) for the
983  * specified plane.
984  */
985 void
vmw_du_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)986 vmw_du_plane_destroy_state(struct drm_plane *plane,
987 			   struct drm_plane_state *state)
988 {
989 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
990 
991 
992 	/* Should have been freed by cleanup_fb */
993 	if (vps->surf)
994 		vmw_surface_unreference(&vps->surf);
995 
996 	if (vps->bo)
997 		vmw_bo_unreference(&vps->bo);
998 
999 	drm_atomic_helper_plane_destroy_state(plane, state);
1000 }
1001 
1002 
1003 /**
1004  * vmw_du_connector_duplicate_state - duplicate connector state
1005  * @connector: DRM connector
1006  *
1007  * Allocates and returns a copy of the connector state (both common and
1008  * vmw-specific) for the specified connector.
1009  *
1010  * Returns: The newly allocated connector state, or NULL on failure.
1011  */
1012 struct drm_connector_state *
vmw_du_connector_duplicate_state(struct drm_connector * connector)1013 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1014 {
1015 	struct drm_connector_state *state;
1016 	struct vmw_connector_state *vcs;
1017 
1018 	if (WARN_ON(!connector->state))
1019 		return NULL;
1020 
1021 	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1022 
1023 	if (!vcs)
1024 		return NULL;
1025 
1026 	state = &vcs->base;
1027 
1028 	__drm_atomic_helper_connector_duplicate_state(connector, state);
1029 
1030 	return state;
1031 }
1032 
1033 
1034 /**
1035  * vmw_du_connector_reset - creates a blank vmw connector state
1036  * @connector: DRM connector
1037  *
1038  * Resets the atomic state for @connector by freeing the state pointer (which
1039  * might be NULL, e.g. at driver load time) and allocating a new empty state
1040  * object.
1041  */
vmw_du_connector_reset(struct drm_connector * connector)1042 void vmw_du_connector_reset(struct drm_connector *connector)
1043 {
1044 	struct vmw_connector_state *vcs;
1045 
1046 
1047 	if (connector->state) {
1048 		__drm_atomic_helper_connector_destroy_state(connector->state);
1049 
1050 		kfree(vmw_connector_state_to_vcs(connector->state));
1051 	}
1052 
1053 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1054 
1055 	if (!vcs) {
1056 		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1057 		return;
1058 	}
1059 
1060 	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1061 }
1062 
1063 
1064 /**
1065  * vmw_du_connector_destroy_state - destroy connector state
1066  * @connector: DRM connector
1067  * @state: state object to destroy
1068  *
1069  * Destroys the connector state (both common and vmw-specific) for the
1070  * specified plane.
1071  */
1072 void
vmw_du_connector_destroy_state(struct drm_connector * connector,struct drm_connector_state * state)1073 vmw_du_connector_destroy_state(struct drm_connector *connector,
1074 			  struct drm_connector_state *state)
1075 {
1076 	drm_atomic_helper_connector_destroy_state(connector, state);
1077 }
1078 /*
1079  * Generic framebuffer code
1080  */
1081 
1082 /*
1083  * Surface framebuffer code
1084  */
1085 
vmw_framebuffer_surface_destroy(struct drm_framebuffer * framebuffer)1086 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1087 {
1088 	struct vmw_framebuffer_surface *vfbs =
1089 		vmw_framebuffer_to_vfbs(framebuffer);
1090 
1091 	drm_framebuffer_cleanup(framebuffer);
1092 	vmw_surface_unreference(&vfbs->surface);
1093 
1094 	kfree(vfbs);
1095 }
1096 
1097 /**
1098  * vmw_kms_readback - Perform a readback from the screen system to
1099  * a buffer-object backed framebuffer.
1100  *
1101  * @dev_priv: Pointer to the device private structure.
1102  * @file_priv: Pointer to a struct drm_file identifying the caller.
1103  * Must be set to NULL if @user_fence_rep is NULL.
1104  * @vfb: Pointer to the buffer-object backed framebuffer.
1105  * @user_fence_rep: User-space provided structure for fence information.
1106  * Must be set to non-NULL if @file_priv is non-NULL.
1107  * @vclips: Array of clip rects.
1108  * @num_clips: Number of clip rects in @vclips.
1109  *
1110  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1111  * interrupted.
1112  */
vmw_kms_readback(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct drm_vmw_fence_rep __user * user_fence_rep,struct drm_vmw_rect * vclips,uint32_t num_clips)1113 int vmw_kms_readback(struct vmw_private *dev_priv,
1114 		     struct drm_file *file_priv,
1115 		     struct vmw_framebuffer *vfb,
1116 		     struct drm_vmw_fence_rep __user *user_fence_rep,
1117 		     struct drm_vmw_rect *vclips,
1118 		     uint32_t num_clips)
1119 {
1120 	switch (dev_priv->active_display_unit) {
1121 	case vmw_du_screen_object:
1122 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1123 					    user_fence_rep, vclips, num_clips,
1124 					    NULL);
1125 	case vmw_du_screen_target:
1126 		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
1127 					user_fence_rep, NULL, vclips, num_clips,
1128 					1, false, true, NULL);
1129 	default:
1130 		WARN_ONCE(true,
1131 			  "Readback called with invalid display system.\n");
1132 }
1133 
1134 	return -ENOSYS;
1135 }
1136 
1137 
1138 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1139 	.destroy = vmw_framebuffer_surface_destroy,
1140 	.dirty = drm_atomic_helper_dirtyfb,
1141 };
1142 
vmw_kms_new_framebuffer_surface(struct vmw_private * dev_priv,struct vmw_surface * surface,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd,bool is_bo_proxy)1143 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1144 					   struct vmw_surface *surface,
1145 					   struct vmw_framebuffer **out,
1146 					   const struct drm_mode_fb_cmd2
1147 					   *mode_cmd,
1148 					   bool is_bo_proxy)
1149 
1150 {
1151 	struct drm_device *dev = &dev_priv->drm;
1152 	struct vmw_framebuffer_surface *vfbs;
1153 	enum SVGA3dSurfaceFormat format;
1154 	int ret;
1155 
1156 	/* 3D is only supported on HWv8 and newer hosts */
1157 	if (dev_priv->active_display_unit == vmw_du_legacy)
1158 		return -ENOSYS;
1159 
1160 	/*
1161 	 * Sanity checks.
1162 	 */
1163 
1164 	if (!drm_any_plane_has_format(&dev_priv->drm,
1165 				      mode_cmd->pixel_format,
1166 				      mode_cmd->modifier[0])) {
1167 		drm_dbg(&dev_priv->drm,
1168 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1169 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1170 		return -EINVAL;
1171 	}
1172 
1173 	/* Surface must be marked as a scanout. */
1174 	if (unlikely(!surface->metadata.scanout))
1175 		return -EINVAL;
1176 
1177 	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1178 		     surface->metadata.num_sizes != 1 ||
1179 		     surface->metadata.base_size.width < mode_cmd->width ||
1180 		     surface->metadata.base_size.height < mode_cmd->height ||
1181 		     surface->metadata.base_size.depth != 1)) {
1182 		DRM_ERROR("Incompatible surface dimensions "
1183 			  "for requested mode.\n");
1184 		return -EINVAL;
1185 	}
1186 
1187 	switch (mode_cmd->pixel_format) {
1188 	case DRM_FORMAT_ARGB8888:
1189 		format = SVGA3D_A8R8G8B8;
1190 		break;
1191 	case DRM_FORMAT_XRGB8888:
1192 		format = SVGA3D_X8R8G8B8;
1193 		break;
1194 	case DRM_FORMAT_RGB565:
1195 		format = SVGA3D_R5G6B5;
1196 		break;
1197 	case DRM_FORMAT_XRGB1555:
1198 		format = SVGA3D_A1R5G5B5;
1199 		break;
1200 	default:
1201 		DRM_ERROR("Invalid pixel format: %p4cc\n",
1202 			  &mode_cmd->pixel_format);
1203 		return -EINVAL;
1204 	}
1205 
1206 	/*
1207 	 * For DX, surface format validation is done when surface->scanout
1208 	 * is set.
1209 	 */
1210 	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1211 		DRM_ERROR("Invalid surface format for requested mode.\n");
1212 		return -EINVAL;
1213 	}
1214 
1215 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1216 	if (!vfbs) {
1217 		ret = -ENOMEM;
1218 		goto out_err1;
1219 	}
1220 
1221 	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1222 	vfbs->surface = vmw_surface_reference(surface);
1223 	vfbs->base.user_handle = mode_cmd->handles[0];
1224 	vfbs->is_bo_proxy = is_bo_proxy;
1225 
1226 	*out = &vfbs->base;
1227 
1228 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1229 				   &vmw_framebuffer_surface_funcs);
1230 	if (ret)
1231 		goto out_err2;
1232 
1233 	return 0;
1234 
1235 out_err2:
1236 	vmw_surface_unreference(&surface);
1237 	kfree(vfbs);
1238 out_err1:
1239 	return ret;
1240 }
1241 
1242 /*
1243  * Buffer-object framebuffer code
1244  */
1245 
vmw_framebuffer_bo_create_handle(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int * handle)1246 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1247 					    struct drm_file *file_priv,
1248 					    unsigned int *handle)
1249 {
1250 	struct vmw_framebuffer_bo *vfbd =
1251 			vmw_framebuffer_to_vfbd(fb);
1252 
1253 	return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle);
1254 }
1255 
vmw_framebuffer_bo_destroy(struct drm_framebuffer * framebuffer)1256 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1257 {
1258 	struct vmw_framebuffer_bo *vfbd =
1259 		vmw_framebuffer_to_vfbd(framebuffer);
1260 
1261 	drm_framebuffer_cleanup(framebuffer);
1262 	vmw_bo_unreference(&vfbd->buffer);
1263 
1264 	kfree(vfbd);
1265 }
1266 
1267 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1268 	.create_handle = vmw_framebuffer_bo_create_handle,
1269 	.destroy = vmw_framebuffer_bo_destroy,
1270 	.dirty = drm_atomic_helper_dirtyfb,
1271 };
1272 
1273 /*
1274  * Pin the bofer in a location suitable for access by the
1275  * display system.
1276  */
vmw_framebuffer_pin(struct vmw_framebuffer * vfb)1277 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1278 {
1279 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1280 	struct vmw_buffer_object *buf;
1281 	struct ttm_placement *placement;
1282 	int ret;
1283 
1284 	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1285 		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1286 
1287 	if (!buf)
1288 		return 0;
1289 
1290 	switch (dev_priv->active_display_unit) {
1291 	case vmw_du_legacy:
1292 		vmw_overlay_pause_all(dev_priv);
1293 		ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
1294 		vmw_overlay_resume_all(dev_priv);
1295 		break;
1296 	case vmw_du_screen_object:
1297 	case vmw_du_screen_target:
1298 		if (vfb->bo) {
1299 			if (dev_priv->capabilities & SVGA_CAP_3D) {
1300 				/*
1301 				 * Use surface DMA to get content to
1302 				 * sreen target surface.
1303 				 */
1304 				placement = &vmw_vram_gmr_placement;
1305 			} else {
1306 				/* Use CPU blit. */
1307 				placement = &vmw_sys_placement;
1308 			}
1309 		} else {
1310 			/* Use surface / image update */
1311 			placement = &vmw_mob_placement;
1312 		}
1313 
1314 		return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
1315 	default:
1316 		return -EINVAL;
1317 	}
1318 
1319 	return ret;
1320 }
1321 
vmw_framebuffer_unpin(struct vmw_framebuffer * vfb)1322 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1323 {
1324 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1325 	struct vmw_buffer_object *buf;
1326 
1327 	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1328 		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1329 
1330 	if (WARN_ON(!buf))
1331 		return 0;
1332 
1333 	return vmw_bo_unpin(dev_priv, buf, false);
1334 }
1335 
1336 /**
1337  * vmw_create_bo_proxy - create a proxy surface for the buffer object
1338  *
1339  * @dev: DRM device
1340  * @mode_cmd: parameters for the new surface
1341  * @bo_mob: MOB backing the buffer object
1342  * @srf_out: newly created surface
1343  *
1344  * When the content FB is a buffer object, we create a surface as a proxy to the
1345  * same buffer.  This way we can do a surface copy rather than a surface DMA.
1346  * This is a more efficient approach
1347  *
1348  * RETURNS:
1349  * 0 on success, error code otherwise
1350  */
vmw_create_bo_proxy(struct drm_device * dev,const struct drm_mode_fb_cmd2 * mode_cmd,struct vmw_buffer_object * bo_mob,struct vmw_surface ** srf_out)1351 static int vmw_create_bo_proxy(struct drm_device *dev,
1352 			       const struct drm_mode_fb_cmd2 *mode_cmd,
1353 			       struct vmw_buffer_object *bo_mob,
1354 			       struct vmw_surface **srf_out)
1355 {
1356 	struct vmw_surface_metadata metadata = {0};
1357 	uint32_t format;
1358 	struct vmw_resource *res;
1359 	unsigned int bytes_pp;
1360 	int ret;
1361 
1362 	switch (mode_cmd->pixel_format) {
1363 	case DRM_FORMAT_ARGB8888:
1364 	case DRM_FORMAT_XRGB8888:
1365 		format = SVGA3D_X8R8G8B8;
1366 		bytes_pp = 4;
1367 		break;
1368 
1369 	case DRM_FORMAT_RGB565:
1370 	case DRM_FORMAT_XRGB1555:
1371 		format = SVGA3D_R5G6B5;
1372 		bytes_pp = 2;
1373 		break;
1374 
1375 	case 8:
1376 		format = SVGA3D_P8;
1377 		bytes_pp = 1;
1378 		break;
1379 
1380 	default:
1381 		DRM_ERROR("Invalid framebuffer format %p4cc\n",
1382 			  &mode_cmd->pixel_format);
1383 		return -EINVAL;
1384 	}
1385 
1386 	metadata.format = format;
1387 	metadata.mip_levels[0] = 1;
1388 	metadata.num_sizes = 1;
1389 	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1390 	metadata.base_size.height =  mode_cmd->height;
1391 	metadata.base_size.depth = 1;
1392 	metadata.scanout = true;
1393 
1394 	ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1395 	if (ret) {
1396 		DRM_ERROR("Failed to allocate proxy content buffer\n");
1397 		return ret;
1398 	}
1399 
1400 	res = &(*srf_out)->res;
1401 
1402 	/* Reserve and switch the backing mob. */
1403 	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1404 	(void) vmw_resource_reserve(res, false, true);
1405 	vmw_user_bo_unref(&res->backup);
1406 	res->backup = vmw_user_bo_ref(bo_mob);
1407 	res->backup_offset = 0;
1408 	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1409 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1410 
1411 	return 0;
1412 }
1413 
1414 
1415 
vmw_kms_new_framebuffer_bo(struct vmw_private * dev_priv,struct vmw_buffer_object * bo,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd)1416 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1417 				      struct vmw_buffer_object *bo,
1418 				      struct vmw_framebuffer **out,
1419 				      const struct drm_mode_fb_cmd2
1420 				      *mode_cmd)
1421 
1422 {
1423 	struct drm_device *dev = &dev_priv->drm;
1424 	struct vmw_framebuffer_bo *vfbd;
1425 	unsigned int requested_size;
1426 	int ret;
1427 
1428 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1429 	if (unlikely(requested_size > bo->base.base.size)) {
1430 		DRM_ERROR("Screen buffer object size is too small "
1431 			  "for requested mode.\n");
1432 		return -EINVAL;
1433 	}
1434 
1435 	if (!drm_any_plane_has_format(&dev_priv->drm,
1436 				      mode_cmd->pixel_format,
1437 				      mode_cmd->modifier[0])) {
1438 		drm_dbg(&dev_priv->drm,
1439 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1440 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1441 		return -EINVAL;
1442 	}
1443 
1444 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1445 	if (!vfbd) {
1446 		ret = -ENOMEM;
1447 		goto out_err1;
1448 	}
1449 
1450 	vfbd->base.base.obj[0] = &bo->base.base;
1451 	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1452 	vfbd->base.bo = true;
1453 	vfbd->buffer = vmw_bo_reference(bo);
1454 	vfbd->base.user_handle = mode_cmd->handles[0];
1455 	*out = &vfbd->base;
1456 
1457 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1458 				   &vmw_framebuffer_bo_funcs);
1459 	if (ret)
1460 		goto out_err2;
1461 
1462 	return 0;
1463 
1464 out_err2:
1465 	vmw_bo_unreference(&bo);
1466 	kfree(vfbd);
1467 out_err1:
1468 	return ret;
1469 }
1470 
1471 
1472 /**
1473  * vmw_kms_srf_ok - check if a surface can be created
1474  *
1475  * @dev_priv: Pointer to device private struct.
1476  * @width: requested width
1477  * @height: requested height
1478  *
1479  * Surfaces need to be less than texture size
1480  */
1481 static bool
vmw_kms_srf_ok(struct vmw_private * dev_priv,uint32_t width,uint32_t height)1482 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1483 {
1484 	if (width  > dev_priv->texture_max_width ||
1485 	    height > dev_priv->texture_max_height)
1486 		return false;
1487 
1488 	return true;
1489 }
1490 
1491 /**
1492  * vmw_kms_new_framebuffer - Create a new framebuffer.
1493  *
1494  * @dev_priv: Pointer to device private struct.
1495  * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1496  * Either @bo or @surface must be NULL.
1497  * @surface: Pointer to a surface to wrap the kms framebuffer around.
1498  * Either @bo or @surface must be NULL.
1499  * @only_2d: No presents will occur to this buffer object based framebuffer.
1500  * This helps the code to do some important optimizations.
1501  * @mode_cmd: Frame-buffer metadata.
1502  */
1503 struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private * dev_priv,struct vmw_buffer_object * bo,struct vmw_surface * surface,bool only_2d,const struct drm_mode_fb_cmd2 * mode_cmd)1504 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1505 			struct vmw_buffer_object *bo,
1506 			struct vmw_surface *surface,
1507 			bool only_2d,
1508 			const struct drm_mode_fb_cmd2 *mode_cmd)
1509 {
1510 	struct vmw_framebuffer *vfb = NULL;
1511 	bool is_bo_proxy = false;
1512 	int ret;
1513 
1514 	/*
1515 	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1516 	 * therefore, wrap the buffer object in a surface so we can use the
1517 	 * SurfaceCopy command.
1518 	 */
1519 	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1520 	    bo && only_2d &&
1521 	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1522 	    dev_priv->active_display_unit == vmw_du_screen_target) {
1523 		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1524 					  bo, &surface);
1525 		if (ret)
1526 			return ERR_PTR(ret);
1527 
1528 		is_bo_proxy = true;
1529 	}
1530 
1531 	/* Create the new framebuffer depending one what we have */
1532 	if (surface) {
1533 		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1534 						      mode_cmd,
1535 						      is_bo_proxy);
1536 		/*
1537 		 * vmw_create_bo_proxy() adds a reference that is no longer
1538 		 * needed
1539 		 */
1540 		if (is_bo_proxy)
1541 			vmw_surface_unreference(&surface);
1542 	} else if (bo) {
1543 		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1544 						 mode_cmd);
1545 	} else {
1546 		BUG();
1547 	}
1548 
1549 	if (ret)
1550 		return ERR_PTR(ret);
1551 
1552 	vfb->pin = vmw_framebuffer_pin;
1553 	vfb->unpin = vmw_framebuffer_unpin;
1554 
1555 	return vfb;
1556 }
1557 
1558 /*
1559  * Generic Kernel modesetting functions
1560  */
1561 
vmw_kms_fb_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)1562 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1563 						 struct drm_file *file_priv,
1564 						 const struct drm_mode_fb_cmd2 *mode_cmd)
1565 {
1566 	struct vmw_private *dev_priv = vmw_priv(dev);
1567 	struct vmw_framebuffer *vfb = NULL;
1568 	struct vmw_surface *surface = NULL;
1569 	struct vmw_buffer_object *bo = NULL;
1570 	int ret;
1571 
1572 	/* returns either a bo or surface */
1573 	ret = vmw_user_lookup_handle(dev_priv, file_priv,
1574 				     mode_cmd->handles[0],
1575 				     &surface, &bo);
1576 	if (ret) {
1577 		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1578 			  mode_cmd->handles[0], mode_cmd->handles[0]);
1579 		goto err_out;
1580 	}
1581 
1582 
1583 	if (!bo &&
1584 	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1585 		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1586 			dev_priv->texture_max_width,
1587 			dev_priv->texture_max_height);
1588 		goto err_out;
1589 	}
1590 
1591 
1592 	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1593 				      !(dev_priv->capabilities & SVGA_CAP_3D),
1594 				      mode_cmd);
1595 	if (IS_ERR(vfb)) {
1596 		ret = PTR_ERR(vfb);
1597 		goto err_out;
1598  	}
1599 
1600 err_out:
1601 	/* vmw_user_lookup_handle takes one ref so does new_fb */
1602 	if (bo)
1603 		vmw_user_bo_unref(&bo);
1604 	if (surface)
1605 		vmw_surface_unreference(&surface);
1606 
1607 	if (ret) {
1608 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1609 		return ERR_PTR(ret);
1610 	}
1611 
1612 	return &vfb->base;
1613 }
1614 
1615 /**
1616  * vmw_kms_check_display_memory - Validates display memory required for a
1617  * topology
1618  * @dev: DRM device
1619  * @num_rects: number of drm_rect in rects
1620  * @rects: array of drm_rect representing the topology to validate indexed by
1621  * crtc index.
1622  *
1623  * Returns:
1624  * 0 on success otherwise negative error code
1625  */
vmw_kms_check_display_memory(struct drm_device * dev,uint32_t num_rects,struct drm_rect * rects)1626 static int vmw_kms_check_display_memory(struct drm_device *dev,
1627 					uint32_t num_rects,
1628 					struct drm_rect *rects)
1629 {
1630 	struct vmw_private *dev_priv = vmw_priv(dev);
1631 	struct drm_rect bounding_box = {0};
1632 	u64 total_pixels = 0, pixel_mem, bb_mem;
1633 	int i;
1634 
1635 	for (i = 0; i < num_rects; i++) {
1636 		/*
1637 		 * For STDU only individual screen (screen target) is limited by
1638 		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1639 		 */
1640 		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1641 		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1642 		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1643 			VMW_DEBUG_KMS("Screen size not supported.\n");
1644 			return -EINVAL;
1645 		}
1646 
1647 		/* Bounding box upper left is at (0,0). */
1648 		if (rects[i].x2 > bounding_box.x2)
1649 			bounding_box.x2 = rects[i].x2;
1650 
1651 		if (rects[i].y2 > bounding_box.y2)
1652 			bounding_box.y2 = rects[i].y2;
1653 
1654 		total_pixels += (u64) drm_rect_width(&rects[i]) *
1655 			(u64) drm_rect_height(&rects[i]);
1656 	}
1657 
1658 	/* Virtual svga device primary limits are always in 32-bpp. */
1659 	pixel_mem = total_pixels * 4;
1660 
1661 	/*
1662 	 * For HV10 and below prim_bb_mem is vram size. When
1663 	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1664 	 * limit on primary bounding box
1665 	 */
1666 	if (pixel_mem > dev_priv->max_primary_mem) {
1667 		VMW_DEBUG_KMS("Combined output size too large.\n");
1668 		return -EINVAL;
1669 	}
1670 
1671 	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1672 	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1673 	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1674 		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1675 
1676 		if (bb_mem > dev_priv->max_primary_mem) {
1677 			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1678 			return -EINVAL;
1679 		}
1680 	}
1681 
1682 	return 0;
1683 }
1684 
1685 /**
1686  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1687  * crtc mutex
1688  * @state: The atomic state pointer containing the new atomic state
1689  * @crtc: The crtc
1690  *
1691  * This function returns the new crtc state if it's part of the state update.
1692  * Otherwise returns the current crtc state. It also makes sure that the
1693  * crtc mutex is locked.
1694  *
1695  * Returns: A valid crtc state pointer or NULL. It may also return a
1696  * pointer error, in particular -EDEADLK if locking needs to be rerun.
1697  */
1698 static struct drm_crtc_state *
vmw_crtc_state_and_lock(struct drm_atomic_state * state,struct drm_crtc * crtc)1699 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1700 {
1701 	struct drm_crtc_state *crtc_state;
1702 
1703 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1704 	if (crtc_state) {
1705 		lockdep_assert_held(&crtc->mutex.mutex.base);
1706 	} else {
1707 		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1708 
1709 		if (ret != 0 && ret != -EALREADY)
1710 			return ERR_PTR(ret);
1711 
1712 		crtc_state = crtc->state;
1713 	}
1714 
1715 	return crtc_state;
1716 }
1717 
1718 /**
1719  * vmw_kms_check_implicit - Verify that all implicit display units scan out
1720  * from the same fb after the new state is committed.
1721  * @dev: The drm_device.
1722  * @state: The new state to be checked.
1723  *
1724  * Returns:
1725  *   Zero on success,
1726  *   -EINVAL on invalid state,
1727  *   -EDEADLK if modeset locking needs to be rerun.
1728  */
vmw_kms_check_implicit(struct drm_device * dev,struct drm_atomic_state * state)1729 static int vmw_kms_check_implicit(struct drm_device *dev,
1730 				  struct drm_atomic_state *state)
1731 {
1732 	struct drm_framebuffer *implicit_fb = NULL;
1733 	struct drm_crtc *crtc;
1734 	struct drm_crtc_state *crtc_state;
1735 	struct drm_plane_state *plane_state;
1736 
1737 	drm_for_each_crtc(crtc, dev) {
1738 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1739 
1740 		if (!du->is_implicit)
1741 			continue;
1742 
1743 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1744 		if (IS_ERR(crtc_state))
1745 			return PTR_ERR(crtc_state);
1746 
1747 		if (!crtc_state || !crtc_state->enable)
1748 			continue;
1749 
1750 		/*
1751 		 * Can't move primary planes across crtcs, so this is OK.
1752 		 * It also means we don't need to take the plane mutex.
1753 		 */
1754 		plane_state = du->primary.state;
1755 		if (plane_state->crtc != crtc)
1756 			continue;
1757 
1758 		if (!implicit_fb)
1759 			implicit_fb = plane_state->fb;
1760 		else if (implicit_fb != plane_state->fb)
1761 			return -EINVAL;
1762 	}
1763 
1764 	return 0;
1765 }
1766 
1767 /**
1768  * vmw_kms_check_topology - Validates topology in drm_atomic_state
1769  * @dev: DRM device
1770  * @state: the driver state object
1771  *
1772  * Returns:
1773  * 0 on success otherwise negative error code
1774  */
vmw_kms_check_topology(struct drm_device * dev,struct drm_atomic_state * state)1775 static int vmw_kms_check_topology(struct drm_device *dev,
1776 				  struct drm_atomic_state *state)
1777 {
1778 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1779 	struct drm_rect *rects;
1780 	struct drm_crtc *crtc;
1781 	uint32_t i;
1782 	int ret = 0;
1783 
1784 	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1785 			GFP_KERNEL);
1786 	if (!rects)
1787 		return -ENOMEM;
1788 
1789 	drm_for_each_crtc(crtc, dev) {
1790 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1791 		struct drm_crtc_state *crtc_state;
1792 
1793 		i = drm_crtc_index(crtc);
1794 
1795 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1796 		if (IS_ERR(crtc_state)) {
1797 			ret = PTR_ERR(crtc_state);
1798 			goto clean;
1799 		}
1800 
1801 		if (!crtc_state)
1802 			continue;
1803 
1804 		if (crtc_state->enable) {
1805 			rects[i].x1 = du->gui_x;
1806 			rects[i].y1 = du->gui_y;
1807 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1808 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1809 		} else {
1810 			rects[i].x1 = 0;
1811 			rects[i].y1 = 0;
1812 			rects[i].x2 = 0;
1813 			rects[i].y2 = 0;
1814 		}
1815 	}
1816 
1817 	/* Determine change to topology due to new atomic state */
1818 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1819 				      new_crtc_state, i) {
1820 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1821 		struct drm_connector *connector;
1822 		struct drm_connector_state *conn_state;
1823 		struct vmw_connector_state *vmw_conn_state;
1824 
1825 		if (!du->pref_active && new_crtc_state->enable) {
1826 			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1827 			ret = -EINVAL;
1828 			goto clean;
1829 		}
1830 
1831 		/*
1832 		 * For vmwgfx each crtc has only one connector attached and it
1833 		 * is not changed so don't really need to check the
1834 		 * crtc->connector_mask and iterate over it.
1835 		 */
1836 		connector = &du->connector;
1837 		conn_state = drm_atomic_get_connector_state(state, connector);
1838 		if (IS_ERR(conn_state)) {
1839 			ret = PTR_ERR(conn_state);
1840 			goto clean;
1841 		}
1842 
1843 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1844 		vmw_conn_state->gui_x = du->gui_x;
1845 		vmw_conn_state->gui_y = du->gui_y;
1846 	}
1847 
1848 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1849 					   rects);
1850 
1851 clean:
1852 	kfree(rects);
1853 	return ret;
1854 }
1855 
1856 /**
1857  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1858  *
1859  * @dev: DRM device
1860  * @state: the driver state object
1861  *
1862  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1863  * us to assign a value to mode->crtc_clock so that
1864  * drm_calc_timestamping_constants() won't throw an error message
1865  *
1866  * Returns:
1867  * Zero for success or -errno
1868  */
1869 static int
vmw_kms_atomic_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)1870 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1871 			     struct drm_atomic_state *state)
1872 {
1873 	struct drm_crtc *crtc;
1874 	struct drm_crtc_state *crtc_state;
1875 	bool need_modeset = false;
1876 	int i, ret;
1877 
1878 	ret = drm_atomic_helper_check(dev, state);
1879 	if (ret)
1880 		return ret;
1881 
1882 	ret = vmw_kms_check_implicit(dev, state);
1883 	if (ret) {
1884 		VMW_DEBUG_KMS("Invalid implicit state\n");
1885 		return ret;
1886 	}
1887 
1888 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1889 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1890 			need_modeset = true;
1891 	}
1892 
1893 	if (need_modeset)
1894 		return vmw_kms_check_topology(dev, state);
1895 
1896 	return ret;
1897 }
1898 
1899 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1900 	.fb_create = vmw_kms_fb_create,
1901 	.atomic_check = vmw_kms_atomic_check_modeset,
1902 	.atomic_commit = drm_atomic_helper_commit,
1903 };
1904 
vmw_kms_generic_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1905 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1906 				   struct drm_file *file_priv,
1907 				   struct vmw_framebuffer *vfb,
1908 				   struct vmw_surface *surface,
1909 				   uint32_t sid,
1910 				   int32_t destX, int32_t destY,
1911 				   struct drm_vmw_rect *clips,
1912 				   uint32_t num_clips)
1913 {
1914 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1915 					    &surface->res, destX, destY,
1916 					    num_clips, 1, NULL, NULL);
1917 }
1918 
1919 
vmw_kms_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1920 int vmw_kms_present(struct vmw_private *dev_priv,
1921 		    struct drm_file *file_priv,
1922 		    struct vmw_framebuffer *vfb,
1923 		    struct vmw_surface *surface,
1924 		    uint32_t sid,
1925 		    int32_t destX, int32_t destY,
1926 		    struct drm_vmw_rect *clips,
1927 		    uint32_t num_clips)
1928 {
1929 	int ret;
1930 
1931 	switch (dev_priv->active_display_unit) {
1932 	case vmw_du_screen_target:
1933 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1934 						 &surface->res, destX, destY,
1935 						 num_clips, 1, NULL, NULL);
1936 		break;
1937 	case vmw_du_screen_object:
1938 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1939 					      sid, destX, destY, clips,
1940 					      num_clips);
1941 		break;
1942 	default:
1943 		WARN_ONCE(true,
1944 			  "Present called with invalid display system.\n");
1945 		ret = -ENOSYS;
1946 		break;
1947 	}
1948 	if (ret)
1949 		return ret;
1950 
1951 	vmw_cmd_flush(dev_priv, false);
1952 
1953 	return 0;
1954 }
1955 
1956 static void
vmw_kms_create_hotplug_mode_update_property(struct vmw_private * dev_priv)1957 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1958 {
1959 	if (dev_priv->hotplug_mode_update_property)
1960 		return;
1961 
1962 	dev_priv->hotplug_mode_update_property =
1963 		drm_property_create_range(&dev_priv->drm,
1964 					  DRM_MODE_PROP_IMMUTABLE,
1965 					  "hotplug_mode_update", 0, 1);
1966 }
1967 
vmw_kms_init(struct vmw_private * dev_priv)1968 int vmw_kms_init(struct vmw_private *dev_priv)
1969 {
1970 	struct drm_device *dev = &dev_priv->drm;
1971 	int ret;
1972 	static const char *display_unit_names[] = {
1973 		"Invalid",
1974 		"Legacy",
1975 		"Screen Object",
1976 		"Screen Target",
1977 		"Invalid (max)"
1978 	};
1979 
1980 	drm_mode_config_init(dev);
1981 	dev->mode_config.funcs = &vmw_kms_funcs;
1982 	dev->mode_config.min_width = 1;
1983 	dev->mode_config.min_height = 1;
1984 	dev->mode_config.max_width = dev_priv->texture_max_width;
1985 	dev->mode_config.max_height = dev_priv->texture_max_height;
1986 
1987 	drm_mode_create_suggested_offset_properties(dev);
1988 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
1989 
1990 	ret = vmw_kms_stdu_init_display(dev_priv);
1991 	if (ret) {
1992 		ret = vmw_kms_sou_init_display(dev_priv);
1993 		if (ret) /* Fallback */
1994 			ret = vmw_kms_ldu_init_display(dev_priv);
1995 	}
1996 	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
1997 	drm_info(&dev_priv->drm, "%s display unit initialized\n",
1998 		 display_unit_names[dev_priv->active_display_unit]);
1999 
2000 	return ret;
2001 }
2002 
vmw_kms_close(struct vmw_private * dev_priv)2003 int vmw_kms_close(struct vmw_private *dev_priv)
2004 {
2005 	int ret = 0;
2006 
2007 	/*
2008 	 * Docs says we should take the lock before calling this function
2009 	 * but since it destroys encoders and our destructor calls
2010 	 * drm_encoder_cleanup which takes the lock we deadlock.
2011 	 */
2012 	drm_mode_config_cleanup(&dev_priv->drm);
2013 	if (dev_priv->active_display_unit == vmw_du_legacy)
2014 		ret = vmw_kms_ldu_close_display(dev_priv);
2015 
2016 	return ret;
2017 }
2018 
vmw_kms_cursor_bypass_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2019 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2020 				struct drm_file *file_priv)
2021 {
2022 	struct drm_vmw_cursor_bypass_arg *arg = data;
2023 	struct vmw_display_unit *du;
2024 	struct drm_crtc *crtc;
2025 	int ret = 0;
2026 
2027 
2028 	mutex_lock(&dev->mode_config.mutex);
2029 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2030 
2031 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2032 			du = vmw_crtc_to_du(crtc);
2033 			du->hotspot_x = arg->xhot;
2034 			du->hotspot_y = arg->yhot;
2035 		}
2036 
2037 		mutex_unlock(&dev->mode_config.mutex);
2038 		return 0;
2039 	}
2040 
2041 	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2042 	if (!crtc) {
2043 		ret = -ENOENT;
2044 		goto out;
2045 	}
2046 
2047 	du = vmw_crtc_to_du(crtc);
2048 
2049 	du->hotspot_x = arg->xhot;
2050 	du->hotspot_y = arg->yhot;
2051 
2052 out:
2053 	mutex_unlock(&dev->mode_config.mutex);
2054 
2055 	return ret;
2056 }
2057 
vmw_kms_write_svga(struct vmw_private * vmw_priv,unsigned width,unsigned height,unsigned pitch,unsigned bpp,unsigned depth)2058 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2059 			unsigned width, unsigned height, unsigned pitch,
2060 			unsigned bpp, unsigned depth)
2061 {
2062 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2063 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2064 	else if (vmw_fifo_have_pitchlock(vmw_priv))
2065 		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2066 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2067 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2068 	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2069 		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2070 
2071 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2072 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2073 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2074 		return -EINVAL;
2075 	}
2076 
2077 	return 0;
2078 }
2079 
vmw_kms_validate_mode_vram(struct vmw_private * dev_priv,uint32_t pitch,uint32_t height)2080 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2081 				uint32_t pitch,
2082 				uint32_t height)
2083 {
2084 	return ((u64) pitch * (u64) height) < (u64)
2085 		((dev_priv->active_display_unit == vmw_du_screen_target) ?
2086 		 dev_priv->max_primary_mem : dev_priv->vram_size);
2087 }
2088 
2089 /**
2090  * vmw_du_update_layout - Update the display unit with topology from resolution
2091  * plugin and generate DRM uevent
2092  * @dev_priv: device private
2093  * @num_rects: number of drm_rect in rects
2094  * @rects: toplogy to update
2095  */
vmw_du_update_layout(struct vmw_private * dev_priv,unsigned int num_rects,struct drm_rect * rects)2096 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2097 				unsigned int num_rects, struct drm_rect *rects)
2098 {
2099 	struct drm_device *dev = &dev_priv->drm;
2100 	struct vmw_display_unit *du;
2101 	struct drm_connector *con;
2102 	struct drm_connector_list_iter conn_iter;
2103 	struct drm_modeset_acquire_ctx ctx;
2104 	struct drm_crtc *crtc;
2105 	int ret;
2106 
2107 	/* Currently gui_x/y is protected with the crtc mutex */
2108 	mutex_lock(&dev->mode_config.mutex);
2109 	drm_modeset_acquire_init(&ctx, 0);
2110 retry:
2111 	drm_for_each_crtc(crtc, dev) {
2112 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2113 		if (ret < 0) {
2114 			if (ret == -EDEADLK) {
2115 				drm_modeset_backoff(&ctx);
2116 				goto retry;
2117       		}
2118 			goto out_fini;
2119 		}
2120 	}
2121 
2122 	drm_connector_list_iter_begin(dev, &conn_iter);
2123 	drm_for_each_connector_iter(con, &conn_iter) {
2124 		du = vmw_connector_to_du(con);
2125 		if (num_rects > du->unit) {
2126 			du->pref_width = drm_rect_width(&rects[du->unit]);
2127 			du->pref_height = drm_rect_height(&rects[du->unit]);
2128 			du->pref_active = true;
2129 			du->gui_x = rects[du->unit].x1;
2130 			du->gui_y = rects[du->unit].y1;
2131 		} else {
2132 			du->pref_width = 800;
2133 			du->pref_height = 600;
2134 			du->pref_active = false;
2135 			du->gui_x = 0;
2136 			du->gui_y = 0;
2137 		}
2138 	}
2139 	drm_connector_list_iter_end(&conn_iter);
2140 
2141 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2142 		du = vmw_connector_to_du(con);
2143 		if (num_rects > du->unit) {
2144 			drm_object_property_set_value
2145 			  (&con->base, dev->mode_config.suggested_x_property,
2146 			   du->gui_x);
2147 			drm_object_property_set_value
2148 			  (&con->base, dev->mode_config.suggested_y_property,
2149 			   du->gui_y);
2150 		} else {
2151 			drm_object_property_set_value
2152 			  (&con->base, dev->mode_config.suggested_x_property,
2153 			   0);
2154 			drm_object_property_set_value
2155 			  (&con->base, dev->mode_config.suggested_y_property,
2156 			   0);
2157 		}
2158 		con->status = vmw_du_connector_detect(con, true);
2159 	}
2160 
2161 	drm_sysfs_hotplug_event(dev);
2162 out_fini:
2163 	drm_modeset_drop_locks(&ctx);
2164 	drm_modeset_acquire_fini(&ctx);
2165 	mutex_unlock(&dev->mode_config.mutex);
2166 
2167 	return 0;
2168 }
2169 
vmw_du_crtc_gamma_set(struct drm_crtc * crtc,u16 * r,u16 * g,u16 * b,uint32_t size,struct drm_modeset_acquire_ctx * ctx)2170 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2171 			  u16 *r, u16 *g, u16 *b,
2172 			  uint32_t size,
2173 			  struct drm_modeset_acquire_ctx *ctx)
2174 {
2175 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2176 	int i;
2177 
2178 	for (i = 0; i < size; i++) {
2179 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2180 			  r[i], g[i], b[i]);
2181 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2182 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2183 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2184 	}
2185 
2186 	return 0;
2187 }
2188 
vmw_du_connector_dpms(struct drm_connector * connector,int mode)2189 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2190 {
2191 	return 0;
2192 }
2193 
2194 enum drm_connector_status
vmw_du_connector_detect(struct drm_connector * connector,bool force)2195 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2196 {
2197 	uint32_t num_displays;
2198 	struct drm_device *dev = connector->dev;
2199 	struct vmw_private *dev_priv = vmw_priv(dev);
2200 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2201 
2202 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2203 
2204 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2205 		 du->pref_active) ?
2206 		connector_status_connected : connector_status_disconnected);
2207 }
2208 
2209 static struct drm_display_mode vmw_kms_connector_builtin[] = {
2210 	/* 640x480@60Hz */
2211 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2212 		   752, 800, 0, 480, 489, 492, 525, 0,
2213 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2214 	/* 800x600@60Hz */
2215 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2216 		   968, 1056, 0, 600, 601, 605, 628, 0,
2217 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2218 	/* 1024x768@60Hz */
2219 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2220 		   1184, 1344, 0, 768, 771, 777, 806, 0,
2221 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2222 	/* 1152x864@75Hz */
2223 	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2224 		   1344, 1600, 0, 864, 865, 868, 900, 0,
2225 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2226 	/* 1280x720@60Hz */
2227 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2228 		   1472, 1664, 0, 720, 723, 728, 748, 0,
2229 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2230 	/* 1280x768@60Hz */
2231 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2232 		   1472, 1664, 0, 768, 771, 778, 798, 0,
2233 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2234 	/* 1280x800@60Hz */
2235 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2236 		   1480, 1680, 0, 800, 803, 809, 831, 0,
2237 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2238 	/* 1280x960@60Hz */
2239 	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2240 		   1488, 1800, 0, 960, 961, 964, 1000, 0,
2241 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2242 	/* 1280x1024@60Hz */
2243 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2244 		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2245 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2246 	/* 1360x768@60Hz */
2247 	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2248 		   1536, 1792, 0, 768, 771, 777, 795, 0,
2249 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2250 	/* 1440x1050@60Hz */
2251 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2252 		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2253 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2254 	/* 1440x900@60Hz */
2255 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2256 		   1672, 1904, 0, 900, 903, 909, 934, 0,
2257 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2258 	/* 1600x1200@60Hz */
2259 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2260 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2261 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2262 	/* 1680x1050@60Hz */
2263 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2264 		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2265 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2266 	/* 1792x1344@60Hz */
2267 	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2268 		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2269 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2270 	/* 1853x1392@60Hz */
2271 	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2272 		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2273 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2274 	/* 1920x1080@60Hz */
2275 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2276 		   2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2277 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2278 	/* 1920x1200@60Hz */
2279 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2280 		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2281 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2282 	/* 1920x1440@60Hz */
2283 	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2284 		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2285 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2286 	/* 2560x1440@60Hz */
2287 	{ DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2288 		   2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2289 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2290 	/* 2560x1600@60Hz */
2291 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2292 		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2293 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2294 	/* 2880x1800@60Hz */
2295 	{ DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2296 		   2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2297 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2298 	/* 3840x2160@60Hz */
2299 	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2300 		   3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2301 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2302 	/* 3840x2400@60Hz */
2303 	{ DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2304 		   3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2305 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2306 	/* Terminate */
2307 	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2308 };
2309 
2310 /**
2311  * vmw_guess_mode_timing - Provide fake timings for a
2312  * 60Hz vrefresh mode.
2313  *
2314  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2315  * members filled in.
2316  */
vmw_guess_mode_timing(struct drm_display_mode * mode)2317 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2318 {
2319 	mode->hsync_start = mode->hdisplay + 50;
2320 	mode->hsync_end = mode->hsync_start + 50;
2321 	mode->htotal = mode->hsync_end + 50;
2322 
2323 	mode->vsync_start = mode->vdisplay + 50;
2324 	mode->vsync_end = mode->vsync_start + 50;
2325 	mode->vtotal = mode->vsync_end + 50;
2326 
2327 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2328 }
2329 
2330 
vmw_du_connector_fill_modes(struct drm_connector * connector,uint32_t max_width,uint32_t max_height)2331 int vmw_du_connector_fill_modes(struct drm_connector *connector,
2332 				uint32_t max_width, uint32_t max_height)
2333 {
2334 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2335 	struct drm_device *dev = connector->dev;
2336 	struct vmw_private *dev_priv = vmw_priv(dev);
2337 	struct drm_display_mode *mode = NULL;
2338 	struct drm_display_mode *bmode;
2339 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2340 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2341 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2342 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2343 	};
2344 	int i;
2345 	u32 assumed_bpp = 4;
2346 
2347 	if (dev_priv->assume_16bpp)
2348 		assumed_bpp = 2;
2349 
2350 	max_width  = min(max_width,  dev_priv->texture_max_width);
2351 	max_height = min(max_height, dev_priv->texture_max_height);
2352 
2353 	/*
2354 	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2355 	 * HEIGHT registers.
2356 	 */
2357 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2358 		max_width  = min(max_width,  dev_priv->stdu_max_width);
2359 		max_height = min(max_height, dev_priv->stdu_max_height);
2360 	}
2361 
2362 	/* Add preferred mode */
2363 	mode = drm_mode_duplicate(dev, &prefmode);
2364 	if (!mode)
2365 		return 0;
2366 	mode->hdisplay = du->pref_width;
2367 	mode->vdisplay = du->pref_height;
2368 	vmw_guess_mode_timing(mode);
2369 	drm_mode_set_name(mode);
2370 
2371 	if (vmw_kms_validate_mode_vram(dev_priv,
2372 					mode->hdisplay * assumed_bpp,
2373 					mode->vdisplay)) {
2374 		drm_mode_probed_add(connector, mode);
2375 	} else {
2376 		drm_mode_destroy(dev, mode);
2377 		mode = NULL;
2378 	}
2379 
2380 	if (du->pref_mode) {
2381 		list_del_init(&du->pref_mode->head);
2382 		drm_mode_destroy(dev, du->pref_mode);
2383 	}
2384 
2385 	/* mode might be null here, this is intended */
2386 	du->pref_mode = mode;
2387 
2388 	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2389 		bmode = &vmw_kms_connector_builtin[i];
2390 		if (bmode->hdisplay > max_width ||
2391 		    bmode->vdisplay > max_height)
2392 			continue;
2393 
2394 		if (!vmw_kms_validate_mode_vram(dev_priv,
2395 						bmode->hdisplay * assumed_bpp,
2396 						bmode->vdisplay))
2397 			continue;
2398 
2399 		mode = drm_mode_duplicate(dev, bmode);
2400 		if (!mode)
2401 			return 0;
2402 
2403 		drm_mode_probed_add(connector, mode);
2404 	}
2405 
2406 	drm_connector_list_update(connector);
2407 	/* Move the prefered mode first, help apps pick the right mode. */
2408 	drm_mode_sort(&connector->modes);
2409 
2410 	return 1;
2411 }
2412 
2413 /**
2414  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2415  * @dev: drm device for the ioctl
2416  * @data: data pointer for the ioctl
2417  * @file_priv: drm file for the ioctl call
2418  *
2419  * Update preferred topology of display unit as per ioctl request. The topology
2420  * is expressed as array of drm_vmw_rect.
2421  * e.g.
2422  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2423  *
2424  * NOTE:
2425  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2426  * device limit on topology, x + w and y + h (lower right) cannot be greater
2427  * than INT_MAX. So topology beyond these limits will return with error.
2428  *
2429  * Returns:
2430  * Zero on success, negative errno on failure.
2431  */
vmw_kms_update_layout_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2432 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2433 				struct drm_file *file_priv)
2434 {
2435 	struct vmw_private *dev_priv = vmw_priv(dev);
2436 	struct drm_mode_config *mode_config = &dev->mode_config;
2437 	struct drm_vmw_update_layout_arg *arg =
2438 		(struct drm_vmw_update_layout_arg *)data;
2439 	void __user *user_rects;
2440 	struct drm_vmw_rect *rects;
2441 	struct drm_rect *drm_rects;
2442 	unsigned rects_size;
2443 	int ret, i;
2444 
2445 	if (!arg->num_outputs) {
2446 		struct drm_rect def_rect = {0, 0, 800, 600};
2447 		VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
2448 			      def_rect.x1, def_rect.y1,
2449 			      def_rect.x2, def_rect.y2);
2450 		vmw_du_update_layout(dev_priv, 1, &def_rect);
2451 		return 0;
2452 	}
2453 
2454 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2455 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2456 			GFP_KERNEL);
2457 	if (unlikely(!rects))
2458 		return -ENOMEM;
2459 
2460 	user_rects = (void __user *)(unsigned long)arg->rects;
2461 	ret = copy_from_user(rects, user_rects, rects_size);
2462 	if (unlikely(ret != 0)) {
2463 		DRM_ERROR("Failed to get rects.\n");
2464 		ret = -EFAULT;
2465 		goto out_free;
2466 	}
2467 
2468 	drm_rects = (struct drm_rect *)rects;
2469 
2470 	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2471 	for (i = 0; i < arg->num_outputs; i++) {
2472 		struct drm_vmw_rect curr_rect;
2473 
2474 		/* Verify user-space for overflow as kernel use drm_rect */
2475 		if ((rects[i].x + rects[i].w > INT_MAX) ||
2476 		    (rects[i].y + rects[i].h > INT_MAX)) {
2477 			ret = -ERANGE;
2478 			goto out_free;
2479 		}
2480 
2481 		curr_rect = rects[i];
2482 		drm_rects[i].x1 = curr_rect.x;
2483 		drm_rects[i].y1 = curr_rect.y;
2484 		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2485 		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2486 
2487 		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2488 			      drm_rects[i].x1, drm_rects[i].y1,
2489 			      drm_rects[i].x2, drm_rects[i].y2);
2490 
2491 		/*
2492 		 * Currently this check is limiting the topology within
2493 		 * mode_config->max (which actually is max texture size
2494 		 * supported by virtual device). This limit is here to address
2495 		 * window managers that create a big framebuffer for whole
2496 		 * topology.
2497 		 */
2498 		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2499 		    drm_rects[i].x2 > mode_config->max_width ||
2500 		    drm_rects[i].y2 > mode_config->max_height) {
2501 			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2502 				      drm_rects[i].x1, drm_rects[i].y1,
2503 				      drm_rects[i].x2, drm_rects[i].y2);
2504 			ret = -EINVAL;
2505 			goto out_free;
2506 		}
2507 	}
2508 
2509 	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2510 
2511 	if (ret == 0)
2512 		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2513 
2514 out_free:
2515 	kfree(rects);
2516 	return ret;
2517 }
2518 
2519 /**
2520  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2521  * on a set of cliprects and a set of display units.
2522  *
2523  * @dev_priv: Pointer to a device private structure.
2524  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2525  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2526  * Cliprects are given in framebuffer coordinates.
2527  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2528  * be NULL. Cliprects are given in source coordinates.
2529  * @dest_x: X coordinate offset for the crtc / destination clip rects.
2530  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2531  * @num_clips: Number of cliprects in the @clips or @vclips array.
2532  * @increment: Integer with which to increment the clip counter when looping.
2533  * Used to skip a predetermined number of clip rects.
2534  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2535  */
vmw_kms_helper_dirty(struct vmw_private * dev_priv,struct vmw_framebuffer * framebuffer,const struct drm_clip_rect * clips,const struct drm_vmw_rect * vclips,s32 dest_x,s32 dest_y,int num_clips,int increment,struct vmw_kms_dirty * dirty)2536 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2537 			 struct vmw_framebuffer *framebuffer,
2538 			 const struct drm_clip_rect *clips,
2539 			 const struct drm_vmw_rect *vclips,
2540 			 s32 dest_x, s32 dest_y,
2541 			 int num_clips,
2542 			 int increment,
2543 			 struct vmw_kms_dirty *dirty)
2544 {
2545 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2546 	struct drm_crtc *crtc;
2547 	u32 num_units = 0;
2548 	u32 i, k;
2549 
2550 	dirty->dev_priv = dev_priv;
2551 
2552 	/* If crtc is passed, no need to iterate over other display units */
2553 	if (dirty->crtc) {
2554 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2555 	} else {
2556 		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2557 				    head) {
2558 			struct drm_plane *plane = crtc->primary;
2559 
2560 			if (plane->state->fb == &framebuffer->base)
2561 				units[num_units++] = vmw_crtc_to_du(crtc);
2562 		}
2563 	}
2564 
2565 	for (k = 0; k < num_units; k++) {
2566 		struct vmw_display_unit *unit = units[k];
2567 		s32 crtc_x = unit->crtc.x;
2568 		s32 crtc_y = unit->crtc.y;
2569 		s32 crtc_width = unit->crtc.mode.hdisplay;
2570 		s32 crtc_height = unit->crtc.mode.vdisplay;
2571 		const struct drm_clip_rect *clips_ptr = clips;
2572 		const struct drm_vmw_rect *vclips_ptr = vclips;
2573 
2574 		dirty->unit = unit;
2575 		if (dirty->fifo_reserve_size > 0) {
2576 			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2577 						      dirty->fifo_reserve_size);
2578 			if (!dirty->cmd)
2579 				return -ENOMEM;
2580 
2581 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2582 		}
2583 		dirty->num_hits = 0;
2584 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2585 		       vclips_ptr += increment) {
2586 			s32 clip_left;
2587 			s32 clip_top;
2588 
2589 			/*
2590 			 * Select clip array type. Note that integer type
2591 			 * in @clips is unsigned short, whereas in @vclips
2592 			 * it's 32-bit.
2593 			 */
2594 			if (clips) {
2595 				dirty->fb_x = (s32) clips_ptr->x1;
2596 				dirty->fb_y = (s32) clips_ptr->y1;
2597 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2598 					crtc_x;
2599 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2600 					crtc_y;
2601 			} else {
2602 				dirty->fb_x = vclips_ptr->x;
2603 				dirty->fb_y = vclips_ptr->y;
2604 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2605 					dest_x - crtc_x;
2606 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2607 					dest_y - crtc_y;
2608 			}
2609 
2610 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2611 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2612 
2613 			/* Skip this clip if it's outside the crtc region */
2614 			if (dirty->unit_x1 >= crtc_width ||
2615 			    dirty->unit_y1 >= crtc_height ||
2616 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2617 				continue;
2618 
2619 			/* Clip right and bottom to crtc limits */
2620 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2621 					       crtc_width);
2622 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2623 					       crtc_height);
2624 
2625 			/* Clip left and top to crtc limits */
2626 			clip_left = min_t(s32, dirty->unit_x1, 0);
2627 			clip_top = min_t(s32, dirty->unit_y1, 0);
2628 			dirty->unit_x1 -= clip_left;
2629 			dirty->unit_y1 -= clip_top;
2630 			dirty->fb_x -= clip_left;
2631 			dirty->fb_y -= clip_top;
2632 
2633 			dirty->clip(dirty);
2634 		}
2635 
2636 		dirty->fifo_commit(dirty);
2637 	}
2638 
2639 	return 0;
2640 }
2641 
2642 /**
2643  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2644  * cleanup and fencing
2645  * @dev_priv: Pointer to the device-private struct
2646  * @file_priv: Pointer identifying the client when user-space fencing is used
2647  * @ctx: Pointer to the validation context
2648  * @out_fence: If non-NULL, returned refcounted fence-pointer
2649  * @user_fence_rep: If non-NULL, pointer to user-space address area
2650  * in which to copy user-space fence info
2651  */
vmw_kms_helper_validation_finish(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_validation_context * ctx,struct vmw_fence_obj ** out_fence,struct drm_vmw_fence_rep __user * user_fence_rep)2652 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2653 				      struct drm_file *file_priv,
2654 				      struct vmw_validation_context *ctx,
2655 				      struct vmw_fence_obj **out_fence,
2656 				      struct drm_vmw_fence_rep __user *
2657 				      user_fence_rep)
2658 {
2659 	struct vmw_fence_obj *fence = NULL;
2660 	uint32_t handle = 0;
2661 	int ret = 0;
2662 
2663 	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2664 	    out_fence)
2665 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2666 						 file_priv ? &handle : NULL);
2667 	vmw_validation_done(ctx, fence);
2668 	if (file_priv)
2669 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2670 					    ret, user_fence_rep, fence,
2671 					    handle, -1);
2672 	if (out_fence)
2673 		*out_fence = fence;
2674 	else
2675 		vmw_fence_obj_unreference(&fence);
2676 }
2677 
2678 /**
2679  * vmw_kms_update_proxy - Helper function to update a proxy surface from
2680  * its backing MOB.
2681  *
2682  * @res: Pointer to the surface resource
2683  * @clips: Clip rects in framebuffer (surface) space.
2684  * @num_clips: Number of clips in @clips.
2685  * @increment: Integer with which to increment the clip counter when looping.
2686  * Used to skip a predetermined number of clip rects.
2687  *
2688  * This function makes sure the proxy surface is updated from its backing MOB
2689  * using the region given by @clips. The surface resource @res and its backing
2690  * MOB needs to be reserved and validated on call.
2691  */
vmw_kms_update_proxy(struct vmw_resource * res,const struct drm_clip_rect * clips,unsigned num_clips,int increment)2692 int vmw_kms_update_proxy(struct vmw_resource *res,
2693 			 const struct drm_clip_rect *clips,
2694 			 unsigned num_clips,
2695 			 int increment)
2696 {
2697 	struct vmw_private *dev_priv = res->dev_priv;
2698 	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2699 	struct {
2700 		SVGA3dCmdHeader header;
2701 		SVGA3dCmdUpdateGBImage body;
2702 	} *cmd;
2703 	SVGA3dBox *box;
2704 	size_t copy_size = 0;
2705 	int i;
2706 
2707 	if (!clips)
2708 		return 0;
2709 
2710 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2711 	if (!cmd)
2712 		return -ENOMEM;
2713 
2714 	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2715 		box = &cmd->body.box;
2716 
2717 		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2718 		cmd->header.size = sizeof(cmd->body);
2719 		cmd->body.image.sid = res->id;
2720 		cmd->body.image.face = 0;
2721 		cmd->body.image.mipmap = 0;
2722 
2723 		if (clips->x1 > size->width || clips->x2 > size->width ||
2724 		    clips->y1 > size->height || clips->y2 > size->height) {
2725 			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2726 			return -EINVAL;
2727 		}
2728 
2729 		box->x = clips->x1;
2730 		box->y = clips->y1;
2731 		box->z = 0;
2732 		box->w = clips->x2 - clips->x1;
2733 		box->h = clips->y2 - clips->y1;
2734 		box->d = 1;
2735 
2736 		copy_size += sizeof(*cmd);
2737 	}
2738 
2739 	vmw_cmd_commit(dev_priv, copy_size);
2740 
2741 	return 0;
2742 }
2743 
vmw_kms_fbdev_init_data(struct vmw_private * dev_priv,unsigned unit,u32 max_width,u32 max_height,struct drm_connector ** p_con,struct drm_crtc ** p_crtc,struct drm_display_mode ** p_mode)2744 int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2745 			    unsigned unit,
2746 			    u32 max_width,
2747 			    u32 max_height,
2748 			    struct drm_connector **p_con,
2749 			    struct drm_crtc **p_crtc,
2750 			    struct drm_display_mode **p_mode)
2751 {
2752 	struct drm_connector *con;
2753 	struct vmw_display_unit *du;
2754 	struct drm_display_mode *mode;
2755 	int i = 0;
2756 	int ret = 0;
2757 
2758 	mutex_lock(&dev_priv->drm.mode_config.mutex);
2759 	list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list,
2760 			    head) {
2761 		if (i == unit)
2762 			break;
2763 
2764 		++i;
2765 	}
2766 
2767 	if (&con->head == &dev_priv->drm.mode_config.connector_list) {
2768 		DRM_ERROR("Could not find initial display unit.\n");
2769 		ret = -EINVAL;
2770 		goto out_unlock;
2771 	}
2772 
2773 	if (list_empty(&con->modes))
2774 		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
2775 
2776 	if (list_empty(&con->modes)) {
2777 		DRM_ERROR("Could not find initial display mode.\n");
2778 		ret = -EINVAL;
2779 		goto out_unlock;
2780 	}
2781 
2782 	du = vmw_connector_to_du(con);
2783 	*p_con = con;
2784 	*p_crtc = &du->crtc;
2785 
2786 	list_for_each_entry(mode, &con->modes, head) {
2787 		if (mode->type & DRM_MODE_TYPE_PREFERRED)
2788 			break;
2789 	}
2790 
2791 	if (&mode->head == &con->modes) {
2792 		WARN_ONCE(true, "Could not find initial preferred mode.\n");
2793 		*p_mode = list_first_entry(&con->modes,
2794 					   struct drm_display_mode,
2795 					   head);
2796 	} else {
2797 		*p_mode = mode;
2798 	}
2799 
2800  out_unlock:
2801 	mutex_unlock(&dev_priv->drm.mode_config.mutex);
2802 
2803 	return ret;
2804 }
2805 
2806 /**
2807  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2808  * property.
2809  *
2810  * @dev_priv: Pointer to a device private struct.
2811  *
2812  * Sets up the implicit placement property unless it's already set up.
2813  */
2814 void
vmw_kms_create_implicit_placement_property(struct vmw_private * dev_priv)2815 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2816 {
2817 	if (dev_priv->implicit_placement_property)
2818 		return;
2819 
2820 	dev_priv->implicit_placement_property =
2821 		drm_property_create_range(&dev_priv->drm,
2822 					  DRM_MODE_PROP_IMMUTABLE,
2823 					  "implicit_placement", 0, 1);
2824 }
2825 
2826 /**
2827  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2828  *
2829  * @dev: Pointer to the drm device
2830  * Return: 0 on success. Negative error code on failure.
2831  */
vmw_kms_suspend(struct drm_device * dev)2832 int vmw_kms_suspend(struct drm_device *dev)
2833 {
2834 	struct vmw_private *dev_priv = vmw_priv(dev);
2835 
2836 	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2837 	if (IS_ERR(dev_priv->suspend_state)) {
2838 		int ret = PTR_ERR(dev_priv->suspend_state);
2839 
2840 		DRM_ERROR("Failed kms suspend: %d\n", ret);
2841 		dev_priv->suspend_state = NULL;
2842 
2843 		return ret;
2844 	}
2845 
2846 	return 0;
2847 }
2848 
2849 
2850 /**
2851  * vmw_kms_resume - Re-enable modesetting and restore state
2852  *
2853  * @dev: Pointer to the drm device
2854  * Return: 0 on success. Negative error code on failure.
2855  *
2856  * State is resumed from a previous vmw_kms_suspend(). It's illegal
2857  * to call this function without a previous vmw_kms_suspend().
2858  */
vmw_kms_resume(struct drm_device * dev)2859 int vmw_kms_resume(struct drm_device *dev)
2860 {
2861 	struct vmw_private *dev_priv = vmw_priv(dev);
2862 	int ret;
2863 
2864 	if (WARN_ON(!dev_priv->suspend_state))
2865 		return 0;
2866 
2867 	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2868 	dev_priv->suspend_state = NULL;
2869 
2870 	return ret;
2871 }
2872 
2873 /**
2874  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2875  *
2876  * @dev: Pointer to the drm device
2877  */
vmw_kms_lost_device(struct drm_device * dev)2878 void vmw_kms_lost_device(struct drm_device *dev)
2879 {
2880 	drm_atomic_helper_shutdown(dev);
2881 }
2882 
2883 /**
2884  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2885  * @update: The closure structure.
2886  *
2887  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2888  * update on display unit.
2889  *
2890  * Return: 0 on success or a negative error code on failure.
2891  */
vmw_du_helper_plane_update(struct vmw_du_update_plane * update)2892 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2893 {
2894 	struct drm_plane_state *state = update->plane->state;
2895 	struct drm_plane_state *old_state = update->old_state;
2896 	struct drm_atomic_helper_damage_iter iter;
2897 	struct drm_rect clip;
2898 	struct drm_rect bb;
2899 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2900 	uint32_t reserved_size = 0;
2901 	uint32_t submit_size = 0;
2902 	uint32_t curr_size = 0;
2903 	uint32_t num_hits = 0;
2904 	void *cmd_start;
2905 	char *cmd_next;
2906 	int ret;
2907 
2908 	/*
2909 	 * Iterate in advance to check if really need plane update and find the
2910 	 * number of clips that actually are in plane src for fifo allocation.
2911 	 */
2912 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2913 	drm_atomic_for_each_plane_damage(&iter, &clip)
2914 		num_hits++;
2915 
2916 	if (num_hits == 0)
2917 		return 0;
2918 
2919 	if (update->vfb->bo) {
2920 		struct vmw_framebuffer_bo *vfbbo =
2921 			container_of(update->vfb, typeof(*vfbbo), base);
2922 
2923 		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
2924 					    update->cpu_blit);
2925 	} else {
2926 		struct vmw_framebuffer_surface *vfbs =
2927 			container_of(update->vfb, typeof(*vfbs), base);
2928 
2929 		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2930 						  0, VMW_RES_DIRTY_NONE, NULL,
2931 						  NULL);
2932 	}
2933 
2934 	if (ret)
2935 		return ret;
2936 
2937 	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2938 	if (ret)
2939 		goto out_unref;
2940 
2941 	reserved_size = update->calc_fifo_size(update, num_hits);
2942 	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2943 	if (!cmd_start) {
2944 		ret = -ENOMEM;
2945 		goto out_revert;
2946 	}
2947 
2948 	cmd_next = cmd_start;
2949 
2950 	if (update->post_prepare) {
2951 		curr_size = update->post_prepare(update, cmd_next);
2952 		cmd_next += curr_size;
2953 		submit_size += curr_size;
2954 	}
2955 
2956 	if (update->pre_clip) {
2957 		curr_size = update->pre_clip(update, cmd_next, num_hits);
2958 		cmd_next += curr_size;
2959 		submit_size += curr_size;
2960 	}
2961 
2962 	bb.x1 = INT_MAX;
2963 	bb.y1 = INT_MAX;
2964 	bb.x2 = INT_MIN;
2965 	bb.y2 = INT_MIN;
2966 
2967 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2968 	drm_atomic_for_each_plane_damage(&iter, &clip) {
2969 		uint32_t fb_x = clip.x1;
2970 		uint32_t fb_y = clip.y1;
2971 
2972 		vmw_du_translate_to_crtc(state, &clip);
2973 		if (update->clip) {
2974 			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2975 						 fb_y);
2976 			cmd_next += curr_size;
2977 			submit_size += curr_size;
2978 		}
2979 		bb.x1 = min_t(int, bb.x1, clip.x1);
2980 		bb.y1 = min_t(int, bb.y1, clip.y1);
2981 		bb.x2 = max_t(int, bb.x2, clip.x2);
2982 		bb.y2 = max_t(int, bb.y2, clip.y2);
2983 	}
2984 
2985 	curr_size = update->post_clip(update, cmd_next, &bb);
2986 	submit_size += curr_size;
2987 
2988 	if (reserved_size < submit_size)
2989 		submit_size = 0;
2990 
2991 	vmw_cmd_commit(update->dev_priv, submit_size);
2992 
2993 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2994 					 update->out_fence, NULL);
2995 	return ret;
2996 
2997 out_revert:
2998 	vmw_validation_revert(&val_ctx);
2999 
3000 out_unref:
3001 	vmw_validation_unref_lists(&val_ctx);
3002 	return ret;
3003 }
3004