• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_placement.h>
29 
30 #include "device_include/svga_overlay.h"
31 #include "device_include/svga_escape.h"
32 
33 #include "vmwgfx_drv.h"
34 
35 #define VMW_MAX_NUM_STREAMS 1
36 #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
37 
38 struct vmw_stream {
39 	struct vmw_buffer_object *buf;
40 	bool claimed;
41 	bool paused;
42 	struct drm_vmw_control_stream_arg saved;
43 };
44 
45 /*
46  * Overlay control
47  */
48 struct vmw_overlay {
49 	/*
50 	 * Each stream is a single overlay. In Xv these are called ports.
51 	 */
52 	struct mutex mutex;
53 	struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
54 };
55 
vmw_overlay(struct drm_device * dev)56 static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
57 {
58 	struct vmw_private *dev_priv = vmw_priv(dev);
59 	return dev_priv ? dev_priv->overlay_priv : NULL;
60 }
61 
62 struct vmw_escape_header {
63 	uint32_t cmd;
64 	SVGAFifoCmdEscape body;
65 };
66 
67 struct vmw_escape_video_flush {
68 	struct vmw_escape_header escape;
69 	SVGAEscapeVideoFlush flush;
70 };
71 
fill_escape(struct vmw_escape_header * header,uint32_t size)72 static inline void fill_escape(struct vmw_escape_header *header,
73 			       uint32_t size)
74 {
75 	header->cmd = SVGA_CMD_ESCAPE;
76 	header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
77 	header->body.size = size;
78 }
79 
fill_flush(struct vmw_escape_video_flush * cmd,uint32_t stream_id)80 static inline void fill_flush(struct vmw_escape_video_flush *cmd,
81 			      uint32_t stream_id)
82 {
83 	fill_escape(&cmd->escape, sizeof(cmd->flush));
84 	cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
85 	cmd->flush.streamId = stream_id;
86 }
87 
88 /*
89  * Send put command to hw.
90  *
91  * Returns
92  * -ERESTARTSYS if interrupted by a signal.
93  */
vmw_overlay_send_put(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,struct drm_vmw_control_stream_arg * arg,bool interruptible)94 static int vmw_overlay_send_put(struct vmw_private *dev_priv,
95 				struct vmw_buffer_object *buf,
96 				struct drm_vmw_control_stream_arg *arg,
97 				bool interruptible)
98 {
99 	struct vmw_escape_video_flush *flush;
100 	size_t fifo_size;
101 	bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
102 	int i, num_items;
103 	SVGAGuestPtr ptr;
104 
105 	struct {
106 		struct vmw_escape_header escape;
107 		struct {
108 			uint32_t cmdType;
109 			uint32_t streamId;
110 		} header;
111 	} *cmds;
112 	struct {
113 		uint32_t registerId;
114 		uint32_t value;
115 	} *items;
116 
117 	/* defines are a index needs + 1 */
118 	if (have_so)
119 		num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
120 	else
121 		num_items = SVGA_VIDEO_PITCH_3 + 1;
122 
123 	fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
124 
125 	cmds = VMW_CMD_RESERVE(dev_priv, fifo_size);
126 	/* hardware has hung, can't do anything here */
127 	if (!cmds)
128 		return -ENOMEM;
129 
130 	items = (typeof(items))&cmds[1];
131 	flush = (struct vmw_escape_video_flush *)&items[num_items];
132 
133 	/* the size is header + number of items */
134 	fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
135 
136 	cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
137 	cmds->header.streamId = arg->stream_id;
138 
139 	/* the IDs are neatly numbered */
140 	for (i = 0; i < num_items; i++)
141 		items[i].registerId = i;
142 
143 	vmw_bo_get_guest_ptr(&buf->base, &ptr);
144 	ptr.offset += arg->offset;
145 
146 	items[SVGA_VIDEO_ENABLED].value     = true;
147 	items[SVGA_VIDEO_FLAGS].value       = arg->flags;
148 	items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
149 	items[SVGA_VIDEO_FORMAT].value      = arg->format;
150 	items[SVGA_VIDEO_COLORKEY].value    = arg->color_key;
151 	items[SVGA_VIDEO_SIZE].value        = arg->size;
152 	items[SVGA_VIDEO_WIDTH].value       = arg->width;
153 	items[SVGA_VIDEO_HEIGHT].value      = arg->height;
154 	items[SVGA_VIDEO_SRC_X].value       = arg->src.x;
155 	items[SVGA_VIDEO_SRC_Y].value       = arg->src.y;
156 	items[SVGA_VIDEO_SRC_WIDTH].value   = arg->src.w;
157 	items[SVGA_VIDEO_SRC_HEIGHT].value  = arg->src.h;
158 	items[SVGA_VIDEO_DST_X].value       = arg->dst.x;
159 	items[SVGA_VIDEO_DST_Y].value       = arg->dst.y;
160 	items[SVGA_VIDEO_DST_WIDTH].value   = arg->dst.w;
161 	items[SVGA_VIDEO_DST_HEIGHT].value  = arg->dst.h;
162 	items[SVGA_VIDEO_PITCH_1].value     = arg->pitch[0];
163 	items[SVGA_VIDEO_PITCH_2].value     = arg->pitch[1];
164 	items[SVGA_VIDEO_PITCH_3].value     = arg->pitch[2];
165 	if (have_so) {
166 		items[SVGA_VIDEO_DATA_GMRID].value    = ptr.gmrId;
167 		items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
168 	}
169 
170 	fill_flush(flush, arg->stream_id);
171 
172 	vmw_cmd_commit(dev_priv, fifo_size);
173 
174 	return 0;
175 }
176 
177 /*
178  * Send stop command to hw.
179  *
180  * Returns
181  * -ERESTARTSYS if interrupted by a signal.
182  */
vmw_overlay_send_stop(struct vmw_private * dev_priv,uint32_t stream_id,bool interruptible)183 static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
184 				 uint32_t stream_id,
185 				 bool interruptible)
186 {
187 	struct {
188 		struct vmw_escape_header escape;
189 		SVGAEscapeVideoSetRegs body;
190 		struct vmw_escape_video_flush flush;
191 	} *cmds;
192 	int ret;
193 
194 	for (;;) {
195 		cmds = VMW_CMD_RESERVE(dev_priv, sizeof(*cmds));
196 		if (cmds)
197 			break;
198 
199 		ret = vmw_fallback_wait(dev_priv, false, true, 0,
200 					interruptible, 3*HZ);
201 		if (interruptible && ret == -ERESTARTSYS)
202 			return ret;
203 		else
204 			BUG_ON(ret != 0);
205 	}
206 
207 	fill_escape(&cmds->escape, sizeof(cmds->body));
208 	cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
209 	cmds->body.header.streamId = stream_id;
210 	cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
211 	cmds->body.items[0].value = false;
212 	fill_flush(&cmds->flush, stream_id);
213 
214 	vmw_cmd_commit(dev_priv, sizeof(*cmds));
215 
216 	return 0;
217 }
218 
219 /*
220  * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
221  *
222  * With the introduction of screen objects buffers could now be
223  * used with GMRs instead of being locked to vram.
224  */
vmw_overlay_move_buffer(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool pin,bool inter)225 static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
226 				   struct vmw_buffer_object *buf,
227 				   bool pin, bool inter)
228 {
229 	if (!pin)
230 		return vmw_bo_unpin(dev_priv, buf, inter);
231 
232 	if (dev_priv->active_display_unit == vmw_du_legacy)
233 		return vmw_bo_pin_in_vram(dev_priv, buf, inter);
234 
235 	return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
236 }
237 
238 /*
239  * Stop or pause a stream.
240  *
241  * If the stream is paused the no evict flag is removed from the buffer
242  * but left in vram. This allows for instance mode_set to evict it
243  * should it need to.
244  *
245  * The caller must hold the overlay lock.
246  *
247  * @stream_id which stream to stop/pause.
248  * @pause true to pause, false to stop completely.
249  */
vmw_overlay_stop(struct vmw_private * dev_priv,uint32_t stream_id,bool pause,bool interruptible)250 static int vmw_overlay_stop(struct vmw_private *dev_priv,
251 			    uint32_t stream_id, bool pause,
252 			    bool interruptible)
253 {
254 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
255 	struct vmw_stream *stream = &overlay->stream[stream_id];
256 	int ret;
257 
258 	/* no buffer attached the stream is completely stopped */
259 	if (!stream->buf)
260 		return 0;
261 
262 	/* If the stream is paused this is already done */
263 	if (!stream->paused) {
264 		ret = vmw_overlay_send_stop(dev_priv, stream_id,
265 					    interruptible);
266 		if (ret)
267 			return ret;
268 
269 		/* We just remove the NO_EVICT flag so no -ENOMEM */
270 		ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
271 					      interruptible);
272 		if (interruptible && ret == -ERESTARTSYS)
273 			return ret;
274 		else
275 			BUG_ON(ret != 0);
276 	}
277 
278 	if (!pause) {
279 		vmw_bo_unreference(&stream->buf);
280 		stream->paused = false;
281 	} else {
282 		stream->paused = true;
283 	}
284 
285 	return 0;
286 }
287 
288 /*
289  * Update a stream and send any put or stop fifo commands needed.
290  *
291  * The caller must hold the overlay lock.
292  *
293  * Returns
294  * -ENOMEM if buffer doesn't fit in vram.
295  * -ERESTARTSYS if interrupted.
296  */
vmw_overlay_update_stream(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,struct drm_vmw_control_stream_arg * arg,bool interruptible)297 static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
298 				     struct vmw_buffer_object *buf,
299 				     struct drm_vmw_control_stream_arg *arg,
300 				     bool interruptible)
301 {
302 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
303 	struct vmw_stream *stream = &overlay->stream[arg->stream_id];
304 	int ret = 0;
305 
306 	if (!buf)
307 		return -EINVAL;
308 
309 	DRM_DEBUG("   %s: old %p, new %p, %spaused\n", __func__,
310 		  stream->buf, buf, stream->paused ? "" : "not ");
311 
312 	if (stream->buf != buf) {
313 		ret = vmw_overlay_stop(dev_priv, arg->stream_id,
314 				       false, interruptible);
315 		if (ret)
316 			return ret;
317 	} else if (!stream->paused) {
318 		/* If the buffers match and not paused then just send
319 		 * the put command, no need to do anything else.
320 		 */
321 		ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
322 		if (ret == 0)
323 			stream->saved = *arg;
324 		else
325 			BUG_ON(!interruptible);
326 
327 		return ret;
328 	}
329 
330 	/* We don't start the old stream if we are interrupted.
331 	 * Might return -ENOMEM if it can't fit the buffer in vram.
332 	 */
333 	ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
334 	if (ret)
335 		return ret;
336 
337 	ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
338 	if (ret) {
339 		/* This one needs to happen no matter what. We only remove
340 		 * the NO_EVICT flag so this is safe from -ENOMEM.
341 		 */
342 		BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
343 		       != 0);
344 		return ret;
345 	}
346 
347 	if (stream->buf != buf)
348 		stream->buf = vmw_bo_reference(buf);
349 	stream->saved = *arg;
350 	/* stream is no longer stopped/paused */
351 	stream->paused = false;
352 
353 	return 0;
354 }
355 
356 /*
357  * Try to resume all paused streams.
358  *
359  * Used by the kms code after moving a new scanout buffer to vram.
360  *
361  * Takes the overlay lock.
362  */
vmw_overlay_resume_all(struct vmw_private * dev_priv)363 int vmw_overlay_resume_all(struct vmw_private *dev_priv)
364 {
365 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
366 	int i, ret;
367 
368 	if (!overlay)
369 		return 0;
370 
371 	mutex_lock(&overlay->mutex);
372 
373 	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
374 		struct vmw_stream *stream = &overlay->stream[i];
375 		if (!stream->paused)
376 			continue;
377 
378 		ret = vmw_overlay_update_stream(dev_priv, stream->buf,
379 						&stream->saved, false);
380 		if (ret != 0)
381 			DRM_INFO("%s: *warning* failed to resume stream %i\n",
382 				 __func__, i);
383 	}
384 
385 	mutex_unlock(&overlay->mutex);
386 
387 	return 0;
388 }
389 
390 /*
391  * Pauses all active streams.
392  *
393  * Used by the kms code when moving a new scanout buffer to vram.
394  *
395  * Takes the overlay lock.
396  */
vmw_overlay_pause_all(struct vmw_private * dev_priv)397 int vmw_overlay_pause_all(struct vmw_private *dev_priv)
398 {
399 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
400 	int i, ret;
401 
402 	if (!overlay)
403 		return 0;
404 
405 	mutex_lock(&overlay->mutex);
406 
407 	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
408 		if (overlay->stream[i].paused)
409 			DRM_INFO("%s: *warning* stream %i already paused\n",
410 				 __func__, i);
411 		ret = vmw_overlay_stop(dev_priv, i, true, false);
412 		WARN_ON(ret != 0);
413 	}
414 
415 	mutex_unlock(&overlay->mutex);
416 
417 	return 0;
418 }
419 
420 
vmw_overlay_available(const struct vmw_private * dev_priv)421 static bool vmw_overlay_available(const struct vmw_private *dev_priv)
422 {
423 	return (dev_priv->overlay_priv != NULL &&
424 		((vmw_fifo_caps(dev_priv) & VMW_OVERLAY_CAP_MASK) ==
425 		 VMW_OVERLAY_CAP_MASK));
426 }
427 
vmw_overlay_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)428 int vmw_overlay_ioctl(struct drm_device *dev, void *data,
429 		      struct drm_file *file_priv)
430 {
431 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
432 	struct vmw_private *dev_priv = vmw_priv(dev);
433 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
434 	struct drm_vmw_control_stream_arg *arg =
435 	    (struct drm_vmw_control_stream_arg *)data;
436 	struct vmw_buffer_object *buf;
437 	struct vmw_resource *res;
438 	int ret;
439 
440 	if (!vmw_overlay_available(dev_priv))
441 		return -ENOSYS;
442 
443 	ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
444 	if (ret)
445 		return ret;
446 
447 	mutex_lock(&overlay->mutex);
448 
449 	if (!arg->enabled) {
450 		ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
451 		goto out_unlock;
452 	}
453 
454 	ret = vmw_user_bo_lookup(file_priv, arg->handle, &buf);
455 	if (ret)
456 		goto out_unlock;
457 
458 	ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
459 
460 	vmw_user_bo_unref(&buf);
461 
462 out_unlock:
463 	mutex_unlock(&overlay->mutex);
464 	vmw_resource_unreference(&res);
465 
466 	return ret;
467 }
468 
vmw_overlay_num_overlays(struct vmw_private * dev_priv)469 int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
470 {
471 	if (!vmw_overlay_available(dev_priv))
472 		return 0;
473 
474 	return VMW_MAX_NUM_STREAMS;
475 }
476 
vmw_overlay_num_free_overlays(struct vmw_private * dev_priv)477 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
478 {
479 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
480 	int i, k;
481 
482 	if (!vmw_overlay_available(dev_priv))
483 		return 0;
484 
485 	mutex_lock(&overlay->mutex);
486 
487 	for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
488 		if (!overlay->stream[i].claimed)
489 			k++;
490 
491 	mutex_unlock(&overlay->mutex);
492 
493 	return k;
494 }
495 
vmw_overlay_claim(struct vmw_private * dev_priv,uint32_t * out)496 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
497 {
498 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
499 	int i;
500 
501 	if (!overlay)
502 		return -ENOSYS;
503 
504 	mutex_lock(&overlay->mutex);
505 
506 	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
507 
508 		if (overlay->stream[i].claimed)
509 			continue;
510 
511 		overlay->stream[i].claimed = true;
512 		*out = i;
513 		mutex_unlock(&overlay->mutex);
514 		return 0;
515 	}
516 
517 	mutex_unlock(&overlay->mutex);
518 	return -ESRCH;
519 }
520 
vmw_overlay_unref(struct vmw_private * dev_priv,uint32_t stream_id)521 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
522 {
523 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
524 
525 	BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
526 
527 	if (!overlay)
528 		return -ENOSYS;
529 
530 	mutex_lock(&overlay->mutex);
531 
532 	WARN_ON(!overlay->stream[stream_id].claimed);
533 	vmw_overlay_stop(dev_priv, stream_id, false, false);
534 	overlay->stream[stream_id].claimed = false;
535 
536 	mutex_unlock(&overlay->mutex);
537 	return 0;
538 }
539 
vmw_overlay_init(struct vmw_private * dev_priv)540 int vmw_overlay_init(struct vmw_private *dev_priv)
541 {
542 	struct vmw_overlay *overlay;
543 	int i;
544 
545 	if (dev_priv->overlay_priv)
546 		return -EINVAL;
547 
548 	overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
549 	if (!overlay)
550 		return -ENOMEM;
551 
552 	mutex_init(&overlay->mutex);
553 	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
554 		overlay->stream[i].buf = NULL;
555 		overlay->stream[i].paused = false;
556 		overlay->stream[i].claimed = false;
557 	}
558 
559 	dev_priv->overlay_priv = overlay;
560 
561 	return 0;
562 }
563 
vmw_overlay_close(struct vmw_private * dev_priv)564 int vmw_overlay_close(struct vmw_private *dev_priv)
565 {
566 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
567 	bool forgotten_buffer = false;
568 	int i;
569 
570 	if (!overlay)
571 		return -ENOSYS;
572 
573 	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
574 		if (overlay->stream[i].buf) {
575 			forgotten_buffer = true;
576 			vmw_overlay_stop(dev_priv, i, false, false);
577 		}
578 	}
579 
580 	WARN_ON(forgotten_buffer);
581 
582 	dev_priv->overlay_priv = NULL;
583 	kfree(overlay);
584 
585 	return 0;
586 }
587