• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
34 
35 #define VMW_RES_HT_ORDER 12
36 
37 /**
38  * struct vmw_resource_relocation - Relocation info for resources
39  *
40  * @head: List head for the software context's relocation list.
41  * @res: Non-ref-counted pointer to the resource.
42  * @offset: Offset of 4 byte entries into the command buffer where the
43  * id that needs fixup is located.
44  */
45 struct vmw_resource_relocation {
46 	struct list_head head;
47 	const struct vmw_resource *res;
48 	unsigned long offset;
49 };
50 
51 /**
52  * struct vmw_resource_val_node - Validation info for resources
53  *
54  * @head: List head for the software context's resource list.
55  * @hash: Hash entry for quick resouce to val_node lookup.
56  * @res: Ref-counted pointer to the resource.
57  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
58  * @new_backup: Refcounted pointer to the new backup buffer.
59  * @staged_bindings: If @res is a context, tracks bindings set up during
60  * the command batch. Otherwise NULL.
61  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
62  * @first_usage: Set to true the first time the resource is referenced in
63  * the command stream.
64  * @switching_backup: The command stream provides a new backup buffer for a
65  * resource.
66  * @no_buffer_needed: This means @switching_backup is true on first buffer
67  * reference. So resource reservation does not need to allocate a backup
68  * buffer for the resource.
69  */
70 struct vmw_resource_val_node {
71 	struct list_head head;
72 	struct drm_hash_item hash;
73 	struct vmw_resource *res;
74 	struct vmw_dma_buffer *new_backup;
75 	struct vmw_ctx_binding_state *staged_bindings;
76 	unsigned long new_backup_offset;
77 	u32 first_usage : 1;
78 	u32 switching_backup : 1;
79 	u32 no_buffer_needed : 1;
80 };
81 
82 /**
83  * struct vmw_cmd_entry - Describe a command for the verifier
84  *
85  * @user_allow: Whether allowed from the execbuf ioctl.
86  * @gb_disable: Whether disabled if guest-backed objects are available.
87  * @gb_enable: Whether enabled iff guest-backed objects are available.
88  */
89 struct vmw_cmd_entry {
90 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
91 		     SVGA3dCmdHeader *);
92 	bool user_allow;
93 	bool gb_disable;
94 	bool gb_enable;
95 };
96 
97 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
98 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
99 				       (_gb_disable), (_gb_enable)}
100 
101 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
102 					struct vmw_sw_context *sw_context,
103 					struct vmw_resource *ctx);
104 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
105 				 struct vmw_sw_context *sw_context,
106 				 SVGAMobId *id,
107 				 struct vmw_dma_buffer **vmw_bo_p);
108 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
109 				   struct vmw_dma_buffer *vbo,
110 				   bool validate_as_mob,
111 				   uint32_t *p_val_node);
112 
113 
114 /**
115  * vmw_resources_unreserve - unreserve resources previously reserved for
116  * command submission.
117  *
118  * @sw_context: pointer to the software context
119  * @backoff: Whether command submission failed.
120  */
vmw_resources_unreserve(struct vmw_sw_context * sw_context,bool backoff)121 static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
122 				    bool backoff)
123 {
124 	struct vmw_resource_val_node *val;
125 	struct list_head *list = &sw_context->resource_list;
126 
127 	if (sw_context->dx_query_mob && !backoff)
128 		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
129 					  sw_context->dx_query_mob);
130 
131 	list_for_each_entry(val, list, head) {
132 		struct vmw_resource *res = val->res;
133 		bool switch_backup =
134 			(backoff) ? false : val->switching_backup;
135 
136 		/*
137 		 * Transfer staged context bindings to the
138 		 * persistent context binding tracker.
139 		 */
140 		if (unlikely(val->staged_bindings)) {
141 			if (!backoff) {
142 				vmw_binding_state_commit
143 					(vmw_context_binding_state(val->res),
144 					 val->staged_bindings);
145 			}
146 
147 			if (val->staged_bindings != sw_context->staged_bindings)
148 				vmw_binding_state_free(val->staged_bindings);
149 			else
150 				sw_context->staged_bindings_inuse = false;
151 			val->staged_bindings = NULL;
152 		}
153 		vmw_resource_unreserve(res, switch_backup, val->new_backup,
154 				       val->new_backup_offset);
155 		vmw_dmabuf_unreference(&val->new_backup);
156 	}
157 }
158 
159 /**
160  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
161  * added to the validate list.
162  *
163  * @dev_priv: Pointer to the device private:
164  * @sw_context: The validation context:
165  * @node: The validation node holding this context.
166  */
vmw_cmd_ctx_first_setup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource_val_node * node)167 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
168 				   struct vmw_sw_context *sw_context,
169 				   struct vmw_resource_val_node *node)
170 {
171 	int ret;
172 
173 	ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
174 	if (unlikely(ret != 0))
175 		goto out_err;
176 
177 	if (!sw_context->staged_bindings) {
178 		sw_context->staged_bindings =
179 			vmw_binding_state_alloc(dev_priv);
180 		if (IS_ERR(sw_context->staged_bindings)) {
181 			DRM_ERROR("Failed to allocate context binding "
182 				  "information.\n");
183 			ret = PTR_ERR(sw_context->staged_bindings);
184 			sw_context->staged_bindings = NULL;
185 			goto out_err;
186 		}
187 	}
188 
189 	if (sw_context->staged_bindings_inuse) {
190 		node->staged_bindings = vmw_binding_state_alloc(dev_priv);
191 		if (IS_ERR(node->staged_bindings)) {
192 			DRM_ERROR("Failed to allocate context binding "
193 				  "information.\n");
194 			ret = PTR_ERR(node->staged_bindings);
195 			node->staged_bindings = NULL;
196 			goto out_err;
197 		}
198 	} else {
199 		node->staged_bindings = sw_context->staged_bindings;
200 		sw_context->staged_bindings_inuse = true;
201 	}
202 
203 	return 0;
204 out_err:
205 	return ret;
206 }
207 
208 /**
209  * vmw_resource_val_add - Add a resource to the software context's
210  * resource list if it's not already on it.
211  *
212  * @sw_context: Pointer to the software context.
213  * @res: Pointer to the resource.
214  * @p_node On successful return points to a valid pointer to a
215  * struct vmw_resource_val_node, if non-NULL on entry.
216  */
vmw_resource_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * res,struct vmw_resource_val_node ** p_node)217 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
218 				struct vmw_resource *res,
219 				struct vmw_resource_val_node **p_node)
220 {
221 	struct vmw_private *dev_priv = res->dev_priv;
222 	struct vmw_resource_val_node *node;
223 	struct drm_hash_item *hash;
224 	int ret;
225 
226 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
227 				    &hash) == 0)) {
228 		node = container_of(hash, struct vmw_resource_val_node, hash);
229 		node->first_usage = false;
230 		if (unlikely(p_node != NULL))
231 			*p_node = node;
232 		return 0;
233 	}
234 
235 	node = kzalloc(sizeof(*node), GFP_KERNEL);
236 	if (unlikely(node == NULL)) {
237 		DRM_ERROR("Failed to allocate a resource validation "
238 			  "entry.\n");
239 		return -ENOMEM;
240 	}
241 
242 	node->hash.key = (unsigned long) res;
243 	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
244 	if (unlikely(ret != 0)) {
245 		DRM_ERROR("Failed to initialize a resource validation "
246 			  "entry.\n");
247 		kfree(node);
248 		return ret;
249 	}
250 	node->res = vmw_resource_reference(res);
251 	node->first_usage = true;
252 	if (unlikely(p_node != NULL))
253 		*p_node = node;
254 
255 	if (!dev_priv->has_mob) {
256 		list_add_tail(&node->head, &sw_context->resource_list);
257 		return 0;
258 	}
259 
260 	switch (vmw_res_type(res)) {
261 	case vmw_res_context:
262 	case vmw_res_dx_context:
263 		list_add(&node->head, &sw_context->ctx_resource_list);
264 		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
265 		break;
266 	case vmw_res_cotable:
267 		list_add_tail(&node->head, &sw_context->ctx_resource_list);
268 		break;
269 	default:
270 		list_add_tail(&node->head, &sw_context->resource_list);
271 		break;
272 	}
273 
274 	return ret;
275 }
276 
277 /**
278  * vmw_view_res_val_add - Add a view and the surface it's pointing to
279  * to the validation list
280  *
281  * @sw_context: The software context holding the validation list.
282  * @view: Pointer to the view resource.
283  *
284  * Returns 0 if success, negative error code otherwise.
285  */
vmw_view_res_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * view)286 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
287 				struct vmw_resource *view)
288 {
289 	int ret;
290 
291 	/*
292 	 * First add the resource the view is pointing to, otherwise
293 	 * it may be swapped out when the view is validated.
294 	 */
295 	ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
296 	if (ret)
297 		return ret;
298 
299 	return vmw_resource_val_add(sw_context, view, NULL);
300 }
301 
302 /**
303  * vmw_view_id_val_add - Look up a view and add it and the surface it's
304  * pointing to to the validation list.
305  *
306  * @sw_context: The software context holding the validation list.
307  * @view_type: The view type to look up.
308  * @id: view id of the view.
309  *
310  * The view is represented by a view id and the DX context it's created on,
311  * or scheduled for creation on. If there is no DX context set, the function
312  * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
313  */
vmw_view_id_val_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,u32 id)314 static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
315 			       enum vmw_view_type view_type, u32 id)
316 {
317 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
318 	struct vmw_resource *view;
319 	int ret;
320 
321 	if (!ctx_node) {
322 		DRM_ERROR("DX Context not set.\n");
323 		return -EINVAL;
324 	}
325 
326 	view = vmw_view_lookup(sw_context->man, view_type, id);
327 	if (IS_ERR(view))
328 		return PTR_ERR(view);
329 
330 	ret = vmw_view_res_val_add(sw_context, view);
331 	vmw_resource_unreference(&view);
332 
333 	return ret;
334 }
335 
336 /**
337  * vmw_resource_context_res_add - Put resources previously bound to a context on
338  * the validation list
339  *
340  * @dev_priv: Pointer to a device private structure
341  * @sw_context: Pointer to a software context used for this command submission
342  * @ctx: Pointer to the context resource
343  *
344  * This function puts all resources that were previously bound to @ctx on
345  * the resource validation list. This is part of the context state reemission
346  */
vmw_resource_context_res_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * ctx)347 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
348 					struct vmw_sw_context *sw_context,
349 					struct vmw_resource *ctx)
350 {
351 	struct list_head *binding_list;
352 	struct vmw_ctx_bindinfo *entry;
353 	int ret = 0;
354 	struct vmw_resource *res;
355 	u32 i;
356 
357 	/* Add all cotables to the validation list. */
358 	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
359 		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
360 			res = vmw_context_cotable(ctx, i);
361 			if (IS_ERR(res))
362 				continue;
363 
364 			ret = vmw_resource_val_add(sw_context, res, NULL);
365 			vmw_resource_unreference(&res);
366 			if (unlikely(ret != 0))
367 				return ret;
368 		}
369 	}
370 
371 
372 	/* Add all resources bound to the context to the validation list */
373 	mutex_lock(&dev_priv->binding_mutex);
374 	binding_list = vmw_context_binding_list(ctx);
375 
376 	list_for_each_entry(entry, binding_list, ctx_list) {
377 		/* entry->res is not refcounted */
378 		res = vmw_resource_reference_unless_doomed(entry->res);
379 		if (unlikely(res == NULL))
380 			continue;
381 
382 		if (vmw_res_type(entry->res) == vmw_res_view)
383 			ret = vmw_view_res_val_add(sw_context, entry->res);
384 		else
385 			ret = vmw_resource_val_add(sw_context, entry->res,
386 						   NULL);
387 		vmw_resource_unreference(&res);
388 		if (unlikely(ret != 0))
389 			break;
390 	}
391 
392 	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
393 		struct vmw_dma_buffer *dx_query_mob;
394 
395 		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
396 		if (dx_query_mob)
397 			ret = vmw_bo_to_validate_list(sw_context,
398 						      dx_query_mob,
399 						      true, NULL);
400 	}
401 
402 	mutex_unlock(&dev_priv->binding_mutex);
403 	return ret;
404 }
405 
406 /**
407  * vmw_resource_relocation_add - Add a relocation to the relocation list
408  *
409  * @list: Pointer to head of relocation list.
410  * @res: The resource.
411  * @offset: Offset into the command buffer currently being parsed where the
412  * id that needs fixup is located. Granularity is 4 bytes.
413  */
vmw_resource_relocation_add(struct list_head * list,const struct vmw_resource * res,unsigned long offset)414 static int vmw_resource_relocation_add(struct list_head *list,
415 				       const struct vmw_resource *res,
416 				       unsigned long offset)
417 {
418 	struct vmw_resource_relocation *rel;
419 
420 	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
421 	if (unlikely(rel == NULL)) {
422 		DRM_ERROR("Failed to allocate a resource relocation.\n");
423 		return -ENOMEM;
424 	}
425 
426 	rel->res = res;
427 	rel->offset = offset;
428 	list_add_tail(&rel->head, list);
429 
430 	return 0;
431 }
432 
433 /**
434  * vmw_resource_relocations_free - Free all relocations on a list
435  *
436  * @list: Pointer to the head of the relocation list.
437  */
vmw_resource_relocations_free(struct list_head * list)438 static void vmw_resource_relocations_free(struct list_head *list)
439 {
440 	struct vmw_resource_relocation *rel, *n;
441 
442 	list_for_each_entry_safe(rel, n, list, head) {
443 		list_del(&rel->head);
444 		kfree(rel);
445 	}
446 }
447 
448 /**
449  * vmw_resource_relocations_apply - Apply all relocations on a list
450  *
451  * @cb: Pointer to the start of the command buffer bein patch. This need
452  * not be the same buffer as the one being parsed when the relocation
453  * list was built, but the contents must be the same modulo the
454  * resource ids.
455  * @list: Pointer to the head of the relocation list.
456  */
vmw_resource_relocations_apply(uint32_t * cb,struct list_head * list)457 static void vmw_resource_relocations_apply(uint32_t *cb,
458 					   struct list_head *list)
459 {
460 	struct vmw_resource_relocation *rel;
461 
462 	list_for_each_entry(rel, list, head) {
463 		if (likely(rel->res != NULL))
464 			cb[rel->offset] = rel->res->id;
465 		else
466 			cb[rel->offset] = SVGA_3D_CMD_NOP;
467 	}
468 }
469 
vmw_cmd_invalid(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)470 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
471 			   struct vmw_sw_context *sw_context,
472 			   SVGA3dCmdHeader *header)
473 {
474 	return -EINVAL;
475 }
476 
vmw_cmd_ok(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)477 static int vmw_cmd_ok(struct vmw_private *dev_priv,
478 		      struct vmw_sw_context *sw_context,
479 		      SVGA3dCmdHeader *header)
480 {
481 	return 0;
482 }
483 
484 /**
485  * vmw_bo_to_validate_list - add a bo to a validate list
486  *
487  * @sw_context: The software context used for this command submission batch.
488  * @bo: The buffer object to add.
489  * @validate_as_mob: Validate this buffer as a MOB.
490  * @p_val_node: If non-NULL Will be updated with the validate node number
491  * on return.
492  *
493  * Returns -EINVAL if the limit of number of buffer objects per command
494  * submission is reached.
495  */
vmw_bo_to_validate_list(struct vmw_sw_context * sw_context,struct vmw_dma_buffer * vbo,bool validate_as_mob,uint32_t * p_val_node)496 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
497 				   struct vmw_dma_buffer *vbo,
498 				   bool validate_as_mob,
499 				   uint32_t *p_val_node)
500 {
501 	uint32_t val_node;
502 	struct vmw_validate_buffer *vval_buf;
503 	struct ttm_validate_buffer *val_buf;
504 	struct drm_hash_item *hash;
505 	int ret;
506 
507 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
508 				    &hash) == 0)) {
509 		vval_buf = container_of(hash, struct vmw_validate_buffer,
510 					hash);
511 		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
512 			DRM_ERROR("Inconsistent buffer usage.\n");
513 			return -EINVAL;
514 		}
515 		val_buf = &vval_buf->base;
516 		val_node = vval_buf - sw_context->val_bufs;
517 	} else {
518 		val_node = sw_context->cur_val_buf;
519 		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
520 			DRM_ERROR("Max number of DMA buffers per submission "
521 				  "exceeded.\n");
522 			return -EINVAL;
523 		}
524 		vval_buf = &sw_context->val_bufs[val_node];
525 		vval_buf->hash.key = (unsigned long) vbo;
526 		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
527 		if (unlikely(ret != 0)) {
528 			DRM_ERROR("Failed to initialize a buffer validation "
529 				  "entry.\n");
530 			return ret;
531 		}
532 		++sw_context->cur_val_buf;
533 		val_buf = &vval_buf->base;
534 		val_buf->bo = ttm_bo_reference(&vbo->base);
535 		val_buf->shared = false;
536 		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
537 		vval_buf->validate_as_mob = validate_as_mob;
538 	}
539 
540 	if (p_val_node)
541 		*p_val_node = val_node;
542 
543 	return 0;
544 }
545 
546 /**
547  * vmw_resources_reserve - Reserve all resources on the sw_context's
548  * resource list.
549  *
550  * @sw_context: Pointer to the software context.
551  *
552  * Note that since vmware's command submission currently is protected by
553  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
554  * since only a single thread at once will attempt this.
555  */
vmw_resources_reserve(struct vmw_sw_context * sw_context)556 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
557 {
558 	struct vmw_resource_val_node *val;
559 	int ret = 0;
560 
561 	list_for_each_entry(val, &sw_context->resource_list, head) {
562 		struct vmw_resource *res = val->res;
563 
564 		ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
565 		if (unlikely(ret != 0))
566 			return ret;
567 
568 		if (res->backup) {
569 			struct vmw_dma_buffer *vbo = res->backup;
570 
571 			ret = vmw_bo_to_validate_list
572 				(sw_context, vbo,
573 				 vmw_resource_needs_backup(res), NULL);
574 
575 			if (unlikely(ret != 0))
576 				return ret;
577 		}
578 	}
579 
580 	if (sw_context->dx_query_mob) {
581 		struct vmw_dma_buffer *expected_dx_query_mob;
582 
583 		expected_dx_query_mob =
584 			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
585 		if (expected_dx_query_mob &&
586 		    expected_dx_query_mob != sw_context->dx_query_mob) {
587 			ret = -EINVAL;
588 		}
589 	}
590 
591 	return ret;
592 }
593 
594 /**
595  * vmw_resources_validate - Validate all resources on the sw_context's
596  * resource list.
597  *
598  * @sw_context: Pointer to the software context.
599  *
600  * Before this function is called, all resource backup buffers must have
601  * been validated.
602  */
vmw_resources_validate(struct vmw_sw_context * sw_context)603 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
604 {
605 	struct vmw_resource_val_node *val;
606 	int ret;
607 
608 	list_for_each_entry(val, &sw_context->resource_list, head) {
609 		struct vmw_resource *res = val->res;
610 		struct vmw_dma_buffer *backup = res->backup;
611 
612 		ret = vmw_resource_validate(res);
613 		if (unlikely(ret != 0)) {
614 			if (ret != -ERESTARTSYS)
615 				DRM_ERROR("Failed to validate resource.\n");
616 			return ret;
617 		}
618 
619 		/* Check if the resource switched backup buffer */
620 		if (backup && res->backup && (backup != res->backup)) {
621 			struct vmw_dma_buffer *vbo = res->backup;
622 
623 			ret = vmw_bo_to_validate_list
624 				(sw_context, vbo,
625 				 vmw_resource_needs_backup(res), NULL);
626 			if (ret) {
627 				ttm_bo_unreserve(&vbo->base);
628 				return ret;
629 			}
630 		}
631 	}
632 	return 0;
633 }
634 
635 /**
636  * vmw_cmd_res_reloc_add - Add a resource to a software context's
637  * relocation- and validation lists.
638  *
639  * @dev_priv: Pointer to a struct vmw_private identifying the device.
640  * @sw_context: Pointer to the software context.
641  * @id_loc: Pointer to where the id that needs translation is located.
642  * @res: Valid pointer to a struct vmw_resource.
643  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
644  * used for this resource is returned here.
645  */
vmw_cmd_res_reloc_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,uint32_t * id_loc,struct vmw_resource * res,struct vmw_resource_val_node ** p_val)646 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
647 				 struct vmw_sw_context *sw_context,
648 				 uint32_t *id_loc,
649 				 struct vmw_resource *res,
650 				 struct vmw_resource_val_node **p_val)
651 {
652 	int ret;
653 	struct vmw_resource_val_node *node;
654 
655 	*p_val = NULL;
656 	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
657 					  res,
658 					  id_loc - sw_context->buf_start);
659 	if (unlikely(ret != 0))
660 		return ret;
661 
662 	ret = vmw_resource_val_add(sw_context, res, &node);
663 	if (unlikely(ret != 0))
664 		return ret;
665 
666 	if (p_val)
667 		*p_val = node;
668 
669 	return 0;
670 }
671 
672 
673 /**
674  * vmw_cmd_res_check - Check that a resource is present and if so, put it
675  * on the resource validate list unless it's already there.
676  *
677  * @dev_priv: Pointer to a device private structure.
678  * @sw_context: Pointer to the software context.
679  * @res_type: Resource type.
680  * @converter: User-space visisble type specific information.
681  * @id_loc: Pointer to the location in the command buffer currently being
682  * parsed from where the user-space resource id handle is located.
683  * @p_val: Pointer to pointer to resource validalidation node. Populated
684  * on exit.
685  */
686 static int
vmw_cmd_res_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * id_loc,struct vmw_resource_val_node ** p_val)687 vmw_cmd_res_check(struct vmw_private *dev_priv,
688 		  struct vmw_sw_context *sw_context,
689 		  enum vmw_res_type res_type,
690 		  const struct vmw_user_resource_conv *converter,
691 		  uint32_t *id_loc,
692 		  struct vmw_resource_val_node **p_val)
693 {
694 	struct vmw_res_cache_entry *rcache =
695 		&sw_context->res_cache[res_type];
696 	struct vmw_resource *res;
697 	struct vmw_resource_val_node *node;
698 	int ret;
699 
700 	if (*id_loc == SVGA3D_INVALID_ID) {
701 		if (p_val)
702 			*p_val = NULL;
703 		if (res_type == vmw_res_context) {
704 			DRM_ERROR("Illegal context invalid id.\n");
705 			return -EINVAL;
706 		}
707 		return 0;
708 	}
709 
710 	/*
711 	 * Fastpath in case of repeated commands referencing the same
712 	 * resource
713 	 */
714 
715 	if (likely(rcache->valid && *id_loc == rcache->handle)) {
716 		const struct vmw_resource *res = rcache->res;
717 
718 		rcache->node->first_usage = false;
719 		if (p_val)
720 			*p_val = rcache->node;
721 
722 		return vmw_resource_relocation_add
723 			(&sw_context->res_relocations, res,
724 			 id_loc - sw_context->buf_start);
725 	}
726 
727 	ret = vmw_user_resource_lookup_handle(dev_priv,
728 					      sw_context->fp->tfile,
729 					      *id_loc,
730 					      converter,
731 					      &res);
732 	if (unlikely(ret != 0)) {
733 		DRM_ERROR("Could not find or use resource 0x%08x.\n",
734 			  (unsigned) *id_loc);
735 		dump_stack();
736 		return ret;
737 	}
738 
739 	rcache->valid = true;
740 	rcache->res = res;
741 	rcache->handle = *id_loc;
742 
743 	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
744 				    res, &node);
745 	if (unlikely(ret != 0))
746 		goto out_no_reloc;
747 
748 	rcache->node = node;
749 	if (p_val)
750 		*p_val = node;
751 	vmw_resource_unreference(&res);
752 	return 0;
753 
754 out_no_reloc:
755 	BUG_ON(sw_context->error_resource != NULL);
756 	sw_context->error_resource = res;
757 
758 	return ret;
759 }
760 
761 /**
762  * vmw_rebind_dx_query - Rebind DX query associated with the context
763  *
764  * @ctx_res: context the query belongs to
765  *
766  * This function assumes binding_mutex is held.
767  */
vmw_rebind_all_dx_query(struct vmw_resource * ctx_res)768 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
769 {
770 	struct vmw_private *dev_priv = ctx_res->dev_priv;
771 	struct vmw_dma_buffer *dx_query_mob;
772 	struct {
773 		SVGA3dCmdHeader header;
774 		SVGA3dCmdDXBindAllQuery body;
775 	} *cmd;
776 
777 
778 	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
779 
780 	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
781 		return 0;
782 
783 	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
784 
785 	if (cmd == NULL) {
786 		DRM_ERROR("Failed to rebind queries.\n");
787 		return -ENOMEM;
788 	}
789 
790 	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
791 	cmd->header.size = sizeof(cmd->body);
792 	cmd->body.cid = ctx_res->id;
793 	cmd->body.mobid = dx_query_mob->base.mem.start;
794 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
795 
796 	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
797 
798 	return 0;
799 }
800 
801 /**
802  * vmw_rebind_contexts - Rebind all resources previously bound to
803  * referenced contexts.
804  *
805  * @sw_context: Pointer to the software context.
806  *
807  * Rebind context binding points that have been scrubbed because of eviction.
808  */
vmw_rebind_contexts(struct vmw_sw_context * sw_context)809 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
810 {
811 	struct vmw_resource_val_node *val;
812 	int ret;
813 
814 	list_for_each_entry(val, &sw_context->resource_list, head) {
815 		if (unlikely(!val->staged_bindings))
816 			break;
817 
818 		ret = vmw_binding_rebind_all
819 			(vmw_context_binding_state(val->res));
820 		if (unlikely(ret != 0)) {
821 			if (ret != -ERESTARTSYS)
822 				DRM_ERROR("Failed to rebind context.\n");
823 			return ret;
824 		}
825 
826 		ret = vmw_rebind_all_dx_query(val->res);
827 		if (ret != 0)
828 			return ret;
829 	}
830 
831 	return 0;
832 }
833 
834 /**
835  * vmw_view_bindings_add - Add an array of view bindings to a context
836  * binding state tracker.
837  *
838  * @sw_context: The execbuf state used for this command.
839  * @view_type: View type for the bindings.
840  * @binding_type: Binding type for the bindings.
841  * @shader_slot: The shader slot to user for the bindings.
842  * @view_ids: Array of view ids to be bound.
843  * @num_views: Number of view ids in @view_ids.
844  * @first_slot: The binding slot to be used for the first view id in @view_ids.
845  */
vmw_view_bindings_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,enum vmw_ctx_binding_type binding_type,uint32 shader_slot,uint32 view_ids[],u32 num_views,u32 first_slot)846 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
847 				 enum vmw_view_type view_type,
848 				 enum vmw_ctx_binding_type binding_type,
849 				 uint32 shader_slot,
850 				 uint32 view_ids[], u32 num_views,
851 				 u32 first_slot)
852 {
853 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
854 	struct vmw_cmdbuf_res_manager *man;
855 	u32 i;
856 	int ret;
857 
858 	if (!ctx_node) {
859 		DRM_ERROR("DX Context not set.\n");
860 		return -EINVAL;
861 	}
862 
863 	man = sw_context->man;
864 	for (i = 0; i < num_views; ++i) {
865 		struct vmw_ctx_bindinfo_view binding;
866 		struct vmw_resource *view = NULL;
867 
868 		if (view_ids[i] != SVGA3D_INVALID_ID) {
869 			view = vmw_view_lookup(man, view_type, view_ids[i]);
870 			if (IS_ERR(view)) {
871 				DRM_ERROR("View not found.\n");
872 				return PTR_ERR(view);
873 			}
874 
875 			ret = vmw_view_res_val_add(sw_context, view);
876 			if (ret) {
877 				DRM_ERROR("Could not add view to "
878 					  "validation list.\n");
879 				vmw_resource_unreference(&view);
880 				return ret;
881 			}
882 		}
883 		binding.bi.ctx = ctx_node->res;
884 		binding.bi.res = view;
885 		binding.bi.bt = binding_type;
886 		binding.shader_slot = shader_slot;
887 		binding.slot = first_slot + i;
888 		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
889 				shader_slot, binding.slot);
890 		if (view)
891 			vmw_resource_unreference(&view);
892 	}
893 
894 	return 0;
895 }
896 
897 /**
898  * vmw_cmd_cid_check - Check a command header for valid context information.
899  *
900  * @dev_priv: Pointer to a device private structure.
901  * @sw_context: Pointer to the software context.
902  * @header: A command header with an embedded user-space context handle.
903  *
904  * Convenience function: Call vmw_cmd_res_check with the user-space context
905  * handle embedded in @header.
906  */
vmw_cmd_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)907 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
908 			     struct vmw_sw_context *sw_context,
909 			     SVGA3dCmdHeader *header)
910 {
911 	struct vmw_cid_cmd {
912 		SVGA3dCmdHeader header;
913 		uint32_t cid;
914 	} *cmd;
915 
916 	cmd = container_of(header, struct vmw_cid_cmd, header);
917 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
918 				 user_context_converter, &cmd->cid, NULL);
919 }
920 
vmw_cmd_set_render_target_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)921 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
922 					   struct vmw_sw_context *sw_context,
923 					   SVGA3dCmdHeader *header)
924 {
925 	struct vmw_sid_cmd {
926 		SVGA3dCmdHeader header;
927 		SVGA3dCmdSetRenderTarget body;
928 	} *cmd;
929 	struct vmw_resource_val_node *ctx_node;
930 	struct vmw_resource_val_node *res_node;
931 	int ret;
932 
933 	cmd = container_of(header, struct vmw_sid_cmd, header);
934 
935 	if (cmd->body.type >= SVGA3D_RT_MAX) {
936 		DRM_ERROR("Illegal render target type %u.\n",
937 			  (unsigned) cmd->body.type);
938 		return -EINVAL;
939 	}
940 
941 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
942 				user_context_converter, &cmd->body.cid,
943 				&ctx_node);
944 	if (unlikely(ret != 0))
945 		return ret;
946 
947 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
948 				user_surface_converter,
949 				&cmd->body.target.sid, &res_node);
950 	if (unlikely(ret != 0))
951 		return ret;
952 
953 	if (dev_priv->has_mob) {
954 		struct vmw_ctx_bindinfo_view binding;
955 
956 		binding.bi.ctx = ctx_node->res;
957 		binding.bi.res = res_node ? res_node->res : NULL;
958 		binding.bi.bt = vmw_ctx_binding_rt;
959 		binding.slot = cmd->body.type;
960 		vmw_binding_add(ctx_node->staged_bindings,
961 				&binding.bi, 0, binding.slot);
962 	}
963 
964 	return 0;
965 }
966 
vmw_cmd_surface_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)967 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
968 				      struct vmw_sw_context *sw_context,
969 				      SVGA3dCmdHeader *header)
970 {
971 	struct vmw_sid_cmd {
972 		SVGA3dCmdHeader header;
973 		SVGA3dCmdSurfaceCopy body;
974 	} *cmd;
975 	int ret;
976 
977 	cmd = container_of(header, struct vmw_sid_cmd, header);
978 
979 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
980 				user_surface_converter,
981 				&cmd->body.src.sid, NULL);
982 	if (ret)
983 		return ret;
984 
985 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
986 				 user_surface_converter,
987 				 &cmd->body.dest.sid, NULL);
988 }
989 
vmw_cmd_buffer_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)990 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
991 				      struct vmw_sw_context *sw_context,
992 				      SVGA3dCmdHeader *header)
993 {
994 	struct {
995 		SVGA3dCmdHeader header;
996 		SVGA3dCmdDXBufferCopy body;
997 	} *cmd;
998 	int ret;
999 
1000 	cmd = container_of(header, typeof(*cmd), header);
1001 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1002 				user_surface_converter,
1003 				&cmd->body.src, NULL);
1004 	if (ret != 0)
1005 		return ret;
1006 
1007 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008 				 user_surface_converter,
1009 				 &cmd->body.dest, NULL);
1010 }
1011 
vmw_cmd_pred_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1012 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1013 				   struct vmw_sw_context *sw_context,
1014 				   SVGA3dCmdHeader *header)
1015 {
1016 	struct {
1017 		SVGA3dCmdHeader header;
1018 		SVGA3dCmdDXPredCopyRegion body;
1019 	} *cmd;
1020 	int ret;
1021 
1022 	cmd = container_of(header, typeof(*cmd), header);
1023 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1024 				user_surface_converter,
1025 				&cmd->body.srcSid, NULL);
1026 	if (ret != 0)
1027 		return ret;
1028 
1029 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1030 				 user_surface_converter,
1031 				 &cmd->body.dstSid, NULL);
1032 }
1033 
vmw_cmd_stretch_blt_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1034 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1035 				     struct vmw_sw_context *sw_context,
1036 				     SVGA3dCmdHeader *header)
1037 {
1038 	struct vmw_sid_cmd {
1039 		SVGA3dCmdHeader header;
1040 		SVGA3dCmdSurfaceStretchBlt body;
1041 	} *cmd;
1042 	int ret;
1043 
1044 	cmd = container_of(header, struct vmw_sid_cmd, header);
1045 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1046 				user_surface_converter,
1047 				&cmd->body.src.sid, NULL);
1048 	if (unlikely(ret != 0))
1049 		return ret;
1050 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1051 				 user_surface_converter,
1052 				 &cmd->body.dest.sid, NULL);
1053 }
1054 
vmw_cmd_blt_surf_screen_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1055 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1056 					 struct vmw_sw_context *sw_context,
1057 					 SVGA3dCmdHeader *header)
1058 {
1059 	struct vmw_sid_cmd {
1060 		SVGA3dCmdHeader header;
1061 		SVGA3dCmdBlitSurfaceToScreen body;
1062 	} *cmd;
1063 
1064 	cmd = container_of(header, struct vmw_sid_cmd, header);
1065 
1066 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1067 				 user_surface_converter,
1068 				 &cmd->body.srcImage.sid, NULL);
1069 }
1070 
vmw_cmd_present_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1071 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1072 				 struct vmw_sw_context *sw_context,
1073 				 SVGA3dCmdHeader *header)
1074 {
1075 	struct vmw_sid_cmd {
1076 		SVGA3dCmdHeader header;
1077 		SVGA3dCmdPresent body;
1078 	} *cmd;
1079 
1080 
1081 	cmd = container_of(header, struct vmw_sid_cmd, header);
1082 
1083 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1084 				 user_surface_converter, &cmd->body.sid,
1085 				 NULL);
1086 }
1087 
1088 /**
1089  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1090  *
1091  * @dev_priv: The device private structure.
1092  * @new_query_bo: The new buffer holding query results.
1093  * @sw_context: The software context used for this command submission.
1094  *
1095  * This function checks whether @new_query_bo is suitable for holding
1096  * query results, and if another buffer currently is pinned for query
1097  * results. If so, the function prepares the state of @sw_context for
1098  * switching pinned buffers after successful submission of the current
1099  * command batch.
1100  */
vmw_query_bo_switch_prepare(struct vmw_private * dev_priv,struct vmw_dma_buffer * new_query_bo,struct vmw_sw_context * sw_context)1101 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1102 				       struct vmw_dma_buffer *new_query_bo,
1103 				       struct vmw_sw_context *sw_context)
1104 {
1105 	struct vmw_res_cache_entry *ctx_entry =
1106 		&sw_context->res_cache[vmw_res_context];
1107 	int ret;
1108 
1109 	BUG_ON(!ctx_entry->valid);
1110 	sw_context->last_query_ctx = ctx_entry->res;
1111 
1112 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1113 
1114 		if (unlikely(new_query_bo->base.num_pages > 4)) {
1115 			DRM_ERROR("Query buffer too large.\n");
1116 			return -EINVAL;
1117 		}
1118 
1119 		if (unlikely(sw_context->cur_query_bo != NULL)) {
1120 			sw_context->needs_post_query_barrier = true;
1121 			ret = vmw_bo_to_validate_list(sw_context,
1122 						      sw_context->cur_query_bo,
1123 						      dev_priv->has_mob, NULL);
1124 			if (unlikely(ret != 0))
1125 				return ret;
1126 		}
1127 		sw_context->cur_query_bo = new_query_bo;
1128 
1129 		ret = vmw_bo_to_validate_list(sw_context,
1130 					      dev_priv->dummy_query_bo,
1131 					      dev_priv->has_mob, NULL);
1132 		if (unlikely(ret != 0))
1133 			return ret;
1134 
1135 	}
1136 
1137 	return 0;
1138 }
1139 
1140 
1141 /**
1142  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1143  *
1144  * @dev_priv: The device private structure.
1145  * @sw_context: The software context used for this command submission batch.
1146  *
1147  * This function will check if we're switching query buffers, and will then,
1148  * issue a dummy occlusion query wait used as a query barrier. When the fence
1149  * object following that query wait has signaled, we are sure that all
1150  * preceding queries have finished, and the old query buffer can be unpinned.
1151  * However, since both the new query buffer and the old one are fenced with
1152  * that fence, we can do an asynchronus unpin now, and be sure that the
1153  * old query buffer won't be moved until the fence has signaled.
1154  *
1155  * As mentioned above, both the new - and old query buffers need to be fenced
1156  * using a sequence emitted *after* calling this function.
1157  */
vmw_query_bo_switch_commit(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)1158 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1159 				     struct vmw_sw_context *sw_context)
1160 {
1161 	/*
1162 	 * The validate list should still hold references to all
1163 	 * contexts here.
1164 	 */
1165 
1166 	if (sw_context->needs_post_query_barrier) {
1167 		struct vmw_res_cache_entry *ctx_entry =
1168 			&sw_context->res_cache[vmw_res_context];
1169 		struct vmw_resource *ctx;
1170 		int ret;
1171 
1172 		BUG_ON(!ctx_entry->valid);
1173 		ctx = ctx_entry->res;
1174 
1175 		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1176 
1177 		if (unlikely(ret != 0))
1178 			DRM_ERROR("Out of fifo space for dummy query.\n");
1179 	}
1180 
1181 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1182 		if (dev_priv->pinned_bo) {
1183 			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1184 			vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1185 		}
1186 
1187 		if (!sw_context->needs_post_query_barrier) {
1188 			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1189 
1190 			/*
1191 			 * We pin also the dummy_query_bo buffer so that we
1192 			 * don't need to validate it when emitting
1193 			 * dummy queries in context destroy paths.
1194 			 */
1195 
1196 			if (!dev_priv->dummy_query_bo_pinned) {
1197 				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1198 						    true);
1199 				dev_priv->dummy_query_bo_pinned = true;
1200 			}
1201 
1202 			BUG_ON(sw_context->last_query_ctx == NULL);
1203 			dev_priv->query_cid = sw_context->last_query_ctx->id;
1204 			dev_priv->query_cid_valid = true;
1205 			dev_priv->pinned_bo =
1206 				vmw_dmabuf_reference(sw_context->cur_query_bo);
1207 		}
1208 	}
1209 }
1210 
1211 /**
1212  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1213  * handle to a MOB id.
1214  *
1215  * @dev_priv: Pointer to a device private structure.
1216  * @sw_context: The software context used for this command batch validation.
1217  * @id: Pointer to the user-space handle to be translated.
1218  * @vmw_bo_p: Points to a location that, on successful return will carry
1219  * a reference-counted pointer to the DMA buffer identified by the
1220  * user-space handle in @id.
1221  *
1222  * This function saves information needed to translate a user-space buffer
1223  * handle to a MOB id. The translation does not take place immediately, but
1224  * during a call to vmw_apply_relocations(). This function builds a relocation
1225  * list and a list of buffers to validate. The former needs to be freed using
1226  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1227  * needs to be freed using vmw_clear_validations.
1228  */
vmw_translate_mob_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAMobId * id,struct vmw_dma_buffer ** vmw_bo_p)1229 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1230 				 struct vmw_sw_context *sw_context,
1231 				 SVGAMobId *id,
1232 				 struct vmw_dma_buffer **vmw_bo_p)
1233 {
1234 	struct vmw_dma_buffer *vmw_bo = NULL;
1235 	uint32_t handle = *id;
1236 	struct vmw_relocation *reloc;
1237 	int ret;
1238 
1239 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1240 				     NULL);
1241 	if (unlikely(ret != 0)) {
1242 		DRM_ERROR("Could not find or use MOB buffer.\n");
1243 		ret = -EINVAL;
1244 		goto out_no_reloc;
1245 	}
1246 
1247 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1248 		DRM_ERROR("Max number relocations per submission"
1249 			  " exceeded\n");
1250 		ret = -EINVAL;
1251 		goto out_no_reloc;
1252 	}
1253 
1254 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
1255 	reloc->mob_loc = id;
1256 	reloc->location = NULL;
1257 
1258 	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1259 	if (unlikely(ret != 0))
1260 		goto out_no_reloc;
1261 
1262 	*vmw_bo_p = vmw_bo;
1263 	return 0;
1264 
1265 out_no_reloc:
1266 	vmw_dmabuf_unreference(&vmw_bo);
1267 	*vmw_bo_p = NULL;
1268 	return ret;
1269 }
1270 
1271 /**
1272  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1273  * handle to a valid SVGAGuestPtr
1274  *
1275  * @dev_priv: Pointer to a device private structure.
1276  * @sw_context: The software context used for this command batch validation.
1277  * @ptr: Pointer to the user-space handle to be translated.
1278  * @vmw_bo_p: Points to a location that, on successful return will carry
1279  * a reference-counted pointer to the DMA buffer identified by the
1280  * user-space handle in @id.
1281  *
1282  * This function saves information needed to translate a user-space buffer
1283  * handle to a valid SVGAGuestPtr. The translation does not take place
1284  * immediately, but during a call to vmw_apply_relocations().
1285  * This function builds a relocation list and a list of buffers to validate.
1286  * The former needs to be freed using either vmw_apply_relocations() or
1287  * vmw_free_relocations(). The latter needs to be freed using
1288  * vmw_clear_validations.
1289  */
vmw_translate_guest_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAGuestPtr * ptr,struct vmw_dma_buffer ** vmw_bo_p)1290 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1291 				   struct vmw_sw_context *sw_context,
1292 				   SVGAGuestPtr *ptr,
1293 				   struct vmw_dma_buffer **vmw_bo_p)
1294 {
1295 	struct vmw_dma_buffer *vmw_bo = NULL;
1296 	uint32_t handle = ptr->gmrId;
1297 	struct vmw_relocation *reloc;
1298 	int ret;
1299 
1300 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1301 				     NULL);
1302 	if (unlikely(ret != 0)) {
1303 		DRM_ERROR("Could not find or use GMR region.\n");
1304 		ret = -EINVAL;
1305 		goto out_no_reloc;
1306 	}
1307 
1308 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1309 		DRM_ERROR("Max number relocations per submission"
1310 			  " exceeded\n");
1311 		ret = -EINVAL;
1312 		goto out_no_reloc;
1313 	}
1314 
1315 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
1316 	reloc->location = ptr;
1317 
1318 	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1319 	if (unlikely(ret != 0))
1320 		goto out_no_reloc;
1321 
1322 	*vmw_bo_p = vmw_bo;
1323 	return 0;
1324 
1325 out_no_reloc:
1326 	vmw_dmabuf_unreference(&vmw_bo);
1327 	*vmw_bo_p = NULL;
1328 	return ret;
1329 }
1330 
1331 
1332 
1333 /**
1334  * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1335  *
1336  * @dev_priv: Pointer to a device private struct.
1337  * @sw_context: The software context used for this command submission.
1338  * @header: Pointer to the command header in the command stream.
1339  *
1340  * This function adds the new query into the query COTABLE
1341  */
vmw_cmd_dx_define_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1342 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1343 				   struct vmw_sw_context *sw_context,
1344 				   SVGA3dCmdHeader *header)
1345 {
1346 	struct vmw_dx_define_query_cmd {
1347 		SVGA3dCmdHeader header;
1348 		SVGA3dCmdDXDefineQuery q;
1349 	} *cmd;
1350 
1351 	int    ret;
1352 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1353 	struct vmw_resource *cotable_res;
1354 
1355 
1356 	if (ctx_node == NULL) {
1357 		DRM_ERROR("DX Context not set for query.\n");
1358 		return -EINVAL;
1359 	}
1360 
1361 	cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1362 
1363 	if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
1364 	    cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1365 		return -EINVAL;
1366 
1367 	cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1368 	ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1369 	vmw_resource_unreference(&cotable_res);
1370 
1371 	return ret;
1372 }
1373 
1374 
1375 
1376 /**
1377  * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1378  *
1379  * @dev_priv: Pointer to a device private struct.
1380  * @sw_context: The software context used for this command submission.
1381  * @header: Pointer to the command header in the command stream.
1382  *
1383  * The query bind operation will eventually associate the query ID
1384  * with its backing MOB.  In this function, we take the user mode
1385  * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1386  * kernel mode equivalent.
1387  */
vmw_cmd_dx_bind_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1388 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1389 				 struct vmw_sw_context *sw_context,
1390 				 SVGA3dCmdHeader *header)
1391 {
1392 	struct vmw_dx_bind_query_cmd {
1393 		SVGA3dCmdHeader header;
1394 		SVGA3dCmdDXBindQuery q;
1395 	} *cmd;
1396 
1397 	struct vmw_dma_buffer *vmw_bo;
1398 	int    ret;
1399 
1400 
1401 	cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1402 
1403 	/*
1404 	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1405 	 * list so its kernel mode MOB ID can be filled in later
1406 	 */
1407 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1408 				    &vmw_bo);
1409 
1410 	if (ret != 0)
1411 		return ret;
1412 
1413 	sw_context->dx_query_mob = vmw_bo;
1414 	sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1415 
1416 	vmw_dmabuf_unreference(&vmw_bo);
1417 
1418 	return ret;
1419 }
1420 
1421 
1422 
1423 /**
1424  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
1425  *
1426  * @dev_priv: Pointer to a device private struct.
1427  * @sw_context: The software context used for this command submission.
1428  * @header: Pointer to the command header in the command stream.
1429  */
vmw_cmd_begin_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1430 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1431 				  struct vmw_sw_context *sw_context,
1432 				  SVGA3dCmdHeader *header)
1433 {
1434 	struct vmw_begin_gb_query_cmd {
1435 		SVGA3dCmdHeader header;
1436 		SVGA3dCmdBeginGBQuery q;
1437 	} *cmd;
1438 
1439 	cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1440 			   header);
1441 
1442 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1443 				 user_context_converter, &cmd->q.cid,
1444 				 NULL);
1445 }
1446 
1447 /**
1448  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1449  *
1450  * @dev_priv: Pointer to a device private struct.
1451  * @sw_context: The software context used for this command submission.
1452  * @header: Pointer to the command header in the command stream.
1453  */
vmw_cmd_begin_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1454 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1455 			       struct vmw_sw_context *sw_context,
1456 			       SVGA3dCmdHeader *header)
1457 {
1458 	struct vmw_begin_query_cmd {
1459 		SVGA3dCmdHeader header;
1460 		SVGA3dCmdBeginQuery q;
1461 	} *cmd;
1462 
1463 	cmd = container_of(header, struct vmw_begin_query_cmd,
1464 			   header);
1465 
1466 	if (unlikely(dev_priv->has_mob)) {
1467 		struct {
1468 			SVGA3dCmdHeader header;
1469 			SVGA3dCmdBeginGBQuery q;
1470 		} gb_cmd;
1471 
1472 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1473 
1474 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1475 		gb_cmd.header.size = cmd->header.size;
1476 		gb_cmd.q.cid = cmd->q.cid;
1477 		gb_cmd.q.type = cmd->q.type;
1478 
1479 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1480 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1481 	}
1482 
1483 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1484 				 user_context_converter, &cmd->q.cid,
1485 				 NULL);
1486 }
1487 
1488 /**
1489  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1490  *
1491  * @dev_priv: Pointer to a device private struct.
1492  * @sw_context: The software context used for this command submission.
1493  * @header: Pointer to the command header in the command stream.
1494  */
vmw_cmd_end_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1495 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1496 				struct vmw_sw_context *sw_context,
1497 				SVGA3dCmdHeader *header)
1498 {
1499 	struct vmw_dma_buffer *vmw_bo;
1500 	struct vmw_query_cmd {
1501 		SVGA3dCmdHeader header;
1502 		SVGA3dCmdEndGBQuery q;
1503 	} *cmd;
1504 	int ret;
1505 
1506 	cmd = container_of(header, struct vmw_query_cmd, header);
1507 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1508 	if (unlikely(ret != 0))
1509 		return ret;
1510 
1511 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1512 				    &cmd->q.mobid,
1513 				    &vmw_bo);
1514 	if (unlikely(ret != 0))
1515 		return ret;
1516 
1517 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1518 
1519 	vmw_dmabuf_unreference(&vmw_bo);
1520 	return ret;
1521 }
1522 
1523 /**
1524  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1525  *
1526  * @dev_priv: Pointer to a device private struct.
1527  * @sw_context: The software context used for this command submission.
1528  * @header: Pointer to the command header in the command stream.
1529  */
vmw_cmd_end_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1530 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1531 			     struct vmw_sw_context *sw_context,
1532 			     SVGA3dCmdHeader *header)
1533 {
1534 	struct vmw_dma_buffer *vmw_bo;
1535 	struct vmw_query_cmd {
1536 		SVGA3dCmdHeader header;
1537 		SVGA3dCmdEndQuery q;
1538 	} *cmd;
1539 	int ret;
1540 
1541 	cmd = container_of(header, struct vmw_query_cmd, header);
1542 	if (dev_priv->has_mob) {
1543 		struct {
1544 			SVGA3dCmdHeader header;
1545 			SVGA3dCmdEndGBQuery q;
1546 		} gb_cmd;
1547 
1548 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1549 
1550 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1551 		gb_cmd.header.size = cmd->header.size;
1552 		gb_cmd.q.cid = cmd->q.cid;
1553 		gb_cmd.q.type = cmd->q.type;
1554 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1555 		gb_cmd.q.offset = cmd->q.guestResult.offset;
1556 
1557 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1558 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1559 	}
1560 
1561 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1562 	if (unlikely(ret != 0))
1563 		return ret;
1564 
1565 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1566 				      &cmd->q.guestResult,
1567 				      &vmw_bo);
1568 	if (unlikely(ret != 0))
1569 		return ret;
1570 
1571 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1572 
1573 	vmw_dmabuf_unreference(&vmw_bo);
1574 	return ret;
1575 }
1576 
1577 /**
1578  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1579  *
1580  * @dev_priv: Pointer to a device private struct.
1581  * @sw_context: The software context used for this command submission.
1582  * @header: Pointer to the command header in the command stream.
1583  */
vmw_cmd_wait_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1584 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1585 				 struct vmw_sw_context *sw_context,
1586 				 SVGA3dCmdHeader *header)
1587 {
1588 	struct vmw_dma_buffer *vmw_bo;
1589 	struct vmw_query_cmd {
1590 		SVGA3dCmdHeader header;
1591 		SVGA3dCmdWaitForGBQuery q;
1592 	} *cmd;
1593 	int ret;
1594 
1595 	cmd = container_of(header, struct vmw_query_cmd, header);
1596 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1597 	if (unlikely(ret != 0))
1598 		return ret;
1599 
1600 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1601 				    &cmd->q.mobid,
1602 				    &vmw_bo);
1603 	if (unlikely(ret != 0))
1604 		return ret;
1605 
1606 	vmw_dmabuf_unreference(&vmw_bo);
1607 	return 0;
1608 }
1609 
1610 /**
1611  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1612  *
1613  * @dev_priv: Pointer to a device private struct.
1614  * @sw_context: The software context used for this command submission.
1615  * @header: Pointer to the command header in the command stream.
1616  */
vmw_cmd_wait_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1617 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1618 			      struct vmw_sw_context *sw_context,
1619 			      SVGA3dCmdHeader *header)
1620 {
1621 	struct vmw_dma_buffer *vmw_bo;
1622 	struct vmw_query_cmd {
1623 		SVGA3dCmdHeader header;
1624 		SVGA3dCmdWaitForQuery q;
1625 	} *cmd;
1626 	int ret;
1627 
1628 	cmd = container_of(header, struct vmw_query_cmd, header);
1629 	if (dev_priv->has_mob) {
1630 		struct {
1631 			SVGA3dCmdHeader header;
1632 			SVGA3dCmdWaitForGBQuery q;
1633 		} gb_cmd;
1634 
1635 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1636 
1637 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1638 		gb_cmd.header.size = cmd->header.size;
1639 		gb_cmd.q.cid = cmd->q.cid;
1640 		gb_cmd.q.type = cmd->q.type;
1641 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1642 		gb_cmd.q.offset = cmd->q.guestResult.offset;
1643 
1644 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1645 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1646 	}
1647 
1648 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1649 	if (unlikely(ret != 0))
1650 		return ret;
1651 
1652 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1653 				      &cmd->q.guestResult,
1654 				      &vmw_bo);
1655 	if (unlikely(ret != 0))
1656 		return ret;
1657 
1658 	vmw_dmabuf_unreference(&vmw_bo);
1659 	return 0;
1660 }
1661 
vmw_cmd_dma(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1662 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1663 		       struct vmw_sw_context *sw_context,
1664 		       SVGA3dCmdHeader *header)
1665 {
1666 	struct vmw_dma_buffer *vmw_bo = NULL;
1667 	struct vmw_surface *srf = NULL;
1668 	struct vmw_dma_cmd {
1669 		SVGA3dCmdHeader header;
1670 		SVGA3dCmdSurfaceDMA dma;
1671 	} *cmd;
1672 	int ret;
1673 	SVGA3dCmdSurfaceDMASuffix *suffix;
1674 	uint32_t bo_size;
1675 
1676 	cmd = container_of(header, struct vmw_dma_cmd, header);
1677 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1678 					       header->size - sizeof(*suffix));
1679 
1680 	/* Make sure device and verifier stays in sync. */
1681 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1682 		DRM_ERROR("Invalid DMA suffix size.\n");
1683 		return -EINVAL;
1684 	}
1685 
1686 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1687 				      &cmd->dma.guest.ptr,
1688 				      &vmw_bo);
1689 	if (unlikely(ret != 0))
1690 		return ret;
1691 
1692 	/* Make sure DMA doesn't cross BO boundaries. */
1693 	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1694 	if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1695 		DRM_ERROR("Invalid DMA offset.\n");
1696 		return -EINVAL;
1697 	}
1698 
1699 	bo_size -= cmd->dma.guest.ptr.offset;
1700 	if (unlikely(suffix->maximumOffset > bo_size))
1701 		suffix->maximumOffset = bo_size;
1702 
1703 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1704 				user_surface_converter, &cmd->dma.host.sid,
1705 				NULL);
1706 	if (unlikely(ret != 0)) {
1707 		if (unlikely(ret != -ERESTARTSYS))
1708 			DRM_ERROR("could not find surface for DMA.\n");
1709 		goto out_no_surface;
1710 	}
1711 
1712 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1713 
1714 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1715 			     header);
1716 
1717 out_no_surface:
1718 	vmw_dmabuf_unreference(&vmw_bo);
1719 	return ret;
1720 }
1721 
vmw_cmd_draw(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1722 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1723 			struct vmw_sw_context *sw_context,
1724 			SVGA3dCmdHeader *header)
1725 {
1726 	struct vmw_draw_cmd {
1727 		SVGA3dCmdHeader header;
1728 		SVGA3dCmdDrawPrimitives body;
1729 	} *cmd;
1730 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1731 		(unsigned long)header + sizeof(*cmd));
1732 	SVGA3dPrimitiveRange *range;
1733 	uint32_t i;
1734 	uint32_t maxnum;
1735 	int ret;
1736 
1737 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1738 	if (unlikely(ret != 0))
1739 		return ret;
1740 
1741 	cmd = container_of(header, struct vmw_draw_cmd, header);
1742 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1743 
1744 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1745 		DRM_ERROR("Illegal number of vertex declarations.\n");
1746 		return -EINVAL;
1747 	}
1748 
1749 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1750 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1751 					user_surface_converter,
1752 					&decl->array.surfaceId, NULL);
1753 		if (unlikely(ret != 0))
1754 			return ret;
1755 	}
1756 
1757 	maxnum = (header->size - sizeof(cmd->body) -
1758 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1759 	if (unlikely(cmd->body.numRanges > maxnum)) {
1760 		DRM_ERROR("Illegal number of index ranges.\n");
1761 		return -EINVAL;
1762 	}
1763 
1764 	range = (SVGA3dPrimitiveRange *) decl;
1765 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1766 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1767 					user_surface_converter,
1768 					&range->indexArray.surfaceId, NULL);
1769 		if (unlikely(ret != 0))
1770 			return ret;
1771 	}
1772 	return 0;
1773 }
1774 
1775 
vmw_cmd_tex_state(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1776 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1777 			     struct vmw_sw_context *sw_context,
1778 			     SVGA3dCmdHeader *header)
1779 {
1780 	struct vmw_tex_state_cmd {
1781 		SVGA3dCmdHeader header;
1782 		SVGA3dCmdSetTextureState state;
1783 	} *cmd;
1784 
1785 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1786 	  ((unsigned long) header + header->size + sizeof(header));
1787 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1788 		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1789 	struct vmw_resource_val_node *ctx_node;
1790 	struct vmw_resource_val_node *res_node;
1791 	int ret;
1792 
1793 	cmd = container_of(header, struct vmw_tex_state_cmd,
1794 			   header);
1795 
1796 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1797 				user_context_converter, &cmd->state.cid,
1798 				&ctx_node);
1799 	if (unlikely(ret != 0))
1800 		return ret;
1801 
1802 	for (; cur_state < last_state; ++cur_state) {
1803 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1804 			continue;
1805 
1806 		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1807 			DRM_ERROR("Illegal texture/sampler unit %u.\n",
1808 				  (unsigned) cur_state->stage);
1809 			return -EINVAL;
1810 		}
1811 
1812 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1813 					user_surface_converter,
1814 					&cur_state->value, &res_node);
1815 		if (unlikely(ret != 0))
1816 			return ret;
1817 
1818 		if (dev_priv->has_mob) {
1819 			struct vmw_ctx_bindinfo_tex binding;
1820 
1821 			binding.bi.ctx = ctx_node->res;
1822 			binding.bi.res = res_node ? res_node->res : NULL;
1823 			binding.bi.bt = vmw_ctx_binding_tex;
1824 			binding.texture_stage = cur_state->stage;
1825 			vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1826 					0, binding.texture_stage);
1827 		}
1828 	}
1829 
1830 	return 0;
1831 }
1832 
vmw_cmd_check_define_gmrfb(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf)1833 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1834 				      struct vmw_sw_context *sw_context,
1835 				      void *buf)
1836 {
1837 	struct vmw_dma_buffer *vmw_bo;
1838 	int ret;
1839 
1840 	struct {
1841 		uint32_t header;
1842 		SVGAFifoCmdDefineGMRFB body;
1843 	} *cmd = buf;
1844 
1845 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1846 				      &cmd->body.ptr,
1847 				      &vmw_bo);
1848 	if (unlikely(ret != 0))
1849 		return ret;
1850 
1851 	vmw_dmabuf_unreference(&vmw_bo);
1852 
1853 	return ret;
1854 }
1855 
1856 
1857 /**
1858  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1859  * switching
1860  *
1861  * @dev_priv: Pointer to a device private struct.
1862  * @sw_context: The software context being used for this batch.
1863  * @val_node: The validation node representing the resource.
1864  * @buf_id: Pointer to the user-space backup buffer handle in the command
1865  * stream.
1866  * @backup_offset: Offset of backup into MOB.
1867  *
1868  * This function prepares for registering a switch of backup buffers
1869  * in the resource metadata just prior to unreserving. It's basically a wrapper
1870  * around vmw_cmd_res_switch_backup with a different interface.
1871  */
vmw_cmd_res_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource_val_node * val_node,uint32_t * buf_id,unsigned long backup_offset)1872 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1873 				     struct vmw_sw_context *sw_context,
1874 				     struct vmw_resource_val_node *val_node,
1875 				     uint32_t *buf_id,
1876 				     unsigned long backup_offset)
1877 {
1878 	struct vmw_dma_buffer *dma_buf;
1879 	int ret;
1880 
1881 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1882 	if (ret)
1883 		return ret;
1884 
1885 	val_node->switching_backup = true;
1886 	if (val_node->first_usage)
1887 		val_node->no_buffer_needed = true;
1888 
1889 	vmw_dmabuf_unreference(&val_node->new_backup);
1890 	val_node->new_backup = dma_buf;
1891 	val_node->new_backup_offset = backup_offset;
1892 
1893 	return 0;
1894 }
1895 
1896 
1897 /**
1898  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1899  *
1900  * @dev_priv: Pointer to a device private struct.
1901  * @sw_context: The software context being used for this batch.
1902  * @res_type: The resource type.
1903  * @converter: Information about user-space binding for this resource type.
1904  * @res_id: Pointer to the user-space resource handle in the command stream.
1905  * @buf_id: Pointer to the user-space backup buffer handle in the command
1906  * stream.
1907  * @backup_offset: Offset of backup into MOB.
1908  *
1909  * This function prepares for registering a switch of backup buffers
1910  * in the resource metadata just prior to unreserving. It's basically a wrapper
1911  * around vmw_cmd_res_switch_backup with a different interface.
1912  */
vmw_cmd_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * res_id,uint32_t * buf_id,unsigned long backup_offset)1913 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1914 				 struct vmw_sw_context *sw_context,
1915 				 enum vmw_res_type res_type,
1916 				 const struct vmw_user_resource_conv
1917 				 *converter,
1918 				 uint32_t *res_id,
1919 				 uint32_t *buf_id,
1920 				 unsigned long backup_offset)
1921 {
1922 	struct vmw_resource_val_node *val_node;
1923 	int ret;
1924 
1925 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1926 				converter, res_id, &val_node);
1927 	if (ret)
1928 		return ret;
1929 
1930 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1931 					 buf_id, backup_offset);
1932 }
1933 
1934 /**
1935  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1936  * command
1937  *
1938  * @dev_priv: Pointer to a device private struct.
1939  * @sw_context: The software context being used for this batch.
1940  * @header: Pointer to the command header in the command stream.
1941  */
vmw_cmd_bind_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1942 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1943 				   struct vmw_sw_context *sw_context,
1944 				   SVGA3dCmdHeader *header)
1945 {
1946 	struct vmw_bind_gb_surface_cmd {
1947 		SVGA3dCmdHeader header;
1948 		SVGA3dCmdBindGBSurface body;
1949 	} *cmd;
1950 
1951 	cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1952 
1953 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1954 				     user_surface_converter,
1955 				     &cmd->body.sid, &cmd->body.mobid,
1956 				     0);
1957 }
1958 
1959 /**
1960  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1961  * command
1962  *
1963  * @dev_priv: Pointer to a device private struct.
1964  * @sw_context: The software context being used for this batch.
1965  * @header: Pointer to the command header in the command stream.
1966  */
vmw_cmd_update_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1967 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1968 				   struct vmw_sw_context *sw_context,
1969 				   SVGA3dCmdHeader *header)
1970 {
1971 	struct vmw_gb_surface_cmd {
1972 		SVGA3dCmdHeader header;
1973 		SVGA3dCmdUpdateGBImage body;
1974 	} *cmd;
1975 
1976 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1977 
1978 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1979 				 user_surface_converter,
1980 				 &cmd->body.image.sid, NULL);
1981 }
1982 
1983 /**
1984  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1985  * command
1986  *
1987  * @dev_priv: Pointer to a device private struct.
1988  * @sw_context: The software context being used for this batch.
1989  * @header: Pointer to the command header in the command stream.
1990  */
vmw_cmd_update_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1991 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1992 				     struct vmw_sw_context *sw_context,
1993 				     SVGA3dCmdHeader *header)
1994 {
1995 	struct vmw_gb_surface_cmd {
1996 		SVGA3dCmdHeader header;
1997 		SVGA3dCmdUpdateGBSurface body;
1998 	} *cmd;
1999 
2000 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2001 
2002 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2003 				 user_surface_converter,
2004 				 &cmd->body.sid, NULL);
2005 }
2006 
2007 /**
2008  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2009  * command
2010  *
2011  * @dev_priv: Pointer to a device private struct.
2012  * @sw_context: The software context being used for this batch.
2013  * @header: Pointer to the command header in the command stream.
2014  */
vmw_cmd_readback_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2015 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2016 				     struct vmw_sw_context *sw_context,
2017 				     SVGA3dCmdHeader *header)
2018 {
2019 	struct vmw_gb_surface_cmd {
2020 		SVGA3dCmdHeader header;
2021 		SVGA3dCmdReadbackGBImage body;
2022 	} *cmd;
2023 
2024 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2025 
2026 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2027 				 user_surface_converter,
2028 				 &cmd->body.image.sid, NULL);
2029 }
2030 
2031 /**
2032  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2033  * command
2034  *
2035  * @dev_priv: Pointer to a device private struct.
2036  * @sw_context: The software context being used for this batch.
2037  * @header: Pointer to the command header in the command stream.
2038  */
vmw_cmd_readback_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2039 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2040 				       struct vmw_sw_context *sw_context,
2041 				       SVGA3dCmdHeader *header)
2042 {
2043 	struct vmw_gb_surface_cmd {
2044 		SVGA3dCmdHeader header;
2045 		SVGA3dCmdReadbackGBSurface body;
2046 	} *cmd;
2047 
2048 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2049 
2050 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2051 				 user_surface_converter,
2052 				 &cmd->body.sid, NULL);
2053 }
2054 
2055 /**
2056  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2057  * command
2058  *
2059  * @dev_priv: Pointer to a device private struct.
2060  * @sw_context: The software context being used for this batch.
2061  * @header: Pointer to the command header in the command stream.
2062  */
vmw_cmd_invalidate_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2063 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2064 				       struct vmw_sw_context *sw_context,
2065 				       SVGA3dCmdHeader *header)
2066 {
2067 	struct vmw_gb_surface_cmd {
2068 		SVGA3dCmdHeader header;
2069 		SVGA3dCmdInvalidateGBImage body;
2070 	} *cmd;
2071 
2072 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2073 
2074 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2075 				 user_surface_converter,
2076 				 &cmd->body.image.sid, NULL);
2077 }
2078 
2079 /**
2080  * vmw_cmd_invalidate_gb_surface - Validate an
2081  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2082  *
2083  * @dev_priv: Pointer to a device private struct.
2084  * @sw_context: The software context being used for this batch.
2085  * @header: Pointer to the command header in the command stream.
2086  */
vmw_cmd_invalidate_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2087 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2088 					 struct vmw_sw_context *sw_context,
2089 					 SVGA3dCmdHeader *header)
2090 {
2091 	struct vmw_gb_surface_cmd {
2092 		SVGA3dCmdHeader header;
2093 		SVGA3dCmdInvalidateGBSurface body;
2094 	} *cmd;
2095 
2096 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2097 
2098 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2099 				 user_surface_converter,
2100 				 &cmd->body.sid, NULL);
2101 }
2102 
2103 
2104 /**
2105  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2106  * command
2107  *
2108  * @dev_priv: Pointer to a device private struct.
2109  * @sw_context: The software context being used for this batch.
2110  * @header: Pointer to the command header in the command stream.
2111  */
vmw_cmd_shader_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2112 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2113 				 struct vmw_sw_context *sw_context,
2114 				 SVGA3dCmdHeader *header)
2115 {
2116 	struct vmw_shader_define_cmd {
2117 		SVGA3dCmdHeader header;
2118 		SVGA3dCmdDefineShader body;
2119 	} *cmd;
2120 	int ret;
2121 	size_t size;
2122 	struct vmw_resource_val_node *val;
2123 
2124 	cmd = container_of(header, struct vmw_shader_define_cmd,
2125 			   header);
2126 
2127 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2128 				user_context_converter, &cmd->body.cid,
2129 				&val);
2130 	if (unlikely(ret != 0))
2131 		return ret;
2132 
2133 	if (unlikely(!dev_priv->has_mob))
2134 		return 0;
2135 
2136 	size = cmd->header.size - sizeof(cmd->body);
2137 	ret = vmw_compat_shader_add(dev_priv,
2138 				    vmw_context_res_man(val->res),
2139 				    cmd->body.shid, cmd + 1,
2140 				    cmd->body.type, size,
2141 				    &sw_context->staged_cmd_res);
2142 	if (unlikely(ret != 0))
2143 		return ret;
2144 
2145 	return vmw_resource_relocation_add(&sw_context->res_relocations,
2146 					   NULL, &cmd->header.id -
2147 					   sw_context->buf_start);
2148 
2149 	return 0;
2150 }
2151 
2152 /**
2153  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2154  * command
2155  *
2156  * @dev_priv: Pointer to a device private struct.
2157  * @sw_context: The software context being used for this batch.
2158  * @header: Pointer to the command header in the command stream.
2159  */
vmw_cmd_shader_destroy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2160 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2161 				  struct vmw_sw_context *sw_context,
2162 				  SVGA3dCmdHeader *header)
2163 {
2164 	struct vmw_shader_destroy_cmd {
2165 		SVGA3dCmdHeader header;
2166 		SVGA3dCmdDestroyShader body;
2167 	} *cmd;
2168 	int ret;
2169 	struct vmw_resource_val_node *val;
2170 
2171 	cmd = container_of(header, struct vmw_shader_destroy_cmd,
2172 			   header);
2173 
2174 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2175 				user_context_converter, &cmd->body.cid,
2176 				&val);
2177 	if (unlikely(ret != 0))
2178 		return ret;
2179 
2180 	if (unlikely(!dev_priv->has_mob))
2181 		return 0;
2182 
2183 	ret = vmw_shader_remove(vmw_context_res_man(val->res),
2184 				cmd->body.shid,
2185 				cmd->body.type,
2186 				&sw_context->staged_cmd_res);
2187 	if (unlikely(ret != 0))
2188 		return ret;
2189 
2190 	return vmw_resource_relocation_add(&sw_context->res_relocations,
2191 					   NULL, &cmd->header.id -
2192 					   sw_context->buf_start);
2193 
2194 	return 0;
2195 }
2196 
2197 /**
2198  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2199  * command
2200  *
2201  * @dev_priv: Pointer to a device private struct.
2202  * @sw_context: The software context being used for this batch.
2203  * @header: Pointer to the command header in the command stream.
2204  */
vmw_cmd_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2205 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2206 			      struct vmw_sw_context *sw_context,
2207 			      SVGA3dCmdHeader *header)
2208 {
2209 	struct vmw_set_shader_cmd {
2210 		SVGA3dCmdHeader header;
2211 		SVGA3dCmdSetShader body;
2212 	} *cmd;
2213 	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2214 	struct vmw_ctx_bindinfo_shader binding;
2215 	struct vmw_resource *res = NULL;
2216 	int ret;
2217 
2218 	cmd = container_of(header, struct vmw_set_shader_cmd,
2219 			   header);
2220 
2221 	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2222 		DRM_ERROR("Illegal shader type %u.\n",
2223 			  (unsigned) cmd->body.type);
2224 		return -EINVAL;
2225 	}
2226 
2227 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2228 				user_context_converter, &cmd->body.cid,
2229 				&ctx_node);
2230 	if (unlikely(ret != 0))
2231 		return ret;
2232 
2233 	if (!dev_priv->has_mob)
2234 		return 0;
2235 
2236 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2237 		res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2238 					cmd->body.shid,
2239 					cmd->body.type);
2240 
2241 		if (!IS_ERR(res)) {
2242 			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2243 						    &cmd->body.shid, res,
2244 						    &res_node);
2245 			vmw_resource_unreference(&res);
2246 			if (unlikely(ret != 0))
2247 				return ret;
2248 		}
2249 	}
2250 
2251 	if (!res_node) {
2252 		ret = vmw_cmd_res_check(dev_priv, sw_context,
2253 					vmw_res_shader,
2254 					user_shader_converter,
2255 					&cmd->body.shid, &res_node);
2256 		if (unlikely(ret != 0))
2257 			return ret;
2258 	}
2259 
2260 	binding.bi.ctx = ctx_node->res;
2261 	binding.bi.res = res_node ? res_node->res : NULL;
2262 	binding.bi.bt = vmw_ctx_binding_shader;
2263 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2264 	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2265 			binding.shader_slot, 0);
2266 	return 0;
2267 }
2268 
2269 /**
2270  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2271  * command
2272  *
2273  * @dev_priv: Pointer to a device private struct.
2274  * @sw_context: The software context being used for this batch.
2275  * @header: Pointer to the command header in the command stream.
2276  */
vmw_cmd_set_shader_const(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2277 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2278 				    struct vmw_sw_context *sw_context,
2279 				    SVGA3dCmdHeader *header)
2280 {
2281 	struct vmw_set_shader_const_cmd {
2282 		SVGA3dCmdHeader header;
2283 		SVGA3dCmdSetShaderConst body;
2284 	} *cmd;
2285 	int ret;
2286 
2287 	cmd = container_of(header, struct vmw_set_shader_const_cmd,
2288 			   header);
2289 
2290 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2291 				user_context_converter, &cmd->body.cid,
2292 				NULL);
2293 	if (unlikely(ret != 0))
2294 		return ret;
2295 
2296 	if (dev_priv->has_mob)
2297 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2298 
2299 	return 0;
2300 }
2301 
2302 /**
2303  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2304  * command
2305  *
2306  * @dev_priv: Pointer to a device private struct.
2307  * @sw_context: The software context being used for this batch.
2308  * @header: Pointer to the command header in the command stream.
2309  */
vmw_cmd_bind_gb_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2310 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2311 				  struct vmw_sw_context *sw_context,
2312 				  SVGA3dCmdHeader *header)
2313 {
2314 	struct vmw_bind_gb_shader_cmd {
2315 		SVGA3dCmdHeader header;
2316 		SVGA3dCmdBindGBShader body;
2317 	} *cmd;
2318 
2319 	cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2320 			   header);
2321 
2322 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2323 				     user_shader_converter,
2324 				     &cmd->body.shid, &cmd->body.mobid,
2325 				     cmd->body.offsetInBytes);
2326 }
2327 
2328 /**
2329  * vmw_cmd_dx_set_single_constant_buffer - Validate an
2330  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2331  *
2332  * @dev_priv: Pointer to a device private struct.
2333  * @sw_context: The software context being used for this batch.
2334  * @header: Pointer to the command header in the command stream.
2335  */
2336 static int
vmw_cmd_dx_set_single_constant_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2337 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2338 				      struct vmw_sw_context *sw_context,
2339 				      SVGA3dCmdHeader *header)
2340 {
2341 	struct {
2342 		SVGA3dCmdHeader header;
2343 		SVGA3dCmdDXSetSingleConstantBuffer body;
2344 	} *cmd;
2345 	struct vmw_resource_val_node *res_node = NULL;
2346 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2347 	struct vmw_ctx_bindinfo_cb binding;
2348 	int ret;
2349 
2350 	if (unlikely(ctx_node == NULL)) {
2351 		DRM_ERROR("DX Context not set.\n");
2352 		return -EINVAL;
2353 	}
2354 
2355 	cmd = container_of(header, typeof(*cmd), header);
2356 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2357 				user_surface_converter,
2358 				&cmd->body.sid, &res_node);
2359 	if (unlikely(ret != 0))
2360 		return ret;
2361 
2362 	binding.bi.ctx = ctx_node->res;
2363 	binding.bi.res = res_node ? res_node->res : NULL;
2364 	binding.bi.bt = vmw_ctx_binding_cb;
2365 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2366 	binding.offset = cmd->body.offsetInBytes;
2367 	binding.size = cmd->body.sizeInBytes;
2368 	binding.slot = cmd->body.slot;
2369 
2370 	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2371 	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2372 		DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2373 			  (unsigned) cmd->body.type,
2374 			  (unsigned) binding.slot);
2375 		return -EINVAL;
2376 	}
2377 
2378 	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2379 			binding.shader_slot, binding.slot);
2380 
2381 	return 0;
2382 }
2383 
2384 /**
2385  * vmw_cmd_dx_set_shader_res - Validate an
2386  * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2387  *
2388  * @dev_priv: Pointer to a device private struct.
2389  * @sw_context: The software context being used for this batch.
2390  * @header: Pointer to the command header in the command stream.
2391  */
vmw_cmd_dx_set_shader_res(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2392 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2393 				     struct vmw_sw_context *sw_context,
2394 				     SVGA3dCmdHeader *header)
2395 {
2396 	struct {
2397 		SVGA3dCmdHeader header;
2398 		SVGA3dCmdDXSetShaderResources body;
2399 	} *cmd = container_of(header, typeof(*cmd), header);
2400 	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2401 		sizeof(SVGA3dShaderResourceViewId);
2402 
2403 	if ((u64) cmd->body.startView + (u64) num_sr_view >
2404 	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2405 	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2406 		DRM_ERROR("Invalid shader binding.\n");
2407 		return -EINVAL;
2408 	}
2409 
2410 	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2411 				     vmw_ctx_binding_sr,
2412 				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2413 				     (void *) &cmd[1], num_sr_view,
2414 				     cmd->body.startView);
2415 }
2416 
2417 /**
2418  * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2419  * command
2420  *
2421  * @dev_priv: Pointer to a device private struct.
2422  * @sw_context: The software context being used for this batch.
2423  * @header: Pointer to the command header in the command stream.
2424  */
vmw_cmd_dx_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2425 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2426 				 struct vmw_sw_context *sw_context,
2427 				 SVGA3dCmdHeader *header)
2428 {
2429 	struct {
2430 		SVGA3dCmdHeader header;
2431 		SVGA3dCmdDXSetShader body;
2432 	} *cmd;
2433 	struct vmw_resource *res = NULL;
2434 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2435 	struct vmw_ctx_bindinfo_shader binding;
2436 	int ret = 0;
2437 
2438 	if (unlikely(ctx_node == NULL)) {
2439 		DRM_ERROR("DX Context not set.\n");
2440 		return -EINVAL;
2441 	}
2442 
2443 	cmd = container_of(header, typeof(*cmd), header);
2444 
2445 	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
2446 	    cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2447 		DRM_ERROR("Illegal shader type %u.\n",
2448 			  (unsigned) cmd->body.type);
2449 		return -EINVAL;
2450 	}
2451 
2452 	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2453 		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2454 		if (IS_ERR(res)) {
2455 			DRM_ERROR("Could not find shader for binding.\n");
2456 			return PTR_ERR(res);
2457 		}
2458 
2459 		ret = vmw_resource_val_add(sw_context, res, NULL);
2460 		if (ret)
2461 			goto out_unref;
2462 	}
2463 
2464 	binding.bi.ctx = ctx_node->res;
2465 	binding.bi.res = res;
2466 	binding.bi.bt = vmw_ctx_binding_dx_shader;
2467 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2468 
2469 	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2470 			binding.shader_slot, 0);
2471 out_unref:
2472 	if (res)
2473 		vmw_resource_unreference(&res);
2474 
2475 	return ret;
2476 }
2477 
2478 /**
2479  * vmw_cmd_dx_set_vertex_buffers - Validates an
2480  * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2481  *
2482  * @dev_priv: Pointer to a device private struct.
2483  * @sw_context: The software context being used for this batch.
2484  * @header: Pointer to the command header in the command stream.
2485  */
vmw_cmd_dx_set_vertex_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2486 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2487 					 struct vmw_sw_context *sw_context,
2488 					 SVGA3dCmdHeader *header)
2489 {
2490 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2491 	struct vmw_ctx_bindinfo_vb binding;
2492 	struct vmw_resource_val_node *res_node;
2493 	struct {
2494 		SVGA3dCmdHeader header;
2495 		SVGA3dCmdDXSetVertexBuffers body;
2496 		SVGA3dVertexBuffer buf[];
2497 	} *cmd;
2498 	int i, ret, num;
2499 
2500 	if (unlikely(ctx_node == NULL)) {
2501 		DRM_ERROR("DX Context not set.\n");
2502 		return -EINVAL;
2503 	}
2504 
2505 	cmd = container_of(header, typeof(*cmd), header);
2506 	num = (cmd->header.size - sizeof(cmd->body)) /
2507 		sizeof(SVGA3dVertexBuffer);
2508 	if ((u64)num + (u64)cmd->body.startBuffer >
2509 	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2510 		DRM_ERROR("Invalid number of vertex buffers.\n");
2511 		return -EINVAL;
2512 	}
2513 
2514 	for (i = 0; i < num; i++) {
2515 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2516 					user_surface_converter,
2517 					&cmd->buf[i].sid, &res_node);
2518 		if (unlikely(ret != 0))
2519 			return ret;
2520 
2521 		binding.bi.ctx = ctx_node->res;
2522 		binding.bi.bt = vmw_ctx_binding_vb;
2523 		binding.bi.res = ((res_node) ? res_node->res : NULL);
2524 		binding.offset = cmd->buf[i].offset;
2525 		binding.stride = cmd->buf[i].stride;
2526 		binding.slot = i + cmd->body.startBuffer;
2527 
2528 		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2529 				0, binding.slot);
2530 	}
2531 
2532 	return 0;
2533 }
2534 
2535 /**
2536  * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2537  * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2538  *
2539  * @dev_priv: Pointer to a device private struct.
2540  * @sw_context: The software context being used for this batch.
2541  * @header: Pointer to the command header in the command stream.
2542  */
vmw_cmd_dx_set_index_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2543 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2544 				       struct vmw_sw_context *sw_context,
2545 				       SVGA3dCmdHeader *header)
2546 {
2547 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2548 	struct vmw_ctx_bindinfo_ib binding;
2549 	struct vmw_resource_val_node *res_node;
2550 	struct {
2551 		SVGA3dCmdHeader header;
2552 		SVGA3dCmdDXSetIndexBuffer body;
2553 	} *cmd;
2554 	int ret;
2555 
2556 	if (unlikely(ctx_node == NULL)) {
2557 		DRM_ERROR("DX Context not set.\n");
2558 		return -EINVAL;
2559 	}
2560 
2561 	cmd = container_of(header, typeof(*cmd), header);
2562 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2563 				user_surface_converter,
2564 				&cmd->body.sid, &res_node);
2565 	if (unlikely(ret != 0))
2566 		return ret;
2567 
2568 	binding.bi.ctx = ctx_node->res;
2569 	binding.bi.res = ((res_node) ? res_node->res : NULL);
2570 	binding.bi.bt = vmw_ctx_binding_ib;
2571 	binding.offset = cmd->body.offset;
2572 	binding.format = cmd->body.format;
2573 
2574 	vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2575 
2576 	return 0;
2577 }
2578 
2579 /**
2580  * vmw_cmd_dx_set_rendertarget - Validate an
2581  * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2582  *
2583  * @dev_priv: Pointer to a device private struct.
2584  * @sw_context: The software context being used for this batch.
2585  * @header: Pointer to the command header in the command stream.
2586  */
vmw_cmd_dx_set_rendertargets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2587 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2588 					struct vmw_sw_context *sw_context,
2589 					SVGA3dCmdHeader *header)
2590 {
2591 	struct {
2592 		SVGA3dCmdHeader header;
2593 		SVGA3dCmdDXSetRenderTargets body;
2594 	} *cmd = container_of(header, typeof(*cmd), header);
2595 	int ret;
2596 	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2597 		sizeof(SVGA3dRenderTargetViewId);
2598 
2599 	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2600 		DRM_ERROR("Invalid DX Rendertarget binding.\n");
2601 		return -EINVAL;
2602 	}
2603 
2604 	ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2605 				    vmw_ctx_binding_ds, 0,
2606 				    &cmd->body.depthStencilViewId, 1, 0);
2607 	if (ret)
2608 		return ret;
2609 
2610 	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2611 				     vmw_ctx_binding_dx_rt, 0,
2612 				     (void *)&cmd[1], num_rt_view, 0);
2613 }
2614 
2615 /**
2616  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2617  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2618  *
2619  * @dev_priv: Pointer to a device private struct.
2620  * @sw_context: The software context being used for this batch.
2621  * @header: Pointer to the command header in the command stream.
2622  */
vmw_cmd_dx_clear_rendertarget_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2623 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2624 					      struct vmw_sw_context *sw_context,
2625 					      SVGA3dCmdHeader *header)
2626 {
2627 	struct {
2628 		SVGA3dCmdHeader header;
2629 		SVGA3dCmdDXClearRenderTargetView body;
2630 	} *cmd = container_of(header, typeof(*cmd), header);
2631 
2632 	return vmw_view_id_val_add(sw_context, vmw_view_rt,
2633 				   cmd->body.renderTargetViewId);
2634 }
2635 
2636 /**
2637  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2638  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2639  *
2640  * @dev_priv: Pointer to a device private struct.
2641  * @sw_context: The software context being used for this batch.
2642  * @header: Pointer to the command header in the command stream.
2643  */
vmw_cmd_dx_clear_depthstencil_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2644 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2645 					      struct vmw_sw_context *sw_context,
2646 					      SVGA3dCmdHeader *header)
2647 {
2648 	struct {
2649 		SVGA3dCmdHeader header;
2650 		SVGA3dCmdDXClearDepthStencilView body;
2651 	} *cmd = container_of(header, typeof(*cmd), header);
2652 
2653 	return vmw_view_id_val_add(sw_context, vmw_view_ds,
2654 				   cmd->body.depthStencilViewId);
2655 }
2656 
vmw_cmd_dx_view_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2657 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2658 				  struct vmw_sw_context *sw_context,
2659 				  SVGA3dCmdHeader *header)
2660 {
2661 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2662 	struct vmw_resource_val_node *srf_node;
2663 	struct vmw_resource *res;
2664 	enum vmw_view_type view_type;
2665 	int ret;
2666 	/*
2667 	 * This is based on the fact that all affected define commands have
2668 	 * the same initial command body layout.
2669 	 */
2670 	struct {
2671 		SVGA3dCmdHeader header;
2672 		uint32 defined_id;
2673 		uint32 sid;
2674 	} *cmd;
2675 
2676 	if (unlikely(ctx_node == NULL)) {
2677 		DRM_ERROR("DX Context not set.\n");
2678 		return -EINVAL;
2679 	}
2680 
2681 	view_type = vmw_view_cmd_to_type(header->id);
2682 	if (view_type == vmw_view_max)
2683 		return -EINVAL;
2684 	cmd = container_of(header, typeof(*cmd), header);
2685 	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2686 		DRM_ERROR("Invalid surface id.\n");
2687 		return -EINVAL;
2688 	}
2689 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2690 				user_surface_converter,
2691 				&cmd->sid, &srf_node);
2692 	if (unlikely(ret != 0))
2693 		return ret;
2694 
2695 	res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2696 	ret = vmw_cotable_notify(res, cmd->defined_id);
2697 	vmw_resource_unreference(&res);
2698 	if (unlikely(ret != 0))
2699 		return ret;
2700 
2701 	return vmw_view_add(sw_context->man,
2702 			    ctx_node->res,
2703 			    srf_node->res,
2704 			    view_type,
2705 			    cmd->defined_id,
2706 			    header,
2707 			    header->size + sizeof(*header),
2708 			    &sw_context->staged_cmd_res);
2709 }
2710 
2711 /**
2712  * vmw_cmd_dx_set_so_targets - Validate an
2713  * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2714  *
2715  * @dev_priv: Pointer to a device private struct.
2716  * @sw_context: The software context being used for this batch.
2717  * @header: Pointer to the command header in the command stream.
2718  */
vmw_cmd_dx_set_so_targets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2719 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2720 				     struct vmw_sw_context *sw_context,
2721 				     SVGA3dCmdHeader *header)
2722 {
2723 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2724 	struct vmw_ctx_bindinfo_so binding;
2725 	struct vmw_resource_val_node *res_node;
2726 	struct {
2727 		SVGA3dCmdHeader header;
2728 		SVGA3dCmdDXSetSOTargets body;
2729 		SVGA3dSoTarget targets[];
2730 	} *cmd;
2731 	int i, ret, num;
2732 
2733 	if (unlikely(ctx_node == NULL)) {
2734 		DRM_ERROR("DX Context not set.\n");
2735 		return -EINVAL;
2736 	}
2737 
2738 	cmd = container_of(header, typeof(*cmd), header);
2739 	num = (cmd->header.size - sizeof(cmd->body)) /
2740 		sizeof(SVGA3dSoTarget);
2741 
2742 	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2743 		DRM_ERROR("Invalid DX SO binding.\n");
2744 		return -EINVAL;
2745 	}
2746 
2747 	for (i = 0; i < num; i++) {
2748 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2749 					user_surface_converter,
2750 					&cmd->targets[i].sid, &res_node);
2751 		if (unlikely(ret != 0))
2752 			return ret;
2753 
2754 		binding.bi.ctx = ctx_node->res;
2755 		binding.bi.res = ((res_node) ? res_node->res : NULL);
2756 		binding.bi.bt = vmw_ctx_binding_so,
2757 		binding.offset = cmd->targets[i].offset;
2758 		binding.size = cmd->targets[i].sizeInBytes;
2759 		binding.slot = i;
2760 
2761 		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2762 				0, binding.slot);
2763 	}
2764 
2765 	return 0;
2766 }
2767 
vmw_cmd_dx_so_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2768 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2769 				struct vmw_sw_context *sw_context,
2770 				SVGA3dCmdHeader *header)
2771 {
2772 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2773 	struct vmw_resource *res;
2774 	/*
2775 	 * This is based on the fact that all affected define commands have
2776 	 * the same initial command body layout.
2777 	 */
2778 	struct {
2779 		SVGA3dCmdHeader header;
2780 		uint32 defined_id;
2781 	} *cmd;
2782 	enum vmw_so_type so_type;
2783 	int ret;
2784 
2785 	if (unlikely(ctx_node == NULL)) {
2786 		DRM_ERROR("DX Context not set.\n");
2787 		return -EINVAL;
2788 	}
2789 
2790 	so_type = vmw_so_cmd_to_type(header->id);
2791 	res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2792 	cmd = container_of(header, typeof(*cmd), header);
2793 	ret = vmw_cotable_notify(res, cmd->defined_id);
2794 	vmw_resource_unreference(&res);
2795 
2796 	return ret;
2797 }
2798 
2799 /**
2800  * vmw_cmd_dx_check_subresource - Validate an
2801  * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2802  *
2803  * @dev_priv: Pointer to a device private struct.
2804  * @sw_context: The software context being used for this batch.
2805  * @header: Pointer to the command header in the command stream.
2806  */
vmw_cmd_dx_check_subresource(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2807 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2808 					struct vmw_sw_context *sw_context,
2809 					SVGA3dCmdHeader *header)
2810 {
2811 	struct {
2812 		SVGA3dCmdHeader header;
2813 		union {
2814 			SVGA3dCmdDXReadbackSubResource r_body;
2815 			SVGA3dCmdDXInvalidateSubResource i_body;
2816 			SVGA3dCmdDXUpdateSubResource u_body;
2817 			SVGA3dSurfaceId sid;
2818 		};
2819 	} *cmd;
2820 
2821 	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2822 		     offsetof(typeof(*cmd), sid));
2823 	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2824 		     offsetof(typeof(*cmd), sid));
2825 	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2826 		     offsetof(typeof(*cmd), sid));
2827 
2828 	cmd = container_of(header, typeof(*cmd), header);
2829 
2830 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2831 				 user_surface_converter,
2832 				 &cmd->sid, NULL);
2833 }
2834 
vmw_cmd_dx_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2835 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2836 				struct vmw_sw_context *sw_context,
2837 				SVGA3dCmdHeader *header)
2838 {
2839 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2840 
2841 	if (unlikely(ctx_node == NULL)) {
2842 		DRM_ERROR("DX Context not set.\n");
2843 		return -EINVAL;
2844 	}
2845 
2846 	return 0;
2847 }
2848 
2849 /**
2850  * vmw_cmd_dx_view_remove - validate a view remove command and
2851  * schedule the view resource for removal.
2852  *
2853  * @dev_priv: Pointer to a device private struct.
2854  * @sw_context: The software context being used for this batch.
2855  * @header: Pointer to the command header in the command stream.
2856  *
2857  * Check that the view exists, and if it was not created using this
2858  * command batch, make sure it's validated (present in the device) so that
2859  * the remove command will not confuse the device.
2860  */
vmw_cmd_dx_view_remove(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2861 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2862 				  struct vmw_sw_context *sw_context,
2863 				  SVGA3dCmdHeader *header)
2864 {
2865 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2866 	struct {
2867 		SVGA3dCmdHeader header;
2868 		union vmw_view_destroy body;
2869 	} *cmd = container_of(header, typeof(*cmd), header);
2870 	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2871 	struct vmw_resource *view;
2872 	int ret;
2873 
2874 	if (!ctx_node) {
2875 		DRM_ERROR("DX Context not set.\n");
2876 		return -EINVAL;
2877 	}
2878 
2879 	ret = vmw_view_remove(sw_context->man,
2880 			      cmd->body.view_id, view_type,
2881 			      &sw_context->staged_cmd_res,
2882 			      &view);
2883 	if (ret || !view)
2884 		return ret;
2885 
2886 	/*
2887 	 * Add view to the validate list iff it was not created using this
2888 	 * command batch.
2889 	 */
2890 	return vmw_view_res_val_add(sw_context, view);
2891 }
2892 
2893 /**
2894  * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2895  * command
2896  *
2897  * @dev_priv: Pointer to a device private struct.
2898  * @sw_context: The software context being used for this batch.
2899  * @header: Pointer to the command header in the command stream.
2900  */
vmw_cmd_dx_define_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2901 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2902 				    struct vmw_sw_context *sw_context,
2903 				    SVGA3dCmdHeader *header)
2904 {
2905 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2906 	struct vmw_resource *res;
2907 	struct {
2908 		SVGA3dCmdHeader header;
2909 		SVGA3dCmdDXDefineShader body;
2910 	} *cmd = container_of(header, typeof(*cmd), header);
2911 	int ret;
2912 
2913 	if (!ctx_node) {
2914 		DRM_ERROR("DX Context not set.\n");
2915 		return -EINVAL;
2916 	}
2917 
2918 	res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2919 	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2920 	vmw_resource_unreference(&res);
2921 	if (ret)
2922 		return ret;
2923 
2924 	return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2925 				 cmd->body.shaderId, cmd->body.type,
2926 				 &sw_context->staged_cmd_res);
2927 }
2928 
2929 /**
2930  * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2931  * command
2932  *
2933  * @dev_priv: Pointer to a device private struct.
2934  * @sw_context: The software context being used for this batch.
2935  * @header: Pointer to the command header in the command stream.
2936  */
vmw_cmd_dx_destroy_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2937 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2938 				     struct vmw_sw_context *sw_context,
2939 				     SVGA3dCmdHeader *header)
2940 {
2941 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2942 	struct {
2943 		SVGA3dCmdHeader header;
2944 		SVGA3dCmdDXDestroyShader body;
2945 	} *cmd = container_of(header, typeof(*cmd), header);
2946 	int ret;
2947 
2948 	if (!ctx_node) {
2949 		DRM_ERROR("DX Context not set.\n");
2950 		return -EINVAL;
2951 	}
2952 
2953 	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2954 				&sw_context->staged_cmd_res);
2955 	if (ret)
2956 		DRM_ERROR("Could not find shader to remove.\n");
2957 
2958 	return ret;
2959 }
2960 
2961 /**
2962  * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2963  * command
2964  *
2965  * @dev_priv: Pointer to a device private struct.
2966  * @sw_context: The software context being used for this batch.
2967  * @header: Pointer to the command header in the command stream.
2968  */
vmw_cmd_dx_bind_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2969 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2970 				  struct vmw_sw_context *sw_context,
2971 				  SVGA3dCmdHeader *header)
2972 {
2973 	struct vmw_resource_val_node *ctx_node;
2974 	struct vmw_resource_val_node *res_node;
2975 	struct vmw_resource *res;
2976 	struct {
2977 		SVGA3dCmdHeader header;
2978 		SVGA3dCmdDXBindShader body;
2979 	} *cmd = container_of(header, typeof(*cmd), header);
2980 	int ret;
2981 
2982 	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2983 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2984 					user_context_converter,
2985 					&cmd->body.cid, &ctx_node);
2986 		if (ret)
2987 			return ret;
2988 	} else {
2989 		ctx_node = sw_context->dx_ctx_node;
2990 		if (!ctx_node) {
2991 			DRM_ERROR("DX Context not set.\n");
2992 			return -EINVAL;
2993 		}
2994 	}
2995 
2996 	res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2997 				cmd->body.shid, 0);
2998 	if (IS_ERR(res)) {
2999 		DRM_ERROR("Could not find shader to bind.\n");
3000 		return PTR_ERR(res);
3001 	}
3002 
3003 	ret = vmw_resource_val_add(sw_context, res, &res_node);
3004 	if (ret) {
3005 		DRM_ERROR("Error creating resource validation node.\n");
3006 		goto out_unref;
3007 	}
3008 
3009 
3010 	ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3011 					&cmd->body.mobid,
3012 					cmd->body.offsetInBytes);
3013 out_unref:
3014 	vmw_resource_unreference(&res);
3015 
3016 	return ret;
3017 }
3018 
vmw_cmd_check_not_3d(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3019 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3020 				struct vmw_sw_context *sw_context,
3021 				void *buf, uint32_t *size)
3022 {
3023 	uint32_t size_remaining = *size;
3024 	uint32_t cmd_id;
3025 
3026 	cmd_id = ((uint32_t *)buf)[0];
3027 	switch (cmd_id) {
3028 	case SVGA_CMD_UPDATE:
3029 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3030 		break;
3031 	case SVGA_CMD_DEFINE_GMRFB:
3032 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3033 		break;
3034 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3035 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3036 		break;
3037 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3038 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3039 		break;
3040 	default:
3041 		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3042 		return -EINVAL;
3043 	}
3044 
3045 	if (*size > size_remaining) {
3046 		DRM_ERROR("Invalid SVGA command (size mismatch):"
3047 			  " %u.\n", cmd_id);
3048 		return -EINVAL;
3049 	}
3050 
3051 	if (unlikely(!sw_context->kernel)) {
3052 		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3053 		return -EPERM;
3054 	}
3055 
3056 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3057 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3058 
3059 	return 0;
3060 }
3061 
3062 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3063 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3064 		    false, false, false),
3065 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3066 		    false, false, false),
3067 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3068 		    true, false, false),
3069 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3070 		    true, false, false),
3071 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3072 		    true, false, false),
3073 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3074 		    false, false, false),
3075 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3076 		    false, false, false),
3077 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3078 		    true, false, false),
3079 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3080 		    true, false, false),
3081 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3082 		    true, false, false),
3083 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3084 		    &vmw_cmd_set_render_target_check, true, false, false),
3085 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3086 		    true, false, false),
3087 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3088 		    true, false, false),
3089 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3090 		    true, false, false),
3091 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3092 		    true, false, false),
3093 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3094 		    true, false, false),
3095 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3096 		    true, false, false),
3097 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3098 		    true, false, false),
3099 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3100 		    false, false, false),
3101 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3102 		    true, false, false),
3103 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3104 		    true, false, false),
3105 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3106 		    true, false, false),
3107 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3108 		    true, false, false),
3109 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3110 		    true, false, false),
3111 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3112 		    true, false, false),
3113 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3114 		    true, false, false),
3115 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3116 		    true, false, false),
3117 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3118 		    true, false, false),
3119 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3120 		    true, false, false),
3121 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3122 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3123 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3124 		    false, false, false),
3125 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3126 		    false, false, false),
3127 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3128 		    false, false, false),
3129 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3130 		    false, false, false),
3131 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3132 		    false, false, false),
3133 	VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3134 		    false, false, false),
3135 	VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3136 		    false, false, false),
3137 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3138 		    false, false, false),
3139 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3140 		    false, false, false),
3141 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3142 		    false, false, false),
3143 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3144 		    false, false, false),
3145 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3146 		    false, false, false),
3147 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3148 		    false, false, false),
3149 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3150 		    false, false, true),
3151 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3152 		    false, false, true),
3153 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3154 		    false, false, true),
3155 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3156 		    false, false, true),
3157 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3158 		    false, false, true),
3159 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3160 		    false, false, true),
3161 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3162 		    false, false, true),
3163 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3164 		    false, false, true),
3165 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3166 		    true, false, true),
3167 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3168 		    false, false, true),
3169 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3170 		    true, false, true),
3171 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3172 		    &vmw_cmd_update_gb_surface, true, false, true),
3173 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3174 		    &vmw_cmd_readback_gb_image, true, false, true),
3175 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3176 		    &vmw_cmd_readback_gb_surface, true, false, true),
3177 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3178 		    &vmw_cmd_invalidate_gb_image, true, false, true),
3179 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3180 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3181 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3182 		    false, false, true),
3183 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3184 		    false, false, true),
3185 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3186 		    false, false, true),
3187 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3188 		    false, false, true),
3189 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3190 		    false, false, true),
3191 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3192 		    false, false, true),
3193 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3194 		    true, false, true),
3195 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3196 		    false, false, true),
3197 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3198 		    false, false, false),
3199 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3200 		    true, false, true),
3201 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3202 		    true, false, true),
3203 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3204 		    true, false, true),
3205 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3206 		    true, false, true),
3207 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3208 		    false, false, true),
3209 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3210 		    false, false, true),
3211 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3212 		    false, false, true),
3213 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3214 		    false, false, true),
3215 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3216 		    false, false, true),
3217 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3218 		    false, false, true),
3219 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3220 		    false, false, true),
3221 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3222 		    false, false, true),
3223 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3224 		    false, false, true),
3225 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3226 		    false, false, true),
3227 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3228 		    true, false, true),
3229 	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3230 		    false, false, true),
3231 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3232 		    false, false, true),
3233 	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3234 		    false, false, true),
3235 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3236 		    false, false, true),
3237 
3238 	/*
3239 	 * DX commands
3240 	 */
3241 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3242 		    false, false, true),
3243 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3244 		    false, false, true),
3245 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3246 		    false, false, true),
3247 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3248 		    false, false, true),
3249 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3250 		    false, false, true),
3251 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3252 		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3253 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3254 		    &vmw_cmd_dx_set_shader_res, true, false, true),
3255 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3256 		    true, false, true),
3257 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3258 		    true, false, true),
3259 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3260 		    true, false, true),
3261 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3262 		    true, false, true),
3263 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3264 		    true, false, true),
3265 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3266 		    &vmw_cmd_dx_cid_check, true, false, true),
3267 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3268 		    true, false, true),
3269 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3270 		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3271 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3272 		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3273 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3274 		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3275 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3276 		    true, false, true),
3277 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3278 		    &vmw_cmd_dx_cid_check, true, false, true),
3279 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3280 		    &vmw_cmd_dx_cid_check, true, false, true),
3281 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3282 		    true, false, true),
3283 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3284 		    true, false, true),
3285 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3286 		    true, false, true),
3287 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3288 		    &vmw_cmd_dx_cid_check, true, false, true),
3289 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3290 		    true, false, true),
3291 	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3292 		    true, false, true),
3293 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3294 		    true, false, true),
3295 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3296 		    true, false, true),
3297 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3298 		    true, false, true),
3299 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3300 		    true, false, true),
3301 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3302 		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3303 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3304 		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3305 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3306 		    true, false, true),
3307 	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
3308 		    true, false, true),
3309 	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3310 		    &vmw_cmd_dx_check_subresource, true, false, true),
3311 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3312 		    &vmw_cmd_dx_check_subresource, true, false, true),
3313 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3314 		    &vmw_cmd_dx_check_subresource, true, false, true),
3315 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3316 		    &vmw_cmd_dx_view_define, true, false, true),
3317 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3318 		    &vmw_cmd_dx_view_remove, true, false, true),
3319 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3320 		    &vmw_cmd_dx_view_define, true, false, true),
3321 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3322 		    &vmw_cmd_dx_view_remove, true, false, true),
3323 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3324 		    &vmw_cmd_dx_view_define, true, false, true),
3325 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3326 		    &vmw_cmd_dx_view_remove, true, false, true),
3327 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3328 		    &vmw_cmd_dx_so_define, true, false, true),
3329 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3330 		    &vmw_cmd_dx_cid_check, true, false, true),
3331 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3332 		    &vmw_cmd_dx_so_define, true, false, true),
3333 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3334 		    &vmw_cmd_dx_cid_check, true, false, true),
3335 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3336 		    &vmw_cmd_dx_so_define, true, false, true),
3337 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3338 		    &vmw_cmd_dx_cid_check, true, false, true),
3339 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3340 		    &vmw_cmd_dx_so_define, true, false, true),
3341 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3342 		    &vmw_cmd_dx_cid_check, true, false, true),
3343 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3344 		    &vmw_cmd_dx_so_define, true, false, true),
3345 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3346 		    &vmw_cmd_dx_cid_check, true, false, true),
3347 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3348 		    &vmw_cmd_dx_define_shader, true, false, true),
3349 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3350 		    &vmw_cmd_dx_destroy_shader, true, false, true),
3351 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3352 		    &vmw_cmd_dx_bind_shader, true, false, true),
3353 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3354 		    &vmw_cmd_dx_so_define, true, false, true),
3355 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3356 		    &vmw_cmd_dx_cid_check, true, false, true),
3357 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3358 		    true, false, true),
3359 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3360 		    &vmw_cmd_dx_set_so_targets, true, false, true),
3361 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3362 		    &vmw_cmd_dx_cid_check, true, false, true),
3363 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3364 		    &vmw_cmd_dx_cid_check, true, false, true),
3365 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3366 		    &vmw_cmd_buffer_copy_check, true, false, true),
3367 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3368 		    &vmw_cmd_pred_copy_check, true, false, true),
3369 };
3370 
vmw_cmd_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3371 static int vmw_cmd_check(struct vmw_private *dev_priv,
3372 			 struct vmw_sw_context *sw_context,
3373 			 void *buf, uint32_t *size)
3374 {
3375 	uint32_t cmd_id;
3376 	uint32_t size_remaining = *size;
3377 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3378 	int ret;
3379 	const struct vmw_cmd_entry *entry;
3380 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3381 
3382 	cmd_id = ((uint32_t *)buf)[0];
3383 	/* Handle any none 3D commands */
3384 	if (unlikely(cmd_id < SVGA_CMD_MAX))
3385 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3386 
3387 
3388 	cmd_id = header->id;
3389 	*size = header->size + sizeof(SVGA3dCmdHeader);
3390 
3391 	cmd_id -= SVGA_3D_CMD_BASE;
3392 	if (unlikely(*size > size_remaining))
3393 		goto out_invalid;
3394 
3395 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3396 		goto out_invalid;
3397 
3398 	entry = &vmw_cmd_entries[cmd_id];
3399 	if (unlikely(!entry->func))
3400 		goto out_invalid;
3401 
3402 	if (unlikely(!entry->user_allow && !sw_context->kernel))
3403 		goto out_privileged;
3404 
3405 	if (unlikely(entry->gb_disable && gb))
3406 		goto out_old;
3407 
3408 	if (unlikely(entry->gb_enable && !gb))
3409 		goto out_new;
3410 
3411 	ret = entry->func(dev_priv, sw_context, header);
3412 	if (unlikely(ret != 0))
3413 		goto out_invalid;
3414 
3415 	return 0;
3416 out_invalid:
3417 	DRM_ERROR("Invalid SVGA3D command: %d\n",
3418 		  cmd_id + SVGA_3D_CMD_BASE);
3419 	return -EINVAL;
3420 out_privileged:
3421 	DRM_ERROR("Privileged SVGA3D command: %d\n",
3422 		  cmd_id + SVGA_3D_CMD_BASE);
3423 	return -EPERM;
3424 out_old:
3425 	DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3426 		  cmd_id + SVGA_3D_CMD_BASE);
3427 	return -EINVAL;
3428 out_new:
3429 	DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3430 		  cmd_id + SVGA_3D_CMD_BASE);
3431 	return -EINVAL;
3432 }
3433 
vmw_cmd_check_all(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t size)3434 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3435 			     struct vmw_sw_context *sw_context,
3436 			     void *buf,
3437 			     uint32_t size)
3438 {
3439 	int32_t cur_size = size;
3440 	int ret;
3441 
3442 	sw_context->buf_start = buf;
3443 
3444 	while (cur_size > 0) {
3445 		size = cur_size;
3446 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3447 		if (unlikely(ret != 0))
3448 			return ret;
3449 		buf = (void *)((unsigned long) buf + size);
3450 		cur_size -= size;
3451 	}
3452 
3453 	if (unlikely(cur_size != 0)) {
3454 		DRM_ERROR("Command verifier out of sync.\n");
3455 		return -EINVAL;
3456 	}
3457 
3458 	return 0;
3459 }
3460 
vmw_free_relocations(struct vmw_sw_context * sw_context)3461 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3462 {
3463 	sw_context->cur_reloc = 0;
3464 }
3465 
vmw_apply_relocations(struct vmw_sw_context * sw_context)3466 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3467 {
3468 	uint32_t i;
3469 	struct vmw_relocation *reloc;
3470 	struct ttm_validate_buffer *validate;
3471 	struct ttm_buffer_object *bo;
3472 
3473 	for (i = 0; i < sw_context->cur_reloc; ++i) {
3474 		reloc = &sw_context->relocs[i];
3475 		validate = &sw_context->val_bufs[reloc->index].base;
3476 		bo = validate->bo;
3477 		switch (bo->mem.mem_type) {
3478 		case TTM_PL_VRAM:
3479 			reloc->location->offset += bo->offset;
3480 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3481 			break;
3482 		case VMW_PL_GMR:
3483 			reloc->location->gmrId = bo->mem.start;
3484 			break;
3485 		case VMW_PL_MOB:
3486 			*reloc->mob_loc = bo->mem.start;
3487 			break;
3488 		default:
3489 			BUG();
3490 		}
3491 	}
3492 	vmw_free_relocations(sw_context);
3493 }
3494 
3495 /**
3496  * vmw_resource_list_unrefererence - Free up a resource list and unreference
3497  * all resources referenced by it.
3498  *
3499  * @list: The resource list.
3500  */
vmw_resource_list_unreference(struct vmw_sw_context * sw_context,struct list_head * list)3501 static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3502 					  struct list_head *list)
3503 {
3504 	struct vmw_resource_val_node *val, *val_next;
3505 
3506 	/*
3507 	 * Drop references to resources held during command submission.
3508 	 */
3509 
3510 	list_for_each_entry_safe(val, val_next, list, head) {
3511 		list_del_init(&val->head);
3512 		vmw_resource_unreference(&val->res);
3513 
3514 		if (val->staged_bindings) {
3515 			if (val->staged_bindings != sw_context->staged_bindings)
3516 				vmw_binding_state_free(val->staged_bindings);
3517 			else
3518 				sw_context->staged_bindings_inuse = false;
3519 			val->staged_bindings = NULL;
3520 		}
3521 
3522 		kfree(val);
3523 	}
3524 }
3525 
vmw_clear_validations(struct vmw_sw_context * sw_context)3526 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3527 {
3528 	struct vmw_validate_buffer *entry, *next;
3529 	struct vmw_resource_val_node *val;
3530 
3531 	/*
3532 	 * Drop references to DMA buffers held during command submission.
3533 	 */
3534 	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3535 				 base.head) {
3536 		list_del(&entry->base.head);
3537 		ttm_bo_unref(&entry->base.bo);
3538 		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3539 		sw_context->cur_val_buf--;
3540 	}
3541 	BUG_ON(sw_context->cur_val_buf != 0);
3542 
3543 	list_for_each_entry(val, &sw_context->resource_list, head)
3544 		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3545 }
3546 
vmw_validate_single_buffer(struct vmw_private * dev_priv,struct ttm_buffer_object * bo,bool interruptible,bool validate_as_mob)3547 int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3548 			       struct ttm_buffer_object *bo,
3549 			       bool interruptible,
3550 			       bool validate_as_mob)
3551 {
3552 	struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3553 						  base);
3554 	int ret;
3555 
3556 	if (vbo->pin_count > 0)
3557 		return 0;
3558 
3559 	if (validate_as_mob)
3560 		return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3561 				       false);
3562 
3563 	/**
3564 	 * Put BO in VRAM if there is space, otherwise as a GMR.
3565 	 * If there is no space in VRAM and GMR ids are all used up,
3566 	 * start evicting GMRs to make room. If the DMA buffer can't be
3567 	 * used as a GMR, this will return -ENOMEM.
3568 	 */
3569 
3570 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3571 			      false);
3572 	if (likely(ret == 0 || ret == -ERESTARTSYS))
3573 		return ret;
3574 
3575 	/**
3576 	 * If that failed, try VRAM again, this time evicting
3577 	 * previous contents.
3578 	 */
3579 
3580 	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
3581 	return ret;
3582 }
3583 
vmw_validate_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)3584 static int vmw_validate_buffers(struct vmw_private *dev_priv,
3585 				struct vmw_sw_context *sw_context)
3586 {
3587 	struct vmw_validate_buffer *entry;
3588 	int ret;
3589 
3590 	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3591 		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3592 						 true,
3593 						 entry->validate_as_mob);
3594 		if (unlikely(ret != 0))
3595 			return ret;
3596 	}
3597 	return 0;
3598 }
3599 
vmw_resize_cmd_bounce(struct vmw_sw_context * sw_context,uint32_t size)3600 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3601 				 uint32_t size)
3602 {
3603 	if (likely(sw_context->cmd_bounce_size >= size))
3604 		return 0;
3605 
3606 	if (sw_context->cmd_bounce_size == 0)
3607 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3608 
3609 	while (sw_context->cmd_bounce_size < size) {
3610 		sw_context->cmd_bounce_size =
3611 			PAGE_ALIGN(sw_context->cmd_bounce_size +
3612 				   (sw_context->cmd_bounce_size >> 1));
3613 	}
3614 
3615 	if (sw_context->cmd_bounce != NULL)
3616 		vfree(sw_context->cmd_bounce);
3617 
3618 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3619 
3620 	if (sw_context->cmd_bounce == NULL) {
3621 		DRM_ERROR("Failed to allocate command bounce buffer.\n");
3622 		sw_context->cmd_bounce_size = 0;
3623 		return -ENOMEM;
3624 	}
3625 
3626 	return 0;
3627 }
3628 
3629 /**
3630  * vmw_execbuf_fence_commands - create and submit a command stream fence
3631  *
3632  * Creates a fence object and submits a command stream marker.
3633  * If this fails for some reason, We sync the fifo and return NULL.
3634  * It is then safe to fence buffers with a NULL pointer.
3635  *
3636  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3637  * a userspace handle if @p_handle is not NULL, otherwise not.
3638  */
3639 
vmw_execbuf_fence_commands(struct drm_file * file_priv,struct vmw_private * dev_priv,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)3640 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3641 			       struct vmw_private *dev_priv,
3642 			       struct vmw_fence_obj **p_fence,
3643 			       uint32_t *p_handle)
3644 {
3645 	uint32_t sequence;
3646 	int ret;
3647 	bool synced = false;
3648 
3649 	/* p_handle implies file_priv. */
3650 	BUG_ON(p_handle != NULL && file_priv == NULL);
3651 
3652 	ret = vmw_fifo_send_fence(dev_priv, &sequence);
3653 	if (unlikely(ret != 0)) {
3654 		DRM_ERROR("Fence submission error. Syncing.\n");
3655 		synced = true;
3656 	}
3657 
3658 	if (p_handle != NULL)
3659 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3660 					    sequence, p_fence, p_handle);
3661 	else
3662 		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3663 
3664 	if (unlikely(ret != 0 && !synced)) {
3665 		(void) vmw_fallback_wait(dev_priv, false, false,
3666 					 sequence, false,
3667 					 VMW_FENCE_WAIT_TIMEOUT);
3668 		*p_fence = NULL;
3669 	}
3670 
3671 	return ret;
3672 }
3673 
3674 /**
3675  * vmw_execbuf_copy_fence_user - copy fence object information to
3676  * user-space.
3677  *
3678  * @dev_priv: Pointer to a vmw_private struct.
3679  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3680  * @ret: Return value from fence object creation.
3681  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3682  * which the information should be copied.
3683  * @fence: Pointer to the fenc object.
3684  * @fence_handle: User-space fence handle.
3685  *
3686  * This function copies fence information to user-space. If copying fails,
3687  * The user-space struct drm_vmw_fence_rep::error member is hopefully
3688  * left untouched, and if it's preloaded with an -EFAULT by user-space,
3689  * the error will hopefully be detected.
3690  * Also if copying fails, user-space will be unable to signal the fence
3691  * object so we wait for it immediately, and then unreference the
3692  * user-space reference.
3693  */
3694 void
vmw_execbuf_copy_fence_user(struct vmw_private * dev_priv,struct vmw_fpriv * vmw_fp,int ret,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj * fence,uint32_t fence_handle)3695 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3696 			    struct vmw_fpriv *vmw_fp,
3697 			    int ret,
3698 			    struct drm_vmw_fence_rep __user *user_fence_rep,
3699 			    struct vmw_fence_obj *fence,
3700 			    uint32_t fence_handle)
3701 {
3702 	struct drm_vmw_fence_rep fence_rep;
3703 
3704 	if (user_fence_rep == NULL)
3705 		return;
3706 
3707 	memset(&fence_rep, 0, sizeof(fence_rep));
3708 
3709 	fence_rep.error = ret;
3710 	if (ret == 0) {
3711 		BUG_ON(fence == NULL);
3712 
3713 		fence_rep.handle = fence_handle;
3714 		fence_rep.seqno = fence->base.seqno;
3715 		vmw_update_seqno(dev_priv, &dev_priv->fifo);
3716 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3717 	}
3718 
3719 	/*
3720 	 * copy_to_user errors will be detected by user space not
3721 	 * seeing fence_rep::error filled in. Typically
3722 	 * user-space would have pre-set that member to -EFAULT.
3723 	 */
3724 	ret = copy_to_user(user_fence_rep, &fence_rep,
3725 			   sizeof(fence_rep));
3726 
3727 	/*
3728 	 * User-space lost the fence object. We need to sync
3729 	 * and unreference the handle.
3730 	 */
3731 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3732 		ttm_ref_object_base_unref(vmw_fp->tfile,
3733 					  fence_handle, TTM_REF_USAGE);
3734 		DRM_ERROR("Fence copy error. Syncing.\n");
3735 		(void) vmw_fence_obj_wait(fence, false, false,
3736 					  VMW_FENCE_WAIT_TIMEOUT);
3737 	}
3738 }
3739 
3740 /**
3741  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3742  * the fifo.
3743  *
3744  * @dev_priv: Pointer to a device private structure.
3745  * @kernel_commands: Pointer to the unpatched command batch.
3746  * @command_size: Size of the unpatched command batch.
3747  * @sw_context: Structure holding the relocation lists.
3748  *
3749  * Side effects: If this function returns 0, then the command batch
3750  * pointed to by @kernel_commands will have been modified.
3751  */
vmw_execbuf_submit_fifo(struct vmw_private * dev_priv,void * kernel_commands,u32 command_size,struct vmw_sw_context * sw_context)3752 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3753 				   void *kernel_commands,
3754 				   u32 command_size,
3755 				   struct vmw_sw_context *sw_context)
3756 {
3757 	void *cmd;
3758 
3759 	if (sw_context->dx_ctx_node)
3760 		cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3761 					  sw_context->dx_ctx_node->res->id);
3762 	else
3763 		cmd = vmw_fifo_reserve(dev_priv, command_size);
3764 	if (!cmd) {
3765 		DRM_ERROR("Failed reserving fifo space for commands.\n");
3766 		return -ENOMEM;
3767 	}
3768 
3769 	vmw_apply_relocations(sw_context);
3770 	memcpy(cmd, kernel_commands, command_size);
3771 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3772 	vmw_resource_relocations_free(&sw_context->res_relocations);
3773 	vmw_fifo_commit(dev_priv, command_size);
3774 
3775 	return 0;
3776 }
3777 
3778 /**
3779  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3780  * the command buffer manager.
3781  *
3782  * @dev_priv: Pointer to a device private structure.
3783  * @header: Opaque handle to the command buffer allocation.
3784  * @command_size: Size of the unpatched command batch.
3785  * @sw_context: Structure holding the relocation lists.
3786  *
3787  * Side effects: If this function returns 0, then the command buffer
3788  * represented by @header will have been modified.
3789  */
vmw_execbuf_submit_cmdbuf(struct vmw_private * dev_priv,struct vmw_cmdbuf_header * header,u32 command_size,struct vmw_sw_context * sw_context)3790 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3791 				     struct vmw_cmdbuf_header *header,
3792 				     u32 command_size,
3793 				     struct vmw_sw_context *sw_context)
3794 {
3795 	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3796 		  SVGA3D_INVALID_ID);
3797 	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3798 				       id, false, header);
3799 
3800 	vmw_apply_relocations(sw_context);
3801 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3802 	vmw_resource_relocations_free(&sw_context->res_relocations);
3803 	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3804 
3805 	return 0;
3806 }
3807 
3808 /**
3809  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3810  * submission using a command buffer.
3811  *
3812  * @dev_priv: Pointer to a device private structure.
3813  * @user_commands: User-space pointer to the commands to be submitted.
3814  * @command_size: Size of the unpatched command batch.
3815  * @header: Out parameter returning the opaque pointer to the command buffer.
3816  *
3817  * This function checks whether we can use the command buffer manager for
3818  * submission and if so, creates a command buffer of suitable size and
3819  * copies the user data into that buffer.
3820  *
3821  * On successful return, the function returns a pointer to the data in the
3822  * command buffer and *@header is set to non-NULL.
3823  * If command buffers could not be used, the function will return the value
3824  * of @kernel_commands on function call. That value may be NULL. In that case,
3825  * the value of *@header will be set to NULL.
3826  * If an error is encountered, the function will return a pointer error value.
3827  * If the function is interrupted by a signal while sleeping, it will return
3828  * -ERESTARTSYS casted to a pointer error value.
3829  */
vmw_execbuf_cmdbuf(struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,u32 command_size,struct vmw_cmdbuf_header ** header)3830 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3831 				void __user *user_commands,
3832 				void *kernel_commands,
3833 				u32 command_size,
3834 				struct vmw_cmdbuf_header **header)
3835 {
3836 	size_t cmdbuf_size;
3837 	int ret;
3838 
3839 	*header = NULL;
3840 	if (command_size > SVGA_CB_MAX_SIZE) {
3841 		DRM_ERROR("Command buffer is too large.\n");
3842 		return ERR_PTR(-EINVAL);
3843 	}
3844 
3845 	if (!dev_priv->cman || kernel_commands)
3846 		return kernel_commands;
3847 
3848 	/* If possible, add a little space for fencing. */
3849 	cmdbuf_size = command_size + 512;
3850 	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3851 	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3852 					   true, header);
3853 	if (IS_ERR(kernel_commands))
3854 		return kernel_commands;
3855 
3856 	ret = copy_from_user(kernel_commands, user_commands,
3857 			     command_size);
3858 	if (ret) {
3859 		DRM_ERROR("Failed copying commands.\n");
3860 		vmw_cmdbuf_header_free(*header);
3861 		*header = NULL;
3862 		return ERR_PTR(-EFAULT);
3863 	}
3864 
3865 	return kernel_commands;
3866 }
3867 
vmw_execbuf_tie_context(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,uint32_t handle)3868 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3869 				   struct vmw_sw_context *sw_context,
3870 				   uint32_t handle)
3871 {
3872 	struct vmw_resource_val_node *ctx_node;
3873 	struct vmw_resource *res;
3874 	int ret;
3875 
3876 	if (handle == SVGA3D_INVALID_ID)
3877 		return 0;
3878 
3879 	ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3880 					      handle, user_context_converter,
3881 					      &res);
3882 	if (unlikely(ret != 0)) {
3883 		DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3884 			  (unsigned) handle);
3885 		return ret;
3886 	}
3887 
3888 	ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3889 	if (unlikely(ret != 0))
3890 		goto out_err;
3891 
3892 	sw_context->dx_ctx_node = ctx_node;
3893 	sw_context->man = vmw_context_res_man(res);
3894 out_err:
3895 	vmw_resource_unreference(&res);
3896 	return ret;
3897 }
3898 
vmw_execbuf_process(struct drm_file * file_priv,struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,uint32_t command_size,uint64_t throttle_us,uint32_t dx_context_handle,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj ** out_fence)3899 int vmw_execbuf_process(struct drm_file *file_priv,
3900 			struct vmw_private *dev_priv,
3901 			void __user *user_commands,
3902 			void *kernel_commands,
3903 			uint32_t command_size,
3904 			uint64_t throttle_us,
3905 			uint32_t dx_context_handle,
3906 			struct drm_vmw_fence_rep __user *user_fence_rep,
3907 			struct vmw_fence_obj **out_fence)
3908 {
3909 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
3910 	struct vmw_fence_obj *fence = NULL;
3911 	struct vmw_resource *error_resource;
3912 	struct list_head resource_list;
3913 	struct vmw_cmdbuf_header *header;
3914 	struct ww_acquire_ctx ticket;
3915 	uint32_t handle;
3916 	int ret;
3917 
3918 	if (throttle_us) {
3919 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3920 				   throttle_us);
3921 
3922 		if (ret)
3923 			return ret;
3924 	}
3925 
3926 	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3927 					     kernel_commands, command_size,
3928 					     &header);
3929 	if (IS_ERR(kernel_commands))
3930 		return PTR_ERR(kernel_commands);
3931 
3932 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3933 	if (ret) {
3934 		ret = -ERESTARTSYS;
3935 		goto out_free_header;
3936 	}
3937 
3938 	sw_context->kernel = false;
3939 	if (kernel_commands == NULL) {
3940 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
3941 		if (unlikely(ret != 0))
3942 			goto out_unlock;
3943 
3944 
3945 		ret = copy_from_user(sw_context->cmd_bounce,
3946 				     user_commands, command_size);
3947 
3948 		if (unlikely(ret != 0)) {
3949 			ret = -EFAULT;
3950 			DRM_ERROR("Failed copying commands.\n");
3951 			goto out_unlock;
3952 		}
3953 		kernel_commands = sw_context->cmd_bounce;
3954 	} else if (!header)
3955 		sw_context->kernel = true;
3956 
3957 	sw_context->fp = vmw_fpriv(file_priv);
3958 	sw_context->cur_reloc = 0;
3959 	sw_context->cur_val_buf = 0;
3960 	INIT_LIST_HEAD(&sw_context->resource_list);
3961 	INIT_LIST_HEAD(&sw_context->ctx_resource_list);
3962 	sw_context->cur_query_bo = dev_priv->pinned_bo;
3963 	sw_context->last_query_ctx = NULL;
3964 	sw_context->needs_post_query_barrier = false;
3965 	sw_context->dx_ctx_node = NULL;
3966 	sw_context->dx_query_mob = NULL;
3967 	sw_context->dx_query_ctx = NULL;
3968 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3969 	INIT_LIST_HEAD(&sw_context->validate_nodes);
3970 	INIT_LIST_HEAD(&sw_context->res_relocations);
3971 	if (sw_context->staged_bindings)
3972 		vmw_binding_state_reset(sw_context->staged_bindings);
3973 
3974 	if (!sw_context->res_ht_initialized) {
3975 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3976 		if (unlikely(ret != 0))
3977 			goto out_unlock;
3978 		sw_context->res_ht_initialized = true;
3979 	}
3980 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3981 	INIT_LIST_HEAD(&resource_list);
3982 	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3983 	if (unlikely(ret != 0)) {
3984 		list_splice_init(&sw_context->ctx_resource_list,
3985 				 &sw_context->resource_list);
3986 		goto out_err_nores;
3987 	}
3988 
3989 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3990 				command_size);
3991 	/*
3992 	 * Merge the resource lists before checking the return status
3993 	 * from vmd_cmd_check_all so that all the open hashtabs will
3994 	 * be handled properly even if vmw_cmd_check_all fails.
3995 	 */
3996 	list_splice_init(&sw_context->ctx_resource_list,
3997 			 &sw_context->resource_list);
3998 
3999 	if (unlikely(ret != 0))
4000 		goto out_err_nores;
4001 
4002 	ret = vmw_resources_reserve(sw_context);
4003 	if (unlikely(ret != 0))
4004 		goto out_err_nores;
4005 
4006 	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4007 				     true, NULL);
4008 	if (unlikely(ret != 0))
4009 		goto out_err_nores;
4010 
4011 	ret = vmw_validate_buffers(dev_priv, sw_context);
4012 	if (unlikely(ret != 0))
4013 		goto out_err;
4014 
4015 	ret = vmw_resources_validate(sw_context);
4016 	if (unlikely(ret != 0))
4017 		goto out_err;
4018 
4019 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4020 	if (unlikely(ret != 0)) {
4021 		ret = -ERESTARTSYS;
4022 		goto out_err;
4023 	}
4024 
4025 	if (dev_priv->has_mob) {
4026 		ret = vmw_rebind_contexts(sw_context);
4027 		if (unlikely(ret != 0))
4028 			goto out_unlock_binding;
4029 	}
4030 
4031 	if (!header) {
4032 		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4033 					      command_size, sw_context);
4034 	} else {
4035 		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4036 						sw_context);
4037 		header = NULL;
4038 	}
4039 	mutex_unlock(&dev_priv->binding_mutex);
4040 	if (ret)
4041 		goto out_err;
4042 
4043 	vmw_query_bo_switch_commit(dev_priv, sw_context);
4044 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4045 					 &fence,
4046 					 (user_fence_rep) ? &handle : NULL);
4047 	/*
4048 	 * This error is harmless, because if fence submission fails,
4049 	 * vmw_fifo_send_fence will sync. The error will be propagated to
4050 	 * user-space in @fence_rep
4051 	 */
4052 
4053 	if (ret != 0)
4054 		DRM_ERROR("Fence submission error. Syncing.\n");
4055 
4056 	vmw_resources_unreserve(sw_context, false);
4057 
4058 	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4059 				    (void *) fence);
4060 
4061 	if (unlikely(dev_priv->pinned_bo != NULL &&
4062 		     !dev_priv->query_cid_valid))
4063 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4064 
4065 	vmw_clear_validations(sw_context);
4066 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4067 				    user_fence_rep, fence, handle);
4068 
4069 	/* Don't unreference when handing fence out */
4070 	if (unlikely(out_fence != NULL)) {
4071 		*out_fence = fence;
4072 		fence = NULL;
4073 	} else if (likely(fence != NULL)) {
4074 		vmw_fence_obj_unreference(&fence);
4075 	}
4076 
4077 	list_splice_init(&sw_context->resource_list, &resource_list);
4078 	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4079 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4080 
4081 	/*
4082 	 * Unreference resources outside of the cmdbuf_mutex to
4083 	 * avoid deadlocks in resource destruction paths.
4084 	 */
4085 	vmw_resource_list_unreference(sw_context, &resource_list);
4086 
4087 	return 0;
4088 
4089 out_unlock_binding:
4090 	mutex_unlock(&dev_priv->binding_mutex);
4091 out_err:
4092 	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4093 out_err_nores:
4094 	vmw_resources_unreserve(sw_context, true);
4095 	vmw_resource_relocations_free(&sw_context->res_relocations);
4096 	vmw_free_relocations(sw_context);
4097 	vmw_clear_validations(sw_context);
4098 	if (unlikely(dev_priv->pinned_bo != NULL &&
4099 		     !dev_priv->query_cid_valid))
4100 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4101 out_unlock:
4102 	list_splice_init(&sw_context->resource_list, &resource_list);
4103 	error_resource = sw_context->error_resource;
4104 	sw_context->error_resource = NULL;
4105 	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4106 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4107 
4108 	/*
4109 	 * Unreference resources outside of the cmdbuf_mutex to
4110 	 * avoid deadlocks in resource destruction paths.
4111 	 */
4112 	vmw_resource_list_unreference(sw_context, &resource_list);
4113 	if (unlikely(error_resource != NULL))
4114 		vmw_resource_unreference(&error_resource);
4115 out_free_header:
4116 	if (header)
4117 		vmw_cmdbuf_header_free(header);
4118 
4119 	return ret;
4120 }
4121 
4122 /**
4123  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4124  *
4125  * @dev_priv: The device private structure.
4126  *
4127  * This function is called to idle the fifo and unpin the query buffer
4128  * if the normal way to do this hits an error, which should typically be
4129  * extremely rare.
4130  */
vmw_execbuf_unpin_panic(struct vmw_private * dev_priv)4131 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4132 {
4133 	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4134 
4135 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4136 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4137 	if (dev_priv->dummy_query_bo_pinned) {
4138 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4139 		dev_priv->dummy_query_bo_pinned = false;
4140 	}
4141 }
4142 
4143 
4144 /**
4145  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4146  * query bo.
4147  *
4148  * @dev_priv: The device private structure.
4149  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4150  * _after_ a query barrier that flushes all queries touching the current
4151  * buffer pointed to by @dev_priv->pinned_bo
4152  *
4153  * This function should be used to unpin the pinned query bo, or
4154  * as a query barrier when we need to make sure that all queries have
4155  * finished before the next fifo command. (For example on hardware
4156  * context destructions where the hardware may otherwise leak unfinished
4157  * queries).
4158  *
4159  * This function does not return any failure codes, but make attempts
4160  * to do safe unpinning in case of errors.
4161  *
4162  * The function will synchronize on the previous query barrier, and will
4163  * thus not finish until that barrier has executed.
4164  *
4165  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4166  * before calling this function.
4167  */
__vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv,struct vmw_fence_obj * fence)4168 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4169 				     struct vmw_fence_obj *fence)
4170 {
4171 	int ret = 0;
4172 	struct list_head validate_list;
4173 	struct ttm_validate_buffer pinned_val, query_val;
4174 	struct vmw_fence_obj *lfence = NULL;
4175 	struct ww_acquire_ctx ticket;
4176 
4177 	if (dev_priv->pinned_bo == NULL)
4178 		goto out_unlock;
4179 
4180 	INIT_LIST_HEAD(&validate_list);
4181 
4182 	pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4183 	pinned_val.shared = false;
4184 	list_add_tail(&pinned_val.head, &validate_list);
4185 
4186 	query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4187 	query_val.shared = false;
4188 	list_add_tail(&query_val.head, &validate_list);
4189 
4190 	ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4191 				     false, NULL);
4192 	if (unlikely(ret != 0)) {
4193 		vmw_execbuf_unpin_panic(dev_priv);
4194 		goto out_no_reserve;
4195 	}
4196 
4197 	if (dev_priv->query_cid_valid) {
4198 		BUG_ON(fence != NULL);
4199 		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4200 		if (unlikely(ret != 0)) {
4201 			vmw_execbuf_unpin_panic(dev_priv);
4202 			goto out_no_emit;
4203 		}
4204 		dev_priv->query_cid_valid = false;
4205 	}
4206 
4207 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4208 	if (dev_priv->dummy_query_bo_pinned) {
4209 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4210 		dev_priv->dummy_query_bo_pinned = false;
4211 	}
4212 	if (fence == NULL) {
4213 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4214 						  NULL);
4215 		fence = lfence;
4216 	}
4217 	ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4218 	if (lfence != NULL)
4219 		vmw_fence_obj_unreference(&lfence);
4220 
4221 	ttm_bo_unref(&query_val.bo);
4222 	ttm_bo_unref(&pinned_val.bo);
4223 	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4224 	DRM_INFO("Dummy query bo pin count: %d\n",
4225 		 dev_priv->dummy_query_bo->pin_count);
4226 
4227 out_unlock:
4228 	return;
4229 
4230 out_no_emit:
4231 	ttm_eu_backoff_reservation(&ticket, &validate_list);
4232 out_no_reserve:
4233 	ttm_bo_unref(&query_val.bo);
4234 	ttm_bo_unref(&pinned_val.bo);
4235 	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4236 }
4237 
4238 /**
4239  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4240  * query bo.
4241  *
4242  * @dev_priv: The device private structure.
4243  *
4244  * This function should be used to unpin the pinned query bo, or
4245  * as a query barrier when we need to make sure that all queries have
4246  * finished before the next fifo command. (For example on hardware
4247  * context destructions where the hardware may otherwise leak unfinished
4248  * queries).
4249  *
4250  * This function does not return any failure codes, but make attempts
4251  * to do safe unpinning in case of errors.
4252  *
4253  * The function will synchronize on the previous query barrier, and will
4254  * thus not finish until that barrier has executed.
4255  */
vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv)4256 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4257 {
4258 	mutex_lock(&dev_priv->cmdbuf_mutex);
4259 	if (dev_priv->query_cid_valid)
4260 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4261 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4262 }
4263 
vmw_execbuf_ioctl(struct drm_device * dev,unsigned long data,struct drm_file * file_priv,size_t size)4264 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4265 		      struct drm_file *file_priv, size_t size)
4266 {
4267 	struct vmw_private *dev_priv = vmw_priv(dev);
4268 	struct drm_vmw_execbuf_arg arg;
4269 	int ret;
4270 	static const size_t copy_offset[] = {
4271 		offsetof(struct drm_vmw_execbuf_arg, context_handle),
4272 		sizeof(struct drm_vmw_execbuf_arg)};
4273 
4274 	if (unlikely(size < copy_offset[0])) {
4275 		DRM_ERROR("Invalid command size, ioctl %d\n",
4276 			  DRM_VMW_EXECBUF);
4277 		return -EINVAL;
4278 	}
4279 
4280 	if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4281 		return -EFAULT;
4282 
4283 	/*
4284 	 * Extend the ioctl argument while
4285 	 * maintaining backwards compatibility:
4286 	 * We take different code paths depending on the value of
4287 	 * arg.version.
4288 	 */
4289 
4290 	if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4291 		     arg.version == 0)) {
4292 		DRM_ERROR("Incorrect execbuf version.\n");
4293 		return -EINVAL;
4294 	}
4295 
4296 	if (arg.version > 1 &&
4297 	    copy_from_user(&arg.context_handle,
4298 			   (void __user *) (data + copy_offset[0]),
4299 			   copy_offset[arg.version - 1] -
4300 			   copy_offset[0]) != 0)
4301 		return -EFAULT;
4302 
4303 	switch (arg.version) {
4304 	case 1:
4305 		arg.context_handle = (uint32_t) -1;
4306 		break;
4307 	case 2:
4308 		if (arg.pad64 != 0) {
4309 			DRM_ERROR("Unused IOCTL data not set to zero.\n");
4310 			return -EINVAL;
4311 		}
4312 		break;
4313 	default:
4314 		break;
4315 	}
4316 
4317 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4318 	if (unlikely(ret != 0))
4319 		return ret;
4320 
4321 	ret = vmw_execbuf_process(file_priv, dev_priv,
4322 				  (void __user *)(unsigned long)arg.commands,
4323 				  NULL, arg.command_size, arg.throttle_us,
4324 				  arg.context_handle,
4325 				  (void __user *)(unsigned long)arg.fence_rep,
4326 				  NULL);
4327 	ttm_read_unlock(&dev_priv->reservation_sem);
4328 	if (unlikely(ret != 0))
4329 		return ret;
4330 
4331 	vmw_kms_cursor_post_execbuf(dev_priv);
4332 
4333 	return 0;
4334 }
4335