1 /**************************************************************************
2 *
3 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
34
35 #define VMW_RES_HT_ORDER 12
36
37 /**
38 * enum vmw_resource_relocation_type - Relocation type for resources
39 *
40 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
41 * command stream is replaced with the actual id after validation.
42 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
43 * with a NOP.
44 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
45 * after validation is -1, the command is replaced with a NOP. Otherwise no
46 * action.
47 */
48 enum vmw_resource_relocation_type {
49 vmw_res_rel_normal,
50 vmw_res_rel_nop,
51 vmw_res_rel_cond_nop,
52 vmw_res_rel_max
53 };
54
55 /**
56 * struct vmw_resource_relocation - Relocation info for resources
57 *
58 * @head: List head for the software context's relocation list.
59 * @res: Non-ref-counted pointer to the resource.
60 * @offset: Offset of single byte entries into the command buffer where the
61 * id that needs fixup is located.
62 * @rel_type: Type of relocation.
63 */
64 struct vmw_resource_relocation {
65 struct list_head head;
66 const struct vmw_resource *res;
67 u32 offset:29;
68 enum vmw_resource_relocation_type rel_type:3;
69 };
70
71 /**
72 * struct vmw_resource_val_node - Validation info for resources
73 *
74 * @head: List head for the software context's resource list.
75 * @hash: Hash entry for quick resouce to val_node lookup.
76 * @res: Ref-counted pointer to the resource.
77 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
78 * @new_backup: Refcounted pointer to the new backup buffer.
79 * @staged_bindings: If @res is a context, tracks bindings set up during
80 * the command batch. Otherwise NULL.
81 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
82 * @first_usage: Set to true the first time the resource is referenced in
83 * the command stream.
84 * @switching_backup: The command stream provides a new backup buffer for a
85 * resource.
86 * @no_buffer_needed: This means @switching_backup is true on first buffer
87 * reference. So resource reservation does not need to allocate a backup
88 * buffer for the resource.
89 */
90 struct vmw_resource_val_node {
91 struct list_head head;
92 struct drm_hash_item hash;
93 struct vmw_resource *res;
94 struct vmw_dma_buffer *new_backup;
95 struct vmw_ctx_binding_state *staged_bindings;
96 unsigned long new_backup_offset;
97 u32 first_usage : 1;
98 u32 switching_backup : 1;
99 u32 no_buffer_needed : 1;
100 };
101
102 /**
103 * struct vmw_cmd_entry - Describe a command for the verifier
104 *
105 * @user_allow: Whether allowed from the execbuf ioctl.
106 * @gb_disable: Whether disabled if guest-backed objects are available.
107 * @gb_enable: Whether enabled iff guest-backed objects are available.
108 */
109 struct vmw_cmd_entry {
110 int (*func) (struct vmw_private *, struct vmw_sw_context *,
111 SVGA3dCmdHeader *);
112 bool user_allow;
113 bool gb_disable;
114 bool gb_enable;
115 };
116
117 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
118 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
119 (_gb_disable), (_gb_enable)}
120
121 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
122 struct vmw_sw_context *sw_context,
123 struct vmw_resource *ctx);
124 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
125 struct vmw_sw_context *sw_context,
126 SVGAMobId *id,
127 struct vmw_dma_buffer **vmw_bo_p);
128 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
129 struct vmw_dma_buffer *vbo,
130 bool validate_as_mob,
131 uint32_t *p_val_node);
132 /**
133 * vmw_ptr_diff - Compute the offset from a to b in bytes
134 *
135 * @a: A starting pointer.
136 * @b: A pointer offset in the same address space.
137 *
138 * Returns: The offset in bytes between the two pointers.
139 */
vmw_ptr_diff(void * a,void * b)140 static size_t vmw_ptr_diff(void *a, void *b)
141 {
142 return (unsigned long) b - (unsigned long) a;
143 }
144
145 /**
146 * vmw_resources_unreserve - unreserve resources previously reserved for
147 * command submission.
148 *
149 * @sw_context: pointer to the software context
150 * @backoff: Whether command submission failed.
151 */
vmw_resources_unreserve(struct vmw_sw_context * sw_context,bool backoff)152 static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
153 bool backoff)
154 {
155 struct vmw_resource_val_node *val;
156 struct list_head *list = &sw_context->resource_list;
157
158 if (sw_context->dx_query_mob && !backoff)
159 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
160 sw_context->dx_query_mob);
161
162 list_for_each_entry(val, list, head) {
163 struct vmw_resource *res = val->res;
164 bool switch_backup =
165 (backoff) ? false : val->switching_backup;
166
167 /*
168 * Transfer staged context bindings to the
169 * persistent context binding tracker.
170 */
171 if (unlikely(val->staged_bindings)) {
172 if (!backoff) {
173 vmw_binding_state_commit
174 (vmw_context_binding_state(val->res),
175 val->staged_bindings);
176 }
177
178 if (val->staged_bindings != sw_context->staged_bindings)
179 vmw_binding_state_free(val->staged_bindings);
180 else
181 sw_context->staged_bindings_inuse = false;
182 val->staged_bindings = NULL;
183 }
184 vmw_resource_unreserve(res, switch_backup, val->new_backup,
185 val->new_backup_offset);
186 vmw_dmabuf_unreference(&val->new_backup);
187 }
188 }
189
190 /**
191 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
192 * added to the validate list.
193 *
194 * @dev_priv: Pointer to the device private:
195 * @sw_context: The validation context:
196 * @node: The validation node holding this context.
197 */
vmw_cmd_ctx_first_setup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource_val_node * node)198 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
199 struct vmw_sw_context *sw_context,
200 struct vmw_resource_val_node *node)
201 {
202 int ret;
203
204 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
205 if (unlikely(ret != 0))
206 goto out_err;
207
208 if (!sw_context->staged_bindings) {
209 sw_context->staged_bindings =
210 vmw_binding_state_alloc(dev_priv);
211 if (IS_ERR(sw_context->staged_bindings)) {
212 DRM_ERROR("Failed to allocate context binding "
213 "information.\n");
214 ret = PTR_ERR(sw_context->staged_bindings);
215 sw_context->staged_bindings = NULL;
216 goto out_err;
217 }
218 }
219
220 if (sw_context->staged_bindings_inuse) {
221 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
222 if (IS_ERR(node->staged_bindings)) {
223 DRM_ERROR("Failed to allocate context binding "
224 "information.\n");
225 ret = PTR_ERR(node->staged_bindings);
226 node->staged_bindings = NULL;
227 goto out_err;
228 }
229 } else {
230 node->staged_bindings = sw_context->staged_bindings;
231 sw_context->staged_bindings_inuse = true;
232 }
233
234 return 0;
235 out_err:
236 return ret;
237 }
238
239 /**
240 * vmw_resource_val_add - Add a resource to the software context's
241 * resource list if it's not already on it.
242 *
243 * @sw_context: Pointer to the software context.
244 * @res: Pointer to the resource.
245 * @p_node On successful return points to a valid pointer to a
246 * struct vmw_resource_val_node, if non-NULL on entry.
247 */
vmw_resource_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * res,struct vmw_resource_val_node ** p_node)248 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
249 struct vmw_resource *res,
250 struct vmw_resource_val_node **p_node)
251 {
252 struct vmw_private *dev_priv = res->dev_priv;
253 struct vmw_resource_val_node *node;
254 struct drm_hash_item *hash;
255 int ret;
256
257 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
258 &hash) == 0)) {
259 node = container_of(hash, struct vmw_resource_val_node, hash);
260 node->first_usage = false;
261 if (unlikely(p_node != NULL))
262 *p_node = node;
263 return 0;
264 }
265
266 node = kzalloc(sizeof(*node), GFP_KERNEL);
267 if (unlikely(node == NULL)) {
268 DRM_ERROR("Failed to allocate a resource validation "
269 "entry.\n");
270 return -ENOMEM;
271 }
272
273 node->hash.key = (unsigned long) res;
274 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
275 if (unlikely(ret != 0)) {
276 DRM_ERROR("Failed to initialize a resource validation "
277 "entry.\n");
278 kfree(node);
279 return ret;
280 }
281 node->res = vmw_resource_reference(res);
282 node->first_usage = true;
283 if (unlikely(p_node != NULL))
284 *p_node = node;
285
286 if (!dev_priv->has_mob) {
287 list_add_tail(&node->head, &sw_context->resource_list);
288 return 0;
289 }
290
291 switch (vmw_res_type(res)) {
292 case vmw_res_context:
293 case vmw_res_dx_context:
294 list_add(&node->head, &sw_context->ctx_resource_list);
295 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
296 break;
297 case vmw_res_cotable:
298 list_add_tail(&node->head, &sw_context->ctx_resource_list);
299 break;
300 default:
301 list_add_tail(&node->head, &sw_context->resource_list);
302 break;
303 }
304
305 return ret;
306 }
307
308 /**
309 * vmw_view_res_val_add - Add a view and the surface it's pointing to
310 * to the validation list
311 *
312 * @sw_context: The software context holding the validation list.
313 * @view: Pointer to the view resource.
314 *
315 * Returns 0 if success, negative error code otherwise.
316 */
vmw_view_res_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * view)317 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
318 struct vmw_resource *view)
319 {
320 int ret;
321
322 /*
323 * First add the resource the view is pointing to, otherwise
324 * it may be swapped out when the view is validated.
325 */
326 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
327 if (ret)
328 return ret;
329
330 return vmw_resource_val_add(sw_context, view, NULL);
331 }
332
333 /**
334 * vmw_view_id_val_add - Look up a view and add it and the surface it's
335 * pointing to to the validation list.
336 *
337 * @sw_context: The software context holding the validation list.
338 * @view_type: The view type to look up.
339 * @id: view id of the view.
340 *
341 * The view is represented by a view id and the DX context it's created on,
342 * or scheduled for creation on. If there is no DX context set, the function
343 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
344 */
vmw_view_id_val_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,u32 id)345 static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
346 enum vmw_view_type view_type, u32 id)
347 {
348 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
349 struct vmw_resource *view;
350 int ret;
351
352 if (!ctx_node) {
353 DRM_ERROR("DX Context not set.\n");
354 return -EINVAL;
355 }
356
357 view = vmw_view_lookup(sw_context->man, view_type, id);
358 if (IS_ERR(view))
359 return PTR_ERR(view);
360
361 ret = vmw_view_res_val_add(sw_context, view);
362 vmw_resource_unreference(&view);
363
364 return ret;
365 }
366
367 /**
368 * vmw_resource_context_res_add - Put resources previously bound to a context on
369 * the validation list
370 *
371 * @dev_priv: Pointer to a device private structure
372 * @sw_context: Pointer to a software context used for this command submission
373 * @ctx: Pointer to the context resource
374 *
375 * This function puts all resources that were previously bound to @ctx on
376 * the resource validation list. This is part of the context state reemission
377 */
vmw_resource_context_res_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * ctx)378 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
379 struct vmw_sw_context *sw_context,
380 struct vmw_resource *ctx)
381 {
382 struct list_head *binding_list;
383 struct vmw_ctx_bindinfo *entry;
384 int ret = 0;
385 struct vmw_resource *res;
386 u32 i;
387
388 /* Add all cotables to the validation list. */
389 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
390 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
391 res = vmw_context_cotable(ctx, i);
392 if (IS_ERR(res))
393 continue;
394
395 ret = vmw_resource_val_add(sw_context, res, NULL);
396 vmw_resource_unreference(&res);
397 if (unlikely(ret != 0))
398 return ret;
399 }
400 }
401
402
403 /* Add all resources bound to the context to the validation list */
404 mutex_lock(&dev_priv->binding_mutex);
405 binding_list = vmw_context_binding_list(ctx);
406
407 list_for_each_entry(entry, binding_list, ctx_list) {
408 /* entry->res is not refcounted */
409 res = vmw_resource_reference_unless_doomed(entry->res);
410 if (unlikely(res == NULL))
411 continue;
412
413 if (vmw_res_type(entry->res) == vmw_res_view)
414 ret = vmw_view_res_val_add(sw_context, entry->res);
415 else
416 ret = vmw_resource_val_add(sw_context, entry->res,
417 NULL);
418 vmw_resource_unreference(&res);
419 if (unlikely(ret != 0))
420 break;
421 }
422
423 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
424 struct vmw_dma_buffer *dx_query_mob;
425
426 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
427 if (dx_query_mob)
428 ret = vmw_bo_to_validate_list(sw_context,
429 dx_query_mob,
430 true, NULL);
431 }
432
433 mutex_unlock(&dev_priv->binding_mutex);
434 return ret;
435 }
436
437 /**
438 * vmw_resource_relocation_add - Add a relocation to the relocation list
439 *
440 * @list: Pointer to head of relocation list.
441 * @res: The resource.
442 * @offset: Offset into the command buffer currently being parsed where the
443 * id that needs fixup is located. Granularity is one byte.
444 * @rel_type: Relocation type.
445 */
vmw_resource_relocation_add(struct list_head * list,const struct vmw_resource * res,unsigned long offset,enum vmw_resource_relocation_type rel_type)446 static int vmw_resource_relocation_add(struct list_head *list,
447 const struct vmw_resource *res,
448 unsigned long offset,
449 enum vmw_resource_relocation_type
450 rel_type)
451 {
452 struct vmw_resource_relocation *rel;
453
454 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
455 if (unlikely(rel == NULL)) {
456 DRM_ERROR("Failed to allocate a resource relocation.\n");
457 return -ENOMEM;
458 }
459
460 rel->res = res;
461 rel->offset = offset;
462 rel->rel_type = rel_type;
463 list_add_tail(&rel->head, list);
464
465 return 0;
466 }
467
468 /**
469 * vmw_resource_relocations_free - Free all relocations on a list
470 *
471 * @list: Pointer to the head of the relocation list.
472 */
vmw_resource_relocations_free(struct list_head * list)473 static void vmw_resource_relocations_free(struct list_head *list)
474 {
475 struct vmw_resource_relocation *rel, *n;
476
477 list_for_each_entry_safe(rel, n, list, head) {
478 list_del(&rel->head);
479 kfree(rel);
480 }
481 }
482
483 /**
484 * vmw_resource_relocations_apply - Apply all relocations on a list
485 *
486 * @cb: Pointer to the start of the command buffer bein patch. This need
487 * not be the same buffer as the one being parsed when the relocation
488 * list was built, but the contents must be the same modulo the
489 * resource ids.
490 * @list: Pointer to the head of the relocation list.
491 */
vmw_resource_relocations_apply(uint32_t * cb,struct list_head * list)492 static void vmw_resource_relocations_apply(uint32_t *cb,
493 struct list_head *list)
494 {
495 struct vmw_resource_relocation *rel;
496
497 /* Validate the struct vmw_resource_relocation member size */
498 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
499 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
500
501 list_for_each_entry(rel, list, head) {
502 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
503 switch (rel->rel_type) {
504 case vmw_res_rel_normal:
505 *addr = rel->res->id;
506 break;
507 case vmw_res_rel_nop:
508 *addr = SVGA_3D_CMD_NOP;
509 break;
510 default:
511 if (rel->res->id == -1)
512 *addr = SVGA_3D_CMD_NOP;
513 break;
514 }
515 }
516 }
517
vmw_cmd_invalid(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)518 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
519 struct vmw_sw_context *sw_context,
520 SVGA3dCmdHeader *header)
521 {
522 return -EINVAL;
523 }
524
vmw_cmd_ok(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)525 static int vmw_cmd_ok(struct vmw_private *dev_priv,
526 struct vmw_sw_context *sw_context,
527 SVGA3dCmdHeader *header)
528 {
529 return 0;
530 }
531
532 /**
533 * vmw_bo_to_validate_list - add a bo to a validate list
534 *
535 * @sw_context: The software context used for this command submission batch.
536 * @bo: The buffer object to add.
537 * @validate_as_mob: Validate this buffer as a MOB.
538 * @p_val_node: If non-NULL Will be updated with the validate node number
539 * on return.
540 *
541 * Returns -EINVAL if the limit of number of buffer objects per command
542 * submission is reached.
543 */
vmw_bo_to_validate_list(struct vmw_sw_context * sw_context,struct vmw_dma_buffer * vbo,bool validate_as_mob,uint32_t * p_val_node)544 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
545 struct vmw_dma_buffer *vbo,
546 bool validate_as_mob,
547 uint32_t *p_val_node)
548 {
549 uint32_t val_node;
550 struct vmw_validate_buffer *vval_buf;
551 struct ttm_validate_buffer *val_buf;
552 struct drm_hash_item *hash;
553 int ret;
554
555 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
556 &hash) == 0)) {
557 vval_buf = container_of(hash, struct vmw_validate_buffer,
558 hash);
559 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
560 DRM_ERROR("Inconsistent buffer usage.\n");
561 return -EINVAL;
562 }
563 val_buf = &vval_buf->base;
564 val_node = vval_buf - sw_context->val_bufs;
565 } else {
566 val_node = sw_context->cur_val_buf;
567 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
568 DRM_ERROR("Max number of DMA buffers per submission "
569 "exceeded.\n");
570 return -EINVAL;
571 }
572 vval_buf = &sw_context->val_bufs[val_node];
573 vval_buf->hash.key = (unsigned long) vbo;
574 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
575 if (unlikely(ret != 0)) {
576 DRM_ERROR("Failed to initialize a buffer validation "
577 "entry.\n");
578 return ret;
579 }
580 ++sw_context->cur_val_buf;
581 val_buf = &vval_buf->base;
582 val_buf->bo = ttm_bo_reference(&vbo->base);
583 val_buf->shared = false;
584 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
585 vval_buf->validate_as_mob = validate_as_mob;
586 }
587
588 if (p_val_node)
589 *p_val_node = val_node;
590
591 return 0;
592 }
593
594 /**
595 * vmw_resources_reserve - Reserve all resources on the sw_context's
596 * resource list.
597 *
598 * @sw_context: Pointer to the software context.
599 *
600 * Note that since vmware's command submission currently is protected by
601 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
602 * since only a single thread at once will attempt this.
603 */
vmw_resources_reserve(struct vmw_sw_context * sw_context)604 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
605 {
606 struct vmw_resource_val_node *val;
607 int ret = 0;
608
609 list_for_each_entry(val, &sw_context->resource_list, head) {
610 struct vmw_resource *res = val->res;
611
612 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
613 if (unlikely(ret != 0))
614 return ret;
615
616 if (res->backup) {
617 struct vmw_dma_buffer *vbo = res->backup;
618
619 ret = vmw_bo_to_validate_list
620 (sw_context, vbo,
621 vmw_resource_needs_backup(res), NULL);
622
623 if (unlikely(ret != 0))
624 return ret;
625 }
626 }
627
628 if (sw_context->dx_query_mob) {
629 struct vmw_dma_buffer *expected_dx_query_mob;
630
631 expected_dx_query_mob =
632 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
633 if (expected_dx_query_mob &&
634 expected_dx_query_mob != sw_context->dx_query_mob) {
635 ret = -EINVAL;
636 }
637 }
638
639 return ret;
640 }
641
642 /**
643 * vmw_resources_validate - Validate all resources on the sw_context's
644 * resource list.
645 *
646 * @sw_context: Pointer to the software context.
647 *
648 * Before this function is called, all resource backup buffers must have
649 * been validated.
650 */
vmw_resources_validate(struct vmw_sw_context * sw_context)651 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
652 {
653 struct vmw_resource_val_node *val;
654 int ret;
655
656 list_for_each_entry(val, &sw_context->resource_list, head) {
657 struct vmw_resource *res = val->res;
658 struct vmw_dma_buffer *backup = res->backup;
659
660 ret = vmw_resource_validate(res);
661 if (unlikely(ret != 0)) {
662 if (ret != -ERESTARTSYS)
663 DRM_ERROR("Failed to validate resource.\n");
664 return ret;
665 }
666
667 /* Check if the resource switched backup buffer */
668 if (backup && res->backup && (backup != res->backup)) {
669 struct vmw_dma_buffer *vbo = res->backup;
670
671 ret = vmw_bo_to_validate_list
672 (sw_context, vbo,
673 vmw_resource_needs_backup(res), NULL);
674 if (ret) {
675 ttm_bo_unreserve(&vbo->base);
676 return ret;
677 }
678 }
679 }
680 return 0;
681 }
682
683 /**
684 * vmw_cmd_res_reloc_add - Add a resource to a software context's
685 * relocation- and validation lists.
686 *
687 * @dev_priv: Pointer to a struct vmw_private identifying the device.
688 * @sw_context: Pointer to the software context.
689 * @id_loc: Pointer to where the id that needs translation is located.
690 * @res: Valid pointer to a struct vmw_resource.
691 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
692 * used for this resource is returned here.
693 */
vmw_cmd_res_reloc_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,uint32_t * id_loc,struct vmw_resource * res,struct vmw_resource_val_node ** p_val)694 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
695 struct vmw_sw_context *sw_context,
696 uint32_t *id_loc,
697 struct vmw_resource *res,
698 struct vmw_resource_val_node **p_val)
699 {
700 int ret;
701 struct vmw_resource_val_node *node;
702
703 *p_val = NULL;
704 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
705 res,
706 vmw_ptr_diff(sw_context->buf_start,
707 id_loc),
708 vmw_res_rel_normal);
709 if (unlikely(ret != 0))
710 return ret;
711
712 ret = vmw_resource_val_add(sw_context, res, &node);
713 if (unlikely(ret != 0))
714 return ret;
715
716 if (p_val)
717 *p_val = node;
718
719 return 0;
720 }
721
722
723 /**
724 * vmw_cmd_res_check - Check that a resource is present and if so, put it
725 * on the resource validate list unless it's already there.
726 *
727 * @dev_priv: Pointer to a device private structure.
728 * @sw_context: Pointer to the software context.
729 * @res_type: Resource type.
730 * @converter: User-space visisble type specific information.
731 * @id_loc: Pointer to the location in the command buffer currently being
732 * parsed from where the user-space resource id handle is located.
733 * @p_val: Pointer to pointer to resource validalidation node. Populated
734 * on exit.
735 */
736 static int
vmw_cmd_res_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * id_loc,struct vmw_resource_val_node ** p_val)737 vmw_cmd_res_check(struct vmw_private *dev_priv,
738 struct vmw_sw_context *sw_context,
739 enum vmw_res_type res_type,
740 const struct vmw_user_resource_conv *converter,
741 uint32_t *id_loc,
742 struct vmw_resource_val_node **p_val)
743 {
744 struct vmw_res_cache_entry *rcache =
745 &sw_context->res_cache[res_type];
746 struct vmw_resource *res;
747 struct vmw_resource_val_node *node;
748 int ret;
749
750 if (*id_loc == SVGA3D_INVALID_ID) {
751 if (p_val)
752 *p_val = NULL;
753 if (res_type == vmw_res_context) {
754 DRM_ERROR("Illegal context invalid id.\n");
755 return -EINVAL;
756 }
757 return 0;
758 }
759
760 /*
761 * Fastpath in case of repeated commands referencing the same
762 * resource
763 */
764
765 if (likely(rcache->valid && *id_loc == rcache->handle)) {
766 const struct vmw_resource *res = rcache->res;
767
768 rcache->node->first_usage = false;
769 if (p_val)
770 *p_val = rcache->node;
771
772 return vmw_resource_relocation_add
773 (&sw_context->res_relocations, res,
774 vmw_ptr_diff(sw_context->buf_start, id_loc),
775 vmw_res_rel_normal);
776 }
777
778 ret = vmw_user_resource_lookup_handle(dev_priv,
779 sw_context->fp->tfile,
780 *id_loc,
781 converter,
782 &res);
783 if (unlikely(ret != 0)) {
784 DRM_ERROR("Could not find or use resource 0x%08x.\n",
785 (unsigned) *id_loc);
786 dump_stack();
787 return ret;
788 }
789
790 rcache->valid = true;
791 rcache->res = res;
792 rcache->handle = *id_loc;
793
794 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
795 res, &node);
796 if (unlikely(ret != 0))
797 goto out_no_reloc;
798
799 rcache->node = node;
800 if (p_val)
801 *p_val = node;
802 vmw_resource_unreference(&res);
803 return 0;
804
805 out_no_reloc:
806 BUG_ON(sw_context->error_resource != NULL);
807 sw_context->error_resource = res;
808
809 return ret;
810 }
811
812 /**
813 * vmw_rebind_dx_query - Rebind DX query associated with the context
814 *
815 * @ctx_res: context the query belongs to
816 *
817 * This function assumes binding_mutex is held.
818 */
vmw_rebind_all_dx_query(struct vmw_resource * ctx_res)819 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
820 {
821 struct vmw_private *dev_priv = ctx_res->dev_priv;
822 struct vmw_dma_buffer *dx_query_mob;
823 struct {
824 SVGA3dCmdHeader header;
825 SVGA3dCmdDXBindAllQuery body;
826 } *cmd;
827
828
829 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
830
831 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
832 return 0;
833
834 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
835
836 if (cmd == NULL) {
837 DRM_ERROR("Failed to rebind queries.\n");
838 return -ENOMEM;
839 }
840
841 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
842 cmd->header.size = sizeof(cmd->body);
843 cmd->body.cid = ctx_res->id;
844 cmd->body.mobid = dx_query_mob->base.mem.start;
845 vmw_fifo_commit(dev_priv, sizeof(*cmd));
846
847 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
848
849 return 0;
850 }
851
852 /**
853 * vmw_rebind_contexts - Rebind all resources previously bound to
854 * referenced contexts.
855 *
856 * @sw_context: Pointer to the software context.
857 *
858 * Rebind context binding points that have been scrubbed because of eviction.
859 */
vmw_rebind_contexts(struct vmw_sw_context * sw_context)860 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
861 {
862 struct vmw_resource_val_node *val;
863 int ret;
864
865 list_for_each_entry(val, &sw_context->resource_list, head) {
866 if (unlikely(!val->staged_bindings))
867 break;
868
869 ret = vmw_binding_rebind_all
870 (vmw_context_binding_state(val->res));
871 if (unlikely(ret != 0)) {
872 if (ret != -ERESTARTSYS)
873 DRM_ERROR("Failed to rebind context.\n");
874 return ret;
875 }
876
877 ret = vmw_rebind_all_dx_query(val->res);
878 if (ret != 0)
879 return ret;
880 }
881
882 return 0;
883 }
884
885 /**
886 * vmw_view_bindings_add - Add an array of view bindings to a context
887 * binding state tracker.
888 *
889 * @sw_context: The execbuf state used for this command.
890 * @view_type: View type for the bindings.
891 * @binding_type: Binding type for the bindings.
892 * @shader_slot: The shader slot to user for the bindings.
893 * @view_ids: Array of view ids to be bound.
894 * @num_views: Number of view ids in @view_ids.
895 * @first_slot: The binding slot to be used for the first view id in @view_ids.
896 */
vmw_view_bindings_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,enum vmw_ctx_binding_type binding_type,uint32 shader_slot,uint32 view_ids[],u32 num_views,u32 first_slot)897 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
898 enum vmw_view_type view_type,
899 enum vmw_ctx_binding_type binding_type,
900 uint32 shader_slot,
901 uint32 view_ids[], u32 num_views,
902 u32 first_slot)
903 {
904 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
905 struct vmw_cmdbuf_res_manager *man;
906 u32 i;
907 int ret;
908
909 if (!ctx_node) {
910 DRM_ERROR("DX Context not set.\n");
911 return -EINVAL;
912 }
913
914 man = sw_context->man;
915 for (i = 0; i < num_views; ++i) {
916 struct vmw_ctx_bindinfo_view binding;
917 struct vmw_resource *view = NULL;
918
919 if (view_ids[i] != SVGA3D_INVALID_ID) {
920 view = vmw_view_lookup(man, view_type, view_ids[i]);
921 if (IS_ERR(view)) {
922 DRM_ERROR("View not found.\n");
923 return PTR_ERR(view);
924 }
925
926 ret = vmw_view_res_val_add(sw_context, view);
927 if (ret) {
928 DRM_ERROR("Could not add view to "
929 "validation list.\n");
930 vmw_resource_unreference(&view);
931 return ret;
932 }
933 }
934 binding.bi.ctx = ctx_node->res;
935 binding.bi.res = view;
936 binding.bi.bt = binding_type;
937 binding.shader_slot = shader_slot;
938 binding.slot = first_slot + i;
939 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
940 shader_slot, binding.slot);
941 if (view)
942 vmw_resource_unreference(&view);
943 }
944
945 return 0;
946 }
947
948 /**
949 * vmw_cmd_cid_check - Check a command header for valid context information.
950 *
951 * @dev_priv: Pointer to a device private structure.
952 * @sw_context: Pointer to the software context.
953 * @header: A command header with an embedded user-space context handle.
954 *
955 * Convenience function: Call vmw_cmd_res_check with the user-space context
956 * handle embedded in @header.
957 */
vmw_cmd_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)958 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
959 struct vmw_sw_context *sw_context,
960 SVGA3dCmdHeader *header)
961 {
962 struct vmw_cid_cmd {
963 SVGA3dCmdHeader header;
964 uint32_t cid;
965 } *cmd;
966
967 cmd = container_of(header, struct vmw_cid_cmd, header);
968 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
969 user_context_converter, &cmd->cid, NULL);
970 }
971
vmw_cmd_set_render_target_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)972 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
973 struct vmw_sw_context *sw_context,
974 SVGA3dCmdHeader *header)
975 {
976 struct vmw_sid_cmd {
977 SVGA3dCmdHeader header;
978 SVGA3dCmdSetRenderTarget body;
979 } *cmd;
980 struct vmw_resource_val_node *ctx_node;
981 struct vmw_resource_val_node *res_node;
982 int ret;
983
984 cmd = container_of(header, struct vmw_sid_cmd, header);
985
986 if (cmd->body.type >= SVGA3D_RT_MAX) {
987 DRM_ERROR("Illegal render target type %u.\n",
988 (unsigned) cmd->body.type);
989 return -EINVAL;
990 }
991
992 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
993 user_context_converter, &cmd->body.cid,
994 &ctx_node);
995 if (unlikely(ret != 0))
996 return ret;
997
998 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
999 user_surface_converter,
1000 &cmd->body.target.sid, &res_node);
1001 if (unlikely(ret != 0))
1002 return ret;
1003
1004 if (dev_priv->has_mob) {
1005 struct vmw_ctx_bindinfo_view binding;
1006
1007 binding.bi.ctx = ctx_node->res;
1008 binding.bi.res = res_node ? res_node->res : NULL;
1009 binding.bi.bt = vmw_ctx_binding_rt;
1010 binding.slot = cmd->body.type;
1011 vmw_binding_add(ctx_node->staged_bindings,
1012 &binding.bi, 0, binding.slot);
1013 }
1014
1015 return 0;
1016 }
1017
vmw_cmd_surface_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1018 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
1019 struct vmw_sw_context *sw_context,
1020 SVGA3dCmdHeader *header)
1021 {
1022 struct vmw_sid_cmd {
1023 SVGA3dCmdHeader header;
1024 SVGA3dCmdSurfaceCopy body;
1025 } *cmd;
1026 int ret;
1027
1028 cmd = container_of(header, struct vmw_sid_cmd, header);
1029
1030 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1031 user_surface_converter,
1032 &cmd->body.src.sid, NULL);
1033 if (ret)
1034 return ret;
1035
1036 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1037 user_surface_converter,
1038 &cmd->body.dest.sid, NULL);
1039 }
1040
vmw_cmd_buffer_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1041 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
1042 struct vmw_sw_context *sw_context,
1043 SVGA3dCmdHeader *header)
1044 {
1045 struct {
1046 SVGA3dCmdHeader header;
1047 SVGA3dCmdDXBufferCopy body;
1048 } *cmd;
1049 int ret;
1050
1051 cmd = container_of(header, typeof(*cmd), header);
1052 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1053 user_surface_converter,
1054 &cmd->body.src, NULL);
1055 if (ret != 0)
1056 return ret;
1057
1058 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1059 user_surface_converter,
1060 &cmd->body.dest, NULL);
1061 }
1062
vmw_cmd_pred_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1063 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1064 struct vmw_sw_context *sw_context,
1065 SVGA3dCmdHeader *header)
1066 {
1067 struct {
1068 SVGA3dCmdHeader header;
1069 SVGA3dCmdDXPredCopyRegion body;
1070 } *cmd;
1071 int ret;
1072
1073 cmd = container_of(header, typeof(*cmd), header);
1074 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1075 user_surface_converter,
1076 &cmd->body.srcSid, NULL);
1077 if (ret != 0)
1078 return ret;
1079
1080 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1081 user_surface_converter,
1082 &cmd->body.dstSid, NULL);
1083 }
1084
vmw_cmd_stretch_blt_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1085 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1086 struct vmw_sw_context *sw_context,
1087 SVGA3dCmdHeader *header)
1088 {
1089 struct vmw_sid_cmd {
1090 SVGA3dCmdHeader header;
1091 SVGA3dCmdSurfaceStretchBlt body;
1092 } *cmd;
1093 int ret;
1094
1095 cmd = container_of(header, struct vmw_sid_cmd, header);
1096 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1097 user_surface_converter,
1098 &cmd->body.src.sid, NULL);
1099 if (unlikely(ret != 0))
1100 return ret;
1101 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1102 user_surface_converter,
1103 &cmd->body.dest.sid, NULL);
1104 }
1105
vmw_cmd_blt_surf_screen_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1106 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1107 struct vmw_sw_context *sw_context,
1108 SVGA3dCmdHeader *header)
1109 {
1110 struct vmw_sid_cmd {
1111 SVGA3dCmdHeader header;
1112 SVGA3dCmdBlitSurfaceToScreen body;
1113 } *cmd;
1114
1115 cmd = container_of(header, struct vmw_sid_cmd, header);
1116
1117 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1118 user_surface_converter,
1119 &cmd->body.srcImage.sid, NULL);
1120 }
1121
vmw_cmd_present_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1122 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1123 struct vmw_sw_context *sw_context,
1124 SVGA3dCmdHeader *header)
1125 {
1126 struct vmw_sid_cmd {
1127 SVGA3dCmdHeader header;
1128 SVGA3dCmdPresent body;
1129 } *cmd;
1130
1131
1132 cmd = container_of(header, struct vmw_sid_cmd, header);
1133
1134 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1135 user_surface_converter, &cmd->body.sid,
1136 NULL);
1137 }
1138
1139 /**
1140 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1141 *
1142 * @dev_priv: The device private structure.
1143 * @new_query_bo: The new buffer holding query results.
1144 * @sw_context: The software context used for this command submission.
1145 *
1146 * This function checks whether @new_query_bo is suitable for holding
1147 * query results, and if another buffer currently is pinned for query
1148 * results. If so, the function prepares the state of @sw_context for
1149 * switching pinned buffers after successful submission of the current
1150 * command batch.
1151 */
vmw_query_bo_switch_prepare(struct vmw_private * dev_priv,struct vmw_dma_buffer * new_query_bo,struct vmw_sw_context * sw_context)1152 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1153 struct vmw_dma_buffer *new_query_bo,
1154 struct vmw_sw_context *sw_context)
1155 {
1156 struct vmw_res_cache_entry *ctx_entry =
1157 &sw_context->res_cache[vmw_res_context];
1158 int ret;
1159
1160 BUG_ON(!ctx_entry->valid);
1161 sw_context->last_query_ctx = ctx_entry->res;
1162
1163 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1164
1165 if (unlikely(new_query_bo->base.num_pages > 4)) {
1166 DRM_ERROR("Query buffer too large.\n");
1167 return -EINVAL;
1168 }
1169
1170 if (unlikely(sw_context->cur_query_bo != NULL)) {
1171 sw_context->needs_post_query_barrier = true;
1172 ret = vmw_bo_to_validate_list(sw_context,
1173 sw_context->cur_query_bo,
1174 dev_priv->has_mob, NULL);
1175 if (unlikely(ret != 0))
1176 return ret;
1177 }
1178 sw_context->cur_query_bo = new_query_bo;
1179
1180 ret = vmw_bo_to_validate_list(sw_context,
1181 dev_priv->dummy_query_bo,
1182 dev_priv->has_mob, NULL);
1183 if (unlikely(ret != 0))
1184 return ret;
1185
1186 }
1187
1188 return 0;
1189 }
1190
1191
1192 /**
1193 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1194 *
1195 * @dev_priv: The device private structure.
1196 * @sw_context: The software context used for this command submission batch.
1197 *
1198 * This function will check if we're switching query buffers, and will then,
1199 * issue a dummy occlusion query wait used as a query barrier. When the fence
1200 * object following that query wait has signaled, we are sure that all
1201 * preceding queries have finished, and the old query buffer can be unpinned.
1202 * However, since both the new query buffer and the old one are fenced with
1203 * that fence, we can do an asynchronus unpin now, and be sure that the
1204 * old query buffer won't be moved until the fence has signaled.
1205 *
1206 * As mentioned above, both the new - and old query buffers need to be fenced
1207 * using a sequence emitted *after* calling this function.
1208 */
vmw_query_bo_switch_commit(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)1209 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1210 struct vmw_sw_context *sw_context)
1211 {
1212 /*
1213 * The validate list should still hold references to all
1214 * contexts here.
1215 */
1216
1217 if (sw_context->needs_post_query_barrier) {
1218 struct vmw_res_cache_entry *ctx_entry =
1219 &sw_context->res_cache[vmw_res_context];
1220 struct vmw_resource *ctx;
1221 int ret;
1222
1223 BUG_ON(!ctx_entry->valid);
1224 ctx = ctx_entry->res;
1225
1226 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1227
1228 if (unlikely(ret != 0))
1229 DRM_ERROR("Out of fifo space for dummy query.\n");
1230 }
1231
1232 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1233 if (dev_priv->pinned_bo) {
1234 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1235 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1236 }
1237
1238 if (!sw_context->needs_post_query_barrier) {
1239 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1240
1241 /*
1242 * We pin also the dummy_query_bo buffer so that we
1243 * don't need to validate it when emitting
1244 * dummy queries in context destroy paths.
1245 */
1246
1247 if (!dev_priv->dummy_query_bo_pinned) {
1248 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1249 true);
1250 dev_priv->dummy_query_bo_pinned = true;
1251 }
1252
1253 BUG_ON(sw_context->last_query_ctx == NULL);
1254 dev_priv->query_cid = sw_context->last_query_ctx->id;
1255 dev_priv->query_cid_valid = true;
1256 dev_priv->pinned_bo =
1257 vmw_dmabuf_reference(sw_context->cur_query_bo);
1258 }
1259 }
1260 }
1261
1262 /**
1263 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1264 * handle to a MOB id.
1265 *
1266 * @dev_priv: Pointer to a device private structure.
1267 * @sw_context: The software context used for this command batch validation.
1268 * @id: Pointer to the user-space handle to be translated.
1269 * @vmw_bo_p: Points to a location that, on successful return will carry
1270 * a reference-counted pointer to the DMA buffer identified by the
1271 * user-space handle in @id.
1272 *
1273 * This function saves information needed to translate a user-space buffer
1274 * handle to a MOB id. The translation does not take place immediately, but
1275 * during a call to vmw_apply_relocations(). This function builds a relocation
1276 * list and a list of buffers to validate. The former needs to be freed using
1277 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1278 * needs to be freed using vmw_clear_validations.
1279 */
vmw_translate_mob_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAMobId * id,struct vmw_dma_buffer ** vmw_bo_p)1280 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1281 struct vmw_sw_context *sw_context,
1282 SVGAMobId *id,
1283 struct vmw_dma_buffer **vmw_bo_p)
1284 {
1285 struct vmw_dma_buffer *vmw_bo = NULL;
1286 uint32_t handle = *id;
1287 struct vmw_relocation *reloc;
1288 int ret;
1289
1290 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1291 NULL);
1292 if (unlikely(ret != 0)) {
1293 DRM_ERROR("Could not find or use MOB buffer.\n");
1294 ret = -EINVAL;
1295 goto out_no_reloc;
1296 }
1297
1298 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1299 DRM_ERROR("Max number relocations per submission"
1300 " exceeded\n");
1301 ret = -EINVAL;
1302 goto out_no_reloc;
1303 }
1304
1305 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1306 reloc->mob_loc = id;
1307 reloc->location = NULL;
1308
1309 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1310 if (unlikely(ret != 0))
1311 goto out_no_reloc;
1312
1313 *vmw_bo_p = vmw_bo;
1314 return 0;
1315
1316 out_no_reloc:
1317 vmw_dmabuf_unreference(&vmw_bo);
1318 *vmw_bo_p = NULL;
1319 return ret;
1320 }
1321
1322 /**
1323 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1324 * handle to a valid SVGAGuestPtr
1325 *
1326 * @dev_priv: Pointer to a device private structure.
1327 * @sw_context: The software context used for this command batch validation.
1328 * @ptr: Pointer to the user-space handle to be translated.
1329 * @vmw_bo_p: Points to a location that, on successful return will carry
1330 * a reference-counted pointer to the DMA buffer identified by the
1331 * user-space handle in @id.
1332 *
1333 * This function saves information needed to translate a user-space buffer
1334 * handle to a valid SVGAGuestPtr. The translation does not take place
1335 * immediately, but during a call to vmw_apply_relocations().
1336 * This function builds a relocation list and a list of buffers to validate.
1337 * The former needs to be freed using either vmw_apply_relocations() or
1338 * vmw_free_relocations(). The latter needs to be freed using
1339 * vmw_clear_validations.
1340 */
vmw_translate_guest_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAGuestPtr * ptr,struct vmw_dma_buffer ** vmw_bo_p)1341 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1342 struct vmw_sw_context *sw_context,
1343 SVGAGuestPtr *ptr,
1344 struct vmw_dma_buffer **vmw_bo_p)
1345 {
1346 struct vmw_dma_buffer *vmw_bo = NULL;
1347 uint32_t handle = ptr->gmrId;
1348 struct vmw_relocation *reloc;
1349 int ret;
1350
1351 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1352 NULL);
1353 if (unlikely(ret != 0)) {
1354 DRM_ERROR("Could not find or use GMR region.\n");
1355 ret = -EINVAL;
1356 goto out_no_reloc;
1357 }
1358
1359 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1360 DRM_ERROR("Max number relocations per submission"
1361 " exceeded\n");
1362 ret = -EINVAL;
1363 goto out_no_reloc;
1364 }
1365
1366 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1367 reloc->location = ptr;
1368
1369 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1370 if (unlikely(ret != 0))
1371 goto out_no_reloc;
1372
1373 *vmw_bo_p = vmw_bo;
1374 return 0;
1375
1376 out_no_reloc:
1377 vmw_dmabuf_unreference(&vmw_bo);
1378 *vmw_bo_p = NULL;
1379 return ret;
1380 }
1381
1382
1383
1384 /**
1385 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1386 *
1387 * @dev_priv: Pointer to a device private struct.
1388 * @sw_context: The software context used for this command submission.
1389 * @header: Pointer to the command header in the command stream.
1390 *
1391 * This function adds the new query into the query COTABLE
1392 */
vmw_cmd_dx_define_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1393 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1394 struct vmw_sw_context *sw_context,
1395 SVGA3dCmdHeader *header)
1396 {
1397 struct vmw_dx_define_query_cmd {
1398 SVGA3dCmdHeader header;
1399 SVGA3dCmdDXDefineQuery q;
1400 } *cmd;
1401
1402 int ret;
1403 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1404 struct vmw_resource *cotable_res;
1405
1406
1407 if (ctx_node == NULL) {
1408 DRM_ERROR("DX Context not set for query.\n");
1409 return -EINVAL;
1410 }
1411
1412 cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1413
1414 if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
1415 cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1416 return -EINVAL;
1417
1418 cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1419 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1420 vmw_resource_unreference(&cotable_res);
1421
1422 return ret;
1423 }
1424
1425
1426
1427 /**
1428 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1429 *
1430 * @dev_priv: Pointer to a device private struct.
1431 * @sw_context: The software context used for this command submission.
1432 * @header: Pointer to the command header in the command stream.
1433 *
1434 * The query bind operation will eventually associate the query ID
1435 * with its backing MOB. In this function, we take the user mode
1436 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1437 * kernel mode equivalent.
1438 */
vmw_cmd_dx_bind_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1439 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1440 struct vmw_sw_context *sw_context,
1441 SVGA3dCmdHeader *header)
1442 {
1443 struct vmw_dx_bind_query_cmd {
1444 SVGA3dCmdHeader header;
1445 SVGA3dCmdDXBindQuery q;
1446 } *cmd;
1447
1448 struct vmw_dma_buffer *vmw_bo;
1449 int ret;
1450
1451
1452 cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1453
1454 /*
1455 * Look up the buffer pointed to by q.mobid, put it on the relocation
1456 * list so its kernel mode MOB ID can be filled in later
1457 */
1458 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1459 &vmw_bo);
1460
1461 if (ret != 0)
1462 return ret;
1463
1464 sw_context->dx_query_mob = vmw_bo;
1465 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1466
1467 vmw_dmabuf_unreference(&vmw_bo);
1468
1469 return ret;
1470 }
1471
1472
1473
1474 /**
1475 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1476 *
1477 * @dev_priv: Pointer to a device private struct.
1478 * @sw_context: The software context used for this command submission.
1479 * @header: Pointer to the command header in the command stream.
1480 */
vmw_cmd_begin_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1481 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1482 struct vmw_sw_context *sw_context,
1483 SVGA3dCmdHeader *header)
1484 {
1485 struct vmw_begin_gb_query_cmd {
1486 SVGA3dCmdHeader header;
1487 SVGA3dCmdBeginGBQuery q;
1488 } *cmd;
1489
1490 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1491 header);
1492
1493 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1494 user_context_converter, &cmd->q.cid,
1495 NULL);
1496 }
1497
1498 /**
1499 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1500 *
1501 * @dev_priv: Pointer to a device private struct.
1502 * @sw_context: The software context used for this command submission.
1503 * @header: Pointer to the command header in the command stream.
1504 */
vmw_cmd_begin_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1505 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1506 struct vmw_sw_context *sw_context,
1507 SVGA3dCmdHeader *header)
1508 {
1509 struct vmw_begin_query_cmd {
1510 SVGA3dCmdHeader header;
1511 SVGA3dCmdBeginQuery q;
1512 } *cmd;
1513
1514 cmd = container_of(header, struct vmw_begin_query_cmd,
1515 header);
1516
1517 if (unlikely(dev_priv->has_mob)) {
1518 struct {
1519 SVGA3dCmdHeader header;
1520 SVGA3dCmdBeginGBQuery q;
1521 } gb_cmd;
1522
1523 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1524
1525 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1526 gb_cmd.header.size = cmd->header.size;
1527 gb_cmd.q.cid = cmd->q.cid;
1528 gb_cmd.q.type = cmd->q.type;
1529
1530 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1531 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1532 }
1533
1534 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1535 user_context_converter, &cmd->q.cid,
1536 NULL);
1537 }
1538
1539 /**
1540 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1541 *
1542 * @dev_priv: Pointer to a device private struct.
1543 * @sw_context: The software context used for this command submission.
1544 * @header: Pointer to the command header in the command stream.
1545 */
vmw_cmd_end_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1546 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1547 struct vmw_sw_context *sw_context,
1548 SVGA3dCmdHeader *header)
1549 {
1550 struct vmw_dma_buffer *vmw_bo;
1551 struct vmw_query_cmd {
1552 SVGA3dCmdHeader header;
1553 SVGA3dCmdEndGBQuery q;
1554 } *cmd;
1555 int ret;
1556
1557 cmd = container_of(header, struct vmw_query_cmd, header);
1558 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1559 if (unlikely(ret != 0))
1560 return ret;
1561
1562 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1563 &cmd->q.mobid,
1564 &vmw_bo);
1565 if (unlikely(ret != 0))
1566 return ret;
1567
1568 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1569
1570 vmw_dmabuf_unreference(&vmw_bo);
1571 return ret;
1572 }
1573
1574 /**
1575 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1576 *
1577 * @dev_priv: Pointer to a device private struct.
1578 * @sw_context: The software context used for this command submission.
1579 * @header: Pointer to the command header in the command stream.
1580 */
vmw_cmd_end_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1581 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1582 struct vmw_sw_context *sw_context,
1583 SVGA3dCmdHeader *header)
1584 {
1585 struct vmw_dma_buffer *vmw_bo;
1586 struct vmw_query_cmd {
1587 SVGA3dCmdHeader header;
1588 SVGA3dCmdEndQuery q;
1589 } *cmd;
1590 int ret;
1591
1592 cmd = container_of(header, struct vmw_query_cmd, header);
1593 if (dev_priv->has_mob) {
1594 struct {
1595 SVGA3dCmdHeader header;
1596 SVGA3dCmdEndGBQuery q;
1597 } gb_cmd;
1598
1599 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1600
1601 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1602 gb_cmd.header.size = cmd->header.size;
1603 gb_cmd.q.cid = cmd->q.cid;
1604 gb_cmd.q.type = cmd->q.type;
1605 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1606 gb_cmd.q.offset = cmd->q.guestResult.offset;
1607
1608 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1609 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1610 }
1611
1612 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1613 if (unlikely(ret != 0))
1614 return ret;
1615
1616 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1617 &cmd->q.guestResult,
1618 &vmw_bo);
1619 if (unlikely(ret != 0))
1620 return ret;
1621
1622 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1623
1624 vmw_dmabuf_unreference(&vmw_bo);
1625 return ret;
1626 }
1627
1628 /**
1629 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1630 *
1631 * @dev_priv: Pointer to a device private struct.
1632 * @sw_context: The software context used for this command submission.
1633 * @header: Pointer to the command header in the command stream.
1634 */
vmw_cmd_wait_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1635 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1636 struct vmw_sw_context *sw_context,
1637 SVGA3dCmdHeader *header)
1638 {
1639 struct vmw_dma_buffer *vmw_bo;
1640 struct vmw_query_cmd {
1641 SVGA3dCmdHeader header;
1642 SVGA3dCmdWaitForGBQuery q;
1643 } *cmd;
1644 int ret;
1645
1646 cmd = container_of(header, struct vmw_query_cmd, header);
1647 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1648 if (unlikely(ret != 0))
1649 return ret;
1650
1651 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1652 &cmd->q.mobid,
1653 &vmw_bo);
1654 if (unlikely(ret != 0))
1655 return ret;
1656
1657 vmw_dmabuf_unreference(&vmw_bo);
1658 return 0;
1659 }
1660
1661 /**
1662 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1663 *
1664 * @dev_priv: Pointer to a device private struct.
1665 * @sw_context: The software context used for this command submission.
1666 * @header: Pointer to the command header in the command stream.
1667 */
vmw_cmd_wait_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1668 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1669 struct vmw_sw_context *sw_context,
1670 SVGA3dCmdHeader *header)
1671 {
1672 struct vmw_dma_buffer *vmw_bo;
1673 struct vmw_query_cmd {
1674 SVGA3dCmdHeader header;
1675 SVGA3dCmdWaitForQuery q;
1676 } *cmd;
1677 int ret;
1678
1679 cmd = container_of(header, struct vmw_query_cmd, header);
1680 if (dev_priv->has_mob) {
1681 struct {
1682 SVGA3dCmdHeader header;
1683 SVGA3dCmdWaitForGBQuery q;
1684 } gb_cmd;
1685
1686 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1687
1688 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1689 gb_cmd.header.size = cmd->header.size;
1690 gb_cmd.q.cid = cmd->q.cid;
1691 gb_cmd.q.type = cmd->q.type;
1692 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1693 gb_cmd.q.offset = cmd->q.guestResult.offset;
1694
1695 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1696 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1697 }
1698
1699 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1700 if (unlikely(ret != 0))
1701 return ret;
1702
1703 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1704 &cmd->q.guestResult,
1705 &vmw_bo);
1706 if (unlikely(ret != 0))
1707 return ret;
1708
1709 vmw_dmabuf_unreference(&vmw_bo);
1710 return 0;
1711 }
1712
vmw_cmd_dma(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1713 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1714 struct vmw_sw_context *sw_context,
1715 SVGA3dCmdHeader *header)
1716 {
1717 struct vmw_dma_buffer *vmw_bo = NULL;
1718 struct vmw_surface *srf = NULL;
1719 struct vmw_dma_cmd {
1720 SVGA3dCmdHeader header;
1721 SVGA3dCmdSurfaceDMA dma;
1722 } *cmd;
1723 int ret;
1724 SVGA3dCmdSurfaceDMASuffix *suffix;
1725 uint32_t bo_size;
1726
1727 cmd = container_of(header, struct vmw_dma_cmd, header);
1728 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1729 header->size - sizeof(*suffix));
1730
1731 /* Make sure device and verifier stays in sync. */
1732 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1733 DRM_ERROR("Invalid DMA suffix size.\n");
1734 return -EINVAL;
1735 }
1736
1737 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1738 &cmd->dma.guest.ptr,
1739 &vmw_bo);
1740 if (unlikely(ret != 0))
1741 return ret;
1742
1743 /* Make sure DMA doesn't cross BO boundaries. */
1744 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1745 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1746 DRM_ERROR("Invalid DMA offset.\n");
1747 return -EINVAL;
1748 }
1749
1750 bo_size -= cmd->dma.guest.ptr.offset;
1751 if (unlikely(suffix->maximumOffset > bo_size))
1752 suffix->maximumOffset = bo_size;
1753
1754 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1755 user_surface_converter, &cmd->dma.host.sid,
1756 NULL);
1757 if (unlikely(ret != 0)) {
1758 if (unlikely(ret != -ERESTARTSYS))
1759 DRM_ERROR("could not find surface for DMA.\n");
1760 goto out_no_surface;
1761 }
1762
1763 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1764
1765 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1766 header);
1767
1768 out_no_surface:
1769 vmw_dmabuf_unreference(&vmw_bo);
1770 return ret;
1771 }
1772
vmw_cmd_draw(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1773 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1774 struct vmw_sw_context *sw_context,
1775 SVGA3dCmdHeader *header)
1776 {
1777 struct vmw_draw_cmd {
1778 SVGA3dCmdHeader header;
1779 SVGA3dCmdDrawPrimitives body;
1780 } *cmd;
1781 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1782 (unsigned long)header + sizeof(*cmd));
1783 SVGA3dPrimitiveRange *range;
1784 uint32_t i;
1785 uint32_t maxnum;
1786 int ret;
1787
1788 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1789 if (unlikely(ret != 0))
1790 return ret;
1791
1792 cmd = container_of(header, struct vmw_draw_cmd, header);
1793 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1794
1795 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1796 DRM_ERROR("Illegal number of vertex declarations.\n");
1797 return -EINVAL;
1798 }
1799
1800 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1801 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1802 user_surface_converter,
1803 &decl->array.surfaceId, NULL);
1804 if (unlikely(ret != 0))
1805 return ret;
1806 }
1807
1808 maxnum = (header->size - sizeof(cmd->body) -
1809 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1810 if (unlikely(cmd->body.numRanges > maxnum)) {
1811 DRM_ERROR("Illegal number of index ranges.\n");
1812 return -EINVAL;
1813 }
1814
1815 range = (SVGA3dPrimitiveRange *) decl;
1816 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1817 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1818 user_surface_converter,
1819 &range->indexArray.surfaceId, NULL);
1820 if (unlikely(ret != 0))
1821 return ret;
1822 }
1823 return 0;
1824 }
1825
1826
vmw_cmd_tex_state(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1827 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1828 struct vmw_sw_context *sw_context,
1829 SVGA3dCmdHeader *header)
1830 {
1831 struct vmw_tex_state_cmd {
1832 SVGA3dCmdHeader header;
1833 SVGA3dCmdSetTextureState state;
1834 } *cmd;
1835
1836 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1837 ((unsigned long) header + header->size + sizeof(header));
1838 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1839 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1840 struct vmw_resource_val_node *ctx_node;
1841 struct vmw_resource_val_node *res_node;
1842 int ret;
1843
1844 cmd = container_of(header, struct vmw_tex_state_cmd,
1845 header);
1846
1847 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1848 user_context_converter, &cmd->state.cid,
1849 &ctx_node);
1850 if (unlikely(ret != 0))
1851 return ret;
1852
1853 for (; cur_state < last_state; ++cur_state) {
1854 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1855 continue;
1856
1857 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1858 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1859 (unsigned) cur_state->stage);
1860 return -EINVAL;
1861 }
1862
1863 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1864 user_surface_converter,
1865 &cur_state->value, &res_node);
1866 if (unlikely(ret != 0))
1867 return ret;
1868
1869 if (dev_priv->has_mob) {
1870 struct vmw_ctx_bindinfo_tex binding;
1871
1872 binding.bi.ctx = ctx_node->res;
1873 binding.bi.res = res_node ? res_node->res : NULL;
1874 binding.bi.bt = vmw_ctx_binding_tex;
1875 binding.texture_stage = cur_state->stage;
1876 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1877 0, binding.texture_stage);
1878 }
1879 }
1880
1881 return 0;
1882 }
1883
vmw_cmd_check_define_gmrfb(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf)1884 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1885 struct vmw_sw_context *sw_context,
1886 void *buf)
1887 {
1888 struct vmw_dma_buffer *vmw_bo;
1889 int ret;
1890
1891 struct {
1892 uint32_t header;
1893 SVGAFifoCmdDefineGMRFB body;
1894 } *cmd = buf;
1895
1896 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1897 &cmd->body.ptr,
1898 &vmw_bo);
1899 if (unlikely(ret != 0))
1900 return ret;
1901
1902 vmw_dmabuf_unreference(&vmw_bo);
1903
1904 return ret;
1905 }
1906
1907
1908 /**
1909 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1910 * switching
1911 *
1912 * @dev_priv: Pointer to a device private struct.
1913 * @sw_context: The software context being used for this batch.
1914 * @val_node: The validation node representing the resource.
1915 * @buf_id: Pointer to the user-space backup buffer handle in the command
1916 * stream.
1917 * @backup_offset: Offset of backup into MOB.
1918 *
1919 * This function prepares for registering a switch of backup buffers
1920 * in the resource metadata just prior to unreserving. It's basically a wrapper
1921 * around vmw_cmd_res_switch_backup with a different interface.
1922 */
vmw_cmd_res_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource_val_node * val_node,uint32_t * buf_id,unsigned long backup_offset)1923 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1924 struct vmw_sw_context *sw_context,
1925 struct vmw_resource_val_node *val_node,
1926 uint32_t *buf_id,
1927 unsigned long backup_offset)
1928 {
1929 struct vmw_dma_buffer *dma_buf;
1930 int ret;
1931
1932 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1933 if (ret)
1934 return ret;
1935
1936 val_node->switching_backup = true;
1937 if (val_node->first_usage)
1938 val_node->no_buffer_needed = true;
1939
1940 vmw_dmabuf_unreference(&val_node->new_backup);
1941 val_node->new_backup = dma_buf;
1942 val_node->new_backup_offset = backup_offset;
1943
1944 return 0;
1945 }
1946
1947
1948 /**
1949 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1950 *
1951 * @dev_priv: Pointer to a device private struct.
1952 * @sw_context: The software context being used for this batch.
1953 * @res_type: The resource type.
1954 * @converter: Information about user-space binding for this resource type.
1955 * @res_id: Pointer to the user-space resource handle in the command stream.
1956 * @buf_id: Pointer to the user-space backup buffer handle in the command
1957 * stream.
1958 * @backup_offset: Offset of backup into MOB.
1959 *
1960 * This function prepares for registering a switch of backup buffers
1961 * in the resource metadata just prior to unreserving. It's basically a wrapper
1962 * around vmw_cmd_res_switch_backup with a different interface.
1963 */
vmw_cmd_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * res_id,uint32_t * buf_id,unsigned long backup_offset)1964 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1965 struct vmw_sw_context *sw_context,
1966 enum vmw_res_type res_type,
1967 const struct vmw_user_resource_conv
1968 *converter,
1969 uint32_t *res_id,
1970 uint32_t *buf_id,
1971 unsigned long backup_offset)
1972 {
1973 struct vmw_resource_val_node *val_node;
1974 int ret;
1975
1976 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1977 converter, res_id, &val_node);
1978 if (ret)
1979 return ret;
1980
1981 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1982 buf_id, backup_offset);
1983 }
1984
1985 /**
1986 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1987 * command
1988 *
1989 * @dev_priv: Pointer to a device private struct.
1990 * @sw_context: The software context being used for this batch.
1991 * @header: Pointer to the command header in the command stream.
1992 */
vmw_cmd_bind_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1993 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1994 struct vmw_sw_context *sw_context,
1995 SVGA3dCmdHeader *header)
1996 {
1997 struct vmw_bind_gb_surface_cmd {
1998 SVGA3dCmdHeader header;
1999 SVGA3dCmdBindGBSurface body;
2000 } *cmd;
2001
2002 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
2003
2004 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
2005 user_surface_converter,
2006 &cmd->body.sid, &cmd->body.mobid,
2007 0);
2008 }
2009
2010 /**
2011 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
2012 * command
2013 *
2014 * @dev_priv: Pointer to a device private struct.
2015 * @sw_context: The software context being used for this batch.
2016 * @header: Pointer to the command header in the command stream.
2017 */
vmw_cmd_update_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2018 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
2019 struct vmw_sw_context *sw_context,
2020 SVGA3dCmdHeader *header)
2021 {
2022 struct vmw_gb_surface_cmd {
2023 SVGA3dCmdHeader header;
2024 SVGA3dCmdUpdateGBImage body;
2025 } *cmd;
2026
2027 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2028
2029 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2030 user_surface_converter,
2031 &cmd->body.image.sid, NULL);
2032 }
2033
2034 /**
2035 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
2036 * command
2037 *
2038 * @dev_priv: Pointer to a device private struct.
2039 * @sw_context: The software context being used for this batch.
2040 * @header: Pointer to the command header in the command stream.
2041 */
vmw_cmd_update_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2042 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
2043 struct vmw_sw_context *sw_context,
2044 SVGA3dCmdHeader *header)
2045 {
2046 struct vmw_gb_surface_cmd {
2047 SVGA3dCmdHeader header;
2048 SVGA3dCmdUpdateGBSurface body;
2049 } *cmd;
2050
2051 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2052
2053 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2054 user_surface_converter,
2055 &cmd->body.sid, NULL);
2056 }
2057
2058 /**
2059 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2060 * command
2061 *
2062 * @dev_priv: Pointer to a device private struct.
2063 * @sw_context: The software context being used for this batch.
2064 * @header: Pointer to the command header in the command stream.
2065 */
vmw_cmd_readback_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2066 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2067 struct vmw_sw_context *sw_context,
2068 SVGA3dCmdHeader *header)
2069 {
2070 struct vmw_gb_surface_cmd {
2071 SVGA3dCmdHeader header;
2072 SVGA3dCmdReadbackGBImage body;
2073 } *cmd;
2074
2075 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2076
2077 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2078 user_surface_converter,
2079 &cmd->body.image.sid, NULL);
2080 }
2081
2082 /**
2083 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2084 * command
2085 *
2086 * @dev_priv: Pointer to a device private struct.
2087 * @sw_context: The software context being used for this batch.
2088 * @header: Pointer to the command header in the command stream.
2089 */
vmw_cmd_readback_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2090 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2091 struct vmw_sw_context *sw_context,
2092 SVGA3dCmdHeader *header)
2093 {
2094 struct vmw_gb_surface_cmd {
2095 SVGA3dCmdHeader header;
2096 SVGA3dCmdReadbackGBSurface body;
2097 } *cmd;
2098
2099 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2100
2101 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2102 user_surface_converter,
2103 &cmd->body.sid, NULL);
2104 }
2105
2106 /**
2107 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2108 * command
2109 *
2110 * @dev_priv: Pointer to a device private struct.
2111 * @sw_context: The software context being used for this batch.
2112 * @header: Pointer to the command header in the command stream.
2113 */
vmw_cmd_invalidate_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2114 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2115 struct vmw_sw_context *sw_context,
2116 SVGA3dCmdHeader *header)
2117 {
2118 struct vmw_gb_surface_cmd {
2119 SVGA3dCmdHeader header;
2120 SVGA3dCmdInvalidateGBImage body;
2121 } *cmd;
2122
2123 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2124
2125 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2126 user_surface_converter,
2127 &cmd->body.image.sid, NULL);
2128 }
2129
2130 /**
2131 * vmw_cmd_invalidate_gb_surface - Validate an
2132 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2133 *
2134 * @dev_priv: Pointer to a device private struct.
2135 * @sw_context: The software context being used for this batch.
2136 * @header: Pointer to the command header in the command stream.
2137 */
vmw_cmd_invalidate_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2138 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2139 struct vmw_sw_context *sw_context,
2140 SVGA3dCmdHeader *header)
2141 {
2142 struct vmw_gb_surface_cmd {
2143 SVGA3dCmdHeader header;
2144 SVGA3dCmdInvalidateGBSurface body;
2145 } *cmd;
2146
2147 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2148
2149 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2150 user_surface_converter,
2151 &cmd->body.sid, NULL);
2152 }
2153
2154
2155 /**
2156 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2157 * command
2158 *
2159 * @dev_priv: Pointer to a device private struct.
2160 * @sw_context: The software context being used for this batch.
2161 * @header: Pointer to the command header in the command stream.
2162 */
vmw_cmd_shader_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2163 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2164 struct vmw_sw_context *sw_context,
2165 SVGA3dCmdHeader *header)
2166 {
2167 struct vmw_shader_define_cmd {
2168 SVGA3dCmdHeader header;
2169 SVGA3dCmdDefineShader body;
2170 } *cmd;
2171 int ret;
2172 size_t size;
2173 struct vmw_resource_val_node *val;
2174
2175 cmd = container_of(header, struct vmw_shader_define_cmd,
2176 header);
2177
2178 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2179 user_context_converter, &cmd->body.cid,
2180 &val);
2181 if (unlikely(ret != 0))
2182 return ret;
2183
2184 if (unlikely(!dev_priv->has_mob))
2185 return 0;
2186
2187 size = cmd->header.size - sizeof(cmd->body);
2188 ret = vmw_compat_shader_add(dev_priv,
2189 vmw_context_res_man(val->res),
2190 cmd->body.shid, cmd + 1,
2191 cmd->body.type, size,
2192 &sw_context->staged_cmd_res);
2193 if (unlikely(ret != 0))
2194 return ret;
2195
2196 return vmw_resource_relocation_add(&sw_context->res_relocations,
2197 NULL,
2198 vmw_ptr_diff(sw_context->buf_start,
2199 &cmd->header.id),
2200 vmw_res_rel_nop);
2201 }
2202
2203 /**
2204 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2205 * command
2206 *
2207 * @dev_priv: Pointer to a device private struct.
2208 * @sw_context: The software context being used for this batch.
2209 * @header: Pointer to the command header in the command stream.
2210 */
vmw_cmd_shader_destroy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2211 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2212 struct vmw_sw_context *sw_context,
2213 SVGA3dCmdHeader *header)
2214 {
2215 struct vmw_shader_destroy_cmd {
2216 SVGA3dCmdHeader header;
2217 SVGA3dCmdDestroyShader body;
2218 } *cmd;
2219 int ret;
2220 struct vmw_resource_val_node *val;
2221
2222 cmd = container_of(header, struct vmw_shader_destroy_cmd,
2223 header);
2224
2225 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2226 user_context_converter, &cmd->body.cid,
2227 &val);
2228 if (unlikely(ret != 0))
2229 return ret;
2230
2231 if (unlikely(!dev_priv->has_mob))
2232 return 0;
2233
2234 ret = vmw_shader_remove(vmw_context_res_man(val->res),
2235 cmd->body.shid,
2236 cmd->body.type,
2237 &sw_context->staged_cmd_res);
2238 if (unlikely(ret != 0))
2239 return ret;
2240
2241 return vmw_resource_relocation_add(&sw_context->res_relocations,
2242 NULL,
2243 vmw_ptr_diff(sw_context->buf_start,
2244 &cmd->header.id),
2245 vmw_res_rel_nop);
2246 }
2247
2248 /**
2249 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2250 * command
2251 *
2252 * @dev_priv: Pointer to a device private struct.
2253 * @sw_context: The software context being used for this batch.
2254 * @header: Pointer to the command header in the command stream.
2255 */
vmw_cmd_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2256 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2257 struct vmw_sw_context *sw_context,
2258 SVGA3dCmdHeader *header)
2259 {
2260 struct vmw_set_shader_cmd {
2261 SVGA3dCmdHeader header;
2262 SVGA3dCmdSetShader body;
2263 } *cmd;
2264 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2265 struct vmw_ctx_bindinfo_shader binding;
2266 struct vmw_resource *res = NULL;
2267 int ret;
2268
2269 cmd = container_of(header, struct vmw_set_shader_cmd,
2270 header);
2271
2272 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2273 DRM_ERROR("Illegal shader type %u.\n",
2274 (unsigned) cmd->body.type);
2275 return -EINVAL;
2276 }
2277
2278 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2279 user_context_converter, &cmd->body.cid,
2280 &ctx_node);
2281 if (unlikely(ret != 0))
2282 return ret;
2283
2284 if (!dev_priv->has_mob)
2285 return 0;
2286
2287 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2288 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2289 cmd->body.shid,
2290 cmd->body.type);
2291
2292 if (!IS_ERR(res)) {
2293 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2294 &cmd->body.shid, res,
2295 &res_node);
2296 vmw_resource_unreference(&res);
2297 if (unlikely(ret != 0))
2298 return ret;
2299 }
2300 }
2301
2302 if (!res_node) {
2303 ret = vmw_cmd_res_check(dev_priv, sw_context,
2304 vmw_res_shader,
2305 user_shader_converter,
2306 &cmd->body.shid, &res_node);
2307 if (unlikely(ret != 0))
2308 return ret;
2309 }
2310
2311 binding.bi.ctx = ctx_node->res;
2312 binding.bi.res = res_node ? res_node->res : NULL;
2313 binding.bi.bt = vmw_ctx_binding_shader;
2314 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2315 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2316 binding.shader_slot, 0);
2317 return 0;
2318 }
2319
2320 /**
2321 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2322 * command
2323 *
2324 * @dev_priv: Pointer to a device private struct.
2325 * @sw_context: The software context being used for this batch.
2326 * @header: Pointer to the command header in the command stream.
2327 */
vmw_cmd_set_shader_const(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2328 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2329 struct vmw_sw_context *sw_context,
2330 SVGA3dCmdHeader *header)
2331 {
2332 struct vmw_set_shader_const_cmd {
2333 SVGA3dCmdHeader header;
2334 SVGA3dCmdSetShaderConst body;
2335 } *cmd;
2336 int ret;
2337
2338 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2339 header);
2340
2341 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2342 user_context_converter, &cmd->body.cid,
2343 NULL);
2344 if (unlikely(ret != 0))
2345 return ret;
2346
2347 if (dev_priv->has_mob)
2348 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2349
2350 return 0;
2351 }
2352
2353 /**
2354 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2355 * command
2356 *
2357 * @dev_priv: Pointer to a device private struct.
2358 * @sw_context: The software context being used for this batch.
2359 * @header: Pointer to the command header in the command stream.
2360 */
vmw_cmd_bind_gb_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2361 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2362 struct vmw_sw_context *sw_context,
2363 SVGA3dCmdHeader *header)
2364 {
2365 struct vmw_bind_gb_shader_cmd {
2366 SVGA3dCmdHeader header;
2367 SVGA3dCmdBindGBShader body;
2368 } *cmd;
2369
2370 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2371 header);
2372
2373 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2374 user_shader_converter,
2375 &cmd->body.shid, &cmd->body.mobid,
2376 cmd->body.offsetInBytes);
2377 }
2378
2379 /**
2380 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2381 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2382 *
2383 * @dev_priv: Pointer to a device private struct.
2384 * @sw_context: The software context being used for this batch.
2385 * @header: Pointer to the command header in the command stream.
2386 */
2387 static int
vmw_cmd_dx_set_single_constant_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2388 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2389 struct vmw_sw_context *sw_context,
2390 SVGA3dCmdHeader *header)
2391 {
2392 struct {
2393 SVGA3dCmdHeader header;
2394 SVGA3dCmdDXSetSingleConstantBuffer body;
2395 } *cmd;
2396 struct vmw_resource_val_node *res_node = NULL;
2397 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2398 struct vmw_ctx_bindinfo_cb binding;
2399 int ret;
2400
2401 if (unlikely(ctx_node == NULL)) {
2402 DRM_ERROR("DX Context not set.\n");
2403 return -EINVAL;
2404 }
2405
2406 cmd = container_of(header, typeof(*cmd), header);
2407 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2408 user_surface_converter,
2409 &cmd->body.sid, &res_node);
2410 if (unlikely(ret != 0))
2411 return ret;
2412
2413 binding.bi.ctx = ctx_node->res;
2414 binding.bi.res = res_node ? res_node->res : NULL;
2415 binding.bi.bt = vmw_ctx_binding_cb;
2416 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2417 binding.offset = cmd->body.offsetInBytes;
2418 binding.size = cmd->body.sizeInBytes;
2419 binding.slot = cmd->body.slot;
2420
2421 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2422 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2423 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2424 (unsigned) cmd->body.type,
2425 (unsigned) binding.slot);
2426 return -EINVAL;
2427 }
2428
2429 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2430 binding.shader_slot, binding.slot);
2431
2432 return 0;
2433 }
2434
2435 /**
2436 * vmw_cmd_dx_set_shader_res - Validate an
2437 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2438 *
2439 * @dev_priv: Pointer to a device private struct.
2440 * @sw_context: The software context being used for this batch.
2441 * @header: Pointer to the command header in the command stream.
2442 */
vmw_cmd_dx_set_shader_res(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2443 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2444 struct vmw_sw_context *sw_context,
2445 SVGA3dCmdHeader *header)
2446 {
2447 struct {
2448 SVGA3dCmdHeader header;
2449 SVGA3dCmdDXSetShaderResources body;
2450 } *cmd = container_of(header, typeof(*cmd), header);
2451 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2452 sizeof(SVGA3dShaderResourceViewId);
2453
2454 if ((u64) cmd->body.startView + (u64) num_sr_view >
2455 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2456 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2457 DRM_ERROR("Invalid shader binding.\n");
2458 return -EINVAL;
2459 }
2460
2461 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2462 vmw_ctx_binding_sr,
2463 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2464 (void *) &cmd[1], num_sr_view,
2465 cmd->body.startView);
2466 }
2467
2468 /**
2469 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2470 * command
2471 *
2472 * @dev_priv: Pointer to a device private struct.
2473 * @sw_context: The software context being used for this batch.
2474 * @header: Pointer to the command header in the command stream.
2475 */
vmw_cmd_dx_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2476 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2477 struct vmw_sw_context *sw_context,
2478 SVGA3dCmdHeader *header)
2479 {
2480 struct {
2481 SVGA3dCmdHeader header;
2482 SVGA3dCmdDXSetShader body;
2483 } *cmd;
2484 struct vmw_resource *res = NULL;
2485 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2486 struct vmw_ctx_bindinfo_shader binding;
2487 int ret = 0;
2488
2489 if (unlikely(ctx_node == NULL)) {
2490 DRM_ERROR("DX Context not set.\n");
2491 return -EINVAL;
2492 }
2493
2494 cmd = container_of(header, typeof(*cmd), header);
2495
2496 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2497 DRM_ERROR("Illegal shader type %u.\n",
2498 (unsigned) cmd->body.type);
2499 return -EINVAL;
2500 }
2501
2502 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2503 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2504 if (IS_ERR(res)) {
2505 DRM_ERROR("Could not find shader for binding.\n");
2506 return PTR_ERR(res);
2507 }
2508
2509 ret = vmw_resource_val_add(sw_context, res, NULL);
2510 if (ret)
2511 goto out_unref;
2512 }
2513
2514 binding.bi.ctx = ctx_node->res;
2515 binding.bi.res = res;
2516 binding.bi.bt = vmw_ctx_binding_dx_shader;
2517 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2518
2519 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2520 binding.shader_slot, 0);
2521 out_unref:
2522 if (res)
2523 vmw_resource_unreference(&res);
2524
2525 return ret;
2526 }
2527
2528 /**
2529 * vmw_cmd_dx_set_vertex_buffers - Validates an
2530 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2531 *
2532 * @dev_priv: Pointer to a device private struct.
2533 * @sw_context: The software context being used for this batch.
2534 * @header: Pointer to the command header in the command stream.
2535 */
vmw_cmd_dx_set_vertex_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2536 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2537 struct vmw_sw_context *sw_context,
2538 SVGA3dCmdHeader *header)
2539 {
2540 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2541 struct vmw_ctx_bindinfo_vb binding;
2542 struct vmw_resource_val_node *res_node;
2543 struct {
2544 SVGA3dCmdHeader header;
2545 SVGA3dCmdDXSetVertexBuffers body;
2546 SVGA3dVertexBuffer buf[];
2547 } *cmd;
2548 int i, ret, num;
2549
2550 if (unlikely(ctx_node == NULL)) {
2551 DRM_ERROR("DX Context not set.\n");
2552 return -EINVAL;
2553 }
2554
2555 cmd = container_of(header, typeof(*cmd), header);
2556 num = (cmd->header.size - sizeof(cmd->body)) /
2557 sizeof(SVGA3dVertexBuffer);
2558 if ((u64)num + (u64)cmd->body.startBuffer >
2559 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2560 DRM_ERROR("Invalid number of vertex buffers.\n");
2561 return -EINVAL;
2562 }
2563
2564 for (i = 0; i < num; i++) {
2565 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2566 user_surface_converter,
2567 &cmd->buf[i].sid, &res_node);
2568 if (unlikely(ret != 0))
2569 return ret;
2570
2571 binding.bi.ctx = ctx_node->res;
2572 binding.bi.bt = vmw_ctx_binding_vb;
2573 binding.bi.res = ((res_node) ? res_node->res : NULL);
2574 binding.offset = cmd->buf[i].offset;
2575 binding.stride = cmd->buf[i].stride;
2576 binding.slot = i + cmd->body.startBuffer;
2577
2578 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2579 0, binding.slot);
2580 }
2581
2582 return 0;
2583 }
2584
2585 /**
2586 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2587 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2588 *
2589 * @dev_priv: Pointer to a device private struct.
2590 * @sw_context: The software context being used for this batch.
2591 * @header: Pointer to the command header in the command stream.
2592 */
vmw_cmd_dx_set_index_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2593 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2594 struct vmw_sw_context *sw_context,
2595 SVGA3dCmdHeader *header)
2596 {
2597 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2598 struct vmw_ctx_bindinfo_ib binding;
2599 struct vmw_resource_val_node *res_node;
2600 struct {
2601 SVGA3dCmdHeader header;
2602 SVGA3dCmdDXSetIndexBuffer body;
2603 } *cmd;
2604 int ret;
2605
2606 if (unlikely(ctx_node == NULL)) {
2607 DRM_ERROR("DX Context not set.\n");
2608 return -EINVAL;
2609 }
2610
2611 cmd = container_of(header, typeof(*cmd), header);
2612 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2613 user_surface_converter,
2614 &cmd->body.sid, &res_node);
2615 if (unlikely(ret != 0))
2616 return ret;
2617
2618 binding.bi.ctx = ctx_node->res;
2619 binding.bi.res = ((res_node) ? res_node->res : NULL);
2620 binding.bi.bt = vmw_ctx_binding_ib;
2621 binding.offset = cmd->body.offset;
2622 binding.format = cmd->body.format;
2623
2624 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2625
2626 return 0;
2627 }
2628
2629 /**
2630 * vmw_cmd_dx_set_rendertarget - Validate an
2631 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2632 *
2633 * @dev_priv: Pointer to a device private struct.
2634 * @sw_context: The software context being used for this batch.
2635 * @header: Pointer to the command header in the command stream.
2636 */
vmw_cmd_dx_set_rendertargets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2637 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2638 struct vmw_sw_context *sw_context,
2639 SVGA3dCmdHeader *header)
2640 {
2641 struct {
2642 SVGA3dCmdHeader header;
2643 SVGA3dCmdDXSetRenderTargets body;
2644 } *cmd = container_of(header, typeof(*cmd), header);
2645 int ret;
2646 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2647 sizeof(SVGA3dRenderTargetViewId);
2648
2649 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2650 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2651 return -EINVAL;
2652 }
2653
2654 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2655 vmw_ctx_binding_ds, 0,
2656 &cmd->body.depthStencilViewId, 1, 0);
2657 if (ret)
2658 return ret;
2659
2660 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2661 vmw_ctx_binding_dx_rt, 0,
2662 (void *)&cmd[1], num_rt_view, 0);
2663 }
2664
2665 /**
2666 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2667 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2668 *
2669 * @dev_priv: Pointer to a device private struct.
2670 * @sw_context: The software context being used for this batch.
2671 * @header: Pointer to the command header in the command stream.
2672 */
vmw_cmd_dx_clear_rendertarget_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2673 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2674 struct vmw_sw_context *sw_context,
2675 SVGA3dCmdHeader *header)
2676 {
2677 struct {
2678 SVGA3dCmdHeader header;
2679 SVGA3dCmdDXClearRenderTargetView body;
2680 } *cmd = container_of(header, typeof(*cmd), header);
2681
2682 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2683 cmd->body.renderTargetViewId);
2684 }
2685
2686 /**
2687 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2688 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2689 *
2690 * @dev_priv: Pointer to a device private struct.
2691 * @sw_context: The software context being used for this batch.
2692 * @header: Pointer to the command header in the command stream.
2693 */
vmw_cmd_dx_clear_depthstencil_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2694 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2695 struct vmw_sw_context *sw_context,
2696 SVGA3dCmdHeader *header)
2697 {
2698 struct {
2699 SVGA3dCmdHeader header;
2700 SVGA3dCmdDXClearDepthStencilView body;
2701 } *cmd = container_of(header, typeof(*cmd), header);
2702
2703 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2704 cmd->body.depthStencilViewId);
2705 }
2706
vmw_cmd_dx_view_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2707 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2708 struct vmw_sw_context *sw_context,
2709 SVGA3dCmdHeader *header)
2710 {
2711 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2712 struct vmw_resource_val_node *srf_node;
2713 struct vmw_resource *res;
2714 enum vmw_view_type view_type;
2715 int ret;
2716 /*
2717 * This is based on the fact that all affected define commands have
2718 * the same initial command body layout.
2719 */
2720 struct {
2721 SVGA3dCmdHeader header;
2722 uint32 defined_id;
2723 uint32 sid;
2724 } *cmd;
2725
2726 if (unlikely(ctx_node == NULL)) {
2727 DRM_ERROR("DX Context not set.\n");
2728 return -EINVAL;
2729 }
2730
2731 view_type = vmw_view_cmd_to_type(header->id);
2732 if (view_type == vmw_view_max)
2733 return -EINVAL;
2734 cmd = container_of(header, typeof(*cmd), header);
2735 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2736 user_surface_converter,
2737 &cmd->sid, &srf_node);
2738 if (unlikely(ret != 0))
2739 return ret;
2740
2741 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2742 ret = vmw_cotable_notify(res, cmd->defined_id);
2743 vmw_resource_unreference(&res);
2744 if (unlikely(ret != 0))
2745 return ret;
2746
2747 return vmw_view_add(sw_context->man,
2748 ctx_node->res,
2749 srf_node->res,
2750 view_type,
2751 cmd->defined_id,
2752 header,
2753 header->size + sizeof(*header),
2754 &sw_context->staged_cmd_res);
2755 }
2756
2757 /**
2758 * vmw_cmd_dx_set_so_targets - Validate an
2759 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2760 *
2761 * @dev_priv: Pointer to a device private struct.
2762 * @sw_context: The software context being used for this batch.
2763 * @header: Pointer to the command header in the command stream.
2764 */
vmw_cmd_dx_set_so_targets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2765 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2766 struct vmw_sw_context *sw_context,
2767 SVGA3dCmdHeader *header)
2768 {
2769 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2770 struct vmw_ctx_bindinfo_so binding;
2771 struct vmw_resource_val_node *res_node;
2772 struct {
2773 SVGA3dCmdHeader header;
2774 SVGA3dCmdDXSetSOTargets body;
2775 SVGA3dSoTarget targets[];
2776 } *cmd;
2777 int i, ret, num;
2778
2779 if (unlikely(ctx_node == NULL)) {
2780 DRM_ERROR("DX Context not set.\n");
2781 return -EINVAL;
2782 }
2783
2784 cmd = container_of(header, typeof(*cmd), header);
2785 num = (cmd->header.size - sizeof(cmd->body)) /
2786 sizeof(SVGA3dSoTarget);
2787
2788 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2789 DRM_ERROR("Invalid DX SO binding.\n");
2790 return -EINVAL;
2791 }
2792
2793 for (i = 0; i < num; i++) {
2794 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2795 user_surface_converter,
2796 &cmd->targets[i].sid, &res_node);
2797 if (unlikely(ret != 0))
2798 return ret;
2799
2800 binding.bi.ctx = ctx_node->res;
2801 binding.bi.res = ((res_node) ? res_node->res : NULL);
2802 binding.bi.bt = vmw_ctx_binding_so,
2803 binding.offset = cmd->targets[i].offset;
2804 binding.size = cmd->targets[i].sizeInBytes;
2805 binding.slot = i;
2806
2807 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2808 0, binding.slot);
2809 }
2810
2811 return 0;
2812 }
2813
vmw_cmd_dx_so_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2814 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2815 struct vmw_sw_context *sw_context,
2816 SVGA3dCmdHeader *header)
2817 {
2818 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2819 struct vmw_resource *res;
2820 /*
2821 * This is based on the fact that all affected define commands have
2822 * the same initial command body layout.
2823 */
2824 struct {
2825 SVGA3dCmdHeader header;
2826 uint32 defined_id;
2827 } *cmd;
2828 enum vmw_so_type so_type;
2829 int ret;
2830
2831 if (unlikely(ctx_node == NULL)) {
2832 DRM_ERROR("DX Context not set.\n");
2833 return -EINVAL;
2834 }
2835
2836 so_type = vmw_so_cmd_to_type(header->id);
2837 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2838 cmd = container_of(header, typeof(*cmd), header);
2839 ret = vmw_cotable_notify(res, cmd->defined_id);
2840 vmw_resource_unreference(&res);
2841
2842 return ret;
2843 }
2844
2845 /**
2846 * vmw_cmd_dx_check_subresource - Validate an
2847 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2848 *
2849 * @dev_priv: Pointer to a device private struct.
2850 * @sw_context: The software context being used for this batch.
2851 * @header: Pointer to the command header in the command stream.
2852 */
vmw_cmd_dx_check_subresource(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2853 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2854 struct vmw_sw_context *sw_context,
2855 SVGA3dCmdHeader *header)
2856 {
2857 struct {
2858 SVGA3dCmdHeader header;
2859 union {
2860 SVGA3dCmdDXReadbackSubResource r_body;
2861 SVGA3dCmdDXInvalidateSubResource i_body;
2862 SVGA3dCmdDXUpdateSubResource u_body;
2863 SVGA3dSurfaceId sid;
2864 };
2865 } *cmd;
2866
2867 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2868 offsetof(typeof(*cmd), sid));
2869 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2870 offsetof(typeof(*cmd), sid));
2871 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2872 offsetof(typeof(*cmd), sid));
2873
2874 cmd = container_of(header, typeof(*cmd), header);
2875
2876 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2877 user_surface_converter,
2878 &cmd->sid, NULL);
2879 }
2880
vmw_cmd_dx_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2881 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2882 struct vmw_sw_context *sw_context,
2883 SVGA3dCmdHeader *header)
2884 {
2885 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2886
2887 if (unlikely(ctx_node == NULL)) {
2888 DRM_ERROR("DX Context not set.\n");
2889 return -EINVAL;
2890 }
2891
2892 return 0;
2893 }
2894
2895 /**
2896 * vmw_cmd_dx_view_remove - validate a view remove command and
2897 * schedule the view resource for removal.
2898 *
2899 * @dev_priv: Pointer to a device private struct.
2900 * @sw_context: The software context being used for this batch.
2901 * @header: Pointer to the command header in the command stream.
2902 *
2903 * Check that the view exists, and if it was not created using this
2904 * command batch, conditionally make this command a NOP.
2905 */
vmw_cmd_dx_view_remove(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2906 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2907 struct vmw_sw_context *sw_context,
2908 SVGA3dCmdHeader *header)
2909 {
2910 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2911 struct {
2912 SVGA3dCmdHeader header;
2913 union vmw_view_destroy body;
2914 } *cmd = container_of(header, typeof(*cmd), header);
2915 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2916 struct vmw_resource *view;
2917 int ret;
2918
2919 if (!ctx_node) {
2920 DRM_ERROR("DX Context not set.\n");
2921 return -EINVAL;
2922 }
2923
2924 ret = vmw_view_remove(sw_context->man,
2925 cmd->body.view_id, view_type,
2926 &sw_context->staged_cmd_res,
2927 &view);
2928 if (ret || !view)
2929 return ret;
2930
2931 /*
2932 * If the view wasn't created during this command batch, it might
2933 * have been removed due to a context swapout, so add a
2934 * relocation to conditionally make this command a NOP to avoid
2935 * device errors.
2936 */
2937 return vmw_resource_relocation_add(&sw_context->res_relocations,
2938 view,
2939 vmw_ptr_diff(sw_context->buf_start,
2940 &cmd->header.id),
2941 vmw_res_rel_cond_nop);
2942 }
2943
2944 /**
2945 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2946 * command
2947 *
2948 * @dev_priv: Pointer to a device private struct.
2949 * @sw_context: The software context being used for this batch.
2950 * @header: Pointer to the command header in the command stream.
2951 */
vmw_cmd_dx_define_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2952 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2953 struct vmw_sw_context *sw_context,
2954 SVGA3dCmdHeader *header)
2955 {
2956 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2957 struct vmw_resource *res;
2958 struct {
2959 SVGA3dCmdHeader header;
2960 SVGA3dCmdDXDefineShader body;
2961 } *cmd = container_of(header, typeof(*cmd), header);
2962 int ret;
2963
2964 if (!ctx_node) {
2965 DRM_ERROR("DX Context not set.\n");
2966 return -EINVAL;
2967 }
2968
2969 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2970 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2971 vmw_resource_unreference(&res);
2972 if (ret)
2973 return ret;
2974
2975 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2976 cmd->body.shaderId, cmd->body.type,
2977 &sw_context->staged_cmd_res);
2978 }
2979
2980 /**
2981 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2982 * command
2983 *
2984 * @dev_priv: Pointer to a device private struct.
2985 * @sw_context: The software context being used for this batch.
2986 * @header: Pointer to the command header in the command stream.
2987 */
vmw_cmd_dx_destroy_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2988 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2989 struct vmw_sw_context *sw_context,
2990 SVGA3dCmdHeader *header)
2991 {
2992 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2993 struct {
2994 SVGA3dCmdHeader header;
2995 SVGA3dCmdDXDestroyShader body;
2996 } *cmd = container_of(header, typeof(*cmd), header);
2997 int ret;
2998
2999 if (!ctx_node) {
3000 DRM_ERROR("DX Context not set.\n");
3001 return -EINVAL;
3002 }
3003
3004 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
3005 &sw_context->staged_cmd_res);
3006 if (ret)
3007 DRM_ERROR("Could not find shader to remove.\n");
3008
3009 return ret;
3010 }
3011
3012 /**
3013 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
3014 * command
3015 *
3016 * @dev_priv: Pointer to a device private struct.
3017 * @sw_context: The software context being used for this batch.
3018 * @header: Pointer to the command header in the command stream.
3019 */
vmw_cmd_dx_bind_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3020 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
3021 struct vmw_sw_context *sw_context,
3022 SVGA3dCmdHeader *header)
3023 {
3024 struct vmw_resource_val_node *ctx_node;
3025 struct vmw_resource_val_node *res_node;
3026 struct vmw_resource *res;
3027 struct {
3028 SVGA3dCmdHeader header;
3029 SVGA3dCmdDXBindShader body;
3030 } *cmd = container_of(header, typeof(*cmd), header);
3031 int ret;
3032
3033 if (cmd->body.cid != SVGA3D_INVALID_ID) {
3034 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
3035 user_context_converter,
3036 &cmd->body.cid, &ctx_node);
3037 if (ret)
3038 return ret;
3039 } else {
3040 ctx_node = sw_context->dx_ctx_node;
3041 if (!ctx_node) {
3042 DRM_ERROR("DX Context not set.\n");
3043 return -EINVAL;
3044 }
3045 }
3046
3047 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
3048 cmd->body.shid, 0);
3049 if (IS_ERR(res)) {
3050 DRM_ERROR("Could not find shader to bind.\n");
3051 return PTR_ERR(res);
3052 }
3053
3054 ret = vmw_resource_val_add(sw_context, res, &res_node);
3055 if (ret) {
3056 DRM_ERROR("Error creating resource validation node.\n");
3057 goto out_unref;
3058 }
3059
3060
3061 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3062 &cmd->body.mobid,
3063 cmd->body.offsetInBytes);
3064 out_unref:
3065 vmw_resource_unreference(&res);
3066
3067 return ret;
3068 }
3069
3070 /**
3071 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3072 *
3073 * @dev_priv: Pointer to a device private struct.
3074 * @sw_context: The software context being used for this batch.
3075 * @header: Pointer to the command header in the command stream.
3076 */
vmw_cmd_dx_genmips(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3077 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3078 struct vmw_sw_context *sw_context,
3079 SVGA3dCmdHeader *header)
3080 {
3081 struct {
3082 SVGA3dCmdHeader header;
3083 SVGA3dCmdDXGenMips body;
3084 } *cmd = container_of(header, typeof(*cmd), header);
3085
3086 return vmw_view_id_val_add(sw_context, vmw_view_sr,
3087 cmd->body.shaderResourceViewId);
3088 }
3089
3090 /**
3091 * vmw_cmd_dx_transfer_from_buffer -
3092 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3093 *
3094 * @dev_priv: Pointer to a device private struct.
3095 * @sw_context: The software context being used for this batch.
3096 * @header: Pointer to the command header in the command stream.
3097 */
vmw_cmd_dx_transfer_from_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3098 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3099 struct vmw_sw_context *sw_context,
3100 SVGA3dCmdHeader *header)
3101 {
3102 struct {
3103 SVGA3dCmdHeader header;
3104 SVGA3dCmdDXTransferFromBuffer body;
3105 } *cmd = container_of(header, typeof(*cmd), header);
3106 int ret;
3107
3108 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3109 user_surface_converter,
3110 &cmd->body.srcSid, NULL);
3111 if (ret != 0)
3112 return ret;
3113
3114 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3115 user_surface_converter,
3116 &cmd->body.destSid, NULL);
3117 }
3118
vmw_cmd_check_not_3d(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3119 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3120 struct vmw_sw_context *sw_context,
3121 void *buf, uint32_t *size)
3122 {
3123 uint32_t size_remaining = *size;
3124 uint32_t cmd_id;
3125
3126 cmd_id = ((uint32_t *)buf)[0];
3127 switch (cmd_id) {
3128 case SVGA_CMD_UPDATE:
3129 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3130 break;
3131 case SVGA_CMD_DEFINE_GMRFB:
3132 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3133 break;
3134 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3135 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3136 break;
3137 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3138 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3139 break;
3140 default:
3141 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3142 return -EINVAL;
3143 }
3144
3145 if (*size > size_remaining) {
3146 DRM_ERROR("Invalid SVGA command (size mismatch):"
3147 " %u.\n", cmd_id);
3148 return -EINVAL;
3149 }
3150
3151 if (unlikely(!sw_context->kernel)) {
3152 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3153 return -EPERM;
3154 }
3155
3156 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3157 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3158
3159 return 0;
3160 }
3161
3162 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3163 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3164 false, false, false),
3165 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3166 false, false, false),
3167 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3168 true, false, false),
3169 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3170 true, false, false),
3171 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3172 true, false, false),
3173 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3174 false, false, false),
3175 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3176 false, false, false),
3177 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3178 true, false, false),
3179 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3180 true, false, false),
3181 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3182 true, false, false),
3183 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3184 &vmw_cmd_set_render_target_check, true, false, false),
3185 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3186 true, false, false),
3187 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3188 true, false, false),
3189 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3190 true, false, false),
3191 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3192 true, false, false),
3193 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3194 true, false, false),
3195 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3196 true, false, false),
3197 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3198 true, false, false),
3199 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3200 false, false, false),
3201 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3202 true, false, false),
3203 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3204 true, false, false),
3205 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3206 true, false, false),
3207 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3208 true, false, false),
3209 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3210 true, false, false),
3211 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3212 true, false, false),
3213 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3214 true, false, false),
3215 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3216 true, false, false),
3217 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3218 true, false, false),
3219 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3220 true, false, false),
3221 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3222 &vmw_cmd_blt_surf_screen_check, false, false, false),
3223 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3224 false, false, false),
3225 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3226 false, false, false),
3227 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3228 false, false, false),
3229 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3230 false, false, false),
3231 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3232 false, false, false),
3233 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3234 false, false, false),
3235 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3236 false, false, false),
3237 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3238 false, false, false),
3239 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3240 false, false, false),
3241 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3242 false, false, false),
3243 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3244 false, false, false),
3245 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3246 false, false, false),
3247 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3248 false, false, false),
3249 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3250 false, false, true),
3251 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3252 false, false, true),
3253 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3254 false, false, true),
3255 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3256 false, false, true),
3257 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3258 false, false, true),
3259 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3260 false, false, true),
3261 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3262 false, false, true),
3263 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3264 false, false, true),
3265 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3266 true, false, true),
3267 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3268 false, false, true),
3269 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3270 true, false, true),
3271 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3272 &vmw_cmd_update_gb_surface, true, false, true),
3273 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3274 &vmw_cmd_readback_gb_image, true, false, true),
3275 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3276 &vmw_cmd_readback_gb_surface, true, false, true),
3277 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3278 &vmw_cmd_invalidate_gb_image, true, false, true),
3279 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3280 &vmw_cmd_invalidate_gb_surface, true, false, true),
3281 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3282 false, false, true),
3283 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3284 false, false, true),
3285 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3286 false, false, true),
3287 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3288 false, false, true),
3289 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3290 false, false, true),
3291 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3292 false, false, true),
3293 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3294 true, false, true),
3295 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3296 false, false, true),
3297 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3298 false, false, false),
3299 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3300 true, false, true),
3301 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3302 true, false, true),
3303 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3304 true, false, true),
3305 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3306 true, false, true),
3307 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3308 false, false, true),
3309 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3310 false, false, true),
3311 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3312 false, false, true),
3313 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3314 false, false, true),
3315 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3316 false, false, true),
3317 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3318 false, false, true),
3319 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3320 false, false, true),
3321 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3322 false, false, true),
3323 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3324 false, false, true),
3325 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3326 false, false, true),
3327 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3328 true, false, true),
3329 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3330 false, false, true),
3331 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3332 false, false, true),
3333 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3334 false, false, true),
3335 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3336 false, false, true),
3337
3338 /*
3339 * DX commands
3340 */
3341 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3342 false, false, true),
3343 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3344 false, false, true),
3345 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3346 false, false, true),
3347 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3348 false, false, true),
3349 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3350 false, false, true),
3351 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3352 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3353 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3354 &vmw_cmd_dx_set_shader_res, true, false, true),
3355 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3356 true, false, true),
3357 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3358 true, false, true),
3359 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3360 true, false, true),
3361 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3362 true, false, true),
3363 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3364 true, false, true),
3365 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3366 &vmw_cmd_dx_cid_check, true, false, true),
3367 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3368 true, false, true),
3369 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3370 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3371 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3372 &vmw_cmd_dx_set_index_buffer, true, false, true),
3373 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3374 &vmw_cmd_dx_set_rendertargets, true, false, true),
3375 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3376 true, false, true),
3377 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3378 &vmw_cmd_dx_cid_check, true, false, true),
3379 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3380 &vmw_cmd_dx_cid_check, true, false, true),
3381 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3382 true, false, true),
3383 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3384 true, false, true),
3385 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3386 true, false, true),
3387 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3388 &vmw_cmd_dx_cid_check, true, false, true),
3389 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3390 true, false, true),
3391 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3392 true, false, true),
3393 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3394 true, false, true),
3395 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3396 true, false, true),
3397 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3398 true, false, true),
3399 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3400 true, false, true),
3401 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3402 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3403 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3404 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3405 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3406 true, false, true),
3407 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3408 true, false, true),
3409 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3410 &vmw_cmd_dx_check_subresource, true, false, true),
3411 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3412 &vmw_cmd_dx_check_subresource, true, false, true),
3413 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3414 &vmw_cmd_dx_check_subresource, true, false, true),
3415 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3416 &vmw_cmd_dx_view_define, true, false, true),
3417 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3418 &vmw_cmd_dx_view_remove, true, false, true),
3419 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3420 &vmw_cmd_dx_view_define, true, false, true),
3421 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3422 &vmw_cmd_dx_view_remove, true, false, true),
3423 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3424 &vmw_cmd_dx_view_define, true, false, true),
3425 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3426 &vmw_cmd_dx_view_remove, true, false, true),
3427 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3428 &vmw_cmd_dx_so_define, true, false, true),
3429 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3430 &vmw_cmd_dx_cid_check, true, false, true),
3431 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3432 &vmw_cmd_dx_so_define, true, false, true),
3433 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3434 &vmw_cmd_dx_cid_check, true, false, true),
3435 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3436 &vmw_cmd_dx_so_define, true, false, true),
3437 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3438 &vmw_cmd_dx_cid_check, true, false, true),
3439 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3440 &vmw_cmd_dx_so_define, true, false, true),
3441 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3442 &vmw_cmd_dx_cid_check, true, false, true),
3443 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3444 &vmw_cmd_dx_so_define, true, false, true),
3445 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3446 &vmw_cmd_dx_cid_check, true, false, true),
3447 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3448 &vmw_cmd_dx_define_shader, true, false, true),
3449 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3450 &vmw_cmd_dx_destroy_shader, true, false, true),
3451 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3452 &vmw_cmd_dx_bind_shader, true, false, true),
3453 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3454 &vmw_cmd_dx_so_define, true, false, true),
3455 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3456 &vmw_cmd_dx_cid_check, true, false, true),
3457 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3458 true, false, true),
3459 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3460 &vmw_cmd_dx_set_so_targets, true, false, true),
3461 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3462 &vmw_cmd_dx_cid_check, true, false, true),
3463 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3464 &vmw_cmd_dx_cid_check, true, false, true),
3465 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3466 &vmw_cmd_buffer_copy_check, true, false, true),
3467 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3468 &vmw_cmd_pred_copy_check, true, false, true),
3469 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3470 &vmw_cmd_dx_transfer_from_buffer,
3471 true, false, true),
3472 };
3473
vmw_cmd_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3474 static int vmw_cmd_check(struct vmw_private *dev_priv,
3475 struct vmw_sw_context *sw_context,
3476 void *buf, uint32_t *size)
3477 {
3478 uint32_t cmd_id;
3479 uint32_t size_remaining = *size;
3480 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3481 int ret;
3482 const struct vmw_cmd_entry *entry;
3483 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3484
3485 cmd_id = ((uint32_t *)buf)[0];
3486 /* Handle any none 3D commands */
3487 if (unlikely(cmd_id < SVGA_CMD_MAX))
3488 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3489
3490
3491 cmd_id = header->id;
3492 *size = header->size + sizeof(SVGA3dCmdHeader);
3493
3494 cmd_id -= SVGA_3D_CMD_BASE;
3495 if (unlikely(*size > size_remaining))
3496 goto out_invalid;
3497
3498 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3499 goto out_invalid;
3500
3501 entry = &vmw_cmd_entries[cmd_id];
3502 if (unlikely(!entry->func))
3503 goto out_invalid;
3504
3505 if (unlikely(!entry->user_allow && !sw_context->kernel))
3506 goto out_privileged;
3507
3508 if (unlikely(entry->gb_disable && gb))
3509 goto out_old;
3510
3511 if (unlikely(entry->gb_enable && !gb))
3512 goto out_new;
3513
3514 ret = entry->func(dev_priv, sw_context, header);
3515 if (unlikely(ret != 0))
3516 goto out_invalid;
3517
3518 return 0;
3519 out_invalid:
3520 DRM_ERROR("Invalid SVGA3D command: %d\n",
3521 cmd_id + SVGA_3D_CMD_BASE);
3522 return -EINVAL;
3523 out_privileged:
3524 DRM_ERROR("Privileged SVGA3D command: %d\n",
3525 cmd_id + SVGA_3D_CMD_BASE);
3526 return -EPERM;
3527 out_old:
3528 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3529 cmd_id + SVGA_3D_CMD_BASE);
3530 return -EINVAL;
3531 out_new:
3532 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3533 cmd_id + SVGA_3D_CMD_BASE);
3534 return -EINVAL;
3535 }
3536
vmw_cmd_check_all(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t size)3537 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3538 struct vmw_sw_context *sw_context,
3539 void *buf,
3540 uint32_t size)
3541 {
3542 int32_t cur_size = size;
3543 int ret;
3544
3545 sw_context->buf_start = buf;
3546
3547 while (cur_size > 0) {
3548 size = cur_size;
3549 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3550 if (unlikely(ret != 0))
3551 return ret;
3552 buf = (void *)((unsigned long) buf + size);
3553 cur_size -= size;
3554 }
3555
3556 if (unlikely(cur_size != 0)) {
3557 DRM_ERROR("Command verifier out of sync.\n");
3558 return -EINVAL;
3559 }
3560
3561 return 0;
3562 }
3563
vmw_free_relocations(struct vmw_sw_context * sw_context)3564 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3565 {
3566 sw_context->cur_reloc = 0;
3567 }
3568
vmw_apply_relocations(struct vmw_sw_context * sw_context)3569 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3570 {
3571 uint32_t i;
3572 struct vmw_relocation *reloc;
3573 struct ttm_validate_buffer *validate;
3574 struct ttm_buffer_object *bo;
3575
3576 for (i = 0; i < sw_context->cur_reloc; ++i) {
3577 reloc = &sw_context->relocs[i];
3578 validate = &sw_context->val_bufs[reloc->index].base;
3579 bo = validate->bo;
3580 switch (bo->mem.mem_type) {
3581 case TTM_PL_VRAM:
3582 reloc->location->offset += bo->offset;
3583 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3584 break;
3585 case VMW_PL_GMR:
3586 reloc->location->gmrId = bo->mem.start;
3587 break;
3588 case VMW_PL_MOB:
3589 *reloc->mob_loc = bo->mem.start;
3590 break;
3591 default:
3592 BUG();
3593 }
3594 }
3595 vmw_free_relocations(sw_context);
3596 }
3597
3598 /**
3599 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3600 * all resources referenced by it.
3601 *
3602 * @list: The resource list.
3603 */
vmw_resource_list_unreference(struct vmw_sw_context * sw_context,struct list_head * list)3604 static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3605 struct list_head *list)
3606 {
3607 struct vmw_resource_val_node *val, *val_next;
3608
3609 /*
3610 * Drop references to resources held during command submission.
3611 */
3612
3613 list_for_each_entry_safe(val, val_next, list, head) {
3614 list_del_init(&val->head);
3615 vmw_resource_unreference(&val->res);
3616
3617 if (val->staged_bindings) {
3618 if (val->staged_bindings != sw_context->staged_bindings)
3619 vmw_binding_state_free(val->staged_bindings);
3620 else
3621 sw_context->staged_bindings_inuse = false;
3622 val->staged_bindings = NULL;
3623 }
3624
3625 kfree(val);
3626 }
3627 }
3628
vmw_clear_validations(struct vmw_sw_context * sw_context)3629 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3630 {
3631 struct vmw_validate_buffer *entry, *next;
3632 struct vmw_resource_val_node *val;
3633
3634 /*
3635 * Drop references to DMA buffers held during command submission.
3636 */
3637 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3638 base.head) {
3639 list_del(&entry->base.head);
3640 ttm_bo_unref(&entry->base.bo);
3641 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3642 sw_context->cur_val_buf--;
3643 }
3644 BUG_ON(sw_context->cur_val_buf != 0);
3645
3646 list_for_each_entry(val, &sw_context->resource_list, head)
3647 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3648 }
3649
vmw_validate_single_buffer(struct vmw_private * dev_priv,struct ttm_buffer_object * bo,bool interruptible,bool validate_as_mob)3650 int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3651 struct ttm_buffer_object *bo,
3652 bool interruptible,
3653 bool validate_as_mob)
3654 {
3655 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3656 base);
3657 int ret;
3658
3659 if (vbo->pin_count > 0)
3660 return 0;
3661
3662 if (validate_as_mob)
3663 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3664 false);
3665
3666 /**
3667 * Put BO in VRAM if there is space, otherwise as a GMR.
3668 * If there is no space in VRAM and GMR ids are all used up,
3669 * start evicting GMRs to make room. If the DMA buffer can't be
3670 * used as a GMR, this will return -ENOMEM.
3671 */
3672
3673 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3674 false);
3675 if (likely(ret == 0 || ret == -ERESTARTSYS))
3676 return ret;
3677
3678 /**
3679 * If that failed, try VRAM again, this time evicting
3680 * previous contents.
3681 */
3682
3683 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
3684 return ret;
3685 }
3686
vmw_validate_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)3687 static int vmw_validate_buffers(struct vmw_private *dev_priv,
3688 struct vmw_sw_context *sw_context)
3689 {
3690 struct vmw_validate_buffer *entry;
3691 int ret;
3692
3693 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3694 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3695 true,
3696 entry->validate_as_mob);
3697 if (unlikely(ret != 0))
3698 return ret;
3699 }
3700 return 0;
3701 }
3702
vmw_resize_cmd_bounce(struct vmw_sw_context * sw_context,uint32_t size)3703 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3704 uint32_t size)
3705 {
3706 if (likely(sw_context->cmd_bounce_size >= size))
3707 return 0;
3708
3709 if (sw_context->cmd_bounce_size == 0)
3710 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3711
3712 while (sw_context->cmd_bounce_size < size) {
3713 sw_context->cmd_bounce_size =
3714 PAGE_ALIGN(sw_context->cmd_bounce_size +
3715 (sw_context->cmd_bounce_size >> 1));
3716 }
3717
3718 vfree(sw_context->cmd_bounce);
3719 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3720
3721 if (sw_context->cmd_bounce == NULL) {
3722 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3723 sw_context->cmd_bounce_size = 0;
3724 return -ENOMEM;
3725 }
3726
3727 return 0;
3728 }
3729
3730 /**
3731 * vmw_execbuf_fence_commands - create and submit a command stream fence
3732 *
3733 * Creates a fence object and submits a command stream marker.
3734 * If this fails for some reason, We sync the fifo and return NULL.
3735 * It is then safe to fence buffers with a NULL pointer.
3736 *
3737 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3738 * a userspace handle if @p_handle is not NULL, otherwise not.
3739 */
3740
vmw_execbuf_fence_commands(struct drm_file * file_priv,struct vmw_private * dev_priv,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)3741 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3742 struct vmw_private *dev_priv,
3743 struct vmw_fence_obj **p_fence,
3744 uint32_t *p_handle)
3745 {
3746 uint32_t sequence;
3747 int ret;
3748 bool synced = false;
3749
3750 /* p_handle implies file_priv. */
3751 BUG_ON(p_handle != NULL && file_priv == NULL);
3752
3753 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3754 if (unlikely(ret != 0)) {
3755 DRM_ERROR("Fence submission error. Syncing.\n");
3756 synced = true;
3757 }
3758
3759 if (p_handle != NULL)
3760 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3761 sequence, p_fence, p_handle);
3762 else
3763 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3764
3765 if (unlikely(ret != 0 && !synced)) {
3766 (void) vmw_fallback_wait(dev_priv, false, false,
3767 sequence, false,
3768 VMW_FENCE_WAIT_TIMEOUT);
3769 *p_fence = NULL;
3770 }
3771
3772 return 0;
3773 }
3774
3775 /**
3776 * vmw_execbuf_copy_fence_user - copy fence object information to
3777 * user-space.
3778 *
3779 * @dev_priv: Pointer to a vmw_private struct.
3780 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3781 * @ret: Return value from fence object creation.
3782 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3783 * which the information should be copied.
3784 * @fence: Pointer to the fenc object.
3785 * @fence_handle: User-space fence handle.
3786 *
3787 * This function copies fence information to user-space. If copying fails,
3788 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3789 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3790 * the error will hopefully be detected.
3791 * Also if copying fails, user-space will be unable to signal the fence
3792 * object so we wait for it immediately, and then unreference the
3793 * user-space reference.
3794 */
3795 void
vmw_execbuf_copy_fence_user(struct vmw_private * dev_priv,struct vmw_fpriv * vmw_fp,int ret,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj * fence,uint32_t fence_handle)3796 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3797 struct vmw_fpriv *vmw_fp,
3798 int ret,
3799 struct drm_vmw_fence_rep __user *user_fence_rep,
3800 struct vmw_fence_obj *fence,
3801 uint32_t fence_handle)
3802 {
3803 struct drm_vmw_fence_rep fence_rep;
3804
3805 if (user_fence_rep == NULL)
3806 return;
3807
3808 memset(&fence_rep, 0, sizeof(fence_rep));
3809
3810 fence_rep.error = ret;
3811 if (ret == 0) {
3812 BUG_ON(fence == NULL);
3813
3814 fence_rep.handle = fence_handle;
3815 fence_rep.seqno = fence->base.seqno;
3816 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3817 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3818 }
3819
3820 /*
3821 * copy_to_user errors will be detected by user space not
3822 * seeing fence_rep::error filled in. Typically
3823 * user-space would have pre-set that member to -EFAULT.
3824 */
3825 ret = copy_to_user(user_fence_rep, &fence_rep,
3826 sizeof(fence_rep));
3827
3828 /*
3829 * User-space lost the fence object. We need to sync
3830 * and unreference the handle.
3831 */
3832 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3833 ttm_ref_object_base_unref(vmw_fp->tfile,
3834 fence_handle, TTM_REF_USAGE);
3835 DRM_ERROR("Fence copy error. Syncing.\n");
3836 (void) vmw_fence_obj_wait(fence, false, false,
3837 VMW_FENCE_WAIT_TIMEOUT);
3838 }
3839 }
3840
3841 /**
3842 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3843 * the fifo.
3844 *
3845 * @dev_priv: Pointer to a device private structure.
3846 * @kernel_commands: Pointer to the unpatched command batch.
3847 * @command_size: Size of the unpatched command batch.
3848 * @sw_context: Structure holding the relocation lists.
3849 *
3850 * Side effects: If this function returns 0, then the command batch
3851 * pointed to by @kernel_commands will have been modified.
3852 */
vmw_execbuf_submit_fifo(struct vmw_private * dev_priv,void * kernel_commands,u32 command_size,struct vmw_sw_context * sw_context)3853 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3854 void *kernel_commands,
3855 u32 command_size,
3856 struct vmw_sw_context *sw_context)
3857 {
3858 void *cmd;
3859
3860 if (sw_context->dx_ctx_node)
3861 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3862 sw_context->dx_ctx_node->res->id);
3863 else
3864 cmd = vmw_fifo_reserve(dev_priv, command_size);
3865 if (!cmd) {
3866 DRM_ERROR("Failed reserving fifo space for commands.\n");
3867 return -ENOMEM;
3868 }
3869
3870 vmw_apply_relocations(sw_context);
3871 memcpy(cmd, kernel_commands, command_size);
3872 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3873 vmw_resource_relocations_free(&sw_context->res_relocations);
3874 vmw_fifo_commit(dev_priv, command_size);
3875
3876 return 0;
3877 }
3878
3879 /**
3880 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3881 * the command buffer manager.
3882 *
3883 * @dev_priv: Pointer to a device private structure.
3884 * @header: Opaque handle to the command buffer allocation.
3885 * @command_size: Size of the unpatched command batch.
3886 * @sw_context: Structure holding the relocation lists.
3887 *
3888 * Side effects: If this function returns 0, then the command buffer
3889 * represented by @header will have been modified.
3890 */
vmw_execbuf_submit_cmdbuf(struct vmw_private * dev_priv,struct vmw_cmdbuf_header * header,u32 command_size,struct vmw_sw_context * sw_context)3891 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3892 struct vmw_cmdbuf_header *header,
3893 u32 command_size,
3894 struct vmw_sw_context *sw_context)
3895 {
3896 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3897 SVGA3D_INVALID_ID);
3898 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3899 id, false, header);
3900
3901 vmw_apply_relocations(sw_context);
3902 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3903 vmw_resource_relocations_free(&sw_context->res_relocations);
3904 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3905
3906 return 0;
3907 }
3908
3909 /**
3910 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3911 * submission using a command buffer.
3912 *
3913 * @dev_priv: Pointer to a device private structure.
3914 * @user_commands: User-space pointer to the commands to be submitted.
3915 * @command_size: Size of the unpatched command batch.
3916 * @header: Out parameter returning the opaque pointer to the command buffer.
3917 *
3918 * This function checks whether we can use the command buffer manager for
3919 * submission and if so, creates a command buffer of suitable size and
3920 * copies the user data into that buffer.
3921 *
3922 * On successful return, the function returns a pointer to the data in the
3923 * command buffer and *@header is set to non-NULL.
3924 * If command buffers could not be used, the function will return the value
3925 * of @kernel_commands on function call. That value may be NULL. In that case,
3926 * the value of *@header will be set to NULL.
3927 * If an error is encountered, the function will return a pointer error value.
3928 * If the function is interrupted by a signal while sleeping, it will return
3929 * -ERESTARTSYS casted to a pointer error value.
3930 */
vmw_execbuf_cmdbuf(struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,u32 command_size,struct vmw_cmdbuf_header ** header)3931 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3932 void __user *user_commands,
3933 void *kernel_commands,
3934 u32 command_size,
3935 struct vmw_cmdbuf_header **header)
3936 {
3937 size_t cmdbuf_size;
3938 int ret;
3939
3940 *header = NULL;
3941 if (command_size > SVGA_CB_MAX_SIZE) {
3942 DRM_ERROR("Command buffer is too large.\n");
3943 return ERR_PTR(-EINVAL);
3944 }
3945
3946 if (!dev_priv->cman || kernel_commands)
3947 return kernel_commands;
3948
3949 /* If possible, add a little space for fencing. */
3950 cmdbuf_size = command_size + 512;
3951 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3952 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3953 true, header);
3954 if (IS_ERR(kernel_commands))
3955 return kernel_commands;
3956
3957 ret = copy_from_user(kernel_commands, user_commands,
3958 command_size);
3959 if (ret) {
3960 DRM_ERROR("Failed copying commands.\n");
3961 vmw_cmdbuf_header_free(*header);
3962 *header = NULL;
3963 return ERR_PTR(-EFAULT);
3964 }
3965
3966 return kernel_commands;
3967 }
3968
vmw_execbuf_tie_context(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,uint32_t handle)3969 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3970 struct vmw_sw_context *sw_context,
3971 uint32_t handle)
3972 {
3973 struct vmw_resource_val_node *ctx_node;
3974 struct vmw_resource *res;
3975 int ret;
3976
3977 if (handle == SVGA3D_INVALID_ID)
3978 return 0;
3979
3980 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3981 handle, user_context_converter,
3982 &res);
3983 if (unlikely(ret != 0)) {
3984 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3985 (unsigned) handle);
3986 return ret;
3987 }
3988
3989 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3990 if (unlikely(ret != 0))
3991 goto out_err;
3992
3993 sw_context->dx_ctx_node = ctx_node;
3994 sw_context->man = vmw_context_res_man(res);
3995 out_err:
3996 vmw_resource_unreference(&res);
3997 return ret;
3998 }
3999
vmw_execbuf_process(struct drm_file * file_priv,struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,uint32_t command_size,uint64_t throttle_us,uint32_t dx_context_handle,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj ** out_fence)4000 int vmw_execbuf_process(struct drm_file *file_priv,
4001 struct vmw_private *dev_priv,
4002 void __user *user_commands,
4003 void *kernel_commands,
4004 uint32_t command_size,
4005 uint64_t throttle_us,
4006 uint32_t dx_context_handle,
4007 struct drm_vmw_fence_rep __user *user_fence_rep,
4008 struct vmw_fence_obj **out_fence)
4009 {
4010 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4011 struct vmw_fence_obj *fence = NULL;
4012 struct vmw_resource *error_resource;
4013 struct list_head resource_list;
4014 struct vmw_cmdbuf_header *header;
4015 struct ww_acquire_ctx ticket;
4016 uint32_t handle;
4017 int ret;
4018
4019 if (throttle_us) {
4020 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4021 throttle_us);
4022
4023 if (ret)
4024 return ret;
4025 }
4026
4027 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4028 kernel_commands, command_size,
4029 &header);
4030 if (IS_ERR(kernel_commands))
4031 return PTR_ERR(kernel_commands);
4032
4033 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4034 if (ret) {
4035 ret = -ERESTARTSYS;
4036 goto out_free_header;
4037 }
4038
4039 sw_context->kernel = false;
4040 if (kernel_commands == NULL) {
4041 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4042 if (unlikely(ret != 0))
4043 goto out_unlock;
4044
4045
4046 ret = copy_from_user(sw_context->cmd_bounce,
4047 user_commands, command_size);
4048
4049 if (unlikely(ret != 0)) {
4050 ret = -EFAULT;
4051 DRM_ERROR("Failed copying commands.\n");
4052 goto out_unlock;
4053 }
4054 kernel_commands = sw_context->cmd_bounce;
4055 } else if (!header)
4056 sw_context->kernel = true;
4057
4058 sw_context->fp = vmw_fpriv(file_priv);
4059 sw_context->cur_reloc = 0;
4060 sw_context->cur_val_buf = 0;
4061 INIT_LIST_HEAD(&sw_context->resource_list);
4062 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
4063 sw_context->cur_query_bo = dev_priv->pinned_bo;
4064 sw_context->last_query_ctx = NULL;
4065 sw_context->needs_post_query_barrier = false;
4066 sw_context->dx_ctx_node = NULL;
4067 sw_context->dx_query_mob = NULL;
4068 sw_context->dx_query_ctx = NULL;
4069 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4070 INIT_LIST_HEAD(&sw_context->validate_nodes);
4071 INIT_LIST_HEAD(&sw_context->res_relocations);
4072 if (sw_context->staged_bindings)
4073 vmw_binding_state_reset(sw_context->staged_bindings);
4074
4075 if (!sw_context->res_ht_initialized) {
4076 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4077 if (unlikely(ret != 0))
4078 goto out_unlock;
4079 sw_context->res_ht_initialized = true;
4080 }
4081 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4082 INIT_LIST_HEAD(&resource_list);
4083 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4084 if (unlikely(ret != 0)) {
4085 list_splice_init(&sw_context->ctx_resource_list,
4086 &sw_context->resource_list);
4087 goto out_err_nores;
4088 }
4089
4090 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4091 command_size);
4092 /*
4093 * Merge the resource lists before checking the return status
4094 * from vmd_cmd_check_all so that all the open hashtabs will
4095 * be handled properly even if vmw_cmd_check_all fails.
4096 */
4097 list_splice_init(&sw_context->ctx_resource_list,
4098 &sw_context->resource_list);
4099
4100 if (unlikely(ret != 0))
4101 goto out_err_nores;
4102
4103 ret = vmw_resources_reserve(sw_context);
4104 if (unlikely(ret != 0))
4105 goto out_err_nores;
4106
4107 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4108 true, NULL);
4109 if (unlikely(ret != 0))
4110 goto out_err_nores;
4111
4112 ret = vmw_validate_buffers(dev_priv, sw_context);
4113 if (unlikely(ret != 0))
4114 goto out_err;
4115
4116 ret = vmw_resources_validate(sw_context);
4117 if (unlikely(ret != 0))
4118 goto out_err;
4119
4120 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4121 if (unlikely(ret != 0)) {
4122 ret = -ERESTARTSYS;
4123 goto out_err;
4124 }
4125
4126 if (dev_priv->has_mob) {
4127 ret = vmw_rebind_contexts(sw_context);
4128 if (unlikely(ret != 0))
4129 goto out_unlock_binding;
4130 }
4131
4132 if (!header) {
4133 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4134 command_size, sw_context);
4135 } else {
4136 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4137 sw_context);
4138 header = NULL;
4139 }
4140 mutex_unlock(&dev_priv->binding_mutex);
4141 if (ret)
4142 goto out_err;
4143
4144 vmw_query_bo_switch_commit(dev_priv, sw_context);
4145 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4146 &fence,
4147 (user_fence_rep) ? &handle : NULL);
4148 /*
4149 * This error is harmless, because if fence submission fails,
4150 * vmw_fifo_send_fence will sync. The error will be propagated to
4151 * user-space in @fence_rep
4152 */
4153
4154 if (ret != 0)
4155 DRM_ERROR("Fence submission error. Syncing.\n");
4156
4157 vmw_resources_unreserve(sw_context, false);
4158
4159 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4160 (void *) fence);
4161
4162 if (unlikely(dev_priv->pinned_bo != NULL &&
4163 !dev_priv->query_cid_valid))
4164 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4165
4166 vmw_clear_validations(sw_context);
4167 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4168 user_fence_rep, fence, handle);
4169
4170 /* Don't unreference when handing fence out */
4171 if (unlikely(out_fence != NULL)) {
4172 *out_fence = fence;
4173 fence = NULL;
4174 } else if (likely(fence != NULL)) {
4175 vmw_fence_obj_unreference(&fence);
4176 }
4177
4178 list_splice_init(&sw_context->resource_list, &resource_list);
4179 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4180 mutex_unlock(&dev_priv->cmdbuf_mutex);
4181
4182 /*
4183 * Unreference resources outside of the cmdbuf_mutex to
4184 * avoid deadlocks in resource destruction paths.
4185 */
4186 vmw_resource_list_unreference(sw_context, &resource_list);
4187
4188 return 0;
4189
4190 out_unlock_binding:
4191 mutex_unlock(&dev_priv->binding_mutex);
4192 out_err:
4193 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4194 out_err_nores:
4195 vmw_resources_unreserve(sw_context, true);
4196 vmw_resource_relocations_free(&sw_context->res_relocations);
4197 vmw_free_relocations(sw_context);
4198 vmw_clear_validations(sw_context);
4199 if (unlikely(dev_priv->pinned_bo != NULL &&
4200 !dev_priv->query_cid_valid))
4201 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4202 out_unlock:
4203 list_splice_init(&sw_context->resource_list, &resource_list);
4204 error_resource = sw_context->error_resource;
4205 sw_context->error_resource = NULL;
4206 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4207 mutex_unlock(&dev_priv->cmdbuf_mutex);
4208
4209 /*
4210 * Unreference resources outside of the cmdbuf_mutex to
4211 * avoid deadlocks in resource destruction paths.
4212 */
4213 vmw_resource_list_unreference(sw_context, &resource_list);
4214 if (unlikely(error_resource != NULL))
4215 vmw_resource_unreference(&error_resource);
4216 out_free_header:
4217 if (header)
4218 vmw_cmdbuf_header_free(header);
4219
4220 return ret;
4221 }
4222
4223 /**
4224 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4225 *
4226 * @dev_priv: The device private structure.
4227 *
4228 * This function is called to idle the fifo and unpin the query buffer
4229 * if the normal way to do this hits an error, which should typically be
4230 * extremely rare.
4231 */
vmw_execbuf_unpin_panic(struct vmw_private * dev_priv)4232 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4233 {
4234 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4235
4236 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4237 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4238 if (dev_priv->dummy_query_bo_pinned) {
4239 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4240 dev_priv->dummy_query_bo_pinned = false;
4241 }
4242 }
4243
4244
4245 /**
4246 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4247 * query bo.
4248 *
4249 * @dev_priv: The device private structure.
4250 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4251 * _after_ a query barrier that flushes all queries touching the current
4252 * buffer pointed to by @dev_priv->pinned_bo
4253 *
4254 * This function should be used to unpin the pinned query bo, or
4255 * as a query barrier when we need to make sure that all queries have
4256 * finished before the next fifo command. (For example on hardware
4257 * context destructions where the hardware may otherwise leak unfinished
4258 * queries).
4259 *
4260 * This function does not return any failure codes, but make attempts
4261 * to do safe unpinning in case of errors.
4262 *
4263 * The function will synchronize on the previous query barrier, and will
4264 * thus not finish until that barrier has executed.
4265 *
4266 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4267 * before calling this function.
4268 */
__vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv,struct vmw_fence_obj * fence)4269 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4270 struct vmw_fence_obj *fence)
4271 {
4272 int ret = 0;
4273 struct list_head validate_list;
4274 struct ttm_validate_buffer pinned_val, query_val;
4275 struct vmw_fence_obj *lfence = NULL;
4276 struct ww_acquire_ctx ticket;
4277
4278 if (dev_priv->pinned_bo == NULL)
4279 goto out_unlock;
4280
4281 INIT_LIST_HEAD(&validate_list);
4282
4283 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4284 pinned_val.shared = false;
4285 list_add_tail(&pinned_val.head, &validate_list);
4286
4287 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4288 query_val.shared = false;
4289 list_add_tail(&query_val.head, &validate_list);
4290
4291 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4292 false, NULL);
4293 if (unlikely(ret != 0)) {
4294 vmw_execbuf_unpin_panic(dev_priv);
4295 goto out_no_reserve;
4296 }
4297
4298 if (dev_priv->query_cid_valid) {
4299 BUG_ON(fence != NULL);
4300 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4301 if (unlikely(ret != 0)) {
4302 vmw_execbuf_unpin_panic(dev_priv);
4303 goto out_no_emit;
4304 }
4305 dev_priv->query_cid_valid = false;
4306 }
4307
4308 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4309 if (dev_priv->dummy_query_bo_pinned) {
4310 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4311 dev_priv->dummy_query_bo_pinned = false;
4312 }
4313 if (fence == NULL) {
4314 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4315 NULL);
4316 fence = lfence;
4317 }
4318 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4319 if (lfence != NULL)
4320 vmw_fence_obj_unreference(&lfence);
4321
4322 ttm_bo_unref(&query_val.bo);
4323 ttm_bo_unref(&pinned_val.bo);
4324 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4325 out_unlock:
4326 return;
4327
4328 out_no_emit:
4329 ttm_eu_backoff_reservation(&ticket, &validate_list);
4330 out_no_reserve:
4331 ttm_bo_unref(&query_val.bo);
4332 ttm_bo_unref(&pinned_val.bo);
4333 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4334 }
4335
4336 /**
4337 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4338 * query bo.
4339 *
4340 * @dev_priv: The device private structure.
4341 *
4342 * This function should be used to unpin the pinned query bo, or
4343 * as a query barrier when we need to make sure that all queries have
4344 * finished before the next fifo command. (For example on hardware
4345 * context destructions where the hardware may otherwise leak unfinished
4346 * queries).
4347 *
4348 * This function does not return any failure codes, but make attempts
4349 * to do safe unpinning in case of errors.
4350 *
4351 * The function will synchronize on the previous query barrier, and will
4352 * thus not finish until that barrier has executed.
4353 */
vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv)4354 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4355 {
4356 mutex_lock(&dev_priv->cmdbuf_mutex);
4357 if (dev_priv->query_cid_valid)
4358 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4359 mutex_unlock(&dev_priv->cmdbuf_mutex);
4360 }
4361
vmw_execbuf_ioctl(struct drm_device * dev,unsigned long data,struct drm_file * file_priv,size_t size)4362 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4363 struct drm_file *file_priv, size_t size)
4364 {
4365 struct vmw_private *dev_priv = vmw_priv(dev);
4366 struct drm_vmw_execbuf_arg arg;
4367 int ret;
4368 static const size_t copy_offset[] = {
4369 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4370 sizeof(struct drm_vmw_execbuf_arg)};
4371
4372 if (unlikely(size < copy_offset[0])) {
4373 DRM_ERROR("Invalid command size, ioctl %d\n",
4374 DRM_VMW_EXECBUF);
4375 return -EINVAL;
4376 }
4377
4378 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4379 return -EFAULT;
4380
4381 /*
4382 * Extend the ioctl argument while
4383 * maintaining backwards compatibility:
4384 * We take different code paths depending on the value of
4385 * arg.version.
4386 */
4387
4388 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4389 arg.version == 0)) {
4390 DRM_ERROR("Incorrect execbuf version.\n");
4391 return -EINVAL;
4392 }
4393
4394 if (arg.version > 1 &&
4395 copy_from_user(&arg.context_handle,
4396 (void __user *) (data + copy_offset[0]),
4397 copy_offset[arg.version - 1] -
4398 copy_offset[0]) != 0)
4399 return -EFAULT;
4400
4401 switch (arg.version) {
4402 case 1:
4403 arg.context_handle = (uint32_t) -1;
4404 break;
4405 case 2:
4406 if (arg.pad64 != 0) {
4407 DRM_ERROR("Unused IOCTL data not set to zero.\n");
4408 return -EINVAL;
4409 }
4410 break;
4411 default:
4412 break;
4413 }
4414
4415 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4416 if (unlikely(ret != 0))
4417 return ret;
4418
4419 ret = vmw_execbuf_process(file_priv, dev_priv,
4420 (void __user *)(unsigned long)arg.commands,
4421 NULL, arg.command_size, arg.throttle_us,
4422 arg.context_handle,
4423 (void __user *)(unsigned long)arg.fence_rep,
4424 NULL);
4425 ttm_read_unlock(&dev_priv->reservation_sem);
4426 if (unlikely(ret != 0))
4427 return ret;
4428
4429 vmw_kms_cursor_post_execbuf(dev_priv);
4430
4431 return 0;
4432 }
4433