1 /**************************************************************************
2 *
3 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 #include <linux/sync_file.h>
28
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
35
36 #define VMW_RES_HT_ORDER 12
37
38 /**
39 * enum vmw_resource_relocation_type - Relocation type for resources
40 *
41 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
42 * command stream is replaced with the actual id after validation.
43 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
44 * with a NOP.
45 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
46 * after validation is -1, the command is replaced with a NOP. Otherwise no
47 * action.
48 */
49 enum vmw_resource_relocation_type {
50 vmw_res_rel_normal,
51 vmw_res_rel_nop,
52 vmw_res_rel_cond_nop,
53 vmw_res_rel_max
54 };
55
56 /**
57 * struct vmw_resource_relocation - Relocation info for resources
58 *
59 * @head: List head for the software context's relocation list.
60 * @res: Non-ref-counted pointer to the resource.
61 * @offset: Offset of single byte entries into the command buffer where the
62 * id that needs fixup is located.
63 * @rel_type: Type of relocation.
64 */
65 struct vmw_resource_relocation {
66 struct list_head head;
67 const struct vmw_resource *res;
68 u32 offset:29;
69 enum vmw_resource_relocation_type rel_type:3;
70 };
71
72 /**
73 * struct vmw_resource_val_node - Validation info for resources
74 *
75 * @head: List head for the software context's resource list.
76 * @hash: Hash entry for quick resouce to val_node lookup.
77 * @res: Ref-counted pointer to the resource.
78 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
79 * @new_backup: Refcounted pointer to the new backup buffer.
80 * @staged_bindings: If @res is a context, tracks bindings set up during
81 * the command batch. Otherwise NULL.
82 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
83 * @first_usage: Set to true the first time the resource is referenced in
84 * the command stream.
85 * @switching_backup: The command stream provides a new backup buffer for a
86 * resource.
87 * @no_buffer_needed: This means @switching_backup is true on first buffer
88 * reference. So resource reservation does not need to allocate a backup
89 * buffer for the resource.
90 */
91 struct vmw_resource_val_node {
92 struct list_head head;
93 struct drm_hash_item hash;
94 struct vmw_resource *res;
95 struct vmw_dma_buffer *new_backup;
96 struct vmw_ctx_binding_state *staged_bindings;
97 unsigned long new_backup_offset;
98 u32 first_usage : 1;
99 u32 switching_backup : 1;
100 u32 no_buffer_needed : 1;
101 };
102
103 /**
104 * struct vmw_cmd_entry - Describe a command for the verifier
105 *
106 * @user_allow: Whether allowed from the execbuf ioctl.
107 * @gb_disable: Whether disabled if guest-backed objects are available.
108 * @gb_enable: Whether enabled iff guest-backed objects are available.
109 */
110 struct vmw_cmd_entry {
111 int (*func) (struct vmw_private *, struct vmw_sw_context *,
112 SVGA3dCmdHeader *);
113 bool user_allow;
114 bool gb_disable;
115 bool gb_enable;
116 const char *cmd_name;
117 };
118
119 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
120 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
121 (_gb_disable), (_gb_enable), #_cmd}
122
123 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
124 struct vmw_sw_context *sw_context,
125 struct vmw_resource *ctx);
126 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
127 struct vmw_sw_context *sw_context,
128 SVGAMobId *id,
129 struct vmw_dma_buffer **vmw_bo_p);
130 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
131 struct vmw_dma_buffer *vbo,
132 bool validate_as_mob,
133 uint32_t *p_val_node);
134 /**
135 * vmw_ptr_diff - Compute the offset from a to b in bytes
136 *
137 * @a: A starting pointer.
138 * @b: A pointer offset in the same address space.
139 *
140 * Returns: The offset in bytes between the two pointers.
141 */
vmw_ptr_diff(void * a,void * b)142 static size_t vmw_ptr_diff(void *a, void *b)
143 {
144 return (unsigned long) b - (unsigned long) a;
145 }
146
147 /**
148 * vmw_resources_unreserve - unreserve resources previously reserved for
149 * command submission.
150 *
151 * @sw_context: pointer to the software context
152 * @backoff: Whether command submission failed.
153 */
vmw_resources_unreserve(struct vmw_sw_context * sw_context,bool backoff)154 static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
155 bool backoff)
156 {
157 struct vmw_resource_val_node *val;
158 struct list_head *list = &sw_context->resource_list;
159
160 if (sw_context->dx_query_mob && !backoff)
161 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
162 sw_context->dx_query_mob);
163
164 list_for_each_entry(val, list, head) {
165 struct vmw_resource *res = val->res;
166 bool switch_backup =
167 (backoff) ? false : val->switching_backup;
168
169 /*
170 * Transfer staged context bindings to the
171 * persistent context binding tracker.
172 */
173 if (unlikely(val->staged_bindings)) {
174 if (!backoff) {
175 vmw_binding_state_commit
176 (vmw_context_binding_state(val->res),
177 val->staged_bindings);
178 }
179
180 if (val->staged_bindings != sw_context->staged_bindings)
181 vmw_binding_state_free(val->staged_bindings);
182 else
183 sw_context->staged_bindings_inuse = false;
184 val->staged_bindings = NULL;
185 }
186 vmw_resource_unreserve(res, switch_backup, val->new_backup,
187 val->new_backup_offset);
188 vmw_dmabuf_unreference(&val->new_backup);
189 }
190 }
191
192 /**
193 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
194 * added to the validate list.
195 *
196 * @dev_priv: Pointer to the device private:
197 * @sw_context: The validation context:
198 * @node: The validation node holding this context.
199 */
vmw_cmd_ctx_first_setup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource_val_node * node)200 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
201 struct vmw_sw_context *sw_context,
202 struct vmw_resource_val_node *node)
203 {
204 int ret;
205
206 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
207 if (unlikely(ret != 0))
208 goto out_err;
209
210 if (!sw_context->staged_bindings) {
211 sw_context->staged_bindings =
212 vmw_binding_state_alloc(dev_priv);
213 if (IS_ERR(sw_context->staged_bindings)) {
214 DRM_ERROR("Failed to allocate context binding "
215 "information.\n");
216 ret = PTR_ERR(sw_context->staged_bindings);
217 sw_context->staged_bindings = NULL;
218 goto out_err;
219 }
220 }
221
222 if (sw_context->staged_bindings_inuse) {
223 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
224 if (IS_ERR(node->staged_bindings)) {
225 DRM_ERROR("Failed to allocate context binding "
226 "information.\n");
227 ret = PTR_ERR(node->staged_bindings);
228 node->staged_bindings = NULL;
229 goto out_err;
230 }
231 } else {
232 node->staged_bindings = sw_context->staged_bindings;
233 sw_context->staged_bindings_inuse = true;
234 }
235
236 return 0;
237 out_err:
238 return ret;
239 }
240
241 /**
242 * vmw_resource_val_add - Add a resource to the software context's
243 * resource list if it's not already on it.
244 *
245 * @sw_context: Pointer to the software context.
246 * @res: Pointer to the resource.
247 * @p_node On successful return points to a valid pointer to a
248 * struct vmw_resource_val_node, if non-NULL on entry.
249 */
vmw_resource_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * res,struct vmw_resource_val_node ** p_node)250 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
251 struct vmw_resource *res,
252 struct vmw_resource_val_node **p_node)
253 {
254 struct vmw_private *dev_priv = res->dev_priv;
255 struct vmw_resource_val_node *node;
256 struct drm_hash_item *hash;
257 int ret;
258
259 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
260 &hash) == 0)) {
261 node = container_of(hash, struct vmw_resource_val_node, hash);
262 node->first_usage = false;
263 if (unlikely(p_node != NULL))
264 *p_node = node;
265 return 0;
266 }
267
268 node = kzalloc(sizeof(*node), GFP_KERNEL);
269 if (unlikely(!node)) {
270 DRM_ERROR("Failed to allocate a resource validation "
271 "entry.\n");
272 return -ENOMEM;
273 }
274
275 node->hash.key = (unsigned long) res;
276 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
277 if (unlikely(ret != 0)) {
278 DRM_ERROR("Failed to initialize a resource validation "
279 "entry.\n");
280 kfree(node);
281 return ret;
282 }
283 node->res = vmw_resource_reference(res);
284 node->first_usage = true;
285 if (unlikely(p_node != NULL))
286 *p_node = node;
287
288 if (!dev_priv->has_mob) {
289 list_add_tail(&node->head, &sw_context->resource_list);
290 return 0;
291 }
292
293 switch (vmw_res_type(res)) {
294 case vmw_res_context:
295 case vmw_res_dx_context:
296 list_add(&node->head, &sw_context->ctx_resource_list);
297 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
298 break;
299 case vmw_res_cotable:
300 list_add_tail(&node->head, &sw_context->ctx_resource_list);
301 break;
302 default:
303 list_add_tail(&node->head, &sw_context->resource_list);
304 break;
305 }
306
307 return ret;
308 }
309
310 /**
311 * vmw_view_res_val_add - Add a view and the surface it's pointing to
312 * to the validation list
313 *
314 * @sw_context: The software context holding the validation list.
315 * @view: Pointer to the view resource.
316 *
317 * Returns 0 if success, negative error code otherwise.
318 */
vmw_view_res_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * view)319 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
320 struct vmw_resource *view)
321 {
322 int ret;
323
324 /*
325 * First add the resource the view is pointing to, otherwise
326 * it may be swapped out when the view is validated.
327 */
328 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
329 if (ret)
330 return ret;
331
332 return vmw_resource_val_add(sw_context, view, NULL);
333 }
334
335 /**
336 * vmw_view_id_val_add - Look up a view and add it and the surface it's
337 * pointing to to the validation list.
338 *
339 * @sw_context: The software context holding the validation list.
340 * @view_type: The view type to look up.
341 * @id: view id of the view.
342 *
343 * The view is represented by a view id and the DX context it's created on,
344 * or scheduled for creation on. If there is no DX context set, the function
345 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
346 */
vmw_view_id_val_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,u32 id)347 static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
348 enum vmw_view_type view_type, u32 id)
349 {
350 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
351 struct vmw_resource *view;
352 int ret;
353
354 if (!ctx_node) {
355 DRM_ERROR("DX Context not set.\n");
356 return -EINVAL;
357 }
358
359 view = vmw_view_lookup(sw_context->man, view_type, id);
360 if (IS_ERR(view))
361 return PTR_ERR(view);
362
363 ret = vmw_view_res_val_add(sw_context, view);
364 vmw_resource_unreference(&view);
365
366 return ret;
367 }
368
369 /**
370 * vmw_resource_context_res_add - Put resources previously bound to a context on
371 * the validation list
372 *
373 * @dev_priv: Pointer to a device private structure
374 * @sw_context: Pointer to a software context used for this command submission
375 * @ctx: Pointer to the context resource
376 *
377 * This function puts all resources that were previously bound to @ctx on
378 * the resource validation list. This is part of the context state reemission
379 */
vmw_resource_context_res_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * ctx)380 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
381 struct vmw_sw_context *sw_context,
382 struct vmw_resource *ctx)
383 {
384 struct list_head *binding_list;
385 struct vmw_ctx_bindinfo *entry;
386 int ret = 0;
387 struct vmw_resource *res;
388 u32 i;
389
390 /* Add all cotables to the validation list. */
391 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
392 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
393 res = vmw_context_cotable(ctx, i);
394 if (IS_ERR(res))
395 continue;
396
397 ret = vmw_resource_val_add(sw_context, res, NULL);
398 vmw_resource_unreference(&res);
399 if (unlikely(ret != 0))
400 return ret;
401 }
402 }
403
404
405 /* Add all resources bound to the context to the validation list */
406 mutex_lock(&dev_priv->binding_mutex);
407 binding_list = vmw_context_binding_list(ctx);
408
409 list_for_each_entry(entry, binding_list, ctx_list) {
410 /* entry->res is not refcounted */
411 res = vmw_resource_reference_unless_doomed(entry->res);
412 if (unlikely(res == NULL))
413 continue;
414
415 if (vmw_res_type(entry->res) == vmw_res_view)
416 ret = vmw_view_res_val_add(sw_context, entry->res);
417 else
418 ret = vmw_resource_val_add(sw_context, entry->res,
419 NULL);
420 vmw_resource_unreference(&res);
421 if (unlikely(ret != 0))
422 break;
423 }
424
425 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
426 struct vmw_dma_buffer *dx_query_mob;
427
428 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
429 if (dx_query_mob)
430 ret = vmw_bo_to_validate_list(sw_context,
431 dx_query_mob,
432 true, NULL);
433 }
434
435 mutex_unlock(&dev_priv->binding_mutex);
436 return ret;
437 }
438
439 /**
440 * vmw_resource_relocation_add - Add a relocation to the relocation list
441 *
442 * @list: Pointer to head of relocation list.
443 * @res: The resource.
444 * @offset: Offset into the command buffer currently being parsed where the
445 * id that needs fixup is located. Granularity is one byte.
446 * @rel_type: Relocation type.
447 */
vmw_resource_relocation_add(struct list_head * list,const struct vmw_resource * res,unsigned long offset,enum vmw_resource_relocation_type rel_type)448 static int vmw_resource_relocation_add(struct list_head *list,
449 const struct vmw_resource *res,
450 unsigned long offset,
451 enum vmw_resource_relocation_type
452 rel_type)
453 {
454 struct vmw_resource_relocation *rel;
455
456 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
457 if (unlikely(!rel)) {
458 DRM_ERROR("Failed to allocate a resource relocation.\n");
459 return -ENOMEM;
460 }
461
462 rel->res = res;
463 rel->offset = offset;
464 rel->rel_type = rel_type;
465 list_add_tail(&rel->head, list);
466
467 return 0;
468 }
469
470 /**
471 * vmw_resource_relocations_free - Free all relocations on a list
472 *
473 * @list: Pointer to the head of the relocation list.
474 */
vmw_resource_relocations_free(struct list_head * list)475 static void vmw_resource_relocations_free(struct list_head *list)
476 {
477 struct vmw_resource_relocation *rel, *n;
478
479 list_for_each_entry_safe(rel, n, list, head) {
480 list_del(&rel->head);
481 kfree(rel);
482 }
483 }
484
485 /**
486 * vmw_resource_relocations_apply - Apply all relocations on a list
487 *
488 * @cb: Pointer to the start of the command buffer bein patch. This need
489 * not be the same buffer as the one being parsed when the relocation
490 * list was built, but the contents must be the same modulo the
491 * resource ids.
492 * @list: Pointer to the head of the relocation list.
493 */
vmw_resource_relocations_apply(uint32_t * cb,struct list_head * list)494 static void vmw_resource_relocations_apply(uint32_t *cb,
495 struct list_head *list)
496 {
497 struct vmw_resource_relocation *rel;
498
499 /* Validate the struct vmw_resource_relocation member size */
500 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
501 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
502
503 list_for_each_entry(rel, list, head) {
504 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
505 switch (rel->rel_type) {
506 case vmw_res_rel_normal:
507 *addr = rel->res->id;
508 break;
509 case vmw_res_rel_nop:
510 *addr = SVGA_3D_CMD_NOP;
511 break;
512 default:
513 if (rel->res->id == -1)
514 *addr = SVGA_3D_CMD_NOP;
515 break;
516 }
517 }
518 }
519
vmw_cmd_invalid(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)520 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
521 struct vmw_sw_context *sw_context,
522 SVGA3dCmdHeader *header)
523 {
524 return -EINVAL;
525 }
526
vmw_cmd_ok(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)527 static int vmw_cmd_ok(struct vmw_private *dev_priv,
528 struct vmw_sw_context *sw_context,
529 SVGA3dCmdHeader *header)
530 {
531 return 0;
532 }
533
534 /**
535 * vmw_bo_to_validate_list - add a bo to a validate list
536 *
537 * @sw_context: The software context used for this command submission batch.
538 * @bo: The buffer object to add.
539 * @validate_as_mob: Validate this buffer as a MOB.
540 * @p_val_node: If non-NULL Will be updated with the validate node number
541 * on return.
542 *
543 * Returns -EINVAL if the limit of number of buffer objects per command
544 * submission is reached.
545 */
vmw_bo_to_validate_list(struct vmw_sw_context * sw_context,struct vmw_dma_buffer * vbo,bool validate_as_mob,uint32_t * p_val_node)546 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
547 struct vmw_dma_buffer *vbo,
548 bool validate_as_mob,
549 uint32_t *p_val_node)
550 {
551 uint32_t val_node;
552 struct vmw_validate_buffer *vval_buf;
553 struct ttm_validate_buffer *val_buf;
554 struct drm_hash_item *hash;
555 int ret;
556
557 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
558 &hash) == 0)) {
559 vval_buf = container_of(hash, struct vmw_validate_buffer,
560 hash);
561 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
562 DRM_ERROR("Inconsistent buffer usage.\n");
563 return -EINVAL;
564 }
565 val_buf = &vval_buf->base;
566 val_node = vval_buf - sw_context->val_bufs;
567 } else {
568 val_node = sw_context->cur_val_buf;
569 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
570 DRM_ERROR("Max number of DMA buffers per submission "
571 "exceeded.\n");
572 return -EINVAL;
573 }
574 vval_buf = &sw_context->val_bufs[val_node];
575 vval_buf->hash.key = (unsigned long) vbo;
576 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
577 if (unlikely(ret != 0)) {
578 DRM_ERROR("Failed to initialize a buffer validation "
579 "entry.\n");
580 return ret;
581 }
582 ++sw_context->cur_val_buf;
583 val_buf = &vval_buf->base;
584 val_buf->bo = ttm_bo_reference(&vbo->base);
585 val_buf->shared = false;
586 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
587 vval_buf->validate_as_mob = validate_as_mob;
588 }
589
590 if (p_val_node)
591 *p_val_node = val_node;
592
593 return 0;
594 }
595
596 /**
597 * vmw_resources_reserve - Reserve all resources on the sw_context's
598 * resource list.
599 *
600 * @sw_context: Pointer to the software context.
601 *
602 * Note that since vmware's command submission currently is protected by
603 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
604 * since only a single thread at once will attempt this.
605 */
vmw_resources_reserve(struct vmw_sw_context * sw_context)606 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
607 {
608 struct vmw_resource_val_node *val;
609 int ret = 0;
610
611 list_for_each_entry(val, &sw_context->resource_list, head) {
612 struct vmw_resource *res = val->res;
613
614 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
615 if (unlikely(ret != 0))
616 return ret;
617
618 if (res->backup) {
619 struct vmw_dma_buffer *vbo = res->backup;
620
621 ret = vmw_bo_to_validate_list
622 (sw_context, vbo,
623 vmw_resource_needs_backup(res), NULL);
624
625 if (unlikely(ret != 0))
626 return ret;
627 }
628 }
629
630 if (sw_context->dx_query_mob) {
631 struct vmw_dma_buffer *expected_dx_query_mob;
632
633 expected_dx_query_mob =
634 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
635 if (expected_dx_query_mob &&
636 expected_dx_query_mob != sw_context->dx_query_mob) {
637 ret = -EINVAL;
638 }
639 }
640
641 return ret;
642 }
643
644 /**
645 * vmw_resources_validate - Validate all resources on the sw_context's
646 * resource list.
647 *
648 * @sw_context: Pointer to the software context.
649 *
650 * Before this function is called, all resource backup buffers must have
651 * been validated.
652 */
vmw_resources_validate(struct vmw_sw_context * sw_context)653 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
654 {
655 struct vmw_resource_val_node *val;
656 int ret;
657
658 list_for_each_entry(val, &sw_context->resource_list, head) {
659 struct vmw_resource *res = val->res;
660 struct vmw_dma_buffer *backup = res->backup;
661
662 ret = vmw_resource_validate(res);
663 if (unlikely(ret != 0)) {
664 if (ret != -ERESTARTSYS)
665 DRM_ERROR("Failed to validate resource.\n");
666 return ret;
667 }
668
669 /* Check if the resource switched backup buffer */
670 if (backup && res->backup && (backup != res->backup)) {
671 struct vmw_dma_buffer *vbo = res->backup;
672
673 ret = vmw_bo_to_validate_list
674 (sw_context, vbo,
675 vmw_resource_needs_backup(res), NULL);
676 if (ret) {
677 ttm_bo_unreserve(&vbo->base);
678 return ret;
679 }
680 }
681 }
682 return 0;
683 }
684
685 /**
686 * vmw_cmd_res_reloc_add - Add a resource to a software context's
687 * relocation- and validation lists.
688 *
689 * @dev_priv: Pointer to a struct vmw_private identifying the device.
690 * @sw_context: Pointer to the software context.
691 * @id_loc: Pointer to where the id that needs translation is located.
692 * @res: Valid pointer to a struct vmw_resource.
693 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
694 * used for this resource is returned here.
695 */
vmw_cmd_res_reloc_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,uint32_t * id_loc,struct vmw_resource * res,struct vmw_resource_val_node ** p_val)696 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
697 struct vmw_sw_context *sw_context,
698 uint32_t *id_loc,
699 struct vmw_resource *res,
700 struct vmw_resource_val_node **p_val)
701 {
702 int ret;
703 struct vmw_resource_val_node *node;
704
705 *p_val = NULL;
706 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
707 res,
708 vmw_ptr_diff(sw_context->buf_start,
709 id_loc),
710 vmw_res_rel_normal);
711 if (unlikely(ret != 0))
712 return ret;
713
714 ret = vmw_resource_val_add(sw_context, res, &node);
715 if (unlikely(ret != 0))
716 return ret;
717
718 if (p_val)
719 *p_val = node;
720
721 return 0;
722 }
723
724
725 /**
726 * vmw_cmd_res_check - Check that a resource is present and if so, put it
727 * on the resource validate list unless it's already there.
728 *
729 * @dev_priv: Pointer to a device private structure.
730 * @sw_context: Pointer to the software context.
731 * @res_type: Resource type.
732 * @converter: User-space visisble type specific information.
733 * @id_loc: Pointer to the location in the command buffer currently being
734 * parsed from where the user-space resource id handle is located.
735 * @p_val: Pointer to pointer to resource validalidation node. Populated
736 * on exit.
737 */
738 static int
vmw_cmd_res_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * id_loc,struct vmw_resource_val_node ** p_val)739 vmw_cmd_res_check(struct vmw_private *dev_priv,
740 struct vmw_sw_context *sw_context,
741 enum vmw_res_type res_type,
742 const struct vmw_user_resource_conv *converter,
743 uint32_t *id_loc,
744 struct vmw_resource_val_node **p_val)
745 {
746 struct vmw_res_cache_entry *rcache =
747 &sw_context->res_cache[res_type];
748 struct vmw_resource *res;
749 struct vmw_resource_val_node *node;
750 int ret;
751
752 if (*id_loc == SVGA3D_INVALID_ID) {
753 if (p_val)
754 *p_val = NULL;
755 if (res_type == vmw_res_context) {
756 DRM_ERROR("Illegal context invalid id.\n");
757 return -EINVAL;
758 }
759 return 0;
760 }
761
762 /*
763 * Fastpath in case of repeated commands referencing the same
764 * resource
765 */
766
767 if (likely(rcache->valid && *id_loc == rcache->handle)) {
768 const struct vmw_resource *res = rcache->res;
769
770 rcache->node->first_usage = false;
771 if (p_val)
772 *p_val = rcache->node;
773
774 return vmw_resource_relocation_add
775 (&sw_context->res_relocations, res,
776 vmw_ptr_diff(sw_context->buf_start, id_loc),
777 vmw_res_rel_normal);
778 }
779
780 ret = vmw_user_resource_lookup_handle(dev_priv,
781 sw_context->fp->tfile,
782 *id_loc,
783 converter,
784 &res);
785 if (unlikely(ret != 0)) {
786 DRM_ERROR("Could not find or use resource 0x%08x.\n",
787 (unsigned) *id_loc);
788 dump_stack();
789 return ret;
790 }
791
792 rcache->valid = true;
793 rcache->res = res;
794 rcache->handle = *id_loc;
795
796 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
797 res, &node);
798 if (unlikely(ret != 0))
799 goto out_no_reloc;
800
801 rcache->node = node;
802 if (p_val)
803 *p_val = node;
804 vmw_resource_unreference(&res);
805 return 0;
806
807 out_no_reloc:
808 BUG_ON(sw_context->error_resource != NULL);
809 sw_context->error_resource = res;
810
811 return ret;
812 }
813
814 /**
815 * vmw_rebind_dx_query - Rebind DX query associated with the context
816 *
817 * @ctx_res: context the query belongs to
818 *
819 * This function assumes binding_mutex is held.
820 */
vmw_rebind_all_dx_query(struct vmw_resource * ctx_res)821 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
822 {
823 struct vmw_private *dev_priv = ctx_res->dev_priv;
824 struct vmw_dma_buffer *dx_query_mob;
825 struct {
826 SVGA3dCmdHeader header;
827 SVGA3dCmdDXBindAllQuery body;
828 } *cmd;
829
830
831 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
832
833 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
834 return 0;
835
836 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
837
838 if (cmd == NULL) {
839 DRM_ERROR("Failed to rebind queries.\n");
840 return -ENOMEM;
841 }
842
843 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
844 cmd->header.size = sizeof(cmd->body);
845 cmd->body.cid = ctx_res->id;
846 cmd->body.mobid = dx_query_mob->base.mem.start;
847 vmw_fifo_commit(dev_priv, sizeof(*cmd));
848
849 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
850
851 return 0;
852 }
853
854 /**
855 * vmw_rebind_contexts - Rebind all resources previously bound to
856 * referenced contexts.
857 *
858 * @sw_context: Pointer to the software context.
859 *
860 * Rebind context binding points that have been scrubbed because of eviction.
861 */
vmw_rebind_contexts(struct vmw_sw_context * sw_context)862 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
863 {
864 struct vmw_resource_val_node *val;
865 int ret;
866
867 list_for_each_entry(val, &sw_context->resource_list, head) {
868 if (unlikely(!val->staged_bindings))
869 break;
870
871 ret = vmw_binding_rebind_all
872 (vmw_context_binding_state(val->res));
873 if (unlikely(ret != 0)) {
874 if (ret != -ERESTARTSYS)
875 DRM_ERROR("Failed to rebind context.\n");
876 return ret;
877 }
878
879 ret = vmw_rebind_all_dx_query(val->res);
880 if (ret != 0)
881 return ret;
882 }
883
884 return 0;
885 }
886
887 /**
888 * vmw_view_bindings_add - Add an array of view bindings to a context
889 * binding state tracker.
890 *
891 * @sw_context: The execbuf state used for this command.
892 * @view_type: View type for the bindings.
893 * @binding_type: Binding type for the bindings.
894 * @shader_slot: The shader slot to user for the bindings.
895 * @view_ids: Array of view ids to be bound.
896 * @num_views: Number of view ids in @view_ids.
897 * @first_slot: The binding slot to be used for the first view id in @view_ids.
898 */
vmw_view_bindings_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,enum vmw_ctx_binding_type binding_type,uint32 shader_slot,uint32 view_ids[],u32 num_views,u32 first_slot)899 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
900 enum vmw_view_type view_type,
901 enum vmw_ctx_binding_type binding_type,
902 uint32 shader_slot,
903 uint32 view_ids[], u32 num_views,
904 u32 first_slot)
905 {
906 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
907 struct vmw_cmdbuf_res_manager *man;
908 u32 i;
909 int ret;
910
911 if (!ctx_node) {
912 DRM_ERROR("DX Context not set.\n");
913 return -EINVAL;
914 }
915
916 man = sw_context->man;
917 for (i = 0; i < num_views; ++i) {
918 struct vmw_ctx_bindinfo_view binding;
919 struct vmw_resource *view = NULL;
920
921 if (view_ids[i] != SVGA3D_INVALID_ID) {
922 view = vmw_view_lookup(man, view_type, view_ids[i]);
923 if (IS_ERR(view)) {
924 DRM_ERROR("View not found.\n");
925 return PTR_ERR(view);
926 }
927
928 ret = vmw_view_res_val_add(sw_context, view);
929 if (ret) {
930 DRM_ERROR("Could not add view to "
931 "validation list.\n");
932 vmw_resource_unreference(&view);
933 return ret;
934 }
935 }
936 binding.bi.ctx = ctx_node->res;
937 binding.bi.res = view;
938 binding.bi.bt = binding_type;
939 binding.shader_slot = shader_slot;
940 binding.slot = first_slot + i;
941 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
942 shader_slot, binding.slot);
943 if (view)
944 vmw_resource_unreference(&view);
945 }
946
947 return 0;
948 }
949
950 /**
951 * vmw_cmd_cid_check - Check a command header for valid context information.
952 *
953 * @dev_priv: Pointer to a device private structure.
954 * @sw_context: Pointer to the software context.
955 * @header: A command header with an embedded user-space context handle.
956 *
957 * Convenience function: Call vmw_cmd_res_check with the user-space context
958 * handle embedded in @header.
959 */
vmw_cmd_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)960 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
961 struct vmw_sw_context *sw_context,
962 SVGA3dCmdHeader *header)
963 {
964 struct vmw_cid_cmd {
965 SVGA3dCmdHeader header;
966 uint32_t cid;
967 } *cmd;
968
969 cmd = container_of(header, struct vmw_cid_cmd, header);
970 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
971 user_context_converter, &cmd->cid, NULL);
972 }
973
vmw_cmd_set_render_target_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)974 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
975 struct vmw_sw_context *sw_context,
976 SVGA3dCmdHeader *header)
977 {
978 struct vmw_sid_cmd {
979 SVGA3dCmdHeader header;
980 SVGA3dCmdSetRenderTarget body;
981 } *cmd;
982 struct vmw_resource_val_node *ctx_node;
983 struct vmw_resource_val_node *res_node;
984 int ret;
985
986 cmd = container_of(header, struct vmw_sid_cmd, header);
987
988 if (cmd->body.type >= SVGA3D_RT_MAX) {
989 DRM_ERROR("Illegal render target type %u.\n",
990 (unsigned) cmd->body.type);
991 return -EINVAL;
992 }
993
994 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
995 user_context_converter, &cmd->body.cid,
996 &ctx_node);
997 if (unlikely(ret != 0))
998 return ret;
999
1000 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1001 user_surface_converter,
1002 &cmd->body.target.sid, &res_node);
1003 if (unlikely(ret != 0))
1004 return ret;
1005
1006 if (dev_priv->has_mob) {
1007 struct vmw_ctx_bindinfo_view binding;
1008
1009 binding.bi.ctx = ctx_node->res;
1010 binding.bi.res = res_node ? res_node->res : NULL;
1011 binding.bi.bt = vmw_ctx_binding_rt;
1012 binding.slot = cmd->body.type;
1013 vmw_binding_add(ctx_node->staged_bindings,
1014 &binding.bi, 0, binding.slot);
1015 }
1016
1017 return 0;
1018 }
1019
vmw_cmd_surface_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1020 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
1021 struct vmw_sw_context *sw_context,
1022 SVGA3dCmdHeader *header)
1023 {
1024 struct vmw_sid_cmd {
1025 SVGA3dCmdHeader header;
1026 SVGA3dCmdSurfaceCopy body;
1027 } *cmd;
1028 int ret;
1029
1030 cmd = container_of(header, struct vmw_sid_cmd, header);
1031
1032 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1033 user_surface_converter,
1034 &cmd->body.src.sid, NULL);
1035 if (ret)
1036 return ret;
1037
1038 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1039 user_surface_converter,
1040 &cmd->body.dest.sid, NULL);
1041 }
1042
vmw_cmd_buffer_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1043 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
1044 struct vmw_sw_context *sw_context,
1045 SVGA3dCmdHeader *header)
1046 {
1047 struct {
1048 SVGA3dCmdHeader header;
1049 SVGA3dCmdDXBufferCopy body;
1050 } *cmd;
1051 int ret;
1052
1053 cmd = container_of(header, typeof(*cmd), header);
1054 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1055 user_surface_converter,
1056 &cmd->body.src, NULL);
1057 if (ret != 0)
1058 return ret;
1059
1060 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1061 user_surface_converter,
1062 &cmd->body.dest, NULL);
1063 }
1064
vmw_cmd_pred_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1065 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1066 struct vmw_sw_context *sw_context,
1067 SVGA3dCmdHeader *header)
1068 {
1069 struct {
1070 SVGA3dCmdHeader header;
1071 SVGA3dCmdDXPredCopyRegion body;
1072 } *cmd;
1073 int ret;
1074
1075 cmd = container_of(header, typeof(*cmd), header);
1076 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1077 user_surface_converter,
1078 &cmd->body.srcSid, NULL);
1079 if (ret != 0)
1080 return ret;
1081
1082 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1083 user_surface_converter,
1084 &cmd->body.dstSid, NULL);
1085 }
1086
vmw_cmd_stretch_blt_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1087 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1088 struct vmw_sw_context *sw_context,
1089 SVGA3dCmdHeader *header)
1090 {
1091 struct vmw_sid_cmd {
1092 SVGA3dCmdHeader header;
1093 SVGA3dCmdSurfaceStretchBlt body;
1094 } *cmd;
1095 int ret;
1096
1097 cmd = container_of(header, struct vmw_sid_cmd, header);
1098 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1099 user_surface_converter,
1100 &cmd->body.src.sid, NULL);
1101 if (unlikely(ret != 0))
1102 return ret;
1103 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1104 user_surface_converter,
1105 &cmd->body.dest.sid, NULL);
1106 }
1107
vmw_cmd_blt_surf_screen_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1108 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1109 struct vmw_sw_context *sw_context,
1110 SVGA3dCmdHeader *header)
1111 {
1112 struct vmw_sid_cmd {
1113 SVGA3dCmdHeader header;
1114 SVGA3dCmdBlitSurfaceToScreen body;
1115 } *cmd;
1116
1117 cmd = container_of(header, struct vmw_sid_cmd, header);
1118
1119 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1120 user_surface_converter,
1121 &cmd->body.srcImage.sid, NULL);
1122 }
1123
vmw_cmd_present_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1124 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1125 struct vmw_sw_context *sw_context,
1126 SVGA3dCmdHeader *header)
1127 {
1128 struct vmw_sid_cmd {
1129 SVGA3dCmdHeader header;
1130 SVGA3dCmdPresent body;
1131 } *cmd;
1132
1133
1134 cmd = container_of(header, struct vmw_sid_cmd, header);
1135
1136 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1137 user_surface_converter, &cmd->body.sid,
1138 NULL);
1139 }
1140
1141 /**
1142 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1143 *
1144 * @dev_priv: The device private structure.
1145 * @new_query_bo: The new buffer holding query results.
1146 * @sw_context: The software context used for this command submission.
1147 *
1148 * This function checks whether @new_query_bo is suitable for holding
1149 * query results, and if another buffer currently is pinned for query
1150 * results. If so, the function prepares the state of @sw_context for
1151 * switching pinned buffers after successful submission of the current
1152 * command batch.
1153 */
vmw_query_bo_switch_prepare(struct vmw_private * dev_priv,struct vmw_dma_buffer * new_query_bo,struct vmw_sw_context * sw_context)1154 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1155 struct vmw_dma_buffer *new_query_bo,
1156 struct vmw_sw_context *sw_context)
1157 {
1158 struct vmw_res_cache_entry *ctx_entry =
1159 &sw_context->res_cache[vmw_res_context];
1160 int ret;
1161
1162 BUG_ON(!ctx_entry->valid);
1163 sw_context->last_query_ctx = ctx_entry->res;
1164
1165 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1166
1167 if (unlikely(new_query_bo->base.num_pages > 4)) {
1168 DRM_ERROR("Query buffer too large.\n");
1169 return -EINVAL;
1170 }
1171
1172 if (unlikely(sw_context->cur_query_bo != NULL)) {
1173 sw_context->needs_post_query_barrier = true;
1174 ret = vmw_bo_to_validate_list(sw_context,
1175 sw_context->cur_query_bo,
1176 dev_priv->has_mob, NULL);
1177 if (unlikely(ret != 0))
1178 return ret;
1179 }
1180 sw_context->cur_query_bo = new_query_bo;
1181
1182 ret = vmw_bo_to_validate_list(sw_context,
1183 dev_priv->dummy_query_bo,
1184 dev_priv->has_mob, NULL);
1185 if (unlikely(ret != 0))
1186 return ret;
1187
1188 }
1189
1190 return 0;
1191 }
1192
1193
1194 /**
1195 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1196 *
1197 * @dev_priv: The device private structure.
1198 * @sw_context: The software context used for this command submission batch.
1199 *
1200 * This function will check if we're switching query buffers, and will then,
1201 * issue a dummy occlusion query wait used as a query barrier. When the fence
1202 * object following that query wait has signaled, we are sure that all
1203 * preceding queries have finished, and the old query buffer can be unpinned.
1204 * However, since both the new query buffer and the old one are fenced with
1205 * that fence, we can do an asynchronus unpin now, and be sure that the
1206 * old query buffer won't be moved until the fence has signaled.
1207 *
1208 * As mentioned above, both the new - and old query buffers need to be fenced
1209 * using a sequence emitted *after* calling this function.
1210 */
vmw_query_bo_switch_commit(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)1211 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1212 struct vmw_sw_context *sw_context)
1213 {
1214 /*
1215 * The validate list should still hold references to all
1216 * contexts here.
1217 */
1218
1219 if (sw_context->needs_post_query_barrier) {
1220 struct vmw_res_cache_entry *ctx_entry =
1221 &sw_context->res_cache[vmw_res_context];
1222 struct vmw_resource *ctx;
1223 int ret;
1224
1225 BUG_ON(!ctx_entry->valid);
1226 ctx = ctx_entry->res;
1227
1228 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1229
1230 if (unlikely(ret != 0))
1231 DRM_ERROR("Out of fifo space for dummy query.\n");
1232 }
1233
1234 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1235 if (dev_priv->pinned_bo) {
1236 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1237 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1238 }
1239
1240 if (!sw_context->needs_post_query_barrier) {
1241 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1242
1243 /*
1244 * We pin also the dummy_query_bo buffer so that we
1245 * don't need to validate it when emitting
1246 * dummy queries in context destroy paths.
1247 */
1248
1249 if (!dev_priv->dummy_query_bo_pinned) {
1250 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1251 true);
1252 dev_priv->dummy_query_bo_pinned = true;
1253 }
1254
1255 BUG_ON(sw_context->last_query_ctx == NULL);
1256 dev_priv->query_cid = sw_context->last_query_ctx->id;
1257 dev_priv->query_cid_valid = true;
1258 dev_priv->pinned_bo =
1259 vmw_dmabuf_reference(sw_context->cur_query_bo);
1260 }
1261 }
1262 }
1263
1264 /**
1265 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1266 * handle to a MOB id.
1267 *
1268 * @dev_priv: Pointer to a device private structure.
1269 * @sw_context: The software context used for this command batch validation.
1270 * @id: Pointer to the user-space handle to be translated.
1271 * @vmw_bo_p: Points to a location that, on successful return will carry
1272 * a reference-counted pointer to the DMA buffer identified by the
1273 * user-space handle in @id.
1274 *
1275 * This function saves information needed to translate a user-space buffer
1276 * handle to a MOB id. The translation does not take place immediately, but
1277 * during a call to vmw_apply_relocations(). This function builds a relocation
1278 * list and a list of buffers to validate. The former needs to be freed using
1279 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1280 * needs to be freed using vmw_clear_validations.
1281 */
vmw_translate_mob_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAMobId * id,struct vmw_dma_buffer ** vmw_bo_p)1282 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1283 struct vmw_sw_context *sw_context,
1284 SVGAMobId *id,
1285 struct vmw_dma_buffer **vmw_bo_p)
1286 {
1287 struct vmw_dma_buffer *vmw_bo = NULL;
1288 uint32_t handle = *id;
1289 struct vmw_relocation *reloc;
1290 int ret;
1291
1292 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1293 NULL);
1294 if (unlikely(ret != 0)) {
1295 DRM_ERROR("Could not find or use MOB buffer.\n");
1296 ret = -EINVAL;
1297 goto out_no_reloc;
1298 }
1299
1300 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1301 DRM_ERROR("Max number relocations per submission"
1302 " exceeded\n");
1303 ret = -EINVAL;
1304 goto out_no_reloc;
1305 }
1306
1307 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1308 reloc->mob_loc = id;
1309 reloc->location = NULL;
1310
1311 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1312 if (unlikely(ret != 0))
1313 goto out_no_reloc;
1314
1315 *vmw_bo_p = vmw_bo;
1316 return 0;
1317
1318 out_no_reloc:
1319 vmw_dmabuf_unreference(&vmw_bo);
1320 *vmw_bo_p = NULL;
1321 return ret;
1322 }
1323
1324 /**
1325 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1326 * handle to a valid SVGAGuestPtr
1327 *
1328 * @dev_priv: Pointer to a device private structure.
1329 * @sw_context: The software context used for this command batch validation.
1330 * @ptr: Pointer to the user-space handle to be translated.
1331 * @vmw_bo_p: Points to a location that, on successful return will carry
1332 * a reference-counted pointer to the DMA buffer identified by the
1333 * user-space handle in @id.
1334 *
1335 * This function saves information needed to translate a user-space buffer
1336 * handle to a valid SVGAGuestPtr. The translation does not take place
1337 * immediately, but during a call to vmw_apply_relocations().
1338 * This function builds a relocation list and a list of buffers to validate.
1339 * The former needs to be freed using either vmw_apply_relocations() or
1340 * vmw_free_relocations(). The latter needs to be freed using
1341 * vmw_clear_validations.
1342 */
vmw_translate_guest_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAGuestPtr * ptr,struct vmw_dma_buffer ** vmw_bo_p)1343 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1344 struct vmw_sw_context *sw_context,
1345 SVGAGuestPtr *ptr,
1346 struct vmw_dma_buffer **vmw_bo_p)
1347 {
1348 struct vmw_dma_buffer *vmw_bo = NULL;
1349 uint32_t handle = ptr->gmrId;
1350 struct vmw_relocation *reloc;
1351 int ret;
1352
1353 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1354 NULL);
1355 if (unlikely(ret != 0)) {
1356 DRM_ERROR("Could not find or use GMR region.\n");
1357 ret = -EINVAL;
1358 goto out_no_reloc;
1359 }
1360
1361 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1362 DRM_ERROR("Max number relocations per submission"
1363 " exceeded\n");
1364 ret = -EINVAL;
1365 goto out_no_reloc;
1366 }
1367
1368 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1369 reloc->location = ptr;
1370
1371 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1372 if (unlikely(ret != 0))
1373 goto out_no_reloc;
1374
1375 *vmw_bo_p = vmw_bo;
1376 return 0;
1377
1378 out_no_reloc:
1379 vmw_dmabuf_unreference(&vmw_bo);
1380 *vmw_bo_p = NULL;
1381 return ret;
1382 }
1383
1384
1385
1386 /**
1387 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1388 *
1389 * @dev_priv: Pointer to a device private struct.
1390 * @sw_context: The software context used for this command submission.
1391 * @header: Pointer to the command header in the command stream.
1392 *
1393 * This function adds the new query into the query COTABLE
1394 */
vmw_cmd_dx_define_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1395 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1396 struct vmw_sw_context *sw_context,
1397 SVGA3dCmdHeader *header)
1398 {
1399 struct vmw_dx_define_query_cmd {
1400 SVGA3dCmdHeader header;
1401 SVGA3dCmdDXDefineQuery q;
1402 } *cmd;
1403
1404 int ret;
1405 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1406 struct vmw_resource *cotable_res;
1407
1408
1409 if (ctx_node == NULL) {
1410 DRM_ERROR("DX Context not set for query.\n");
1411 return -EINVAL;
1412 }
1413
1414 cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1415
1416 if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
1417 cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1418 return -EINVAL;
1419
1420 cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1421 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1422 vmw_resource_unreference(&cotable_res);
1423
1424 return ret;
1425 }
1426
1427
1428
1429 /**
1430 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1431 *
1432 * @dev_priv: Pointer to a device private struct.
1433 * @sw_context: The software context used for this command submission.
1434 * @header: Pointer to the command header in the command stream.
1435 *
1436 * The query bind operation will eventually associate the query ID
1437 * with its backing MOB. In this function, we take the user mode
1438 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1439 * kernel mode equivalent.
1440 */
vmw_cmd_dx_bind_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1441 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1442 struct vmw_sw_context *sw_context,
1443 SVGA3dCmdHeader *header)
1444 {
1445 struct vmw_dx_bind_query_cmd {
1446 SVGA3dCmdHeader header;
1447 SVGA3dCmdDXBindQuery q;
1448 } *cmd;
1449
1450 struct vmw_dma_buffer *vmw_bo;
1451 int ret;
1452
1453
1454 cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1455
1456 /*
1457 * Look up the buffer pointed to by q.mobid, put it on the relocation
1458 * list so its kernel mode MOB ID can be filled in later
1459 */
1460 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1461 &vmw_bo);
1462
1463 if (ret != 0)
1464 return ret;
1465
1466 sw_context->dx_query_mob = vmw_bo;
1467 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1468
1469 vmw_dmabuf_unreference(&vmw_bo);
1470
1471 return ret;
1472 }
1473
1474
1475
1476 /**
1477 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1478 *
1479 * @dev_priv: Pointer to a device private struct.
1480 * @sw_context: The software context used for this command submission.
1481 * @header: Pointer to the command header in the command stream.
1482 */
vmw_cmd_begin_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1483 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1484 struct vmw_sw_context *sw_context,
1485 SVGA3dCmdHeader *header)
1486 {
1487 struct vmw_begin_gb_query_cmd {
1488 SVGA3dCmdHeader header;
1489 SVGA3dCmdBeginGBQuery q;
1490 } *cmd;
1491
1492 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1493 header);
1494
1495 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1496 user_context_converter, &cmd->q.cid,
1497 NULL);
1498 }
1499
1500 /**
1501 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1502 *
1503 * @dev_priv: Pointer to a device private struct.
1504 * @sw_context: The software context used for this command submission.
1505 * @header: Pointer to the command header in the command stream.
1506 */
vmw_cmd_begin_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1507 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1508 struct vmw_sw_context *sw_context,
1509 SVGA3dCmdHeader *header)
1510 {
1511 struct vmw_begin_query_cmd {
1512 SVGA3dCmdHeader header;
1513 SVGA3dCmdBeginQuery q;
1514 } *cmd;
1515
1516 cmd = container_of(header, struct vmw_begin_query_cmd,
1517 header);
1518
1519 if (unlikely(dev_priv->has_mob)) {
1520 struct {
1521 SVGA3dCmdHeader header;
1522 SVGA3dCmdBeginGBQuery q;
1523 } gb_cmd;
1524
1525 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1526
1527 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1528 gb_cmd.header.size = cmd->header.size;
1529 gb_cmd.q.cid = cmd->q.cid;
1530 gb_cmd.q.type = cmd->q.type;
1531
1532 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1533 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1534 }
1535
1536 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1537 user_context_converter, &cmd->q.cid,
1538 NULL);
1539 }
1540
1541 /**
1542 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1543 *
1544 * @dev_priv: Pointer to a device private struct.
1545 * @sw_context: The software context used for this command submission.
1546 * @header: Pointer to the command header in the command stream.
1547 */
vmw_cmd_end_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1548 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1549 struct vmw_sw_context *sw_context,
1550 SVGA3dCmdHeader *header)
1551 {
1552 struct vmw_dma_buffer *vmw_bo;
1553 struct vmw_query_cmd {
1554 SVGA3dCmdHeader header;
1555 SVGA3dCmdEndGBQuery q;
1556 } *cmd;
1557 int ret;
1558
1559 cmd = container_of(header, struct vmw_query_cmd, header);
1560 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1561 if (unlikely(ret != 0))
1562 return ret;
1563
1564 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1565 &cmd->q.mobid,
1566 &vmw_bo);
1567 if (unlikely(ret != 0))
1568 return ret;
1569
1570 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1571
1572 vmw_dmabuf_unreference(&vmw_bo);
1573 return ret;
1574 }
1575
1576 /**
1577 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1578 *
1579 * @dev_priv: Pointer to a device private struct.
1580 * @sw_context: The software context used for this command submission.
1581 * @header: Pointer to the command header in the command stream.
1582 */
vmw_cmd_end_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1583 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1584 struct vmw_sw_context *sw_context,
1585 SVGA3dCmdHeader *header)
1586 {
1587 struct vmw_dma_buffer *vmw_bo;
1588 struct vmw_query_cmd {
1589 SVGA3dCmdHeader header;
1590 SVGA3dCmdEndQuery q;
1591 } *cmd;
1592 int ret;
1593
1594 cmd = container_of(header, struct vmw_query_cmd, header);
1595 if (dev_priv->has_mob) {
1596 struct {
1597 SVGA3dCmdHeader header;
1598 SVGA3dCmdEndGBQuery q;
1599 } gb_cmd;
1600
1601 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1602
1603 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1604 gb_cmd.header.size = cmd->header.size;
1605 gb_cmd.q.cid = cmd->q.cid;
1606 gb_cmd.q.type = cmd->q.type;
1607 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1608 gb_cmd.q.offset = cmd->q.guestResult.offset;
1609
1610 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1611 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1612 }
1613
1614 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1615 if (unlikely(ret != 0))
1616 return ret;
1617
1618 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1619 &cmd->q.guestResult,
1620 &vmw_bo);
1621 if (unlikely(ret != 0))
1622 return ret;
1623
1624 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1625
1626 vmw_dmabuf_unreference(&vmw_bo);
1627 return ret;
1628 }
1629
1630 /**
1631 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1632 *
1633 * @dev_priv: Pointer to a device private struct.
1634 * @sw_context: The software context used for this command submission.
1635 * @header: Pointer to the command header in the command stream.
1636 */
vmw_cmd_wait_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1637 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1638 struct vmw_sw_context *sw_context,
1639 SVGA3dCmdHeader *header)
1640 {
1641 struct vmw_dma_buffer *vmw_bo;
1642 struct vmw_query_cmd {
1643 SVGA3dCmdHeader header;
1644 SVGA3dCmdWaitForGBQuery q;
1645 } *cmd;
1646 int ret;
1647
1648 cmd = container_of(header, struct vmw_query_cmd, header);
1649 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1650 if (unlikely(ret != 0))
1651 return ret;
1652
1653 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1654 &cmd->q.mobid,
1655 &vmw_bo);
1656 if (unlikely(ret != 0))
1657 return ret;
1658
1659 vmw_dmabuf_unreference(&vmw_bo);
1660 return 0;
1661 }
1662
1663 /**
1664 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1665 *
1666 * @dev_priv: Pointer to a device private struct.
1667 * @sw_context: The software context used for this command submission.
1668 * @header: Pointer to the command header in the command stream.
1669 */
vmw_cmd_wait_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1670 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1671 struct vmw_sw_context *sw_context,
1672 SVGA3dCmdHeader *header)
1673 {
1674 struct vmw_dma_buffer *vmw_bo;
1675 struct vmw_query_cmd {
1676 SVGA3dCmdHeader header;
1677 SVGA3dCmdWaitForQuery q;
1678 } *cmd;
1679 int ret;
1680
1681 cmd = container_of(header, struct vmw_query_cmd, header);
1682 if (dev_priv->has_mob) {
1683 struct {
1684 SVGA3dCmdHeader header;
1685 SVGA3dCmdWaitForGBQuery q;
1686 } gb_cmd;
1687
1688 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1689
1690 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1691 gb_cmd.header.size = cmd->header.size;
1692 gb_cmd.q.cid = cmd->q.cid;
1693 gb_cmd.q.type = cmd->q.type;
1694 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1695 gb_cmd.q.offset = cmd->q.guestResult.offset;
1696
1697 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1698 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1699 }
1700
1701 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1702 if (unlikely(ret != 0))
1703 return ret;
1704
1705 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1706 &cmd->q.guestResult,
1707 &vmw_bo);
1708 if (unlikely(ret != 0))
1709 return ret;
1710
1711 vmw_dmabuf_unreference(&vmw_bo);
1712 return 0;
1713 }
1714
vmw_cmd_dma(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1715 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1716 struct vmw_sw_context *sw_context,
1717 SVGA3dCmdHeader *header)
1718 {
1719 struct vmw_dma_buffer *vmw_bo = NULL;
1720 struct vmw_surface *srf = NULL;
1721 struct vmw_dma_cmd {
1722 SVGA3dCmdHeader header;
1723 SVGA3dCmdSurfaceDMA dma;
1724 } *cmd;
1725 int ret;
1726 SVGA3dCmdSurfaceDMASuffix *suffix;
1727 uint32_t bo_size;
1728
1729 cmd = container_of(header, struct vmw_dma_cmd, header);
1730 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1731 header->size - sizeof(*suffix));
1732
1733 /* Make sure device and verifier stays in sync. */
1734 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1735 DRM_ERROR("Invalid DMA suffix size.\n");
1736 return -EINVAL;
1737 }
1738
1739 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1740 &cmd->dma.guest.ptr,
1741 &vmw_bo);
1742 if (unlikely(ret != 0))
1743 return ret;
1744
1745 /* Make sure DMA doesn't cross BO boundaries. */
1746 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1747 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1748 DRM_ERROR("Invalid DMA offset.\n");
1749 return -EINVAL;
1750 }
1751
1752 bo_size -= cmd->dma.guest.ptr.offset;
1753 if (unlikely(suffix->maximumOffset > bo_size))
1754 suffix->maximumOffset = bo_size;
1755
1756 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1757 user_surface_converter, &cmd->dma.host.sid,
1758 NULL);
1759 if (unlikely(ret != 0)) {
1760 if (unlikely(ret != -ERESTARTSYS))
1761 DRM_ERROR("could not find surface for DMA.\n");
1762 goto out_no_surface;
1763 }
1764
1765 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1766
1767 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1768 header);
1769
1770 out_no_surface:
1771 vmw_dmabuf_unreference(&vmw_bo);
1772 return ret;
1773 }
1774
vmw_cmd_draw(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1775 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1776 struct vmw_sw_context *sw_context,
1777 SVGA3dCmdHeader *header)
1778 {
1779 struct vmw_draw_cmd {
1780 SVGA3dCmdHeader header;
1781 SVGA3dCmdDrawPrimitives body;
1782 } *cmd;
1783 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1784 (unsigned long)header + sizeof(*cmd));
1785 SVGA3dPrimitiveRange *range;
1786 uint32_t i;
1787 uint32_t maxnum;
1788 int ret;
1789
1790 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1791 if (unlikely(ret != 0))
1792 return ret;
1793
1794 cmd = container_of(header, struct vmw_draw_cmd, header);
1795 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1796
1797 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1798 DRM_ERROR("Illegal number of vertex declarations.\n");
1799 return -EINVAL;
1800 }
1801
1802 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1803 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1804 user_surface_converter,
1805 &decl->array.surfaceId, NULL);
1806 if (unlikely(ret != 0))
1807 return ret;
1808 }
1809
1810 maxnum = (header->size - sizeof(cmd->body) -
1811 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1812 if (unlikely(cmd->body.numRanges > maxnum)) {
1813 DRM_ERROR("Illegal number of index ranges.\n");
1814 return -EINVAL;
1815 }
1816
1817 range = (SVGA3dPrimitiveRange *) decl;
1818 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1819 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1820 user_surface_converter,
1821 &range->indexArray.surfaceId, NULL);
1822 if (unlikely(ret != 0))
1823 return ret;
1824 }
1825 return 0;
1826 }
1827
1828
vmw_cmd_tex_state(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1829 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1830 struct vmw_sw_context *sw_context,
1831 SVGA3dCmdHeader *header)
1832 {
1833 struct vmw_tex_state_cmd {
1834 SVGA3dCmdHeader header;
1835 SVGA3dCmdSetTextureState state;
1836 } *cmd;
1837
1838 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1839 ((unsigned long) header + header->size + sizeof(header));
1840 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1841 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1842 struct vmw_resource_val_node *ctx_node;
1843 struct vmw_resource_val_node *res_node;
1844 int ret;
1845
1846 cmd = container_of(header, struct vmw_tex_state_cmd,
1847 header);
1848
1849 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1850 user_context_converter, &cmd->state.cid,
1851 &ctx_node);
1852 if (unlikely(ret != 0))
1853 return ret;
1854
1855 for (; cur_state < last_state; ++cur_state) {
1856 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1857 continue;
1858
1859 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1860 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1861 (unsigned) cur_state->stage);
1862 return -EINVAL;
1863 }
1864
1865 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1866 user_surface_converter,
1867 &cur_state->value, &res_node);
1868 if (unlikely(ret != 0))
1869 return ret;
1870
1871 if (dev_priv->has_mob) {
1872 struct vmw_ctx_bindinfo_tex binding;
1873
1874 binding.bi.ctx = ctx_node->res;
1875 binding.bi.res = res_node ? res_node->res : NULL;
1876 binding.bi.bt = vmw_ctx_binding_tex;
1877 binding.texture_stage = cur_state->stage;
1878 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1879 0, binding.texture_stage);
1880 }
1881 }
1882
1883 return 0;
1884 }
1885
vmw_cmd_check_define_gmrfb(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf)1886 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1887 struct vmw_sw_context *sw_context,
1888 void *buf)
1889 {
1890 struct vmw_dma_buffer *vmw_bo;
1891 int ret;
1892
1893 struct {
1894 uint32_t header;
1895 SVGAFifoCmdDefineGMRFB body;
1896 } *cmd = buf;
1897
1898 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1899 &cmd->body.ptr,
1900 &vmw_bo);
1901 if (unlikely(ret != 0))
1902 return ret;
1903
1904 vmw_dmabuf_unreference(&vmw_bo);
1905
1906 return ret;
1907 }
1908
1909
1910 /**
1911 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1912 * switching
1913 *
1914 * @dev_priv: Pointer to a device private struct.
1915 * @sw_context: The software context being used for this batch.
1916 * @val_node: The validation node representing the resource.
1917 * @buf_id: Pointer to the user-space backup buffer handle in the command
1918 * stream.
1919 * @backup_offset: Offset of backup into MOB.
1920 *
1921 * This function prepares for registering a switch of backup buffers
1922 * in the resource metadata just prior to unreserving. It's basically a wrapper
1923 * around vmw_cmd_res_switch_backup with a different interface.
1924 */
vmw_cmd_res_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource_val_node * val_node,uint32_t * buf_id,unsigned long backup_offset)1925 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1926 struct vmw_sw_context *sw_context,
1927 struct vmw_resource_val_node *val_node,
1928 uint32_t *buf_id,
1929 unsigned long backup_offset)
1930 {
1931 struct vmw_dma_buffer *dma_buf;
1932 int ret;
1933
1934 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1935 if (ret)
1936 return ret;
1937
1938 val_node->switching_backup = true;
1939 if (val_node->first_usage)
1940 val_node->no_buffer_needed = true;
1941
1942 vmw_dmabuf_unreference(&val_node->new_backup);
1943 val_node->new_backup = dma_buf;
1944 val_node->new_backup_offset = backup_offset;
1945
1946 return 0;
1947 }
1948
1949
1950 /**
1951 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1952 *
1953 * @dev_priv: Pointer to a device private struct.
1954 * @sw_context: The software context being used for this batch.
1955 * @res_type: The resource type.
1956 * @converter: Information about user-space binding for this resource type.
1957 * @res_id: Pointer to the user-space resource handle in the command stream.
1958 * @buf_id: Pointer to the user-space backup buffer handle in the command
1959 * stream.
1960 * @backup_offset: Offset of backup into MOB.
1961 *
1962 * This function prepares for registering a switch of backup buffers
1963 * in the resource metadata just prior to unreserving. It's basically a wrapper
1964 * around vmw_cmd_res_switch_backup with a different interface.
1965 */
vmw_cmd_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * res_id,uint32_t * buf_id,unsigned long backup_offset)1966 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1967 struct vmw_sw_context *sw_context,
1968 enum vmw_res_type res_type,
1969 const struct vmw_user_resource_conv
1970 *converter,
1971 uint32_t *res_id,
1972 uint32_t *buf_id,
1973 unsigned long backup_offset)
1974 {
1975 struct vmw_resource_val_node *val_node;
1976 int ret;
1977
1978 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1979 converter, res_id, &val_node);
1980 if (ret)
1981 return ret;
1982
1983 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1984 buf_id, backup_offset);
1985 }
1986
1987 /**
1988 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1989 * command
1990 *
1991 * @dev_priv: Pointer to a device private struct.
1992 * @sw_context: The software context being used for this batch.
1993 * @header: Pointer to the command header in the command stream.
1994 */
vmw_cmd_bind_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1995 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1996 struct vmw_sw_context *sw_context,
1997 SVGA3dCmdHeader *header)
1998 {
1999 struct vmw_bind_gb_surface_cmd {
2000 SVGA3dCmdHeader header;
2001 SVGA3dCmdBindGBSurface body;
2002 } *cmd;
2003
2004 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
2005
2006 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
2007 user_surface_converter,
2008 &cmd->body.sid, &cmd->body.mobid,
2009 0);
2010 }
2011
2012 /**
2013 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
2014 * command
2015 *
2016 * @dev_priv: Pointer to a device private struct.
2017 * @sw_context: The software context being used for this batch.
2018 * @header: Pointer to the command header in the command stream.
2019 */
vmw_cmd_update_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2020 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
2021 struct vmw_sw_context *sw_context,
2022 SVGA3dCmdHeader *header)
2023 {
2024 struct vmw_gb_surface_cmd {
2025 SVGA3dCmdHeader header;
2026 SVGA3dCmdUpdateGBImage body;
2027 } *cmd;
2028
2029 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2030
2031 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2032 user_surface_converter,
2033 &cmd->body.image.sid, NULL);
2034 }
2035
2036 /**
2037 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
2038 * command
2039 *
2040 * @dev_priv: Pointer to a device private struct.
2041 * @sw_context: The software context being used for this batch.
2042 * @header: Pointer to the command header in the command stream.
2043 */
vmw_cmd_update_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2044 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
2045 struct vmw_sw_context *sw_context,
2046 SVGA3dCmdHeader *header)
2047 {
2048 struct vmw_gb_surface_cmd {
2049 SVGA3dCmdHeader header;
2050 SVGA3dCmdUpdateGBSurface body;
2051 } *cmd;
2052
2053 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2054
2055 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2056 user_surface_converter,
2057 &cmd->body.sid, NULL);
2058 }
2059
2060 /**
2061 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2062 * command
2063 *
2064 * @dev_priv: Pointer to a device private struct.
2065 * @sw_context: The software context being used for this batch.
2066 * @header: Pointer to the command header in the command stream.
2067 */
vmw_cmd_readback_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2068 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2069 struct vmw_sw_context *sw_context,
2070 SVGA3dCmdHeader *header)
2071 {
2072 struct vmw_gb_surface_cmd {
2073 SVGA3dCmdHeader header;
2074 SVGA3dCmdReadbackGBImage body;
2075 } *cmd;
2076
2077 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2078
2079 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2080 user_surface_converter,
2081 &cmd->body.image.sid, NULL);
2082 }
2083
2084 /**
2085 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2086 * command
2087 *
2088 * @dev_priv: Pointer to a device private struct.
2089 * @sw_context: The software context being used for this batch.
2090 * @header: Pointer to the command header in the command stream.
2091 */
vmw_cmd_readback_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2092 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2093 struct vmw_sw_context *sw_context,
2094 SVGA3dCmdHeader *header)
2095 {
2096 struct vmw_gb_surface_cmd {
2097 SVGA3dCmdHeader header;
2098 SVGA3dCmdReadbackGBSurface body;
2099 } *cmd;
2100
2101 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2102
2103 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2104 user_surface_converter,
2105 &cmd->body.sid, NULL);
2106 }
2107
2108 /**
2109 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2110 * command
2111 *
2112 * @dev_priv: Pointer to a device private struct.
2113 * @sw_context: The software context being used for this batch.
2114 * @header: Pointer to the command header in the command stream.
2115 */
vmw_cmd_invalidate_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2116 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2117 struct vmw_sw_context *sw_context,
2118 SVGA3dCmdHeader *header)
2119 {
2120 struct vmw_gb_surface_cmd {
2121 SVGA3dCmdHeader header;
2122 SVGA3dCmdInvalidateGBImage body;
2123 } *cmd;
2124
2125 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2126
2127 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2128 user_surface_converter,
2129 &cmd->body.image.sid, NULL);
2130 }
2131
2132 /**
2133 * vmw_cmd_invalidate_gb_surface - Validate an
2134 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2135 *
2136 * @dev_priv: Pointer to a device private struct.
2137 * @sw_context: The software context being used for this batch.
2138 * @header: Pointer to the command header in the command stream.
2139 */
vmw_cmd_invalidate_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2140 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2141 struct vmw_sw_context *sw_context,
2142 SVGA3dCmdHeader *header)
2143 {
2144 struct vmw_gb_surface_cmd {
2145 SVGA3dCmdHeader header;
2146 SVGA3dCmdInvalidateGBSurface body;
2147 } *cmd;
2148
2149 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2150
2151 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2152 user_surface_converter,
2153 &cmd->body.sid, NULL);
2154 }
2155
2156
2157 /**
2158 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2159 * command
2160 *
2161 * @dev_priv: Pointer to a device private struct.
2162 * @sw_context: The software context being used for this batch.
2163 * @header: Pointer to the command header in the command stream.
2164 */
vmw_cmd_shader_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2165 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2166 struct vmw_sw_context *sw_context,
2167 SVGA3dCmdHeader *header)
2168 {
2169 struct vmw_shader_define_cmd {
2170 SVGA3dCmdHeader header;
2171 SVGA3dCmdDefineShader body;
2172 } *cmd;
2173 int ret;
2174 size_t size;
2175 struct vmw_resource_val_node *val;
2176
2177 cmd = container_of(header, struct vmw_shader_define_cmd,
2178 header);
2179
2180 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2181 user_context_converter, &cmd->body.cid,
2182 &val);
2183 if (unlikely(ret != 0))
2184 return ret;
2185
2186 if (unlikely(!dev_priv->has_mob))
2187 return 0;
2188
2189 size = cmd->header.size - sizeof(cmd->body);
2190 ret = vmw_compat_shader_add(dev_priv,
2191 vmw_context_res_man(val->res),
2192 cmd->body.shid, cmd + 1,
2193 cmd->body.type, size,
2194 &sw_context->staged_cmd_res);
2195 if (unlikely(ret != 0))
2196 return ret;
2197
2198 return vmw_resource_relocation_add(&sw_context->res_relocations,
2199 NULL,
2200 vmw_ptr_diff(sw_context->buf_start,
2201 &cmd->header.id),
2202 vmw_res_rel_nop);
2203 }
2204
2205 /**
2206 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2207 * command
2208 *
2209 * @dev_priv: Pointer to a device private struct.
2210 * @sw_context: The software context being used for this batch.
2211 * @header: Pointer to the command header in the command stream.
2212 */
vmw_cmd_shader_destroy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2213 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2214 struct vmw_sw_context *sw_context,
2215 SVGA3dCmdHeader *header)
2216 {
2217 struct vmw_shader_destroy_cmd {
2218 SVGA3dCmdHeader header;
2219 SVGA3dCmdDestroyShader body;
2220 } *cmd;
2221 int ret;
2222 struct vmw_resource_val_node *val;
2223
2224 cmd = container_of(header, struct vmw_shader_destroy_cmd,
2225 header);
2226
2227 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2228 user_context_converter, &cmd->body.cid,
2229 &val);
2230 if (unlikely(ret != 0))
2231 return ret;
2232
2233 if (unlikely(!dev_priv->has_mob))
2234 return 0;
2235
2236 ret = vmw_shader_remove(vmw_context_res_man(val->res),
2237 cmd->body.shid,
2238 cmd->body.type,
2239 &sw_context->staged_cmd_res);
2240 if (unlikely(ret != 0))
2241 return ret;
2242
2243 return vmw_resource_relocation_add(&sw_context->res_relocations,
2244 NULL,
2245 vmw_ptr_diff(sw_context->buf_start,
2246 &cmd->header.id),
2247 vmw_res_rel_nop);
2248 }
2249
2250 /**
2251 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2252 * command
2253 *
2254 * @dev_priv: Pointer to a device private struct.
2255 * @sw_context: The software context being used for this batch.
2256 * @header: Pointer to the command header in the command stream.
2257 */
vmw_cmd_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2258 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2259 struct vmw_sw_context *sw_context,
2260 SVGA3dCmdHeader *header)
2261 {
2262 struct vmw_set_shader_cmd {
2263 SVGA3dCmdHeader header;
2264 SVGA3dCmdSetShader body;
2265 } *cmd;
2266 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2267 struct vmw_ctx_bindinfo_shader binding;
2268 struct vmw_resource *res = NULL;
2269 int ret;
2270
2271 cmd = container_of(header, struct vmw_set_shader_cmd,
2272 header);
2273
2274 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2275 DRM_ERROR("Illegal shader type %u.\n",
2276 (unsigned) cmd->body.type);
2277 return -EINVAL;
2278 }
2279
2280 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2281 user_context_converter, &cmd->body.cid,
2282 &ctx_node);
2283 if (unlikely(ret != 0))
2284 return ret;
2285
2286 if (!dev_priv->has_mob)
2287 return 0;
2288
2289 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2290 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2291 cmd->body.shid,
2292 cmd->body.type);
2293
2294 if (!IS_ERR(res)) {
2295 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2296 &cmd->body.shid, res,
2297 &res_node);
2298 vmw_resource_unreference(&res);
2299 if (unlikely(ret != 0))
2300 return ret;
2301 }
2302 }
2303
2304 if (!res_node) {
2305 ret = vmw_cmd_res_check(dev_priv, sw_context,
2306 vmw_res_shader,
2307 user_shader_converter,
2308 &cmd->body.shid, &res_node);
2309 if (unlikely(ret != 0))
2310 return ret;
2311 }
2312
2313 binding.bi.ctx = ctx_node->res;
2314 binding.bi.res = res_node ? res_node->res : NULL;
2315 binding.bi.bt = vmw_ctx_binding_shader;
2316 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2317 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2318 binding.shader_slot, 0);
2319 return 0;
2320 }
2321
2322 /**
2323 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2324 * command
2325 *
2326 * @dev_priv: Pointer to a device private struct.
2327 * @sw_context: The software context being used for this batch.
2328 * @header: Pointer to the command header in the command stream.
2329 */
vmw_cmd_set_shader_const(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2330 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2331 struct vmw_sw_context *sw_context,
2332 SVGA3dCmdHeader *header)
2333 {
2334 struct vmw_set_shader_const_cmd {
2335 SVGA3dCmdHeader header;
2336 SVGA3dCmdSetShaderConst body;
2337 } *cmd;
2338 int ret;
2339
2340 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2341 header);
2342
2343 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2344 user_context_converter, &cmd->body.cid,
2345 NULL);
2346 if (unlikely(ret != 0))
2347 return ret;
2348
2349 if (dev_priv->has_mob)
2350 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2351
2352 return 0;
2353 }
2354
2355 /**
2356 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2357 * command
2358 *
2359 * @dev_priv: Pointer to a device private struct.
2360 * @sw_context: The software context being used for this batch.
2361 * @header: Pointer to the command header in the command stream.
2362 */
vmw_cmd_bind_gb_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2363 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2364 struct vmw_sw_context *sw_context,
2365 SVGA3dCmdHeader *header)
2366 {
2367 struct vmw_bind_gb_shader_cmd {
2368 SVGA3dCmdHeader header;
2369 SVGA3dCmdBindGBShader body;
2370 } *cmd;
2371
2372 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2373 header);
2374
2375 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2376 user_shader_converter,
2377 &cmd->body.shid, &cmd->body.mobid,
2378 cmd->body.offsetInBytes);
2379 }
2380
2381 /**
2382 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2383 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2384 *
2385 * @dev_priv: Pointer to a device private struct.
2386 * @sw_context: The software context being used for this batch.
2387 * @header: Pointer to the command header in the command stream.
2388 */
2389 static int
vmw_cmd_dx_set_single_constant_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2390 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2391 struct vmw_sw_context *sw_context,
2392 SVGA3dCmdHeader *header)
2393 {
2394 struct {
2395 SVGA3dCmdHeader header;
2396 SVGA3dCmdDXSetSingleConstantBuffer body;
2397 } *cmd;
2398 struct vmw_resource_val_node *res_node = NULL;
2399 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2400 struct vmw_ctx_bindinfo_cb binding;
2401 int ret;
2402
2403 if (unlikely(ctx_node == NULL)) {
2404 DRM_ERROR("DX Context not set.\n");
2405 return -EINVAL;
2406 }
2407
2408 cmd = container_of(header, typeof(*cmd), header);
2409 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2410 user_surface_converter,
2411 &cmd->body.sid, &res_node);
2412 if (unlikely(ret != 0))
2413 return ret;
2414
2415 binding.bi.ctx = ctx_node->res;
2416 binding.bi.res = res_node ? res_node->res : NULL;
2417 binding.bi.bt = vmw_ctx_binding_cb;
2418 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2419 binding.offset = cmd->body.offsetInBytes;
2420 binding.size = cmd->body.sizeInBytes;
2421 binding.slot = cmd->body.slot;
2422
2423 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2424 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2425 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2426 (unsigned) cmd->body.type,
2427 (unsigned) binding.slot);
2428 return -EINVAL;
2429 }
2430
2431 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2432 binding.shader_slot, binding.slot);
2433
2434 return 0;
2435 }
2436
2437 /**
2438 * vmw_cmd_dx_set_shader_res - Validate an
2439 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2440 *
2441 * @dev_priv: Pointer to a device private struct.
2442 * @sw_context: The software context being used for this batch.
2443 * @header: Pointer to the command header in the command stream.
2444 */
vmw_cmd_dx_set_shader_res(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2445 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2446 struct vmw_sw_context *sw_context,
2447 SVGA3dCmdHeader *header)
2448 {
2449 struct {
2450 SVGA3dCmdHeader header;
2451 SVGA3dCmdDXSetShaderResources body;
2452 } *cmd = container_of(header, typeof(*cmd), header);
2453 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2454 sizeof(SVGA3dShaderResourceViewId);
2455
2456 if ((u64) cmd->body.startView + (u64) num_sr_view >
2457 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2458 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2459 DRM_ERROR("Invalid shader binding.\n");
2460 return -EINVAL;
2461 }
2462
2463 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2464 vmw_ctx_binding_sr,
2465 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2466 (void *) &cmd[1], num_sr_view,
2467 cmd->body.startView);
2468 }
2469
2470 /**
2471 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2472 * command
2473 *
2474 * @dev_priv: Pointer to a device private struct.
2475 * @sw_context: The software context being used for this batch.
2476 * @header: Pointer to the command header in the command stream.
2477 */
vmw_cmd_dx_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2478 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2479 struct vmw_sw_context *sw_context,
2480 SVGA3dCmdHeader *header)
2481 {
2482 struct {
2483 SVGA3dCmdHeader header;
2484 SVGA3dCmdDXSetShader body;
2485 } *cmd;
2486 struct vmw_resource *res = NULL;
2487 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2488 struct vmw_ctx_bindinfo_shader binding;
2489 int ret = 0;
2490
2491 if (unlikely(ctx_node == NULL)) {
2492 DRM_ERROR("DX Context not set.\n");
2493 return -EINVAL;
2494 }
2495
2496 cmd = container_of(header, typeof(*cmd), header);
2497
2498 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
2499 cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2500 DRM_ERROR("Illegal shader type %u.\n",
2501 (unsigned) cmd->body.type);
2502 return -EINVAL;
2503 }
2504
2505 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2506 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2507 if (IS_ERR(res)) {
2508 DRM_ERROR("Could not find shader for binding.\n");
2509 return PTR_ERR(res);
2510 }
2511
2512 ret = vmw_resource_val_add(sw_context, res, NULL);
2513 if (ret)
2514 goto out_unref;
2515 }
2516
2517 binding.bi.ctx = ctx_node->res;
2518 binding.bi.res = res;
2519 binding.bi.bt = vmw_ctx_binding_dx_shader;
2520 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2521
2522 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2523 binding.shader_slot, 0);
2524 out_unref:
2525 if (res)
2526 vmw_resource_unreference(&res);
2527
2528 return ret;
2529 }
2530
2531 /**
2532 * vmw_cmd_dx_set_vertex_buffers - Validates an
2533 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2534 *
2535 * @dev_priv: Pointer to a device private struct.
2536 * @sw_context: The software context being used for this batch.
2537 * @header: Pointer to the command header in the command stream.
2538 */
vmw_cmd_dx_set_vertex_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2539 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2540 struct vmw_sw_context *sw_context,
2541 SVGA3dCmdHeader *header)
2542 {
2543 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2544 struct vmw_ctx_bindinfo_vb binding;
2545 struct vmw_resource_val_node *res_node;
2546 struct {
2547 SVGA3dCmdHeader header;
2548 SVGA3dCmdDXSetVertexBuffers body;
2549 SVGA3dVertexBuffer buf[];
2550 } *cmd;
2551 int i, ret, num;
2552
2553 if (unlikely(ctx_node == NULL)) {
2554 DRM_ERROR("DX Context not set.\n");
2555 return -EINVAL;
2556 }
2557
2558 cmd = container_of(header, typeof(*cmd), header);
2559 num = (cmd->header.size - sizeof(cmd->body)) /
2560 sizeof(SVGA3dVertexBuffer);
2561 if ((u64)num + (u64)cmd->body.startBuffer >
2562 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2563 DRM_ERROR("Invalid number of vertex buffers.\n");
2564 return -EINVAL;
2565 }
2566
2567 for (i = 0; i < num; i++) {
2568 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2569 user_surface_converter,
2570 &cmd->buf[i].sid, &res_node);
2571 if (unlikely(ret != 0))
2572 return ret;
2573
2574 binding.bi.ctx = ctx_node->res;
2575 binding.bi.bt = vmw_ctx_binding_vb;
2576 binding.bi.res = ((res_node) ? res_node->res : NULL);
2577 binding.offset = cmd->buf[i].offset;
2578 binding.stride = cmd->buf[i].stride;
2579 binding.slot = i + cmd->body.startBuffer;
2580
2581 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2582 0, binding.slot);
2583 }
2584
2585 return 0;
2586 }
2587
2588 /**
2589 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2590 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2591 *
2592 * @dev_priv: Pointer to a device private struct.
2593 * @sw_context: The software context being used for this batch.
2594 * @header: Pointer to the command header in the command stream.
2595 */
vmw_cmd_dx_set_index_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2596 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2597 struct vmw_sw_context *sw_context,
2598 SVGA3dCmdHeader *header)
2599 {
2600 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2601 struct vmw_ctx_bindinfo_ib binding;
2602 struct vmw_resource_val_node *res_node;
2603 struct {
2604 SVGA3dCmdHeader header;
2605 SVGA3dCmdDXSetIndexBuffer body;
2606 } *cmd;
2607 int ret;
2608
2609 if (unlikely(ctx_node == NULL)) {
2610 DRM_ERROR("DX Context not set.\n");
2611 return -EINVAL;
2612 }
2613
2614 cmd = container_of(header, typeof(*cmd), header);
2615 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2616 user_surface_converter,
2617 &cmd->body.sid, &res_node);
2618 if (unlikely(ret != 0))
2619 return ret;
2620
2621 binding.bi.ctx = ctx_node->res;
2622 binding.bi.res = ((res_node) ? res_node->res : NULL);
2623 binding.bi.bt = vmw_ctx_binding_ib;
2624 binding.offset = cmd->body.offset;
2625 binding.format = cmd->body.format;
2626
2627 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2628
2629 return 0;
2630 }
2631
2632 /**
2633 * vmw_cmd_dx_set_rendertarget - Validate an
2634 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2635 *
2636 * @dev_priv: Pointer to a device private struct.
2637 * @sw_context: The software context being used for this batch.
2638 * @header: Pointer to the command header in the command stream.
2639 */
vmw_cmd_dx_set_rendertargets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2640 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2641 struct vmw_sw_context *sw_context,
2642 SVGA3dCmdHeader *header)
2643 {
2644 struct {
2645 SVGA3dCmdHeader header;
2646 SVGA3dCmdDXSetRenderTargets body;
2647 } *cmd = container_of(header, typeof(*cmd), header);
2648 int ret;
2649 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2650 sizeof(SVGA3dRenderTargetViewId);
2651
2652 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2653 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2654 return -EINVAL;
2655 }
2656
2657 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2658 vmw_ctx_binding_ds, 0,
2659 &cmd->body.depthStencilViewId, 1, 0);
2660 if (ret)
2661 return ret;
2662
2663 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2664 vmw_ctx_binding_dx_rt, 0,
2665 (void *)&cmd[1], num_rt_view, 0);
2666 }
2667
2668 /**
2669 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2670 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2671 *
2672 * @dev_priv: Pointer to a device private struct.
2673 * @sw_context: The software context being used for this batch.
2674 * @header: Pointer to the command header in the command stream.
2675 */
vmw_cmd_dx_clear_rendertarget_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2676 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2677 struct vmw_sw_context *sw_context,
2678 SVGA3dCmdHeader *header)
2679 {
2680 struct {
2681 SVGA3dCmdHeader header;
2682 SVGA3dCmdDXClearRenderTargetView body;
2683 } *cmd = container_of(header, typeof(*cmd), header);
2684
2685 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2686 cmd->body.renderTargetViewId);
2687 }
2688
2689 /**
2690 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2691 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2692 *
2693 * @dev_priv: Pointer to a device private struct.
2694 * @sw_context: The software context being used for this batch.
2695 * @header: Pointer to the command header in the command stream.
2696 */
vmw_cmd_dx_clear_depthstencil_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2697 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2698 struct vmw_sw_context *sw_context,
2699 SVGA3dCmdHeader *header)
2700 {
2701 struct {
2702 SVGA3dCmdHeader header;
2703 SVGA3dCmdDXClearDepthStencilView body;
2704 } *cmd = container_of(header, typeof(*cmd), header);
2705
2706 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2707 cmd->body.depthStencilViewId);
2708 }
2709
vmw_cmd_dx_view_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2710 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2711 struct vmw_sw_context *sw_context,
2712 SVGA3dCmdHeader *header)
2713 {
2714 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2715 struct vmw_resource_val_node *srf_node;
2716 struct vmw_resource *res;
2717 enum vmw_view_type view_type;
2718 int ret;
2719 /*
2720 * This is based on the fact that all affected define commands have
2721 * the same initial command body layout.
2722 */
2723 struct {
2724 SVGA3dCmdHeader header;
2725 uint32 defined_id;
2726 uint32 sid;
2727 } *cmd;
2728
2729 if (unlikely(ctx_node == NULL)) {
2730 DRM_ERROR("DX Context not set.\n");
2731 return -EINVAL;
2732 }
2733
2734 view_type = vmw_view_cmd_to_type(header->id);
2735 if (view_type == vmw_view_max)
2736 return -EINVAL;
2737 cmd = container_of(header, typeof(*cmd), header);
2738 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2739 DRM_ERROR("Invalid surface id.\n");
2740 return -EINVAL;
2741 }
2742 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2743 user_surface_converter,
2744 &cmd->sid, &srf_node);
2745 if (unlikely(ret != 0))
2746 return ret;
2747
2748 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2749 ret = vmw_cotable_notify(res, cmd->defined_id);
2750 vmw_resource_unreference(&res);
2751 if (unlikely(ret != 0))
2752 return ret;
2753
2754 return vmw_view_add(sw_context->man,
2755 ctx_node->res,
2756 srf_node->res,
2757 view_type,
2758 cmd->defined_id,
2759 header,
2760 header->size + sizeof(*header),
2761 &sw_context->staged_cmd_res);
2762 }
2763
2764 /**
2765 * vmw_cmd_dx_set_so_targets - Validate an
2766 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2767 *
2768 * @dev_priv: Pointer to a device private struct.
2769 * @sw_context: The software context being used for this batch.
2770 * @header: Pointer to the command header in the command stream.
2771 */
vmw_cmd_dx_set_so_targets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2772 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2773 struct vmw_sw_context *sw_context,
2774 SVGA3dCmdHeader *header)
2775 {
2776 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2777 struct vmw_ctx_bindinfo_so binding;
2778 struct vmw_resource_val_node *res_node;
2779 struct {
2780 SVGA3dCmdHeader header;
2781 SVGA3dCmdDXSetSOTargets body;
2782 SVGA3dSoTarget targets[];
2783 } *cmd;
2784 int i, ret, num;
2785
2786 if (unlikely(ctx_node == NULL)) {
2787 DRM_ERROR("DX Context not set.\n");
2788 return -EINVAL;
2789 }
2790
2791 cmd = container_of(header, typeof(*cmd), header);
2792 num = (cmd->header.size - sizeof(cmd->body)) /
2793 sizeof(SVGA3dSoTarget);
2794
2795 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2796 DRM_ERROR("Invalid DX SO binding.\n");
2797 return -EINVAL;
2798 }
2799
2800 for (i = 0; i < num; i++) {
2801 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2802 user_surface_converter,
2803 &cmd->targets[i].sid, &res_node);
2804 if (unlikely(ret != 0))
2805 return ret;
2806
2807 binding.bi.ctx = ctx_node->res;
2808 binding.bi.res = ((res_node) ? res_node->res : NULL);
2809 binding.bi.bt = vmw_ctx_binding_so,
2810 binding.offset = cmd->targets[i].offset;
2811 binding.size = cmd->targets[i].sizeInBytes;
2812 binding.slot = i;
2813
2814 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2815 0, binding.slot);
2816 }
2817
2818 return 0;
2819 }
2820
vmw_cmd_dx_so_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2821 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2822 struct vmw_sw_context *sw_context,
2823 SVGA3dCmdHeader *header)
2824 {
2825 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2826 struct vmw_resource *res;
2827 /*
2828 * This is based on the fact that all affected define commands have
2829 * the same initial command body layout.
2830 */
2831 struct {
2832 SVGA3dCmdHeader header;
2833 uint32 defined_id;
2834 } *cmd;
2835 enum vmw_so_type so_type;
2836 int ret;
2837
2838 if (unlikely(ctx_node == NULL)) {
2839 DRM_ERROR("DX Context not set.\n");
2840 return -EINVAL;
2841 }
2842
2843 so_type = vmw_so_cmd_to_type(header->id);
2844 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2845 cmd = container_of(header, typeof(*cmd), header);
2846 ret = vmw_cotable_notify(res, cmd->defined_id);
2847 vmw_resource_unreference(&res);
2848
2849 return ret;
2850 }
2851
2852 /**
2853 * vmw_cmd_dx_check_subresource - Validate an
2854 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2855 *
2856 * @dev_priv: Pointer to a device private struct.
2857 * @sw_context: The software context being used for this batch.
2858 * @header: Pointer to the command header in the command stream.
2859 */
vmw_cmd_dx_check_subresource(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2860 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2861 struct vmw_sw_context *sw_context,
2862 SVGA3dCmdHeader *header)
2863 {
2864 struct {
2865 SVGA3dCmdHeader header;
2866 union {
2867 SVGA3dCmdDXReadbackSubResource r_body;
2868 SVGA3dCmdDXInvalidateSubResource i_body;
2869 SVGA3dCmdDXUpdateSubResource u_body;
2870 SVGA3dSurfaceId sid;
2871 };
2872 } *cmd;
2873
2874 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2875 offsetof(typeof(*cmd), sid));
2876 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2877 offsetof(typeof(*cmd), sid));
2878 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2879 offsetof(typeof(*cmd), sid));
2880
2881 cmd = container_of(header, typeof(*cmd), header);
2882
2883 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2884 user_surface_converter,
2885 &cmd->sid, NULL);
2886 }
2887
vmw_cmd_dx_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2888 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2889 struct vmw_sw_context *sw_context,
2890 SVGA3dCmdHeader *header)
2891 {
2892 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2893
2894 if (unlikely(ctx_node == NULL)) {
2895 DRM_ERROR("DX Context not set.\n");
2896 return -EINVAL;
2897 }
2898
2899 return 0;
2900 }
2901
2902 /**
2903 * vmw_cmd_dx_view_remove - validate a view remove command and
2904 * schedule the view resource for removal.
2905 *
2906 * @dev_priv: Pointer to a device private struct.
2907 * @sw_context: The software context being used for this batch.
2908 * @header: Pointer to the command header in the command stream.
2909 *
2910 * Check that the view exists, and if it was not created using this
2911 * command batch, conditionally make this command a NOP.
2912 */
vmw_cmd_dx_view_remove(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2913 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2914 struct vmw_sw_context *sw_context,
2915 SVGA3dCmdHeader *header)
2916 {
2917 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2918 struct {
2919 SVGA3dCmdHeader header;
2920 union vmw_view_destroy body;
2921 } *cmd = container_of(header, typeof(*cmd), header);
2922 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2923 struct vmw_resource *view;
2924 int ret;
2925
2926 if (!ctx_node) {
2927 DRM_ERROR("DX Context not set.\n");
2928 return -EINVAL;
2929 }
2930
2931 ret = vmw_view_remove(sw_context->man,
2932 cmd->body.view_id, view_type,
2933 &sw_context->staged_cmd_res,
2934 &view);
2935 if (ret || !view)
2936 return ret;
2937
2938 /*
2939 * If the view wasn't created during this command batch, it might
2940 * have been removed due to a context swapout, so add a
2941 * relocation to conditionally make this command a NOP to avoid
2942 * device errors.
2943 */
2944 return vmw_resource_relocation_add(&sw_context->res_relocations,
2945 view,
2946 vmw_ptr_diff(sw_context->buf_start,
2947 &cmd->header.id),
2948 vmw_res_rel_cond_nop);
2949 }
2950
2951 /**
2952 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2953 * command
2954 *
2955 * @dev_priv: Pointer to a device private struct.
2956 * @sw_context: The software context being used for this batch.
2957 * @header: Pointer to the command header in the command stream.
2958 */
vmw_cmd_dx_define_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2959 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2960 struct vmw_sw_context *sw_context,
2961 SVGA3dCmdHeader *header)
2962 {
2963 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2964 struct vmw_resource *res;
2965 struct {
2966 SVGA3dCmdHeader header;
2967 SVGA3dCmdDXDefineShader body;
2968 } *cmd = container_of(header, typeof(*cmd), header);
2969 int ret;
2970
2971 if (!ctx_node) {
2972 DRM_ERROR("DX Context not set.\n");
2973 return -EINVAL;
2974 }
2975
2976 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2977 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2978 vmw_resource_unreference(&res);
2979 if (ret)
2980 return ret;
2981
2982 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2983 cmd->body.shaderId, cmd->body.type,
2984 &sw_context->staged_cmd_res);
2985 }
2986
2987 /**
2988 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2989 * command
2990 *
2991 * @dev_priv: Pointer to a device private struct.
2992 * @sw_context: The software context being used for this batch.
2993 * @header: Pointer to the command header in the command stream.
2994 */
vmw_cmd_dx_destroy_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2995 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2996 struct vmw_sw_context *sw_context,
2997 SVGA3dCmdHeader *header)
2998 {
2999 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
3000 struct {
3001 SVGA3dCmdHeader header;
3002 SVGA3dCmdDXDestroyShader body;
3003 } *cmd = container_of(header, typeof(*cmd), header);
3004 int ret;
3005
3006 if (!ctx_node) {
3007 DRM_ERROR("DX Context not set.\n");
3008 return -EINVAL;
3009 }
3010
3011 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
3012 &sw_context->staged_cmd_res);
3013 if (ret)
3014 DRM_ERROR("Could not find shader to remove.\n");
3015
3016 return ret;
3017 }
3018
3019 /**
3020 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
3021 * command
3022 *
3023 * @dev_priv: Pointer to a device private struct.
3024 * @sw_context: The software context being used for this batch.
3025 * @header: Pointer to the command header in the command stream.
3026 */
vmw_cmd_dx_bind_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3027 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
3028 struct vmw_sw_context *sw_context,
3029 SVGA3dCmdHeader *header)
3030 {
3031 struct vmw_resource_val_node *ctx_node;
3032 struct vmw_resource_val_node *res_node;
3033 struct vmw_resource *res;
3034 struct {
3035 SVGA3dCmdHeader header;
3036 SVGA3dCmdDXBindShader body;
3037 } *cmd = container_of(header, typeof(*cmd), header);
3038 int ret;
3039
3040 if (cmd->body.cid != SVGA3D_INVALID_ID) {
3041 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
3042 user_context_converter,
3043 &cmd->body.cid, &ctx_node);
3044 if (ret)
3045 return ret;
3046 } else {
3047 ctx_node = sw_context->dx_ctx_node;
3048 if (!ctx_node) {
3049 DRM_ERROR("DX Context not set.\n");
3050 return -EINVAL;
3051 }
3052 }
3053
3054 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
3055 cmd->body.shid, 0);
3056 if (IS_ERR(res)) {
3057 DRM_ERROR("Could not find shader to bind.\n");
3058 return PTR_ERR(res);
3059 }
3060
3061 ret = vmw_resource_val_add(sw_context, res, &res_node);
3062 if (ret) {
3063 DRM_ERROR("Error creating resource validation node.\n");
3064 goto out_unref;
3065 }
3066
3067
3068 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3069 &cmd->body.mobid,
3070 cmd->body.offsetInBytes);
3071 out_unref:
3072 vmw_resource_unreference(&res);
3073
3074 return ret;
3075 }
3076
3077 /**
3078 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3079 *
3080 * @dev_priv: Pointer to a device private struct.
3081 * @sw_context: The software context being used for this batch.
3082 * @header: Pointer to the command header in the command stream.
3083 */
vmw_cmd_dx_genmips(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3084 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3085 struct vmw_sw_context *sw_context,
3086 SVGA3dCmdHeader *header)
3087 {
3088 struct {
3089 SVGA3dCmdHeader header;
3090 SVGA3dCmdDXGenMips body;
3091 } *cmd = container_of(header, typeof(*cmd), header);
3092
3093 return vmw_view_id_val_add(sw_context, vmw_view_sr,
3094 cmd->body.shaderResourceViewId);
3095 }
3096
3097 /**
3098 * vmw_cmd_dx_transfer_from_buffer -
3099 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3100 *
3101 * @dev_priv: Pointer to a device private struct.
3102 * @sw_context: The software context being used for this batch.
3103 * @header: Pointer to the command header in the command stream.
3104 */
vmw_cmd_dx_transfer_from_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3105 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3106 struct vmw_sw_context *sw_context,
3107 SVGA3dCmdHeader *header)
3108 {
3109 struct {
3110 SVGA3dCmdHeader header;
3111 SVGA3dCmdDXTransferFromBuffer body;
3112 } *cmd = container_of(header, typeof(*cmd), header);
3113 int ret;
3114
3115 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3116 user_surface_converter,
3117 &cmd->body.srcSid, NULL);
3118 if (ret != 0)
3119 return ret;
3120
3121 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3122 user_surface_converter,
3123 &cmd->body.destSid, NULL);
3124 }
3125
vmw_cmd_check_not_3d(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3126 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3127 struct vmw_sw_context *sw_context,
3128 void *buf, uint32_t *size)
3129 {
3130 uint32_t size_remaining = *size;
3131 uint32_t cmd_id;
3132
3133 cmd_id = ((uint32_t *)buf)[0];
3134 switch (cmd_id) {
3135 case SVGA_CMD_UPDATE:
3136 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3137 break;
3138 case SVGA_CMD_DEFINE_GMRFB:
3139 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3140 break;
3141 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3142 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3143 break;
3144 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3145 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3146 break;
3147 default:
3148 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3149 return -EINVAL;
3150 }
3151
3152 if (*size > size_remaining) {
3153 DRM_ERROR("Invalid SVGA command (size mismatch):"
3154 " %u.\n", cmd_id);
3155 return -EINVAL;
3156 }
3157
3158 if (unlikely(!sw_context->kernel)) {
3159 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3160 return -EPERM;
3161 }
3162
3163 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3164 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3165
3166 return 0;
3167 }
3168
3169 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3170 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3171 false, false, false),
3172 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3173 false, false, false),
3174 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3175 true, false, false),
3176 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3177 true, false, false),
3178 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3179 true, false, false),
3180 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3181 false, false, false),
3182 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3183 false, false, false),
3184 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3185 true, false, false),
3186 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3187 true, false, false),
3188 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3189 true, false, false),
3190 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3191 &vmw_cmd_set_render_target_check, true, false, false),
3192 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3193 true, false, false),
3194 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3195 true, false, false),
3196 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3197 true, false, false),
3198 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3199 true, false, false),
3200 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3201 true, false, false),
3202 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3203 true, false, false),
3204 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3205 true, false, false),
3206 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3207 false, false, false),
3208 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3209 true, false, false),
3210 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3211 true, false, false),
3212 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3213 true, false, false),
3214 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3215 true, false, false),
3216 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3217 true, false, false),
3218 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3219 true, false, false),
3220 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3221 true, false, false),
3222 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3223 true, false, false),
3224 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3225 true, false, false),
3226 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3227 true, false, false),
3228 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3229 &vmw_cmd_blt_surf_screen_check, false, false, false),
3230 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3231 false, false, false),
3232 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3233 false, false, false),
3234 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3235 false, false, false),
3236 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3237 false, false, false),
3238 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3239 false, false, false),
3240 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3241 false, false, false),
3242 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3243 false, false, false),
3244 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3245 false, false, false),
3246 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3247 false, false, false),
3248 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3249 false, false, false),
3250 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3251 false, false, false),
3252 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3253 false, false, false),
3254 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3255 false, false, false),
3256 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3257 false, false, true),
3258 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3259 false, false, true),
3260 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3261 false, false, true),
3262 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3263 false, false, true),
3264 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3265 false, false, true),
3266 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3267 false, false, true),
3268 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3269 false, false, true),
3270 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3271 false, false, true),
3272 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3273 true, false, true),
3274 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3275 false, false, true),
3276 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3277 true, false, true),
3278 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3279 &vmw_cmd_update_gb_surface, true, false, true),
3280 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3281 &vmw_cmd_readback_gb_image, true, false, true),
3282 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3283 &vmw_cmd_readback_gb_surface, true, false, true),
3284 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3285 &vmw_cmd_invalidate_gb_image, true, false, true),
3286 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3287 &vmw_cmd_invalidate_gb_surface, true, false, true),
3288 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3289 false, false, true),
3290 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3291 false, false, true),
3292 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3293 false, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3295 false, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3297 false, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3299 false, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3301 true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3303 false, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3305 false, false, false),
3306 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3307 true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3309 true, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3311 true, false, true),
3312 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3313 true, false, true),
3314 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3315 true, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3317 false, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3319 false, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3321 false, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3323 false, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3325 false, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3327 false, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3329 false, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3331 false, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3333 false, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3335 false, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3337 true, false, true),
3338 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3339 false, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3341 false, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3343 false, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3345 false, false, true),
3346
3347 /*
3348 * DX commands
3349 */
3350 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3351 false, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3353 false, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3355 false, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3357 false, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3359 false, false, true),
3360 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3361 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3362 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3363 &vmw_cmd_dx_set_shader_res, true, false, true),
3364 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3365 true, false, true),
3366 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3367 true, false, true),
3368 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3369 true, false, true),
3370 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3371 true, false, true),
3372 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3373 true, false, true),
3374 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3375 &vmw_cmd_dx_cid_check, true, false, true),
3376 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3377 true, false, true),
3378 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3379 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3380 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3381 &vmw_cmd_dx_set_index_buffer, true, false, true),
3382 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3383 &vmw_cmd_dx_set_rendertargets, true, false, true),
3384 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3385 true, false, true),
3386 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3387 &vmw_cmd_dx_cid_check, true, false, true),
3388 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3389 &vmw_cmd_dx_cid_check, true, false, true),
3390 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3391 true, false, true),
3392 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3393 true, false, true),
3394 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3395 true, false, true),
3396 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3397 &vmw_cmd_dx_cid_check, true, false, true),
3398 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3399 true, false, true),
3400 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3401 true, false, true),
3402 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3403 true, false, true),
3404 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3405 true, false, true),
3406 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3407 true, false, true),
3408 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3409 true, false, true),
3410 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3411 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3412 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3413 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3414 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3415 true, false, true),
3416 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3417 true, false, true),
3418 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3419 &vmw_cmd_dx_check_subresource, true, false, true),
3420 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3421 &vmw_cmd_dx_check_subresource, true, false, true),
3422 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3423 &vmw_cmd_dx_check_subresource, true, false, true),
3424 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3425 &vmw_cmd_dx_view_define, true, false, true),
3426 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3427 &vmw_cmd_dx_view_remove, true, false, true),
3428 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3429 &vmw_cmd_dx_view_define, true, false, true),
3430 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3431 &vmw_cmd_dx_view_remove, true, false, true),
3432 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3433 &vmw_cmd_dx_view_define, true, false, true),
3434 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3435 &vmw_cmd_dx_view_remove, true, false, true),
3436 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3437 &vmw_cmd_dx_so_define, true, false, true),
3438 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3439 &vmw_cmd_dx_cid_check, true, false, true),
3440 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3441 &vmw_cmd_dx_so_define, true, false, true),
3442 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3443 &vmw_cmd_dx_cid_check, true, false, true),
3444 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3445 &vmw_cmd_dx_so_define, true, false, true),
3446 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3447 &vmw_cmd_dx_cid_check, true, false, true),
3448 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3449 &vmw_cmd_dx_so_define, true, false, true),
3450 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3451 &vmw_cmd_dx_cid_check, true, false, true),
3452 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3453 &vmw_cmd_dx_so_define, true, false, true),
3454 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3455 &vmw_cmd_dx_cid_check, true, false, true),
3456 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3457 &vmw_cmd_dx_define_shader, true, false, true),
3458 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3459 &vmw_cmd_dx_destroy_shader, true, false, true),
3460 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3461 &vmw_cmd_dx_bind_shader, true, false, true),
3462 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3463 &vmw_cmd_dx_so_define, true, false, true),
3464 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3465 &vmw_cmd_dx_cid_check, true, false, true),
3466 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3467 true, false, true),
3468 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3469 &vmw_cmd_dx_set_so_targets, true, false, true),
3470 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3471 &vmw_cmd_dx_cid_check, true, false, true),
3472 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3473 &vmw_cmd_dx_cid_check, true, false, true),
3474 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3475 &vmw_cmd_buffer_copy_check, true, false, true),
3476 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3477 &vmw_cmd_pred_copy_check, true, false, true),
3478 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3479 &vmw_cmd_dx_transfer_from_buffer,
3480 true, false, true),
3481 };
3482
vmw_cmd_describe(const void * buf,u32 * size,char const ** cmd)3483 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3484 {
3485 u32 cmd_id = ((u32 *) buf)[0];
3486
3487 if (cmd_id >= SVGA_CMD_MAX) {
3488 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3489 const struct vmw_cmd_entry *entry;
3490
3491 *size = header->size + sizeof(SVGA3dCmdHeader);
3492 cmd_id = header->id;
3493 if (cmd_id >= SVGA_3D_CMD_MAX)
3494 return false;
3495
3496 cmd_id -= SVGA_3D_CMD_BASE;
3497 entry = &vmw_cmd_entries[cmd_id];
3498 *cmd = entry->cmd_name;
3499 return true;
3500 }
3501
3502 switch (cmd_id) {
3503 case SVGA_CMD_UPDATE:
3504 *cmd = "SVGA_CMD_UPDATE";
3505 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3506 break;
3507 case SVGA_CMD_DEFINE_GMRFB:
3508 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3509 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3510 break;
3511 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3512 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3513 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3514 break;
3515 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3516 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3517 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3518 break;
3519 default:
3520 *cmd = "UNKNOWN";
3521 *size = 0;
3522 return false;
3523 }
3524
3525 return true;
3526 }
3527
vmw_cmd_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3528 static int vmw_cmd_check(struct vmw_private *dev_priv,
3529 struct vmw_sw_context *sw_context,
3530 void *buf, uint32_t *size)
3531 {
3532 uint32_t cmd_id;
3533 uint32_t size_remaining = *size;
3534 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3535 int ret;
3536 const struct vmw_cmd_entry *entry;
3537 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3538
3539 cmd_id = ((uint32_t *)buf)[0];
3540 /* Handle any none 3D commands */
3541 if (unlikely(cmd_id < SVGA_CMD_MAX))
3542 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3543
3544
3545 cmd_id = header->id;
3546 *size = header->size + sizeof(SVGA3dCmdHeader);
3547
3548 cmd_id -= SVGA_3D_CMD_BASE;
3549 if (unlikely(*size > size_remaining))
3550 goto out_invalid;
3551
3552 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3553 goto out_invalid;
3554
3555 entry = &vmw_cmd_entries[cmd_id];
3556 if (unlikely(!entry->func))
3557 goto out_invalid;
3558
3559 if (unlikely(!entry->user_allow && !sw_context->kernel))
3560 goto out_privileged;
3561
3562 if (unlikely(entry->gb_disable && gb))
3563 goto out_old;
3564
3565 if (unlikely(entry->gb_enable && !gb))
3566 goto out_new;
3567
3568 ret = entry->func(dev_priv, sw_context, header);
3569 if (unlikely(ret != 0))
3570 goto out_invalid;
3571
3572 return 0;
3573 out_invalid:
3574 DRM_ERROR("Invalid SVGA3D command: %d\n",
3575 cmd_id + SVGA_3D_CMD_BASE);
3576 return -EINVAL;
3577 out_privileged:
3578 DRM_ERROR("Privileged SVGA3D command: %d\n",
3579 cmd_id + SVGA_3D_CMD_BASE);
3580 return -EPERM;
3581 out_old:
3582 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3583 cmd_id + SVGA_3D_CMD_BASE);
3584 return -EINVAL;
3585 out_new:
3586 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3587 cmd_id + SVGA_3D_CMD_BASE);
3588 return -EINVAL;
3589 }
3590
vmw_cmd_check_all(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t size)3591 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3592 struct vmw_sw_context *sw_context,
3593 void *buf,
3594 uint32_t size)
3595 {
3596 int32_t cur_size = size;
3597 int ret;
3598
3599 sw_context->buf_start = buf;
3600
3601 while (cur_size > 0) {
3602 size = cur_size;
3603 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3604 if (unlikely(ret != 0))
3605 return ret;
3606 buf = (void *)((unsigned long) buf + size);
3607 cur_size -= size;
3608 }
3609
3610 if (unlikely(cur_size != 0)) {
3611 DRM_ERROR("Command verifier out of sync.\n");
3612 return -EINVAL;
3613 }
3614
3615 return 0;
3616 }
3617
vmw_free_relocations(struct vmw_sw_context * sw_context)3618 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3619 {
3620 sw_context->cur_reloc = 0;
3621 }
3622
vmw_apply_relocations(struct vmw_sw_context * sw_context)3623 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3624 {
3625 uint32_t i;
3626 struct vmw_relocation *reloc;
3627 struct ttm_validate_buffer *validate;
3628 struct ttm_buffer_object *bo;
3629
3630 for (i = 0; i < sw_context->cur_reloc; ++i) {
3631 reloc = &sw_context->relocs[i];
3632 validate = &sw_context->val_bufs[reloc->index].base;
3633 bo = validate->bo;
3634 switch (bo->mem.mem_type) {
3635 case TTM_PL_VRAM:
3636 reloc->location->offset += bo->offset;
3637 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3638 break;
3639 case VMW_PL_GMR:
3640 reloc->location->gmrId = bo->mem.start;
3641 break;
3642 case VMW_PL_MOB:
3643 *reloc->mob_loc = bo->mem.start;
3644 break;
3645 default:
3646 BUG();
3647 }
3648 }
3649 vmw_free_relocations(sw_context);
3650 }
3651
3652 /**
3653 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3654 * all resources referenced by it.
3655 *
3656 * @list: The resource list.
3657 */
vmw_resource_list_unreference(struct vmw_sw_context * sw_context,struct list_head * list)3658 static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3659 struct list_head *list)
3660 {
3661 struct vmw_resource_val_node *val, *val_next;
3662
3663 /*
3664 * Drop references to resources held during command submission.
3665 */
3666
3667 list_for_each_entry_safe(val, val_next, list, head) {
3668 list_del_init(&val->head);
3669 vmw_resource_unreference(&val->res);
3670
3671 if (val->staged_bindings) {
3672 if (val->staged_bindings != sw_context->staged_bindings)
3673 vmw_binding_state_free(val->staged_bindings);
3674 else
3675 sw_context->staged_bindings_inuse = false;
3676 val->staged_bindings = NULL;
3677 }
3678
3679 kfree(val);
3680 }
3681 }
3682
vmw_clear_validations(struct vmw_sw_context * sw_context)3683 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3684 {
3685 struct vmw_validate_buffer *entry, *next;
3686 struct vmw_resource_val_node *val;
3687
3688 /*
3689 * Drop references to DMA buffers held during command submission.
3690 */
3691 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3692 base.head) {
3693 list_del(&entry->base.head);
3694 ttm_bo_unref(&entry->base.bo);
3695 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3696 sw_context->cur_val_buf--;
3697 }
3698 BUG_ON(sw_context->cur_val_buf != 0);
3699
3700 list_for_each_entry(val, &sw_context->resource_list, head)
3701 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3702 }
3703
vmw_validate_single_buffer(struct vmw_private * dev_priv,struct ttm_buffer_object * bo,bool interruptible,bool validate_as_mob)3704 int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3705 struct ttm_buffer_object *bo,
3706 bool interruptible,
3707 bool validate_as_mob)
3708 {
3709 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3710 base);
3711 int ret;
3712
3713 if (vbo->pin_count > 0)
3714 return 0;
3715
3716 if (validate_as_mob)
3717 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3718 false);
3719
3720 /**
3721 * Put BO in VRAM if there is space, otherwise as a GMR.
3722 * If there is no space in VRAM and GMR ids are all used up,
3723 * start evicting GMRs to make room. If the DMA buffer can't be
3724 * used as a GMR, this will return -ENOMEM.
3725 */
3726
3727 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3728 false);
3729 if (likely(ret == 0 || ret == -ERESTARTSYS))
3730 return ret;
3731
3732 /**
3733 * If that failed, try VRAM again, this time evicting
3734 * previous contents.
3735 */
3736
3737 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
3738 return ret;
3739 }
3740
vmw_validate_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)3741 static int vmw_validate_buffers(struct vmw_private *dev_priv,
3742 struct vmw_sw_context *sw_context)
3743 {
3744 struct vmw_validate_buffer *entry;
3745 int ret;
3746
3747 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3748 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3749 true,
3750 entry->validate_as_mob);
3751 if (unlikely(ret != 0))
3752 return ret;
3753 }
3754 return 0;
3755 }
3756
vmw_resize_cmd_bounce(struct vmw_sw_context * sw_context,uint32_t size)3757 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3758 uint32_t size)
3759 {
3760 if (likely(sw_context->cmd_bounce_size >= size))
3761 return 0;
3762
3763 if (sw_context->cmd_bounce_size == 0)
3764 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3765
3766 while (sw_context->cmd_bounce_size < size) {
3767 sw_context->cmd_bounce_size =
3768 PAGE_ALIGN(sw_context->cmd_bounce_size +
3769 (sw_context->cmd_bounce_size >> 1));
3770 }
3771
3772 vfree(sw_context->cmd_bounce);
3773 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3774
3775 if (sw_context->cmd_bounce == NULL) {
3776 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3777 sw_context->cmd_bounce_size = 0;
3778 return -ENOMEM;
3779 }
3780
3781 return 0;
3782 }
3783
3784 /**
3785 * vmw_execbuf_fence_commands - create and submit a command stream fence
3786 *
3787 * Creates a fence object and submits a command stream marker.
3788 * If this fails for some reason, We sync the fifo and return NULL.
3789 * It is then safe to fence buffers with a NULL pointer.
3790 *
3791 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3792 * a userspace handle if @p_handle is not NULL, otherwise not.
3793 */
3794
vmw_execbuf_fence_commands(struct drm_file * file_priv,struct vmw_private * dev_priv,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)3795 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3796 struct vmw_private *dev_priv,
3797 struct vmw_fence_obj **p_fence,
3798 uint32_t *p_handle)
3799 {
3800 uint32_t sequence;
3801 int ret;
3802 bool synced = false;
3803
3804 /* p_handle implies file_priv. */
3805 BUG_ON(p_handle != NULL && file_priv == NULL);
3806
3807 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3808 if (unlikely(ret != 0)) {
3809 DRM_ERROR("Fence submission error. Syncing.\n");
3810 synced = true;
3811 }
3812
3813 if (p_handle != NULL)
3814 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3815 sequence, p_fence, p_handle);
3816 else
3817 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3818
3819 if (unlikely(ret != 0 && !synced)) {
3820 (void) vmw_fallback_wait(dev_priv, false, false,
3821 sequence, false,
3822 VMW_FENCE_WAIT_TIMEOUT);
3823 *p_fence = NULL;
3824 }
3825
3826 return ret;
3827 }
3828
3829 /**
3830 * vmw_execbuf_copy_fence_user - copy fence object information to
3831 * user-space.
3832 *
3833 * @dev_priv: Pointer to a vmw_private struct.
3834 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3835 * @ret: Return value from fence object creation.
3836 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3837 * which the information should be copied.
3838 * @fence: Pointer to the fenc object.
3839 * @fence_handle: User-space fence handle.
3840 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3841 * @sync_file: Only used to clean up in case of an error in this function.
3842 *
3843 * This function copies fence information to user-space. If copying fails,
3844 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3845 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3846 * the error will hopefully be detected.
3847 * Also if copying fails, user-space will be unable to signal the fence
3848 * object so we wait for it immediately, and then unreference the
3849 * user-space reference.
3850 */
3851 void
vmw_execbuf_copy_fence_user(struct vmw_private * dev_priv,struct vmw_fpriv * vmw_fp,int ret,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj * fence,uint32_t fence_handle,int32_t out_fence_fd,struct sync_file * sync_file)3852 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3853 struct vmw_fpriv *vmw_fp,
3854 int ret,
3855 struct drm_vmw_fence_rep __user *user_fence_rep,
3856 struct vmw_fence_obj *fence,
3857 uint32_t fence_handle,
3858 int32_t out_fence_fd,
3859 struct sync_file *sync_file)
3860 {
3861 struct drm_vmw_fence_rep fence_rep;
3862
3863 if (user_fence_rep == NULL)
3864 return;
3865
3866 memset(&fence_rep, 0, sizeof(fence_rep));
3867
3868 fence_rep.error = ret;
3869 fence_rep.fd = out_fence_fd;
3870 if (ret == 0) {
3871 BUG_ON(fence == NULL);
3872
3873 fence_rep.handle = fence_handle;
3874 fence_rep.seqno = fence->base.seqno;
3875 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3876 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3877 }
3878
3879 /*
3880 * copy_to_user errors will be detected by user space not
3881 * seeing fence_rep::error filled in. Typically
3882 * user-space would have pre-set that member to -EFAULT.
3883 */
3884 ret = copy_to_user(user_fence_rep, &fence_rep,
3885 sizeof(fence_rep));
3886
3887 /*
3888 * User-space lost the fence object. We need to sync
3889 * and unreference the handle.
3890 */
3891 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3892 if (sync_file)
3893 fput(sync_file->file);
3894
3895 if (fence_rep.fd != -1) {
3896 put_unused_fd(fence_rep.fd);
3897 fence_rep.fd = -1;
3898 }
3899
3900 ttm_ref_object_base_unref(vmw_fp->tfile,
3901 fence_handle, TTM_REF_USAGE);
3902 DRM_ERROR("Fence copy error. Syncing.\n");
3903 (void) vmw_fence_obj_wait(fence, false, false,
3904 VMW_FENCE_WAIT_TIMEOUT);
3905 }
3906 }
3907
3908 /**
3909 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3910 * the fifo.
3911 *
3912 * @dev_priv: Pointer to a device private structure.
3913 * @kernel_commands: Pointer to the unpatched command batch.
3914 * @command_size: Size of the unpatched command batch.
3915 * @sw_context: Structure holding the relocation lists.
3916 *
3917 * Side effects: If this function returns 0, then the command batch
3918 * pointed to by @kernel_commands will have been modified.
3919 */
vmw_execbuf_submit_fifo(struct vmw_private * dev_priv,void * kernel_commands,u32 command_size,struct vmw_sw_context * sw_context)3920 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3921 void *kernel_commands,
3922 u32 command_size,
3923 struct vmw_sw_context *sw_context)
3924 {
3925 void *cmd;
3926
3927 if (sw_context->dx_ctx_node)
3928 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3929 sw_context->dx_ctx_node->res->id);
3930 else
3931 cmd = vmw_fifo_reserve(dev_priv, command_size);
3932 if (!cmd) {
3933 DRM_ERROR("Failed reserving fifo space for commands.\n");
3934 return -ENOMEM;
3935 }
3936
3937 vmw_apply_relocations(sw_context);
3938 memcpy(cmd, kernel_commands, command_size);
3939 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3940 vmw_resource_relocations_free(&sw_context->res_relocations);
3941 vmw_fifo_commit(dev_priv, command_size);
3942
3943 return 0;
3944 }
3945
3946 /**
3947 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3948 * the command buffer manager.
3949 *
3950 * @dev_priv: Pointer to a device private structure.
3951 * @header: Opaque handle to the command buffer allocation.
3952 * @command_size: Size of the unpatched command batch.
3953 * @sw_context: Structure holding the relocation lists.
3954 *
3955 * Side effects: If this function returns 0, then the command buffer
3956 * represented by @header will have been modified.
3957 */
vmw_execbuf_submit_cmdbuf(struct vmw_private * dev_priv,struct vmw_cmdbuf_header * header,u32 command_size,struct vmw_sw_context * sw_context)3958 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3959 struct vmw_cmdbuf_header *header,
3960 u32 command_size,
3961 struct vmw_sw_context *sw_context)
3962 {
3963 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3964 SVGA3D_INVALID_ID);
3965 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3966 id, false, header);
3967
3968 vmw_apply_relocations(sw_context);
3969 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3970 vmw_resource_relocations_free(&sw_context->res_relocations);
3971 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3972
3973 return 0;
3974 }
3975
3976 /**
3977 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3978 * submission using a command buffer.
3979 *
3980 * @dev_priv: Pointer to a device private structure.
3981 * @user_commands: User-space pointer to the commands to be submitted.
3982 * @command_size: Size of the unpatched command batch.
3983 * @header: Out parameter returning the opaque pointer to the command buffer.
3984 *
3985 * This function checks whether we can use the command buffer manager for
3986 * submission and if so, creates a command buffer of suitable size and
3987 * copies the user data into that buffer.
3988 *
3989 * On successful return, the function returns a pointer to the data in the
3990 * command buffer and *@header is set to non-NULL.
3991 * If command buffers could not be used, the function will return the value
3992 * of @kernel_commands on function call. That value may be NULL. In that case,
3993 * the value of *@header will be set to NULL.
3994 * If an error is encountered, the function will return a pointer error value.
3995 * If the function is interrupted by a signal while sleeping, it will return
3996 * -ERESTARTSYS casted to a pointer error value.
3997 */
vmw_execbuf_cmdbuf(struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,u32 command_size,struct vmw_cmdbuf_header ** header)3998 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3999 void __user *user_commands,
4000 void *kernel_commands,
4001 u32 command_size,
4002 struct vmw_cmdbuf_header **header)
4003 {
4004 size_t cmdbuf_size;
4005 int ret;
4006
4007 *header = NULL;
4008 if (command_size > SVGA_CB_MAX_SIZE) {
4009 DRM_ERROR("Command buffer is too large.\n");
4010 return ERR_PTR(-EINVAL);
4011 }
4012
4013 if (!dev_priv->cman || kernel_commands)
4014 return kernel_commands;
4015
4016 /* If possible, add a little space for fencing. */
4017 cmdbuf_size = command_size + 512;
4018 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4019 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
4020 true, header);
4021 if (IS_ERR(kernel_commands))
4022 return kernel_commands;
4023
4024 ret = copy_from_user(kernel_commands, user_commands,
4025 command_size);
4026 if (ret) {
4027 DRM_ERROR("Failed copying commands.\n");
4028 vmw_cmdbuf_header_free(*header);
4029 *header = NULL;
4030 return ERR_PTR(-EFAULT);
4031 }
4032
4033 return kernel_commands;
4034 }
4035
vmw_execbuf_tie_context(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,uint32_t handle)4036 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4037 struct vmw_sw_context *sw_context,
4038 uint32_t handle)
4039 {
4040 struct vmw_resource_val_node *ctx_node;
4041 struct vmw_resource *res;
4042 int ret;
4043
4044 if (handle == SVGA3D_INVALID_ID)
4045 return 0;
4046
4047 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
4048 handle, user_context_converter,
4049 &res);
4050 if (unlikely(ret != 0)) {
4051 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
4052 (unsigned) handle);
4053 return ret;
4054 }
4055
4056 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
4057 if (unlikely(ret != 0))
4058 goto out_err;
4059
4060 sw_context->dx_ctx_node = ctx_node;
4061 sw_context->man = vmw_context_res_man(res);
4062 out_err:
4063 vmw_resource_unreference(&res);
4064 return ret;
4065 }
4066
vmw_execbuf_process(struct drm_file * file_priv,struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,uint32_t command_size,uint64_t throttle_us,uint32_t dx_context_handle,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj ** out_fence,uint32_t flags)4067 int vmw_execbuf_process(struct drm_file *file_priv,
4068 struct vmw_private *dev_priv,
4069 void __user *user_commands,
4070 void *kernel_commands,
4071 uint32_t command_size,
4072 uint64_t throttle_us,
4073 uint32_t dx_context_handle,
4074 struct drm_vmw_fence_rep __user *user_fence_rep,
4075 struct vmw_fence_obj **out_fence,
4076 uint32_t flags)
4077 {
4078 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4079 struct vmw_fence_obj *fence = NULL;
4080 struct vmw_resource *error_resource;
4081 struct list_head resource_list;
4082 struct vmw_cmdbuf_header *header;
4083 struct ww_acquire_ctx ticket;
4084 uint32_t handle;
4085 int ret;
4086 int32_t out_fence_fd = -1;
4087 struct sync_file *sync_file = NULL;
4088
4089
4090 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4091 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4092 if (out_fence_fd < 0) {
4093 DRM_ERROR("Failed to get a fence file descriptor.\n");
4094 return out_fence_fd;
4095 }
4096 }
4097
4098 if (throttle_us) {
4099 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4100 throttle_us);
4101
4102 if (ret)
4103 goto out_free_fence_fd;
4104 }
4105
4106 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4107 kernel_commands, command_size,
4108 &header);
4109 if (IS_ERR(kernel_commands)) {
4110 ret = PTR_ERR(kernel_commands);
4111 goto out_free_fence_fd;
4112 }
4113
4114 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4115 if (ret) {
4116 ret = -ERESTARTSYS;
4117 goto out_free_header;
4118 }
4119
4120 sw_context->kernel = false;
4121 if (kernel_commands == NULL) {
4122 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4123 if (unlikely(ret != 0))
4124 goto out_unlock;
4125
4126
4127 ret = copy_from_user(sw_context->cmd_bounce,
4128 user_commands, command_size);
4129
4130 if (unlikely(ret != 0)) {
4131 ret = -EFAULT;
4132 DRM_ERROR("Failed copying commands.\n");
4133 goto out_unlock;
4134 }
4135 kernel_commands = sw_context->cmd_bounce;
4136 } else if (!header)
4137 sw_context->kernel = true;
4138
4139 sw_context->fp = vmw_fpriv(file_priv);
4140 sw_context->cur_reloc = 0;
4141 sw_context->cur_val_buf = 0;
4142 INIT_LIST_HEAD(&sw_context->resource_list);
4143 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
4144 sw_context->cur_query_bo = dev_priv->pinned_bo;
4145 sw_context->last_query_ctx = NULL;
4146 sw_context->needs_post_query_barrier = false;
4147 sw_context->dx_ctx_node = NULL;
4148 sw_context->dx_query_mob = NULL;
4149 sw_context->dx_query_ctx = NULL;
4150 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4151 INIT_LIST_HEAD(&sw_context->validate_nodes);
4152 INIT_LIST_HEAD(&sw_context->res_relocations);
4153 if (sw_context->staged_bindings)
4154 vmw_binding_state_reset(sw_context->staged_bindings);
4155
4156 if (!sw_context->res_ht_initialized) {
4157 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4158 if (unlikely(ret != 0))
4159 goto out_unlock;
4160 sw_context->res_ht_initialized = true;
4161 }
4162 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4163 INIT_LIST_HEAD(&resource_list);
4164 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4165 if (unlikely(ret != 0)) {
4166 list_splice_init(&sw_context->ctx_resource_list,
4167 &sw_context->resource_list);
4168 goto out_err_nores;
4169 }
4170
4171 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4172 command_size);
4173 /*
4174 * Merge the resource lists before checking the return status
4175 * from vmd_cmd_check_all so that all the open hashtabs will
4176 * be handled properly even if vmw_cmd_check_all fails.
4177 */
4178 list_splice_init(&sw_context->ctx_resource_list,
4179 &sw_context->resource_list);
4180
4181 if (unlikely(ret != 0))
4182 goto out_err_nores;
4183
4184 ret = vmw_resources_reserve(sw_context);
4185 if (unlikely(ret != 0))
4186 goto out_err_nores;
4187
4188 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4189 true, NULL);
4190 if (unlikely(ret != 0))
4191 goto out_err_nores;
4192
4193 ret = vmw_validate_buffers(dev_priv, sw_context);
4194 if (unlikely(ret != 0))
4195 goto out_err;
4196
4197 ret = vmw_resources_validate(sw_context);
4198 if (unlikely(ret != 0))
4199 goto out_err;
4200
4201 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4202 if (unlikely(ret != 0)) {
4203 ret = -ERESTARTSYS;
4204 goto out_err;
4205 }
4206
4207 if (dev_priv->has_mob) {
4208 ret = vmw_rebind_contexts(sw_context);
4209 if (unlikely(ret != 0))
4210 goto out_unlock_binding;
4211 }
4212
4213 if (!header) {
4214 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4215 command_size, sw_context);
4216 } else {
4217 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4218 sw_context);
4219 header = NULL;
4220 }
4221 mutex_unlock(&dev_priv->binding_mutex);
4222 if (ret)
4223 goto out_err;
4224
4225 vmw_query_bo_switch_commit(dev_priv, sw_context);
4226 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4227 &fence,
4228 (user_fence_rep) ? &handle : NULL);
4229 /*
4230 * This error is harmless, because if fence submission fails,
4231 * vmw_fifo_send_fence will sync. The error will be propagated to
4232 * user-space in @fence_rep
4233 */
4234
4235 if (ret != 0)
4236 DRM_ERROR("Fence submission error. Syncing.\n");
4237
4238 vmw_resources_unreserve(sw_context, false);
4239
4240 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4241 (void *) fence);
4242
4243 if (unlikely(dev_priv->pinned_bo != NULL &&
4244 !dev_priv->query_cid_valid))
4245 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4246
4247 vmw_clear_validations(sw_context);
4248
4249 /*
4250 * If anything fails here, give up trying to export the fence
4251 * and do a sync since the user mode will not be able to sync
4252 * the fence itself. This ensures we are still functionally
4253 * correct.
4254 */
4255 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4256
4257 sync_file = sync_file_create(&fence->base);
4258 if (!sync_file) {
4259 DRM_ERROR("Unable to create sync file for fence\n");
4260 put_unused_fd(out_fence_fd);
4261 out_fence_fd = -1;
4262
4263 (void) vmw_fence_obj_wait(fence, false, false,
4264 VMW_FENCE_WAIT_TIMEOUT);
4265 } else {
4266 /* Link the fence with the FD created earlier */
4267 fd_install(out_fence_fd, sync_file->file);
4268 }
4269 }
4270
4271 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4272 user_fence_rep, fence, handle,
4273 out_fence_fd, sync_file);
4274
4275 /* Don't unreference when handing fence out */
4276 if (unlikely(out_fence != NULL)) {
4277 *out_fence = fence;
4278 fence = NULL;
4279 } else if (likely(fence != NULL)) {
4280 vmw_fence_obj_unreference(&fence);
4281 }
4282
4283 list_splice_init(&sw_context->resource_list, &resource_list);
4284 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4285 mutex_unlock(&dev_priv->cmdbuf_mutex);
4286
4287 /*
4288 * Unreference resources outside of the cmdbuf_mutex to
4289 * avoid deadlocks in resource destruction paths.
4290 */
4291 vmw_resource_list_unreference(sw_context, &resource_list);
4292
4293 return 0;
4294
4295 out_unlock_binding:
4296 mutex_unlock(&dev_priv->binding_mutex);
4297 out_err:
4298 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4299 out_err_nores:
4300 vmw_resources_unreserve(sw_context, true);
4301 vmw_resource_relocations_free(&sw_context->res_relocations);
4302 vmw_free_relocations(sw_context);
4303 vmw_clear_validations(sw_context);
4304 if (unlikely(dev_priv->pinned_bo != NULL &&
4305 !dev_priv->query_cid_valid))
4306 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4307 out_unlock:
4308 list_splice_init(&sw_context->resource_list, &resource_list);
4309 error_resource = sw_context->error_resource;
4310 sw_context->error_resource = NULL;
4311 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4312 mutex_unlock(&dev_priv->cmdbuf_mutex);
4313
4314 /*
4315 * Unreference resources outside of the cmdbuf_mutex to
4316 * avoid deadlocks in resource destruction paths.
4317 */
4318 vmw_resource_list_unreference(sw_context, &resource_list);
4319 if (unlikely(error_resource != NULL))
4320 vmw_resource_unreference(&error_resource);
4321 out_free_header:
4322 if (header)
4323 vmw_cmdbuf_header_free(header);
4324 out_free_fence_fd:
4325 if (out_fence_fd >= 0)
4326 put_unused_fd(out_fence_fd);
4327
4328 return ret;
4329 }
4330
4331 /**
4332 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4333 *
4334 * @dev_priv: The device private structure.
4335 *
4336 * This function is called to idle the fifo and unpin the query buffer
4337 * if the normal way to do this hits an error, which should typically be
4338 * extremely rare.
4339 */
vmw_execbuf_unpin_panic(struct vmw_private * dev_priv)4340 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4341 {
4342 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4343
4344 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4345 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4346 if (dev_priv->dummy_query_bo_pinned) {
4347 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4348 dev_priv->dummy_query_bo_pinned = false;
4349 }
4350 }
4351
4352
4353 /**
4354 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4355 * query bo.
4356 *
4357 * @dev_priv: The device private structure.
4358 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4359 * _after_ a query barrier that flushes all queries touching the current
4360 * buffer pointed to by @dev_priv->pinned_bo
4361 *
4362 * This function should be used to unpin the pinned query bo, or
4363 * as a query barrier when we need to make sure that all queries have
4364 * finished before the next fifo command. (For example on hardware
4365 * context destructions where the hardware may otherwise leak unfinished
4366 * queries).
4367 *
4368 * This function does not return any failure codes, but make attempts
4369 * to do safe unpinning in case of errors.
4370 *
4371 * The function will synchronize on the previous query barrier, and will
4372 * thus not finish until that barrier has executed.
4373 *
4374 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4375 * before calling this function.
4376 */
__vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv,struct vmw_fence_obj * fence)4377 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4378 struct vmw_fence_obj *fence)
4379 {
4380 int ret = 0;
4381 struct list_head validate_list;
4382 struct ttm_validate_buffer pinned_val, query_val;
4383 struct vmw_fence_obj *lfence = NULL;
4384 struct ww_acquire_ctx ticket;
4385
4386 if (dev_priv->pinned_bo == NULL)
4387 goto out_unlock;
4388
4389 INIT_LIST_HEAD(&validate_list);
4390
4391 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4392 pinned_val.shared = false;
4393 list_add_tail(&pinned_val.head, &validate_list);
4394
4395 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4396 query_val.shared = false;
4397 list_add_tail(&query_val.head, &validate_list);
4398
4399 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4400 false, NULL);
4401 if (unlikely(ret != 0)) {
4402 vmw_execbuf_unpin_panic(dev_priv);
4403 goto out_no_reserve;
4404 }
4405
4406 if (dev_priv->query_cid_valid) {
4407 BUG_ON(fence != NULL);
4408 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4409 if (unlikely(ret != 0)) {
4410 vmw_execbuf_unpin_panic(dev_priv);
4411 goto out_no_emit;
4412 }
4413 dev_priv->query_cid_valid = false;
4414 }
4415
4416 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4417 if (dev_priv->dummy_query_bo_pinned) {
4418 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4419 dev_priv->dummy_query_bo_pinned = false;
4420 }
4421 if (fence == NULL) {
4422 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4423 NULL);
4424 fence = lfence;
4425 }
4426 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4427 if (lfence != NULL)
4428 vmw_fence_obj_unreference(&lfence);
4429
4430 ttm_bo_unref(&query_val.bo);
4431 ttm_bo_unref(&pinned_val.bo);
4432 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4433 out_unlock:
4434 return;
4435
4436 out_no_emit:
4437 ttm_eu_backoff_reservation(&ticket, &validate_list);
4438 out_no_reserve:
4439 ttm_bo_unref(&query_val.bo);
4440 ttm_bo_unref(&pinned_val.bo);
4441 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4442 }
4443
4444 /**
4445 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4446 * query bo.
4447 *
4448 * @dev_priv: The device private structure.
4449 *
4450 * This function should be used to unpin the pinned query bo, or
4451 * as a query barrier when we need to make sure that all queries have
4452 * finished before the next fifo command. (For example on hardware
4453 * context destructions where the hardware may otherwise leak unfinished
4454 * queries).
4455 *
4456 * This function does not return any failure codes, but make attempts
4457 * to do safe unpinning in case of errors.
4458 *
4459 * The function will synchronize on the previous query barrier, and will
4460 * thus not finish until that barrier has executed.
4461 */
vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv)4462 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4463 {
4464 mutex_lock(&dev_priv->cmdbuf_mutex);
4465 if (dev_priv->query_cid_valid)
4466 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4467 mutex_unlock(&dev_priv->cmdbuf_mutex);
4468 }
4469
vmw_execbuf_ioctl(struct drm_device * dev,unsigned long data,struct drm_file * file_priv,size_t size)4470 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4471 struct drm_file *file_priv, size_t size)
4472 {
4473 struct vmw_private *dev_priv = vmw_priv(dev);
4474 struct drm_vmw_execbuf_arg arg;
4475 int ret;
4476 static const size_t copy_offset[] = {
4477 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4478 sizeof(struct drm_vmw_execbuf_arg)};
4479 struct dma_fence *in_fence = NULL;
4480
4481 if (unlikely(size < copy_offset[0])) {
4482 DRM_ERROR("Invalid command size, ioctl %d\n",
4483 DRM_VMW_EXECBUF);
4484 return -EINVAL;
4485 }
4486
4487 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4488 return -EFAULT;
4489
4490 /*
4491 * Extend the ioctl argument while
4492 * maintaining backwards compatibility:
4493 * We take different code paths depending on the value of
4494 * arg.version.
4495 */
4496
4497 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4498 arg.version == 0)) {
4499 DRM_ERROR("Incorrect execbuf version.\n");
4500 return -EINVAL;
4501 }
4502
4503 if (arg.version > 1 &&
4504 copy_from_user(&arg.context_handle,
4505 (void __user *) (data + copy_offset[0]),
4506 copy_offset[arg.version - 1] -
4507 copy_offset[0]) != 0)
4508 return -EFAULT;
4509
4510 switch (arg.version) {
4511 case 1:
4512 arg.context_handle = (uint32_t) -1;
4513 break;
4514 case 2:
4515 default:
4516 break;
4517 }
4518
4519
4520 /* If imported a fence FD from elsewhere, then wait on it */
4521 if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4522 in_fence = sync_file_get_fence(arg.imported_fence_fd);
4523
4524 if (!in_fence) {
4525 DRM_ERROR("Cannot get imported fence\n");
4526 return -EINVAL;
4527 }
4528
4529 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4530 if (ret)
4531 goto out;
4532 }
4533
4534 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4535 if (unlikely(ret != 0))
4536 return ret;
4537
4538 ret = vmw_execbuf_process(file_priv, dev_priv,
4539 (void __user *)(unsigned long)arg.commands,
4540 NULL, arg.command_size, arg.throttle_us,
4541 arg.context_handle,
4542 (void __user *)(unsigned long)arg.fence_rep,
4543 NULL,
4544 arg.flags);
4545 ttm_read_unlock(&dev_priv->reservation_sem);
4546 if (unlikely(ret != 0))
4547 goto out;
4548
4549 vmw_kms_cursor_post_execbuf(dev_priv);
4550
4551 out:
4552 if (in_fence)
4553 dma_fence_put(in_fence);
4554 return ret;
4555 }
4556