1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 #include <linux/sync_file.h>
28
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
35
36 #define VMW_RES_HT_ORDER 12
37
38 /*
39 * Helper macro to get dx_ctx_node if available otherwise print an error
40 * message. This is for use in command verifier function where if dx_ctx_node
41 * is not set then command is invalid.
42 */
43 #define VMW_GET_CTX_NODE(__sw_context) \
44 ({ \
45 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
46 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
47 __sw_context->dx_ctx_node; \
48 }); \
49 })
50
51 #define VMW_DECLARE_CMD_VAR(__var, __type) \
52 struct { \
53 SVGA3dCmdHeader header; \
54 __type body; \
55 } __var
56
57 /**
58 * struct vmw_relocation - Buffer object relocation
59 *
60 * @head: List head for the command submission context's relocation list
61 * @vbo: Non ref-counted pointer to buffer object
62 * @mob_loc: Pointer to location for mob id to be modified
63 * @location: Pointer to location for guest pointer to be modified
64 */
65 struct vmw_relocation {
66 struct list_head head;
67 struct vmw_buffer_object *vbo;
68 union {
69 SVGAMobId *mob_loc;
70 SVGAGuestPtr *location;
71 };
72 };
73
74 /**
75 * enum vmw_resource_relocation_type - Relocation type for resources
76 *
77 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
78 * command stream is replaced with the actual id after validation.
79 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
80 * with a NOP.
81 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
82 * validation is -1, the command is replaced with a NOP. Otherwise no action.
83 */
84 enum vmw_resource_relocation_type {
85 vmw_res_rel_normal,
86 vmw_res_rel_nop,
87 vmw_res_rel_cond_nop,
88 vmw_res_rel_max
89 };
90
91 /**
92 * struct vmw_resource_relocation - Relocation info for resources
93 *
94 * @head: List head for the software context's relocation list.
95 * @res: Non-ref-counted pointer to the resource.
96 * @offset: Offset of single byte entries into the command buffer where the id
97 * that needs fixup is located.
98 * @rel_type: Type of relocation.
99 */
100 struct vmw_resource_relocation {
101 struct list_head head;
102 const struct vmw_resource *res;
103 u32 offset:29;
104 enum vmw_resource_relocation_type rel_type:3;
105 };
106
107 /**
108 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
109 *
110 * @head: List head of context list
111 * @ctx: The context resource
112 * @cur: The context's persistent binding state
113 * @staged: The binding state changes of this command buffer
114 */
115 struct vmw_ctx_validation_info {
116 struct list_head head;
117 struct vmw_resource *ctx;
118 struct vmw_ctx_binding_state *cur;
119 struct vmw_ctx_binding_state *staged;
120 };
121
122 /**
123 * struct vmw_cmd_entry - Describe a command for the verifier
124 *
125 * @user_allow: Whether allowed from the execbuf ioctl.
126 * @gb_disable: Whether disabled if guest-backed objects are available.
127 * @gb_enable: Whether enabled iff guest-backed objects are available.
128 */
129 struct vmw_cmd_entry {
130 int (*func) (struct vmw_private *, struct vmw_sw_context *,
131 SVGA3dCmdHeader *);
132 bool user_allow;
133 bool gb_disable;
134 bool gb_enable;
135 const char *cmd_name;
136 };
137
138 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
139 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
140 (_gb_disable), (_gb_enable), #_cmd}
141
142 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
143 struct vmw_sw_context *sw_context,
144 struct vmw_resource *ctx);
145 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
146 struct vmw_sw_context *sw_context,
147 SVGAMobId *id,
148 struct vmw_buffer_object **vmw_bo_p);
149 /**
150 * vmw_ptr_diff - Compute the offset from a to b in bytes
151 *
152 * @a: A starting pointer.
153 * @b: A pointer offset in the same address space.
154 *
155 * Returns: The offset in bytes between the two pointers.
156 */
vmw_ptr_diff(void * a,void * b)157 static size_t vmw_ptr_diff(void *a, void *b)
158 {
159 return (unsigned long) b - (unsigned long) a;
160 }
161
162 /**
163 * vmw_execbuf_bindings_commit - Commit modified binding state
164 *
165 * @sw_context: The command submission context
166 * @backoff: Whether this is part of the error path and binding state changes
167 * should be ignored
168 */
vmw_execbuf_bindings_commit(struct vmw_sw_context * sw_context,bool backoff)169 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
170 bool backoff)
171 {
172 struct vmw_ctx_validation_info *entry;
173
174 list_for_each_entry(entry, &sw_context->ctx_list, head) {
175 if (!backoff)
176 vmw_binding_state_commit(entry->cur, entry->staged);
177
178 if (entry->staged != sw_context->staged_bindings)
179 vmw_binding_state_free(entry->staged);
180 else
181 sw_context->staged_bindings_inuse = false;
182 }
183
184 /* List entries are freed with the validation context */
185 INIT_LIST_HEAD(&sw_context->ctx_list);
186 }
187
188 /**
189 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
190 *
191 * @sw_context: The command submission context
192 */
vmw_bind_dx_query_mob(struct vmw_sw_context * sw_context)193 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
194 {
195 if (sw_context->dx_query_mob)
196 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
197 sw_context->dx_query_mob);
198 }
199
200 /**
201 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
202 * the validate list.
203 *
204 * @dev_priv: Pointer to the device private:
205 * @sw_context: The command submission context
206 * @node: The validation node holding the context resource metadata
207 */
vmw_cmd_ctx_first_setup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * res,struct vmw_ctx_validation_info * node)208 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
209 struct vmw_sw_context *sw_context,
210 struct vmw_resource *res,
211 struct vmw_ctx_validation_info *node)
212 {
213 int ret;
214
215 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
216 if (unlikely(ret != 0))
217 goto out_err;
218
219 if (!sw_context->staged_bindings) {
220 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
221 if (IS_ERR(sw_context->staged_bindings)) {
222 ret = PTR_ERR(sw_context->staged_bindings);
223 sw_context->staged_bindings = NULL;
224 goto out_err;
225 }
226 }
227
228 if (sw_context->staged_bindings_inuse) {
229 node->staged = vmw_binding_state_alloc(dev_priv);
230 if (IS_ERR(node->staged)) {
231 ret = PTR_ERR(node->staged);
232 node->staged = NULL;
233 goto out_err;
234 }
235 } else {
236 node->staged = sw_context->staged_bindings;
237 sw_context->staged_bindings_inuse = true;
238 }
239
240 node->ctx = res;
241 node->cur = vmw_context_binding_state(res);
242 list_add_tail(&node->head, &sw_context->ctx_list);
243
244 return 0;
245
246 out_err:
247 return ret;
248 }
249
250 /**
251 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
252 *
253 * @dev_priv: Pointer to the device private struct.
254 * @res_type: The resource type.
255 *
256 * Guest-backed contexts and DX contexts require extra size to store execbuf
257 * private information in the validation node. Typically the binding manager
258 * associated data structures.
259 *
260 * Returns: The extra size requirement based on resource type.
261 */
vmw_execbuf_res_size(struct vmw_private * dev_priv,enum vmw_res_type res_type)262 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
263 enum vmw_res_type res_type)
264 {
265 return (res_type == vmw_res_dx_context ||
266 (res_type == vmw_res_context && dev_priv->has_mob)) ?
267 sizeof(struct vmw_ctx_validation_info) : 0;
268 }
269
270 /**
271 * vmw_execbuf_rcache_update - Update a resource-node cache entry
272 *
273 * @rcache: Pointer to the entry to update.
274 * @res: Pointer to the resource.
275 * @private: Pointer to the execbuf-private space in the resource validation
276 * node.
277 */
vmw_execbuf_rcache_update(struct vmw_res_cache_entry * rcache,struct vmw_resource * res,void * private)278 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
279 struct vmw_resource *res,
280 void *private)
281 {
282 rcache->res = res;
283 rcache->private = private;
284 rcache->valid = 1;
285 rcache->valid_handle = 0;
286 }
287
288 /**
289 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
290 * rcu-protected pointer to the validation list.
291 *
292 * @sw_context: Pointer to the software context.
293 * @res: Unreferenced rcu-protected pointer to the resource.
294 * @dirty: Whether to change dirty status.
295 *
296 * Returns: 0 on success. Negative error code on failure. Typical error codes
297 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
298 */
vmw_execbuf_res_noref_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * res,u32 dirty)299 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
300 struct vmw_resource *res,
301 u32 dirty)
302 {
303 struct vmw_private *dev_priv = res->dev_priv;
304 int ret;
305 enum vmw_res_type res_type = vmw_res_type(res);
306 struct vmw_res_cache_entry *rcache;
307 struct vmw_ctx_validation_info *ctx_info;
308 bool first_usage;
309 unsigned int priv_size;
310
311 rcache = &sw_context->res_cache[res_type];
312 if (likely(rcache->valid && rcache->res == res)) {
313 if (dirty)
314 vmw_validation_res_set_dirty(sw_context->ctx,
315 rcache->private, dirty);
316 vmw_user_resource_noref_release();
317 return 0;
318 }
319
320 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
321 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
322 dirty, (void **)&ctx_info,
323 &first_usage);
324 vmw_user_resource_noref_release();
325 if (ret)
326 return ret;
327
328 if (priv_size && first_usage) {
329 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
330 ctx_info);
331 if (ret) {
332 VMW_DEBUG_USER("Failed first usage context setup.\n");
333 return ret;
334 }
335 }
336
337 vmw_execbuf_rcache_update(rcache, res, ctx_info);
338 return 0;
339 }
340
341 /**
342 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
343 * validation list if it's not already on it
344 *
345 * @sw_context: Pointer to the software context.
346 * @res: Pointer to the resource.
347 * @dirty: Whether to change dirty status.
348 *
349 * Returns: Zero on success. Negative error code on failure.
350 */
vmw_execbuf_res_noctx_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * res,u32 dirty)351 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
352 struct vmw_resource *res,
353 u32 dirty)
354 {
355 struct vmw_res_cache_entry *rcache;
356 enum vmw_res_type res_type = vmw_res_type(res);
357 void *ptr;
358 int ret;
359
360 rcache = &sw_context->res_cache[res_type];
361 if (likely(rcache->valid && rcache->res == res)) {
362 if (dirty)
363 vmw_validation_res_set_dirty(sw_context->ctx,
364 rcache->private, dirty);
365 return 0;
366 }
367
368 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
369 &ptr, NULL);
370 if (ret)
371 return ret;
372
373 vmw_execbuf_rcache_update(rcache, res, ptr);
374
375 return 0;
376 }
377
378 /**
379 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
380 * validation list
381 *
382 * @sw_context: The software context holding the validation list.
383 * @view: Pointer to the view resource.
384 *
385 * Returns 0 if success, negative error code otherwise.
386 */
vmw_view_res_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * view)387 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
388 struct vmw_resource *view)
389 {
390 int ret;
391
392 /*
393 * First add the resource the view is pointing to, otherwise it may be
394 * swapped out when the view is validated.
395 */
396 ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
397 vmw_view_dirtying(view));
398 if (ret)
399 return ret;
400
401 return vmw_execbuf_res_noctx_val_add(sw_context, view,
402 VMW_RES_DIRTY_NONE);
403 }
404
405 /**
406 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
407 * to to the validation list.
408 *
409 * @sw_context: The software context holding the validation list.
410 * @view_type: The view type to look up.
411 * @id: view id of the view.
412 *
413 * The view is represented by a view id and the DX context it's created on, or
414 * scheduled for creation on. If there is no DX context set, the function will
415 * return an -EINVAL error pointer.
416 *
417 * Returns: Unreferenced pointer to the resource on success, negative error
418 * pointer on failure.
419 */
420 static struct vmw_resource *
vmw_view_id_val_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,u32 id)421 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
422 enum vmw_view_type view_type, u32 id)
423 {
424 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
425 struct vmw_resource *view;
426 int ret;
427
428 if (!ctx_node)
429 return ERR_PTR(-EINVAL);
430
431 view = vmw_view_lookup(sw_context->man, view_type, id);
432 if (IS_ERR(view))
433 return view;
434
435 ret = vmw_view_res_val_add(sw_context, view);
436 if (ret)
437 return ERR_PTR(ret);
438
439 return view;
440 }
441
442 /**
443 * vmw_resource_context_res_add - Put resources previously bound to a context on
444 * the validation list
445 *
446 * @dev_priv: Pointer to a device private structure
447 * @sw_context: Pointer to a software context used for this command submission
448 * @ctx: Pointer to the context resource
449 *
450 * This function puts all resources that were previously bound to @ctx on the
451 * resource validation list. This is part of the context state reemission
452 */
vmw_resource_context_res_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * ctx)453 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
454 struct vmw_sw_context *sw_context,
455 struct vmw_resource *ctx)
456 {
457 struct list_head *binding_list;
458 struct vmw_ctx_bindinfo *entry;
459 int ret = 0;
460 struct vmw_resource *res;
461 u32 i;
462 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
463 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
464
465 /* Add all cotables to the validation list. */
466 if (has_sm4_context(dev_priv) &&
467 vmw_res_type(ctx) == vmw_res_dx_context) {
468 for (i = 0; i < cotable_max; ++i) {
469 res = vmw_context_cotable(ctx, i);
470 if (IS_ERR(res))
471 continue;
472
473 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
474 VMW_RES_DIRTY_SET);
475 if (unlikely(ret != 0))
476 return ret;
477 }
478 }
479
480 /* Add all resources bound to the context to the validation list */
481 mutex_lock(&dev_priv->binding_mutex);
482 binding_list = vmw_context_binding_list(ctx);
483
484 list_for_each_entry(entry, binding_list, ctx_list) {
485 if (vmw_res_type(entry->res) == vmw_res_view)
486 ret = vmw_view_res_val_add(sw_context, entry->res);
487 else
488 ret = vmw_execbuf_res_noctx_val_add
489 (sw_context, entry->res,
490 vmw_binding_dirtying(entry->bt));
491 if (unlikely(ret != 0))
492 break;
493 }
494
495 if (has_sm4_context(dev_priv) &&
496 vmw_res_type(ctx) == vmw_res_dx_context) {
497 struct vmw_buffer_object *dx_query_mob;
498
499 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
500 if (dx_query_mob)
501 ret = vmw_validation_add_bo(sw_context->ctx,
502 dx_query_mob, true, false);
503 }
504
505 mutex_unlock(&dev_priv->binding_mutex);
506 return ret;
507 }
508
509 /**
510 * vmw_resource_relocation_add - Add a relocation to the relocation list
511 *
512 * @list: Pointer to head of relocation list.
513 * @res: The resource.
514 * @offset: Offset into the command buffer currently being parsed where the id
515 * that needs fixup is located. Granularity is one byte.
516 * @rel_type: Relocation type.
517 */
vmw_resource_relocation_add(struct vmw_sw_context * sw_context,const struct vmw_resource * res,unsigned long offset,enum vmw_resource_relocation_type rel_type)518 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
519 const struct vmw_resource *res,
520 unsigned long offset,
521 enum vmw_resource_relocation_type
522 rel_type)
523 {
524 struct vmw_resource_relocation *rel;
525
526 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
527 if (unlikely(!rel)) {
528 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
529 return -ENOMEM;
530 }
531
532 rel->res = res;
533 rel->offset = offset;
534 rel->rel_type = rel_type;
535 list_add_tail(&rel->head, &sw_context->res_relocations);
536
537 return 0;
538 }
539
540 /**
541 * vmw_resource_relocations_free - Free all relocations on a list
542 *
543 * @list: Pointer to the head of the relocation list
544 */
vmw_resource_relocations_free(struct list_head * list)545 static void vmw_resource_relocations_free(struct list_head *list)
546 {
547 /* Memory is validation context memory, so no need to free it */
548 INIT_LIST_HEAD(list);
549 }
550
551 /**
552 * vmw_resource_relocations_apply - Apply all relocations on a list
553 *
554 * @cb: Pointer to the start of the command buffer bein patch. This need not be
555 * the same buffer as the one being parsed when the relocation list was built,
556 * but the contents must be the same modulo the resource ids.
557 * @list: Pointer to the head of the relocation list.
558 */
vmw_resource_relocations_apply(uint32_t * cb,struct list_head * list)559 static void vmw_resource_relocations_apply(uint32_t *cb,
560 struct list_head *list)
561 {
562 struct vmw_resource_relocation *rel;
563
564 /* Validate the struct vmw_resource_relocation member size */
565 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
566 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
567
568 list_for_each_entry(rel, list, head) {
569 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
570 switch (rel->rel_type) {
571 case vmw_res_rel_normal:
572 *addr = rel->res->id;
573 break;
574 case vmw_res_rel_nop:
575 *addr = SVGA_3D_CMD_NOP;
576 break;
577 default:
578 if (rel->res->id == -1)
579 *addr = SVGA_3D_CMD_NOP;
580 break;
581 }
582 }
583 }
584
vmw_cmd_invalid(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)585 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
586 struct vmw_sw_context *sw_context,
587 SVGA3dCmdHeader *header)
588 {
589 return -EINVAL;
590 }
591
vmw_cmd_ok(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)592 static int vmw_cmd_ok(struct vmw_private *dev_priv,
593 struct vmw_sw_context *sw_context,
594 SVGA3dCmdHeader *header)
595 {
596 return 0;
597 }
598
599 /**
600 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
601 * list.
602 *
603 * @sw_context: Pointer to the software context.
604 *
605 * Note that since vmware's command submission currently is protected by the
606 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
607 * only a single thread at once will attempt this.
608 */
vmw_resources_reserve(struct vmw_sw_context * sw_context)609 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
610 {
611 int ret;
612
613 ret = vmw_validation_res_reserve(sw_context->ctx, true);
614 if (ret)
615 return ret;
616
617 if (sw_context->dx_query_mob) {
618 struct vmw_buffer_object *expected_dx_query_mob;
619
620 expected_dx_query_mob =
621 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
622 if (expected_dx_query_mob &&
623 expected_dx_query_mob != sw_context->dx_query_mob) {
624 ret = -EINVAL;
625 }
626 }
627
628 return ret;
629 }
630
631 /**
632 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
633 * resource validate list unless it's already there.
634 *
635 * @dev_priv: Pointer to a device private structure.
636 * @sw_context: Pointer to the software context.
637 * @res_type: Resource type.
638 * @dirty: Whether to change dirty status.
639 * @converter: User-space visisble type specific information.
640 * @id_loc: Pointer to the location in the command buffer currently being parsed
641 * from where the user-space resource id handle is located.
642 * @p_val: Pointer to pointer to resource validalidation node. Populated on
643 * exit.
644 */
645 static int
vmw_cmd_res_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,u32 dirty,const struct vmw_user_resource_conv * converter,uint32_t * id_loc,struct vmw_resource ** p_res)646 vmw_cmd_res_check(struct vmw_private *dev_priv,
647 struct vmw_sw_context *sw_context,
648 enum vmw_res_type res_type,
649 u32 dirty,
650 const struct vmw_user_resource_conv *converter,
651 uint32_t *id_loc,
652 struct vmw_resource **p_res)
653 {
654 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
655 struct vmw_resource *res;
656 int ret;
657
658 if (p_res)
659 *p_res = NULL;
660
661 if (*id_loc == SVGA3D_INVALID_ID) {
662 if (res_type == vmw_res_context) {
663 VMW_DEBUG_USER("Illegal context invalid id.\n");
664 return -EINVAL;
665 }
666 return 0;
667 }
668
669 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
670 res = rcache->res;
671 if (dirty)
672 vmw_validation_res_set_dirty(sw_context->ctx,
673 rcache->private, dirty);
674 } else {
675 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
676
677 ret = vmw_validation_preload_res(sw_context->ctx, size);
678 if (ret)
679 return ret;
680
681 res = vmw_user_resource_noref_lookup_handle
682 (dev_priv, sw_context->fp->tfile, *id_loc, converter);
683 if (IS_ERR(res)) {
684 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
685 (unsigned int) *id_loc);
686 return PTR_ERR(res);
687 }
688
689 ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
690 if (unlikely(ret != 0))
691 return ret;
692
693 if (rcache->valid && rcache->res == res) {
694 rcache->valid_handle = true;
695 rcache->handle = *id_loc;
696 }
697 }
698
699 ret = vmw_resource_relocation_add(sw_context, res,
700 vmw_ptr_diff(sw_context->buf_start,
701 id_loc),
702 vmw_res_rel_normal);
703 if (p_res)
704 *p_res = res;
705
706 return 0;
707 }
708
709 /**
710 * vmw_rebind_dx_query - Rebind DX query associated with the context
711 *
712 * @ctx_res: context the query belongs to
713 *
714 * This function assumes binding_mutex is held.
715 */
vmw_rebind_all_dx_query(struct vmw_resource * ctx_res)716 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
717 {
718 struct vmw_private *dev_priv = ctx_res->dev_priv;
719 struct vmw_buffer_object *dx_query_mob;
720 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
721
722 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
723
724 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
725 return 0;
726
727 cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
728 if (cmd == NULL)
729 return -ENOMEM;
730
731 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
732 cmd->header.size = sizeof(cmd->body);
733 cmd->body.cid = ctx_res->id;
734 cmd->body.mobid = dx_query_mob->base.mem.start;
735 vmw_fifo_commit(dev_priv, sizeof(*cmd));
736
737 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
738
739 return 0;
740 }
741
742 /**
743 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
744 * contexts.
745 *
746 * @sw_context: Pointer to the software context.
747 *
748 * Rebind context binding points that have been scrubbed because of eviction.
749 */
vmw_rebind_contexts(struct vmw_sw_context * sw_context)750 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
751 {
752 struct vmw_ctx_validation_info *val;
753 int ret;
754
755 list_for_each_entry(val, &sw_context->ctx_list, head) {
756 ret = vmw_binding_rebind_all(val->cur);
757 if (unlikely(ret != 0)) {
758 if (ret != -ERESTARTSYS)
759 VMW_DEBUG_USER("Failed to rebind context.\n");
760 return ret;
761 }
762
763 ret = vmw_rebind_all_dx_query(val->ctx);
764 if (ret != 0) {
765 VMW_DEBUG_USER("Failed to rebind queries.\n");
766 return ret;
767 }
768 }
769
770 return 0;
771 }
772
773 /**
774 * vmw_view_bindings_add - Add an array of view bindings to a context binding
775 * state tracker.
776 *
777 * @sw_context: The execbuf state used for this command.
778 * @view_type: View type for the bindings.
779 * @binding_type: Binding type for the bindings.
780 * @shader_slot: The shader slot to user for the bindings.
781 * @view_ids: Array of view ids to be bound.
782 * @num_views: Number of view ids in @view_ids.
783 * @first_slot: The binding slot to be used for the first view id in @view_ids.
784 */
vmw_view_bindings_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,enum vmw_ctx_binding_type binding_type,uint32 shader_slot,uint32 view_ids[],u32 num_views,u32 first_slot)785 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
786 enum vmw_view_type view_type,
787 enum vmw_ctx_binding_type binding_type,
788 uint32 shader_slot,
789 uint32 view_ids[], u32 num_views,
790 u32 first_slot)
791 {
792 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
793 u32 i;
794
795 if (!ctx_node)
796 return -EINVAL;
797
798 for (i = 0; i < num_views; ++i) {
799 struct vmw_ctx_bindinfo_view binding;
800 struct vmw_resource *view = NULL;
801
802 if (view_ids[i] != SVGA3D_INVALID_ID) {
803 view = vmw_view_id_val_add(sw_context, view_type,
804 view_ids[i]);
805 if (IS_ERR(view)) {
806 VMW_DEBUG_USER("View not found.\n");
807 return PTR_ERR(view);
808 }
809 }
810 binding.bi.ctx = ctx_node->ctx;
811 binding.bi.res = view;
812 binding.bi.bt = binding_type;
813 binding.shader_slot = shader_slot;
814 binding.slot = first_slot + i;
815 vmw_binding_add(ctx_node->staged, &binding.bi,
816 shader_slot, binding.slot);
817 }
818
819 return 0;
820 }
821
822 /**
823 * vmw_cmd_cid_check - Check a command header for valid context information.
824 *
825 * @dev_priv: Pointer to a device private structure.
826 * @sw_context: Pointer to the software context.
827 * @header: A command header with an embedded user-space context handle.
828 *
829 * Convenience function: Call vmw_cmd_res_check with the user-space context
830 * handle embedded in @header.
831 */
vmw_cmd_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)832 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
833 struct vmw_sw_context *sw_context,
834 SVGA3dCmdHeader *header)
835 {
836 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
837 container_of(header, typeof(*cmd), header);
838
839 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
840 VMW_RES_DIRTY_SET, user_context_converter,
841 &cmd->body, NULL);
842 }
843
844 /**
845 * vmw_execbuf_info_from_res - Get the private validation metadata for a
846 * recently validated resource
847 *
848 * @sw_context: Pointer to the command submission context
849 * @res: The resource
850 *
851 * The resource pointed to by @res needs to be present in the command submission
852 * context's resource cache and hence the last resource of that type to be
853 * processed by the validation code.
854 *
855 * Return: a pointer to the private metadata of the resource, or NULL if it
856 * wasn't found
857 */
858 static struct vmw_ctx_validation_info *
vmw_execbuf_info_from_res(struct vmw_sw_context * sw_context,struct vmw_resource * res)859 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
860 struct vmw_resource *res)
861 {
862 struct vmw_res_cache_entry *rcache =
863 &sw_context->res_cache[vmw_res_type(res)];
864
865 if (rcache->valid && rcache->res == res)
866 return rcache->private;
867
868 WARN_ON_ONCE(true);
869 return NULL;
870 }
871
vmw_cmd_set_render_target_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)872 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
873 struct vmw_sw_context *sw_context,
874 SVGA3dCmdHeader *header)
875 {
876 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
877 struct vmw_resource *ctx;
878 struct vmw_resource *res;
879 int ret;
880
881 cmd = container_of(header, typeof(*cmd), header);
882
883 if (cmd->body.type >= SVGA3D_RT_MAX) {
884 VMW_DEBUG_USER("Illegal render target type %u.\n",
885 (unsigned int) cmd->body.type);
886 return -EINVAL;
887 }
888
889 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
890 VMW_RES_DIRTY_SET, user_context_converter,
891 &cmd->body.cid, &ctx);
892 if (unlikely(ret != 0))
893 return ret;
894
895 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
896 VMW_RES_DIRTY_SET, user_surface_converter,
897 &cmd->body.target.sid, &res);
898 if (unlikely(ret))
899 return ret;
900
901 if (dev_priv->has_mob) {
902 struct vmw_ctx_bindinfo_view binding;
903 struct vmw_ctx_validation_info *node;
904
905 node = vmw_execbuf_info_from_res(sw_context, ctx);
906 if (!node)
907 return -EINVAL;
908
909 binding.bi.ctx = ctx;
910 binding.bi.res = res;
911 binding.bi.bt = vmw_ctx_binding_rt;
912 binding.slot = cmd->body.type;
913 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
914 }
915
916 return 0;
917 }
918
vmw_cmd_surface_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)919 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
920 struct vmw_sw_context *sw_context,
921 SVGA3dCmdHeader *header)
922 {
923 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
924 int ret;
925
926 cmd = container_of(header, typeof(*cmd), header);
927
928 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
929 VMW_RES_DIRTY_NONE, user_surface_converter,
930 &cmd->body.src.sid, NULL);
931 if (ret)
932 return ret;
933
934 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
935 VMW_RES_DIRTY_SET, user_surface_converter,
936 &cmd->body.dest.sid, NULL);
937 }
938
vmw_cmd_buffer_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)939 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
940 struct vmw_sw_context *sw_context,
941 SVGA3dCmdHeader *header)
942 {
943 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
944 int ret;
945
946 cmd = container_of(header, typeof(*cmd), header);
947 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
948 VMW_RES_DIRTY_NONE, user_surface_converter,
949 &cmd->body.src, NULL);
950 if (ret != 0)
951 return ret;
952
953 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
954 VMW_RES_DIRTY_SET, user_surface_converter,
955 &cmd->body.dest, NULL);
956 }
957
vmw_cmd_pred_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)958 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
959 struct vmw_sw_context *sw_context,
960 SVGA3dCmdHeader *header)
961 {
962 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
963 int ret;
964
965 cmd = container_of(header, typeof(*cmd), header);
966 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
967 VMW_RES_DIRTY_NONE, user_surface_converter,
968 &cmd->body.srcSid, NULL);
969 if (ret != 0)
970 return ret;
971
972 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
973 VMW_RES_DIRTY_SET, user_surface_converter,
974 &cmd->body.dstSid, NULL);
975 }
976
vmw_cmd_stretch_blt_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)977 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
978 struct vmw_sw_context *sw_context,
979 SVGA3dCmdHeader *header)
980 {
981 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
982 int ret;
983
984 cmd = container_of(header, typeof(*cmd), header);
985 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
986 VMW_RES_DIRTY_NONE, user_surface_converter,
987 &cmd->body.src.sid, NULL);
988 if (unlikely(ret != 0))
989 return ret;
990
991 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
992 VMW_RES_DIRTY_SET, user_surface_converter,
993 &cmd->body.dest.sid, NULL);
994 }
995
vmw_cmd_blt_surf_screen_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)996 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
997 struct vmw_sw_context *sw_context,
998 SVGA3dCmdHeader *header)
999 {
1000 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
1001 container_of(header, typeof(*cmd), header);
1002
1003 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1004 VMW_RES_DIRTY_NONE, user_surface_converter,
1005 &cmd->body.srcImage.sid, NULL);
1006 }
1007
vmw_cmd_present_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1008 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1009 struct vmw_sw_context *sw_context,
1010 SVGA3dCmdHeader *header)
1011 {
1012 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1013 container_of(header, typeof(*cmd), header);
1014
1015 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1016 VMW_RES_DIRTY_NONE, user_surface_converter,
1017 &cmd->body.sid, NULL);
1018 }
1019
1020 /**
1021 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1022 *
1023 * @dev_priv: The device private structure.
1024 * @new_query_bo: The new buffer holding query results.
1025 * @sw_context: The software context used for this command submission.
1026 *
1027 * This function checks whether @new_query_bo is suitable for holding query
1028 * results, and if another buffer currently is pinned for query results. If so,
1029 * the function prepares the state of @sw_context for switching pinned buffers
1030 * after successful submission of the current command batch.
1031 */
vmw_query_bo_switch_prepare(struct vmw_private * dev_priv,struct vmw_buffer_object * new_query_bo,struct vmw_sw_context * sw_context)1032 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1033 struct vmw_buffer_object *new_query_bo,
1034 struct vmw_sw_context *sw_context)
1035 {
1036 struct vmw_res_cache_entry *ctx_entry =
1037 &sw_context->res_cache[vmw_res_context];
1038 int ret;
1039
1040 BUG_ON(!ctx_entry->valid);
1041 sw_context->last_query_ctx = ctx_entry->res;
1042
1043 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1044
1045 if (unlikely(new_query_bo->base.num_pages > 4)) {
1046 VMW_DEBUG_USER("Query buffer too large.\n");
1047 return -EINVAL;
1048 }
1049
1050 if (unlikely(sw_context->cur_query_bo != NULL)) {
1051 sw_context->needs_post_query_barrier = true;
1052 ret = vmw_validation_add_bo(sw_context->ctx,
1053 sw_context->cur_query_bo,
1054 dev_priv->has_mob, false);
1055 if (unlikely(ret != 0))
1056 return ret;
1057 }
1058 sw_context->cur_query_bo = new_query_bo;
1059
1060 ret = vmw_validation_add_bo(sw_context->ctx,
1061 dev_priv->dummy_query_bo,
1062 dev_priv->has_mob, false);
1063 if (unlikely(ret != 0))
1064 return ret;
1065 }
1066
1067 return 0;
1068 }
1069
1070 /**
1071 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1072 *
1073 * @dev_priv: The device private structure.
1074 * @sw_context: The software context used for this command submission batch.
1075 *
1076 * This function will check if we're switching query buffers, and will then,
1077 * issue a dummy occlusion query wait used as a query barrier. When the fence
1078 * object following that query wait has signaled, we are sure that all preceding
1079 * queries have finished, and the old query buffer can be unpinned. However,
1080 * since both the new query buffer and the old one are fenced with that fence,
1081 * we can do an asynchronus unpin now, and be sure that the old query buffer
1082 * won't be moved until the fence has signaled.
1083 *
1084 * As mentioned above, both the new - and old query buffers need to be fenced
1085 * using a sequence emitted *after* calling this function.
1086 */
vmw_query_bo_switch_commit(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)1087 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1088 struct vmw_sw_context *sw_context)
1089 {
1090 /*
1091 * The validate list should still hold references to all
1092 * contexts here.
1093 */
1094 if (sw_context->needs_post_query_barrier) {
1095 struct vmw_res_cache_entry *ctx_entry =
1096 &sw_context->res_cache[vmw_res_context];
1097 struct vmw_resource *ctx;
1098 int ret;
1099
1100 BUG_ON(!ctx_entry->valid);
1101 ctx = ctx_entry->res;
1102
1103 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1104
1105 if (unlikely(ret != 0))
1106 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1107 }
1108
1109 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1110 if (dev_priv->pinned_bo) {
1111 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1112 vmw_bo_unreference(&dev_priv->pinned_bo);
1113 }
1114
1115 if (!sw_context->needs_post_query_barrier) {
1116 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1117
1118 /*
1119 * We pin also the dummy_query_bo buffer so that we
1120 * don't need to validate it when emitting dummy queries
1121 * in context destroy paths.
1122 */
1123 if (!dev_priv->dummy_query_bo_pinned) {
1124 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1125 true);
1126 dev_priv->dummy_query_bo_pinned = true;
1127 }
1128
1129 BUG_ON(sw_context->last_query_ctx == NULL);
1130 dev_priv->query_cid = sw_context->last_query_ctx->id;
1131 dev_priv->query_cid_valid = true;
1132 dev_priv->pinned_bo =
1133 vmw_bo_reference(sw_context->cur_query_bo);
1134 }
1135 }
1136 }
1137
1138 /**
1139 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1140 * to a MOB id.
1141 *
1142 * @dev_priv: Pointer to a device private structure.
1143 * @sw_context: The software context used for this command batch validation.
1144 * @id: Pointer to the user-space handle to be translated.
1145 * @vmw_bo_p: Points to a location that, on successful return will carry a
1146 * non-reference-counted pointer to the buffer object identified by the
1147 * user-space handle in @id.
1148 *
1149 * This function saves information needed to translate a user-space buffer
1150 * handle to a MOB id. The translation does not take place immediately, but
1151 * during a call to vmw_apply_relocations().
1152 *
1153 * This function builds a relocation list and a list of buffers to validate. The
1154 * former needs to be freed using either vmw_apply_relocations() or
1155 * vmw_free_relocations(). The latter needs to be freed using
1156 * vmw_clear_validations.
1157 */
vmw_translate_mob_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAMobId * id,struct vmw_buffer_object ** vmw_bo_p)1158 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1159 struct vmw_sw_context *sw_context,
1160 SVGAMobId *id,
1161 struct vmw_buffer_object **vmw_bo_p)
1162 {
1163 struct vmw_buffer_object *vmw_bo;
1164 uint32_t handle = *id;
1165 struct vmw_relocation *reloc;
1166 int ret;
1167
1168 vmw_validation_preload_bo(sw_context->ctx);
1169 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1170 if (IS_ERR(vmw_bo)) {
1171 VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1172 return PTR_ERR(vmw_bo);
1173 }
1174
1175 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1176 vmw_user_bo_noref_release();
1177 if (unlikely(ret != 0))
1178 return ret;
1179
1180 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1181 if (!reloc)
1182 return -ENOMEM;
1183
1184 reloc->mob_loc = id;
1185 reloc->vbo = vmw_bo;
1186
1187 *vmw_bo_p = vmw_bo;
1188 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1189
1190 return 0;
1191 }
1192
1193 /**
1194 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1195 * to a valid SVGAGuestPtr
1196 *
1197 * @dev_priv: Pointer to a device private structure.
1198 * @sw_context: The software context used for this command batch validation.
1199 * @ptr: Pointer to the user-space handle to be translated.
1200 * @vmw_bo_p: Points to a location that, on successful return will carry a
1201 * non-reference-counted pointer to the DMA buffer identified by the user-space
1202 * handle in @id.
1203 *
1204 * This function saves information needed to translate a user-space buffer
1205 * handle to a valid SVGAGuestPtr. The translation does not take place
1206 * immediately, but during a call to vmw_apply_relocations().
1207 *
1208 * This function builds a relocation list and a list of buffers to validate.
1209 * The former needs to be freed using either vmw_apply_relocations() or
1210 * vmw_free_relocations(). The latter needs to be freed using
1211 * vmw_clear_validations.
1212 */
vmw_translate_guest_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAGuestPtr * ptr,struct vmw_buffer_object ** vmw_bo_p)1213 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1214 struct vmw_sw_context *sw_context,
1215 SVGAGuestPtr *ptr,
1216 struct vmw_buffer_object **vmw_bo_p)
1217 {
1218 struct vmw_buffer_object *vmw_bo;
1219 uint32_t handle = ptr->gmrId;
1220 struct vmw_relocation *reloc;
1221 int ret;
1222
1223 vmw_validation_preload_bo(sw_context->ctx);
1224 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1225 if (IS_ERR(vmw_bo)) {
1226 VMW_DEBUG_USER("Could not find or use GMR region.\n");
1227 return PTR_ERR(vmw_bo);
1228 }
1229
1230 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1231 vmw_user_bo_noref_release();
1232 if (unlikely(ret != 0))
1233 return ret;
1234
1235 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1236 if (!reloc)
1237 return -ENOMEM;
1238
1239 reloc->location = ptr;
1240 reloc->vbo = vmw_bo;
1241 *vmw_bo_p = vmw_bo;
1242 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1243
1244 return 0;
1245 }
1246
1247 /**
1248 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1249 *
1250 * @dev_priv: Pointer to a device private struct.
1251 * @sw_context: The software context used for this command submission.
1252 * @header: Pointer to the command header in the command stream.
1253 *
1254 * This function adds the new query into the query COTABLE
1255 */
vmw_cmd_dx_define_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1256 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1257 struct vmw_sw_context *sw_context,
1258 SVGA3dCmdHeader *header)
1259 {
1260 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1261 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1262 struct vmw_resource *cotable_res;
1263 int ret;
1264
1265 if (!ctx_node)
1266 return -EINVAL;
1267
1268 cmd = container_of(header, typeof(*cmd), header);
1269
1270 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1271 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1272 return -EINVAL;
1273
1274 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1275 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1276
1277 return ret;
1278 }
1279
1280 /**
1281 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1282 *
1283 * @dev_priv: Pointer to a device private struct.
1284 * @sw_context: The software context used for this command submission.
1285 * @header: Pointer to the command header in the command stream.
1286 *
1287 * The query bind operation will eventually associate the query ID with its
1288 * backing MOB. In this function, we take the user mode MOB ID and use
1289 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1290 */
vmw_cmd_dx_bind_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1291 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1292 struct vmw_sw_context *sw_context,
1293 SVGA3dCmdHeader *header)
1294 {
1295 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1296 struct vmw_buffer_object *vmw_bo;
1297 int ret;
1298
1299 cmd = container_of(header, typeof(*cmd), header);
1300
1301 /*
1302 * Look up the buffer pointed to by q.mobid, put it on the relocation
1303 * list so its kernel mode MOB ID can be filled in later
1304 */
1305 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1306 &vmw_bo);
1307
1308 if (ret != 0)
1309 return ret;
1310
1311 sw_context->dx_query_mob = vmw_bo;
1312 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1313 return 0;
1314 }
1315
1316 /**
1317 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1318 *
1319 * @dev_priv: Pointer to a device private struct.
1320 * @sw_context: The software context used for this command submission.
1321 * @header: Pointer to the command header in the command stream.
1322 */
vmw_cmd_begin_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1323 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1324 struct vmw_sw_context *sw_context,
1325 SVGA3dCmdHeader *header)
1326 {
1327 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1328 container_of(header, typeof(*cmd), header);
1329
1330 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1331 VMW_RES_DIRTY_SET, user_context_converter,
1332 &cmd->body.cid, NULL);
1333 }
1334
1335 /**
1336 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1337 *
1338 * @dev_priv: Pointer to a device private struct.
1339 * @sw_context: The software context used for this command submission.
1340 * @header: Pointer to the command header in the command stream.
1341 */
vmw_cmd_begin_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1342 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1343 struct vmw_sw_context *sw_context,
1344 SVGA3dCmdHeader *header)
1345 {
1346 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1347 container_of(header, typeof(*cmd), header);
1348
1349 if (unlikely(dev_priv->has_mob)) {
1350 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1351
1352 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1353
1354 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1355 gb_cmd.header.size = cmd->header.size;
1356 gb_cmd.body.cid = cmd->body.cid;
1357 gb_cmd.body.type = cmd->body.type;
1358
1359 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1360 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1361 }
1362
1363 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1364 VMW_RES_DIRTY_SET, user_context_converter,
1365 &cmd->body.cid, NULL);
1366 }
1367
1368 /**
1369 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1370 *
1371 * @dev_priv: Pointer to a device private struct.
1372 * @sw_context: The software context used for this command submission.
1373 * @header: Pointer to the command header in the command stream.
1374 */
vmw_cmd_end_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1375 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1376 struct vmw_sw_context *sw_context,
1377 SVGA3dCmdHeader *header)
1378 {
1379 struct vmw_buffer_object *vmw_bo;
1380 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1381 int ret;
1382
1383 cmd = container_of(header, typeof(*cmd), header);
1384 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1385 if (unlikely(ret != 0))
1386 return ret;
1387
1388 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1389 &vmw_bo);
1390 if (unlikely(ret != 0))
1391 return ret;
1392
1393 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1394
1395 return ret;
1396 }
1397
1398 /**
1399 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1400 *
1401 * @dev_priv: Pointer to a device private struct.
1402 * @sw_context: The software context used for this command submission.
1403 * @header: Pointer to the command header in the command stream.
1404 */
vmw_cmd_end_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1405 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1406 struct vmw_sw_context *sw_context,
1407 SVGA3dCmdHeader *header)
1408 {
1409 struct vmw_buffer_object *vmw_bo;
1410 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1411 int ret;
1412
1413 cmd = container_of(header, typeof(*cmd), header);
1414 if (dev_priv->has_mob) {
1415 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1416
1417 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1418
1419 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1420 gb_cmd.header.size = cmd->header.size;
1421 gb_cmd.body.cid = cmd->body.cid;
1422 gb_cmd.body.type = cmd->body.type;
1423 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1424 gb_cmd.body.offset = cmd->body.guestResult.offset;
1425
1426 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1427 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1428 }
1429
1430 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1431 if (unlikely(ret != 0))
1432 return ret;
1433
1434 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1435 &cmd->body.guestResult, &vmw_bo);
1436 if (unlikely(ret != 0))
1437 return ret;
1438
1439 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1440
1441 return ret;
1442 }
1443
1444 /**
1445 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1446 *
1447 * @dev_priv: Pointer to a device private struct.
1448 * @sw_context: The software context used for this command submission.
1449 * @header: Pointer to the command header in the command stream.
1450 */
vmw_cmd_wait_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1451 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1452 struct vmw_sw_context *sw_context,
1453 SVGA3dCmdHeader *header)
1454 {
1455 struct vmw_buffer_object *vmw_bo;
1456 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1457 int ret;
1458
1459 cmd = container_of(header, typeof(*cmd), header);
1460 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1461 if (unlikely(ret != 0))
1462 return ret;
1463
1464 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1465 &vmw_bo);
1466 if (unlikely(ret != 0))
1467 return ret;
1468
1469 return 0;
1470 }
1471
1472 /**
1473 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1474 *
1475 * @dev_priv: Pointer to a device private struct.
1476 * @sw_context: The software context used for this command submission.
1477 * @header: Pointer to the command header in the command stream.
1478 */
vmw_cmd_wait_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1479 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1480 struct vmw_sw_context *sw_context,
1481 SVGA3dCmdHeader *header)
1482 {
1483 struct vmw_buffer_object *vmw_bo;
1484 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1485 int ret;
1486
1487 cmd = container_of(header, typeof(*cmd), header);
1488 if (dev_priv->has_mob) {
1489 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1490
1491 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1492
1493 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1494 gb_cmd.header.size = cmd->header.size;
1495 gb_cmd.body.cid = cmd->body.cid;
1496 gb_cmd.body.type = cmd->body.type;
1497 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1498 gb_cmd.body.offset = cmd->body.guestResult.offset;
1499
1500 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1501 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1502 }
1503
1504 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1505 if (unlikely(ret != 0))
1506 return ret;
1507
1508 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1509 &cmd->body.guestResult, &vmw_bo);
1510 if (unlikely(ret != 0))
1511 return ret;
1512
1513 return 0;
1514 }
1515
vmw_cmd_dma(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1516 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1517 struct vmw_sw_context *sw_context,
1518 SVGA3dCmdHeader *header)
1519 {
1520 struct vmw_buffer_object *vmw_bo = NULL;
1521 struct vmw_surface *srf = NULL;
1522 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1523 int ret;
1524 SVGA3dCmdSurfaceDMASuffix *suffix;
1525 uint32_t bo_size;
1526 bool dirty;
1527
1528 cmd = container_of(header, typeof(*cmd), header);
1529 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1530 header->size - sizeof(*suffix));
1531
1532 /* Make sure device and verifier stays in sync. */
1533 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1534 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1535 return -EINVAL;
1536 }
1537
1538 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1539 &cmd->body.guest.ptr, &vmw_bo);
1540 if (unlikely(ret != 0))
1541 return ret;
1542
1543 /* Make sure DMA doesn't cross BO boundaries. */
1544 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1545 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1546 VMW_DEBUG_USER("Invalid DMA offset.\n");
1547 return -EINVAL;
1548 }
1549
1550 bo_size -= cmd->body.guest.ptr.offset;
1551 if (unlikely(suffix->maximumOffset > bo_size))
1552 suffix->maximumOffset = bo_size;
1553
1554 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1555 VMW_RES_DIRTY_SET : 0;
1556 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1557 dirty, user_surface_converter,
1558 &cmd->body.host.sid, NULL);
1559 if (unlikely(ret != 0)) {
1560 if (unlikely(ret != -ERESTARTSYS))
1561 VMW_DEBUG_USER("could not find surface for DMA.\n");
1562 return ret;
1563 }
1564
1565 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1566
1567 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1568
1569 return 0;
1570 }
1571
vmw_cmd_draw(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1572 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1573 struct vmw_sw_context *sw_context,
1574 SVGA3dCmdHeader *header)
1575 {
1576 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1577 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1578 (unsigned long)header + sizeof(*cmd));
1579 SVGA3dPrimitiveRange *range;
1580 uint32_t i;
1581 uint32_t maxnum;
1582 int ret;
1583
1584 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1585 if (unlikely(ret != 0))
1586 return ret;
1587
1588 cmd = container_of(header, typeof(*cmd), header);
1589 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1590
1591 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1592 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1593 return -EINVAL;
1594 }
1595
1596 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1597 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1598 VMW_RES_DIRTY_NONE,
1599 user_surface_converter,
1600 &decl->array.surfaceId, NULL);
1601 if (unlikely(ret != 0))
1602 return ret;
1603 }
1604
1605 maxnum = (header->size - sizeof(cmd->body) -
1606 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1607 if (unlikely(cmd->body.numRanges > maxnum)) {
1608 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1609 return -EINVAL;
1610 }
1611
1612 range = (SVGA3dPrimitiveRange *) decl;
1613 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1614 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1615 VMW_RES_DIRTY_NONE,
1616 user_surface_converter,
1617 &range->indexArray.surfaceId, NULL);
1618 if (unlikely(ret != 0))
1619 return ret;
1620 }
1621 return 0;
1622 }
1623
vmw_cmd_tex_state(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1624 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1625 struct vmw_sw_context *sw_context,
1626 SVGA3dCmdHeader *header)
1627 {
1628 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1629 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1630 ((unsigned long) header + header->size + sizeof(*header));
1631 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1632 ((unsigned long) header + sizeof(*cmd));
1633 struct vmw_resource *ctx;
1634 struct vmw_resource *res;
1635 int ret;
1636
1637 cmd = container_of(header, typeof(*cmd), header);
1638
1639 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1640 VMW_RES_DIRTY_SET, user_context_converter,
1641 &cmd->body.cid, &ctx);
1642 if (unlikely(ret != 0))
1643 return ret;
1644
1645 for (; cur_state < last_state; ++cur_state) {
1646 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1647 continue;
1648
1649 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1650 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1651 (unsigned int) cur_state->stage);
1652 return -EINVAL;
1653 }
1654
1655 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1656 VMW_RES_DIRTY_NONE,
1657 user_surface_converter,
1658 &cur_state->value, &res);
1659 if (unlikely(ret != 0))
1660 return ret;
1661
1662 if (dev_priv->has_mob) {
1663 struct vmw_ctx_bindinfo_tex binding;
1664 struct vmw_ctx_validation_info *node;
1665
1666 node = vmw_execbuf_info_from_res(sw_context, ctx);
1667 if (!node)
1668 return -EINVAL;
1669
1670 binding.bi.ctx = ctx;
1671 binding.bi.res = res;
1672 binding.bi.bt = vmw_ctx_binding_tex;
1673 binding.texture_stage = cur_state->stage;
1674 vmw_binding_add(node->staged, &binding.bi, 0,
1675 binding.texture_stage);
1676 }
1677 }
1678
1679 return 0;
1680 }
1681
vmw_cmd_check_define_gmrfb(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf)1682 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1683 struct vmw_sw_context *sw_context,
1684 void *buf)
1685 {
1686 struct vmw_buffer_object *vmw_bo;
1687
1688 struct {
1689 uint32_t header;
1690 SVGAFifoCmdDefineGMRFB body;
1691 } *cmd = buf;
1692
1693 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1694 &vmw_bo);
1695 }
1696
1697 /**
1698 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1699 * switching
1700 *
1701 * @dev_priv: Pointer to a device private struct.
1702 * @sw_context: The software context being used for this batch.
1703 * @val_node: The validation node representing the resource.
1704 * @buf_id: Pointer to the user-space backup buffer handle in the command
1705 * stream.
1706 * @backup_offset: Offset of backup into MOB.
1707 *
1708 * This function prepares for registering a switch of backup buffers in the
1709 * resource metadata just prior to unreserving. It's basically a wrapper around
1710 * vmw_cmd_res_switch_backup with a different interface.
1711 */
vmw_cmd_res_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * res,uint32_t * buf_id,unsigned long backup_offset)1712 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1713 struct vmw_sw_context *sw_context,
1714 struct vmw_resource *res, uint32_t *buf_id,
1715 unsigned long backup_offset)
1716 {
1717 struct vmw_buffer_object *vbo;
1718 void *info;
1719 int ret;
1720
1721 info = vmw_execbuf_info_from_res(sw_context, res);
1722 if (!info)
1723 return -EINVAL;
1724
1725 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1726 if (ret)
1727 return ret;
1728
1729 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1730 backup_offset);
1731 return 0;
1732 }
1733
1734 /**
1735 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1736 *
1737 * @dev_priv: Pointer to a device private struct.
1738 * @sw_context: The software context being used for this batch.
1739 * @res_type: The resource type.
1740 * @converter: Information about user-space binding for this resource type.
1741 * @res_id: Pointer to the user-space resource handle in the command stream.
1742 * @buf_id: Pointer to the user-space backup buffer handle in the command
1743 * stream.
1744 * @backup_offset: Offset of backup into MOB.
1745 *
1746 * This function prepares for registering a switch of backup buffers in the
1747 * resource metadata just prior to unreserving. It's basically a wrapper around
1748 * vmw_cmd_res_switch_backup with a different interface.
1749 */
vmw_cmd_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * res_id,uint32_t * buf_id,unsigned long backup_offset)1750 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1751 struct vmw_sw_context *sw_context,
1752 enum vmw_res_type res_type,
1753 const struct vmw_user_resource_conv
1754 *converter, uint32_t *res_id, uint32_t *buf_id,
1755 unsigned long backup_offset)
1756 {
1757 struct vmw_resource *res;
1758 int ret;
1759
1760 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1761 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1762 if (ret)
1763 return ret;
1764
1765 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1766 backup_offset);
1767 }
1768
1769 /**
1770 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1771 *
1772 * @dev_priv: Pointer to a device private struct.
1773 * @sw_context: The software context being used for this batch.
1774 * @header: Pointer to the command header in the command stream.
1775 */
vmw_cmd_bind_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1776 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1777 struct vmw_sw_context *sw_context,
1778 SVGA3dCmdHeader *header)
1779 {
1780 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1781 container_of(header, typeof(*cmd), header);
1782
1783 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1784 user_surface_converter, &cmd->body.sid,
1785 &cmd->body.mobid, 0);
1786 }
1787
1788 /**
1789 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1790 *
1791 * @dev_priv: Pointer to a device private struct.
1792 * @sw_context: The software context being used for this batch.
1793 * @header: Pointer to the command header in the command stream.
1794 */
vmw_cmd_update_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1795 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1796 struct vmw_sw_context *sw_context,
1797 SVGA3dCmdHeader *header)
1798 {
1799 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1800 container_of(header, typeof(*cmd), header);
1801
1802 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1803 VMW_RES_DIRTY_NONE, user_surface_converter,
1804 &cmd->body.image.sid, NULL);
1805 }
1806
1807 /**
1808 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1809 *
1810 * @dev_priv: Pointer to a device private struct.
1811 * @sw_context: The software context being used for this batch.
1812 * @header: Pointer to the command header in the command stream.
1813 */
vmw_cmd_update_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1814 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1815 struct vmw_sw_context *sw_context,
1816 SVGA3dCmdHeader *header)
1817 {
1818 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1819 container_of(header, typeof(*cmd), header);
1820
1821 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1822 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1823 &cmd->body.sid, NULL);
1824 }
1825
1826 /**
1827 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1828 *
1829 * @dev_priv: Pointer to a device private struct.
1830 * @sw_context: The software context being used for this batch.
1831 * @header: Pointer to the command header in the command stream.
1832 */
vmw_cmd_readback_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1833 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1834 struct vmw_sw_context *sw_context,
1835 SVGA3dCmdHeader *header)
1836 {
1837 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1838 container_of(header, typeof(*cmd), header);
1839
1840 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1841 VMW_RES_DIRTY_NONE, user_surface_converter,
1842 &cmd->body.image.sid, NULL);
1843 }
1844
1845 /**
1846 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1847 * command
1848 *
1849 * @dev_priv: Pointer to a device private struct.
1850 * @sw_context: The software context being used for this batch.
1851 * @header: Pointer to the command header in the command stream.
1852 */
vmw_cmd_readback_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1853 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1854 struct vmw_sw_context *sw_context,
1855 SVGA3dCmdHeader *header)
1856 {
1857 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1858 container_of(header, typeof(*cmd), header);
1859
1860 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1861 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1862 &cmd->body.sid, NULL);
1863 }
1864
1865 /**
1866 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1867 * command
1868 *
1869 * @dev_priv: Pointer to a device private struct.
1870 * @sw_context: The software context being used for this batch.
1871 * @header: Pointer to the command header in the command stream.
1872 */
vmw_cmd_invalidate_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1873 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1874 struct vmw_sw_context *sw_context,
1875 SVGA3dCmdHeader *header)
1876 {
1877 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1878 container_of(header, typeof(*cmd), header);
1879
1880 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1881 VMW_RES_DIRTY_NONE, user_surface_converter,
1882 &cmd->body.image.sid, NULL);
1883 }
1884
1885 /**
1886 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1887 * command
1888 *
1889 * @dev_priv: Pointer to a device private struct.
1890 * @sw_context: The software context being used for this batch.
1891 * @header: Pointer to the command header in the command stream.
1892 */
vmw_cmd_invalidate_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1893 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1894 struct vmw_sw_context *sw_context,
1895 SVGA3dCmdHeader *header)
1896 {
1897 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1898 container_of(header, typeof(*cmd), header);
1899
1900 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1901 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1902 &cmd->body.sid, NULL);
1903 }
1904
1905 /**
1906 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1907 *
1908 * @dev_priv: Pointer to a device private struct.
1909 * @sw_context: The software context being used for this batch.
1910 * @header: Pointer to the command header in the command stream.
1911 */
vmw_cmd_shader_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1912 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1913 struct vmw_sw_context *sw_context,
1914 SVGA3dCmdHeader *header)
1915 {
1916 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1917 int ret;
1918 size_t size;
1919 struct vmw_resource *ctx;
1920
1921 cmd = container_of(header, typeof(*cmd), header);
1922
1923 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1924 VMW_RES_DIRTY_SET, user_context_converter,
1925 &cmd->body.cid, &ctx);
1926 if (unlikely(ret != 0))
1927 return ret;
1928
1929 if (unlikely(!dev_priv->has_mob))
1930 return 0;
1931
1932 size = cmd->header.size - sizeof(cmd->body);
1933 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1934 cmd->body.shid, cmd + 1, cmd->body.type,
1935 size, &sw_context->staged_cmd_res);
1936 if (unlikely(ret != 0))
1937 return ret;
1938
1939 return vmw_resource_relocation_add(sw_context, NULL,
1940 vmw_ptr_diff(sw_context->buf_start,
1941 &cmd->header.id),
1942 vmw_res_rel_nop);
1943 }
1944
1945 /**
1946 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1947 *
1948 * @dev_priv: Pointer to a device private struct.
1949 * @sw_context: The software context being used for this batch.
1950 * @header: Pointer to the command header in the command stream.
1951 */
vmw_cmd_shader_destroy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1952 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1953 struct vmw_sw_context *sw_context,
1954 SVGA3dCmdHeader *header)
1955 {
1956 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1957 int ret;
1958 struct vmw_resource *ctx;
1959
1960 cmd = container_of(header, typeof(*cmd), header);
1961
1962 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1963 VMW_RES_DIRTY_SET, user_context_converter,
1964 &cmd->body.cid, &ctx);
1965 if (unlikely(ret != 0))
1966 return ret;
1967
1968 if (unlikely(!dev_priv->has_mob))
1969 return 0;
1970
1971 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1972 cmd->body.type, &sw_context->staged_cmd_res);
1973 if (unlikely(ret != 0))
1974 return ret;
1975
1976 return vmw_resource_relocation_add(sw_context, NULL,
1977 vmw_ptr_diff(sw_context->buf_start,
1978 &cmd->header.id),
1979 vmw_res_rel_nop);
1980 }
1981
1982 /**
1983 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1984 *
1985 * @dev_priv: Pointer to a device private struct.
1986 * @sw_context: The software context being used for this batch.
1987 * @header: Pointer to the command header in the command stream.
1988 */
vmw_cmd_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1989 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1990 struct vmw_sw_context *sw_context,
1991 SVGA3dCmdHeader *header)
1992 {
1993 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1994 struct vmw_ctx_bindinfo_shader binding;
1995 struct vmw_resource *ctx, *res = NULL;
1996 struct vmw_ctx_validation_info *ctx_info;
1997 int ret;
1998
1999 cmd = container_of(header, typeof(*cmd), header);
2000
2001 if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
2002 VMW_DEBUG_USER("Illegal shader type %u.\n",
2003 (unsigned int) cmd->body.type);
2004 return -EINVAL;
2005 }
2006
2007 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2008 VMW_RES_DIRTY_SET, user_context_converter,
2009 &cmd->body.cid, &ctx);
2010 if (unlikely(ret != 0))
2011 return ret;
2012
2013 if (!dev_priv->has_mob)
2014 return 0;
2015
2016 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2017 /*
2018 * This is the compat shader path - Per device guest-backed
2019 * shaders, but user-space thinks it's per context host-
2020 * backed shaders.
2021 */
2022 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2023 cmd->body.shid, cmd->body.type);
2024 if (!IS_ERR(res)) {
2025 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2026 VMW_RES_DIRTY_NONE);
2027 if (unlikely(ret != 0))
2028 return ret;
2029
2030 ret = vmw_resource_relocation_add
2031 (sw_context, res,
2032 vmw_ptr_diff(sw_context->buf_start,
2033 &cmd->body.shid),
2034 vmw_res_rel_normal);
2035 if (unlikely(ret != 0))
2036 return ret;
2037 }
2038 }
2039
2040 if (IS_ERR_OR_NULL(res)) {
2041 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2042 VMW_RES_DIRTY_NONE,
2043 user_shader_converter, &cmd->body.shid,
2044 &res);
2045 if (unlikely(ret != 0))
2046 return ret;
2047 }
2048
2049 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2050 if (!ctx_info)
2051 return -EINVAL;
2052
2053 binding.bi.ctx = ctx;
2054 binding.bi.res = res;
2055 binding.bi.bt = vmw_ctx_binding_shader;
2056 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2057 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2058
2059 return 0;
2060 }
2061
2062 /**
2063 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2064 *
2065 * @dev_priv: Pointer to a device private struct.
2066 * @sw_context: The software context being used for this batch.
2067 * @header: Pointer to the command header in the command stream.
2068 */
vmw_cmd_set_shader_const(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2069 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2070 struct vmw_sw_context *sw_context,
2071 SVGA3dCmdHeader *header)
2072 {
2073 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2074 int ret;
2075
2076 cmd = container_of(header, typeof(*cmd), header);
2077
2078 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2079 VMW_RES_DIRTY_SET, user_context_converter,
2080 &cmd->body.cid, NULL);
2081 if (unlikely(ret != 0))
2082 return ret;
2083
2084 if (dev_priv->has_mob)
2085 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2086
2087 return 0;
2088 }
2089
2090 /**
2091 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2092 *
2093 * @dev_priv: Pointer to a device private struct.
2094 * @sw_context: The software context being used for this batch.
2095 * @header: Pointer to the command header in the command stream.
2096 */
vmw_cmd_bind_gb_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2097 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2098 struct vmw_sw_context *sw_context,
2099 SVGA3dCmdHeader *header)
2100 {
2101 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2102 container_of(header, typeof(*cmd), header);
2103
2104 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2105 user_shader_converter, &cmd->body.shid,
2106 &cmd->body.mobid, cmd->body.offsetInBytes);
2107 }
2108
2109 /**
2110 * vmw_cmd_dx_set_single_constant_buffer - Validate
2111 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2112 *
2113 * @dev_priv: Pointer to a device private struct.
2114 * @sw_context: The software context being used for this batch.
2115 * @header: Pointer to the command header in the command stream.
2116 */
2117 static int
vmw_cmd_dx_set_single_constant_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2118 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2119 struct vmw_sw_context *sw_context,
2120 SVGA3dCmdHeader *header)
2121 {
2122 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2123
2124 struct vmw_resource *res = NULL;
2125 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2126 struct vmw_ctx_bindinfo_cb binding;
2127 int ret;
2128
2129 if (!ctx_node)
2130 return -EINVAL;
2131
2132 cmd = container_of(header, typeof(*cmd), header);
2133 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2134 VMW_RES_DIRTY_NONE, user_surface_converter,
2135 &cmd->body.sid, &res);
2136 if (unlikely(ret != 0))
2137 return ret;
2138
2139 if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
2140 cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2141 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2142 (unsigned int) cmd->body.type,
2143 (unsigned int) cmd->body.slot);
2144 return -EINVAL;
2145 }
2146
2147 binding.bi.ctx = ctx_node->ctx;
2148 binding.bi.res = res;
2149 binding.bi.bt = vmw_ctx_binding_cb;
2150 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2151 binding.offset = cmd->body.offsetInBytes;
2152 binding.size = cmd->body.sizeInBytes;
2153 binding.slot = cmd->body.slot;
2154
2155 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2156 binding.slot);
2157
2158 return 0;
2159 }
2160
2161 /**
2162 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2163 * command
2164 *
2165 * @dev_priv: Pointer to a device private struct.
2166 * @sw_context: The software context being used for this batch.
2167 * @header: Pointer to the command header in the command stream.
2168 */
vmw_cmd_dx_set_shader_res(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2169 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2170 struct vmw_sw_context *sw_context,
2171 SVGA3dCmdHeader *header)
2172 {
2173 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2174 container_of(header, typeof(*cmd), header);
2175
2176 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2177 sizeof(SVGA3dShaderResourceViewId);
2178
2179 if ((u64) cmd->body.startView + (u64) num_sr_view >
2180 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2181 !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2182 VMW_DEBUG_USER("Invalid shader binding.\n");
2183 return -EINVAL;
2184 }
2185
2186 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2187 vmw_ctx_binding_sr,
2188 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2189 (void *) &cmd[1], num_sr_view,
2190 cmd->body.startView);
2191 }
2192
2193 /**
2194 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2195 *
2196 * @dev_priv: Pointer to a device private struct.
2197 * @sw_context: The software context being used for this batch.
2198 * @header: Pointer to the command header in the command stream.
2199 */
vmw_cmd_dx_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2200 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2201 struct vmw_sw_context *sw_context,
2202 SVGA3dCmdHeader *header)
2203 {
2204 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2205 struct vmw_resource *res = NULL;
2206 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2207 struct vmw_ctx_bindinfo_shader binding;
2208 int ret = 0;
2209
2210 if (!ctx_node)
2211 return -EINVAL;
2212
2213 cmd = container_of(header, typeof(*cmd), header);
2214
2215 if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2216 VMW_DEBUG_USER("Illegal shader type %u.\n",
2217 (unsigned int) cmd->body.type);
2218 return -EINVAL;
2219 }
2220
2221 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2222 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2223 if (IS_ERR(res)) {
2224 VMW_DEBUG_USER("Could not find shader for binding.\n");
2225 return PTR_ERR(res);
2226 }
2227
2228 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2229 VMW_RES_DIRTY_NONE);
2230 if (ret)
2231 return ret;
2232 }
2233
2234 binding.bi.ctx = ctx_node->ctx;
2235 binding.bi.res = res;
2236 binding.bi.bt = vmw_ctx_binding_dx_shader;
2237 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2238
2239 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2240
2241 return 0;
2242 }
2243
2244 /**
2245 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2246 * command
2247 *
2248 * @dev_priv: Pointer to a device private struct.
2249 * @sw_context: The software context being used for this batch.
2250 * @header: Pointer to the command header in the command stream.
2251 */
vmw_cmd_dx_set_vertex_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2252 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2253 struct vmw_sw_context *sw_context,
2254 SVGA3dCmdHeader *header)
2255 {
2256 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2257 struct vmw_ctx_bindinfo_vb binding;
2258 struct vmw_resource *res;
2259 struct {
2260 SVGA3dCmdHeader header;
2261 SVGA3dCmdDXSetVertexBuffers body;
2262 SVGA3dVertexBuffer buf[];
2263 } *cmd;
2264 int i, ret, num;
2265
2266 if (!ctx_node)
2267 return -EINVAL;
2268
2269 cmd = container_of(header, typeof(*cmd), header);
2270 num = (cmd->header.size - sizeof(cmd->body)) /
2271 sizeof(SVGA3dVertexBuffer);
2272 if ((u64)num + (u64)cmd->body.startBuffer >
2273 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2274 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2275 return -EINVAL;
2276 }
2277
2278 for (i = 0; i < num; i++) {
2279 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2280 VMW_RES_DIRTY_NONE,
2281 user_surface_converter,
2282 &cmd->buf[i].sid, &res);
2283 if (unlikely(ret != 0))
2284 return ret;
2285
2286 binding.bi.ctx = ctx_node->ctx;
2287 binding.bi.bt = vmw_ctx_binding_vb;
2288 binding.bi.res = res;
2289 binding.offset = cmd->buf[i].offset;
2290 binding.stride = cmd->buf[i].stride;
2291 binding.slot = i + cmd->body.startBuffer;
2292
2293 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2294 }
2295
2296 return 0;
2297 }
2298
2299 /**
2300 * vmw_cmd_dx_ia_set_vertex_buffers - Validate
2301 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2302 *
2303 * @dev_priv: Pointer to a device private struct.
2304 * @sw_context: The software context being used for this batch.
2305 * @header: Pointer to the command header in the command stream.
2306 */
vmw_cmd_dx_set_index_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2307 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2308 struct vmw_sw_context *sw_context,
2309 SVGA3dCmdHeader *header)
2310 {
2311 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2312 struct vmw_ctx_bindinfo_ib binding;
2313 struct vmw_resource *res;
2314 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2315 int ret;
2316
2317 if (!ctx_node)
2318 return -EINVAL;
2319
2320 cmd = container_of(header, typeof(*cmd), header);
2321 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2322 VMW_RES_DIRTY_NONE, user_surface_converter,
2323 &cmd->body.sid, &res);
2324 if (unlikely(ret != 0))
2325 return ret;
2326
2327 binding.bi.ctx = ctx_node->ctx;
2328 binding.bi.res = res;
2329 binding.bi.bt = vmw_ctx_binding_ib;
2330 binding.offset = cmd->body.offset;
2331 binding.format = cmd->body.format;
2332
2333 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2334
2335 return 0;
2336 }
2337
2338 /**
2339 * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2340 * command
2341 *
2342 * @dev_priv: Pointer to a device private struct.
2343 * @sw_context: The software context being used for this batch.
2344 * @header: Pointer to the command header in the command stream.
2345 */
vmw_cmd_dx_set_rendertargets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2346 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2347 struct vmw_sw_context *sw_context,
2348 SVGA3dCmdHeader *header)
2349 {
2350 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2351 container_of(header, typeof(*cmd), header);
2352 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2353 sizeof(SVGA3dRenderTargetViewId);
2354 int ret;
2355
2356 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2357 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2358 return -EINVAL;
2359 }
2360
2361 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2362 0, &cmd->body.depthStencilViewId, 1, 0);
2363 if (ret)
2364 return ret;
2365
2366 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2367 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2368 num_rt_view, 0);
2369 }
2370
2371 /**
2372 * vmw_cmd_dx_clear_rendertarget_view - Validate
2373 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2374 *
2375 * @dev_priv: Pointer to a device private struct.
2376 * @sw_context: The software context being used for this batch.
2377 * @header: Pointer to the command header in the command stream.
2378 */
vmw_cmd_dx_clear_rendertarget_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2379 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2380 struct vmw_sw_context *sw_context,
2381 SVGA3dCmdHeader *header)
2382 {
2383 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2384 container_of(header, typeof(*cmd), header);
2385 struct vmw_resource *ret;
2386
2387 ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2388 cmd->body.renderTargetViewId);
2389
2390 return PTR_ERR_OR_ZERO(ret);
2391 }
2392
2393 /**
2394 * vmw_cmd_dx_clear_rendertarget_view - Validate
2395 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2396 *
2397 * @dev_priv: Pointer to a device private struct.
2398 * @sw_context: The software context being used for this batch.
2399 * @header: Pointer to the command header in the command stream.
2400 */
vmw_cmd_dx_clear_depthstencil_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2401 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2402 struct vmw_sw_context *sw_context,
2403 SVGA3dCmdHeader *header)
2404 {
2405 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2406 container_of(header, typeof(*cmd), header);
2407 struct vmw_resource *ret;
2408
2409 ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2410 cmd->body.depthStencilViewId);
2411
2412 return PTR_ERR_OR_ZERO(ret);
2413 }
2414
vmw_cmd_dx_view_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2415 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2416 struct vmw_sw_context *sw_context,
2417 SVGA3dCmdHeader *header)
2418 {
2419 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2420 struct vmw_resource *srf;
2421 struct vmw_resource *res;
2422 enum vmw_view_type view_type;
2423 int ret;
2424 /*
2425 * This is based on the fact that all affected define commands have the
2426 * same initial command body layout.
2427 */
2428 struct {
2429 SVGA3dCmdHeader header;
2430 uint32 defined_id;
2431 uint32 sid;
2432 } *cmd;
2433
2434 if (!ctx_node)
2435 return -EINVAL;
2436
2437 view_type = vmw_view_cmd_to_type(header->id);
2438 if (view_type == vmw_view_max)
2439 return -EINVAL;
2440
2441 cmd = container_of(header, typeof(*cmd), header);
2442 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2443 VMW_DEBUG_USER("Invalid surface id.\n");
2444 return -EINVAL;
2445 }
2446 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2447 VMW_RES_DIRTY_NONE, user_surface_converter,
2448 &cmd->sid, &srf);
2449 if (unlikely(ret != 0))
2450 return ret;
2451
2452 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2453 ret = vmw_cotable_notify(res, cmd->defined_id);
2454 if (unlikely(ret != 0))
2455 return ret;
2456
2457 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2458 cmd->defined_id, header,
2459 header->size + sizeof(*header),
2460 &sw_context->staged_cmd_res);
2461 }
2462
2463 /**
2464 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2465 *
2466 * @dev_priv: Pointer to a device private struct.
2467 * @sw_context: The software context being used for this batch.
2468 * @header: Pointer to the command header in the command stream.
2469 */
vmw_cmd_dx_set_so_targets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2470 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2471 struct vmw_sw_context *sw_context,
2472 SVGA3dCmdHeader *header)
2473 {
2474 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2475 struct vmw_ctx_bindinfo_so_target binding;
2476 struct vmw_resource *res;
2477 struct {
2478 SVGA3dCmdHeader header;
2479 SVGA3dCmdDXSetSOTargets body;
2480 SVGA3dSoTarget targets[];
2481 } *cmd;
2482 int i, ret, num;
2483
2484 if (!ctx_node)
2485 return -EINVAL;
2486
2487 cmd = container_of(header, typeof(*cmd), header);
2488 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2489
2490 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2491 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2492 return -EINVAL;
2493 }
2494
2495 for (i = 0; i < num; i++) {
2496 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2497 VMW_RES_DIRTY_SET,
2498 user_surface_converter,
2499 &cmd->targets[i].sid, &res);
2500 if (unlikely(ret != 0))
2501 return ret;
2502
2503 binding.bi.ctx = ctx_node->ctx;
2504 binding.bi.res = res;
2505 binding.bi.bt = vmw_ctx_binding_so_target,
2506 binding.offset = cmd->targets[i].offset;
2507 binding.size = cmd->targets[i].sizeInBytes;
2508 binding.slot = i;
2509
2510 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2511 }
2512
2513 return 0;
2514 }
2515
vmw_cmd_dx_so_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2516 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2517 struct vmw_sw_context *sw_context,
2518 SVGA3dCmdHeader *header)
2519 {
2520 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2521 struct vmw_resource *res;
2522 /*
2523 * This is based on the fact that all affected define commands have
2524 * the same initial command body layout.
2525 */
2526 struct {
2527 SVGA3dCmdHeader header;
2528 uint32 defined_id;
2529 } *cmd;
2530 enum vmw_so_type so_type;
2531 int ret;
2532
2533 if (!ctx_node)
2534 return -EINVAL;
2535
2536 so_type = vmw_so_cmd_to_type(header->id);
2537 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2538 cmd = container_of(header, typeof(*cmd), header);
2539 ret = vmw_cotable_notify(res, cmd->defined_id);
2540
2541 return ret;
2542 }
2543
2544 /**
2545 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2546 * command
2547 *
2548 * @dev_priv: Pointer to a device private struct.
2549 * @sw_context: The software context being used for this batch.
2550 * @header: Pointer to the command header in the command stream.
2551 */
vmw_cmd_dx_check_subresource(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2552 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2553 struct vmw_sw_context *sw_context,
2554 SVGA3dCmdHeader *header)
2555 {
2556 struct {
2557 SVGA3dCmdHeader header;
2558 union {
2559 SVGA3dCmdDXReadbackSubResource r_body;
2560 SVGA3dCmdDXInvalidateSubResource i_body;
2561 SVGA3dCmdDXUpdateSubResource u_body;
2562 SVGA3dSurfaceId sid;
2563 };
2564 } *cmd;
2565
2566 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2567 offsetof(typeof(*cmd), sid));
2568 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2569 offsetof(typeof(*cmd), sid));
2570 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2571 offsetof(typeof(*cmd), sid));
2572
2573 cmd = container_of(header, typeof(*cmd), header);
2574 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2575 VMW_RES_DIRTY_NONE, user_surface_converter,
2576 &cmd->sid, NULL);
2577 }
2578
vmw_cmd_dx_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2579 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2580 struct vmw_sw_context *sw_context,
2581 SVGA3dCmdHeader *header)
2582 {
2583 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2584
2585 if (!ctx_node)
2586 return -EINVAL;
2587
2588 return 0;
2589 }
2590
2591 /**
2592 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2593 * resource for removal.
2594 *
2595 * @dev_priv: Pointer to a device private struct.
2596 * @sw_context: The software context being used for this batch.
2597 * @header: Pointer to the command header in the command stream.
2598 *
2599 * Check that the view exists, and if it was not created using this command
2600 * batch, conditionally make this command a NOP.
2601 */
vmw_cmd_dx_view_remove(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2602 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2603 struct vmw_sw_context *sw_context,
2604 SVGA3dCmdHeader *header)
2605 {
2606 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2607 struct {
2608 SVGA3dCmdHeader header;
2609 union vmw_view_destroy body;
2610 } *cmd = container_of(header, typeof(*cmd), header);
2611 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2612 struct vmw_resource *view;
2613 int ret;
2614
2615 if (!ctx_node)
2616 return -EINVAL;
2617
2618 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2619 &sw_context->staged_cmd_res, &view);
2620 if (ret || !view)
2621 return ret;
2622
2623 /*
2624 * If the view wasn't created during this command batch, it might
2625 * have been removed due to a context swapout, so add a
2626 * relocation to conditionally make this command a NOP to avoid
2627 * device errors.
2628 */
2629 return vmw_resource_relocation_add(sw_context, view,
2630 vmw_ptr_diff(sw_context->buf_start,
2631 &cmd->header.id),
2632 vmw_res_rel_cond_nop);
2633 }
2634
2635 /**
2636 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2637 *
2638 * @dev_priv: Pointer to a device private struct.
2639 * @sw_context: The software context being used for this batch.
2640 * @header: Pointer to the command header in the command stream.
2641 */
vmw_cmd_dx_define_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2642 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2643 struct vmw_sw_context *sw_context,
2644 SVGA3dCmdHeader *header)
2645 {
2646 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2647 struct vmw_resource *res;
2648 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2649 container_of(header, typeof(*cmd), header);
2650 int ret;
2651
2652 if (!ctx_node)
2653 return -EINVAL;
2654
2655 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2656 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2657 if (ret)
2658 return ret;
2659
2660 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2661 cmd->body.shaderId, cmd->body.type,
2662 &sw_context->staged_cmd_res);
2663 }
2664
2665 /**
2666 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2667 *
2668 * @dev_priv: Pointer to a device private struct.
2669 * @sw_context: The software context being used for this batch.
2670 * @header: Pointer to the command header in the command stream.
2671 */
vmw_cmd_dx_destroy_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2672 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2673 struct vmw_sw_context *sw_context,
2674 SVGA3dCmdHeader *header)
2675 {
2676 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2677 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2678 container_of(header, typeof(*cmd), header);
2679 int ret;
2680
2681 if (!ctx_node)
2682 return -EINVAL;
2683
2684 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2685 &sw_context->staged_cmd_res);
2686
2687 return ret;
2688 }
2689
2690 /**
2691 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2692 *
2693 * @dev_priv: Pointer to a device private struct.
2694 * @sw_context: The software context being used for this batch.
2695 * @header: Pointer to the command header in the command stream.
2696 */
vmw_cmd_dx_bind_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2697 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2698 struct vmw_sw_context *sw_context,
2699 SVGA3dCmdHeader *header)
2700 {
2701 struct vmw_resource *ctx;
2702 struct vmw_resource *res;
2703 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2704 container_of(header, typeof(*cmd), header);
2705 int ret;
2706
2707 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2708 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2709 VMW_RES_DIRTY_SET,
2710 user_context_converter, &cmd->body.cid,
2711 &ctx);
2712 if (ret)
2713 return ret;
2714 } else {
2715 struct vmw_ctx_validation_info *ctx_node =
2716 VMW_GET_CTX_NODE(sw_context);
2717
2718 if (!ctx_node)
2719 return -EINVAL;
2720
2721 ctx = ctx_node->ctx;
2722 }
2723
2724 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2725 if (IS_ERR(res)) {
2726 VMW_DEBUG_USER("Could not find shader to bind.\n");
2727 return PTR_ERR(res);
2728 }
2729
2730 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2731 VMW_RES_DIRTY_NONE);
2732 if (ret) {
2733 VMW_DEBUG_USER("Error creating resource validation node.\n");
2734 return ret;
2735 }
2736
2737 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2738 &cmd->body.mobid,
2739 cmd->body.offsetInBytes);
2740 }
2741
2742 /**
2743 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2744 *
2745 * @dev_priv: Pointer to a device private struct.
2746 * @sw_context: The software context being used for this batch.
2747 * @header: Pointer to the command header in the command stream.
2748 */
vmw_cmd_dx_genmips(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2749 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2750 struct vmw_sw_context *sw_context,
2751 SVGA3dCmdHeader *header)
2752 {
2753 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2754 container_of(header, typeof(*cmd), header);
2755 struct vmw_resource *view;
2756 struct vmw_res_cache_entry *rcache;
2757
2758 view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2759 cmd->body.shaderResourceViewId);
2760 if (IS_ERR(view))
2761 return PTR_ERR(view);
2762
2763 /*
2764 * Normally the shader-resource view is not gpu-dirtying, but for
2765 * this particular command it is...
2766 * So mark the last looked-up surface, which is the surface
2767 * the view points to, gpu-dirty.
2768 */
2769 rcache = &sw_context->res_cache[vmw_res_surface];
2770 vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2771 VMW_RES_DIRTY_SET);
2772 return 0;
2773 }
2774
2775 /**
2776 * vmw_cmd_dx_transfer_from_buffer - Validate
2777 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2778 *
2779 * @dev_priv: Pointer to a device private struct.
2780 * @sw_context: The software context being used for this batch.
2781 * @header: Pointer to the command header in the command stream.
2782 */
vmw_cmd_dx_transfer_from_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2783 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2784 struct vmw_sw_context *sw_context,
2785 SVGA3dCmdHeader *header)
2786 {
2787 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2788 container_of(header, typeof(*cmd), header);
2789 int ret;
2790
2791 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2792 VMW_RES_DIRTY_NONE, user_surface_converter,
2793 &cmd->body.srcSid, NULL);
2794 if (ret != 0)
2795 return ret;
2796
2797 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2798 VMW_RES_DIRTY_SET, user_surface_converter,
2799 &cmd->body.destSid, NULL);
2800 }
2801
2802 /**
2803 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2804 *
2805 * @dev_priv: Pointer to a device private struct.
2806 * @sw_context: The software context being used for this batch.
2807 * @header: Pointer to the command header in the command stream.
2808 */
vmw_cmd_intra_surface_copy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2809 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2810 struct vmw_sw_context *sw_context,
2811 SVGA3dCmdHeader *header)
2812 {
2813 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2814 container_of(header, typeof(*cmd), header);
2815
2816 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2817 return -EINVAL;
2818
2819 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2820 VMW_RES_DIRTY_SET, user_surface_converter,
2821 &cmd->body.surface.sid, NULL);
2822 }
2823
vmw_cmd_sm5(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2824 static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2825 struct vmw_sw_context *sw_context,
2826 SVGA3dCmdHeader *header)
2827 {
2828 if (!has_sm5_context(dev_priv))
2829 return -EINVAL;
2830
2831 return 0;
2832 }
2833
vmw_cmd_sm5_view_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2834 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2835 struct vmw_sw_context *sw_context,
2836 SVGA3dCmdHeader *header)
2837 {
2838 if (!has_sm5_context(dev_priv))
2839 return -EINVAL;
2840
2841 return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2842 }
2843
vmw_cmd_sm5_view_remove(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2844 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2845 struct vmw_sw_context *sw_context,
2846 SVGA3dCmdHeader *header)
2847 {
2848 if (!has_sm5_context(dev_priv))
2849 return -EINVAL;
2850
2851 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2852 }
2853
vmw_cmd_clear_uav_uint(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2854 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2855 struct vmw_sw_context *sw_context,
2856 SVGA3dCmdHeader *header)
2857 {
2858 struct {
2859 SVGA3dCmdHeader header;
2860 SVGA3dCmdDXClearUAViewUint body;
2861 } *cmd = container_of(header, typeof(*cmd), header);
2862 struct vmw_resource *ret;
2863
2864 if (!has_sm5_context(dev_priv))
2865 return -EINVAL;
2866
2867 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2868 cmd->body.uaViewId);
2869
2870 return PTR_ERR_OR_ZERO(ret);
2871 }
2872
vmw_cmd_clear_uav_float(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2873 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2874 struct vmw_sw_context *sw_context,
2875 SVGA3dCmdHeader *header)
2876 {
2877 struct {
2878 SVGA3dCmdHeader header;
2879 SVGA3dCmdDXClearUAViewFloat body;
2880 } *cmd = container_of(header, typeof(*cmd), header);
2881 struct vmw_resource *ret;
2882
2883 if (!has_sm5_context(dev_priv))
2884 return -EINVAL;
2885
2886 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2887 cmd->body.uaViewId);
2888
2889 return PTR_ERR_OR_ZERO(ret);
2890 }
2891
vmw_cmd_set_uav(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2892 static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2893 struct vmw_sw_context *sw_context,
2894 SVGA3dCmdHeader *header)
2895 {
2896 struct {
2897 SVGA3dCmdHeader header;
2898 SVGA3dCmdDXSetUAViews body;
2899 } *cmd = container_of(header, typeof(*cmd), header);
2900 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2901 sizeof(SVGA3dUAViewId);
2902 int ret;
2903
2904 if (!has_sm5_context(dev_priv))
2905 return -EINVAL;
2906
2907 if (num_uav > SVGA3D_MAX_UAVIEWS) {
2908 VMW_DEBUG_USER("Invalid UAV binding.\n");
2909 return -EINVAL;
2910 }
2911
2912 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2913 vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2914 num_uav, 0);
2915 if (ret)
2916 return ret;
2917
2918 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2919 cmd->body.uavSpliceIndex);
2920
2921 return ret;
2922 }
2923
vmw_cmd_set_cs_uav(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2924 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2925 struct vmw_sw_context *sw_context,
2926 SVGA3dCmdHeader *header)
2927 {
2928 struct {
2929 SVGA3dCmdHeader header;
2930 SVGA3dCmdDXSetCSUAViews body;
2931 } *cmd = container_of(header, typeof(*cmd), header);
2932 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2933 sizeof(SVGA3dUAViewId);
2934 int ret;
2935
2936 if (!has_sm5_context(dev_priv))
2937 return -EINVAL;
2938
2939 if (num_uav > SVGA3D_MAX_UAVIEWS) {
2940 VMW_DEBUG_USER("Invalid UAV binding.\n");
2941 return -EINVAL;
2942 }
2943
2944 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2945 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2946 num_uav, 0);
2947 if (ret)
2948 return ret;
2949
2950 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2951 cmd->body.startIndex);
2952
2953 return ret;
2954 }
2955
vmw_cmd_dx_define_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2956 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2957 struct vmw_sw_context *sw_context,
2958 SVGA3dCmdHeader *header)
2959 {
2960 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2961 struct vmw_resource *res;
2962 struct {
2963 SVGA3dCmdHeader header;
2964 SVGA3dCmdDXDefineStreamOutputWithMob body;
2965 } *cmd = container_of(header, typeof(*cmd), header);
2966 int ret;
2967
2968 if (!has_sm5_context(dev_priv))
2969 return -EINVAL;
2970
2971 if (!ctx_node) {
2972 DRM_ERROR("DX Context not set.\n");
2973 return -EINVAL;
2974 }
2975
2976 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
2977 ret = vmw_cotable_notify(res, cmd->body.soid);
2978 if (ret)
2979 return ret;
2980
2981 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
2982 cmd->body.soid,
2983 &sw_context->staged_cmd_res);
2984 }
2985
vmw_cmd_dx_destroy_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2986 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
2987 struct vmw_sw_context *sw_context,
2988 SVGA3dCmdHeader *header)
2989 {
2990 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2991 struct vmw_resource *res;
2992 struct {
2993 SVGA3dCmdHeader header;
2994 SVGA3dCmdDXDestroyStreamOutput body;
2995 } *cmd = container_of(header, typeof(*cmd), header);
2996
2997 if (!ctx_node) {
2998 DRM_ERROR("DX Context not set.\n");
2999 return -EINVAL;
3000 }
3001
3002 /*
3003 * When device does not support SM5 then streamoutput with mob command is
3004 * not available to user-space. Simply return in this case.
3005 */
3006 if (!has_sm5_context(dev_priv))
3007 return 0;
3008
3009 /*
3010 * With SM5 capable device if lookup fails then user-space probably used
3011 * old streamoutput define command. Return without an error.
3012 */
3013 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3014 cmd->body.soid);
3015 if (IS_ERR(res))
3016 return 0;
3017
3018 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3019 &sw_context->staged_cmd_res);
3020 }
3021
vmw_cmd_dx_bind_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3022 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3023 struct vmw_sw_context *sw_context,
3024 SVGA3dCmdHeader *header)
3025 {
3026 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3027 struct vmw_resource *res;
3028 struct {
3029 SVGA3dCmdHeader header;
3030 SVGA3dCmdDXBindStreamOutput body;
3031 } *cmd = container_of(header, typeof(*cmd), header);
3032 int ret;
3033
3034 if (!has_sm5_context(dev_priv))
3035 return -EINVAL;
3036
3037 if (!ctx_node) {
3038 DRM_ERROR("DX Context not set.\n");
3039 return -EINVAL;
3040 }
3041
3042 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3043 cmd->body.soid);
3044 if (IS_ERR(res)) {
3045 DRM_ERROR("Could not find streamoutput to bind.\n");
3046 return PTR_ERR(res);
3047 }
3048
3049 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3050
3051 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3052 VMW_RES_DIRTY_NONE);
3053 if (ret) {
3054 DRM_ERROR("Error creating resource validation node.\n");
3055 return ret;
3056 }
3057
3058 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3059 &cmd->body.mobid,
3060 cmd->body.offsetInBytes);
3061 }
3062
vmw_cmd_dx_set_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3063 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3064 struct vmw_sw_context *sw_context,
3065 SVGA3dCmdHeader *header)
3066 {
3067 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3068 struct vmw_resource *res;
3069 struct vmw_ctx_bindinfo_so binding;
3070 struct {
3071 SVGA3dCmdHeader header;
3072 SVGA3dCmdDXSetStreamOutput body;
3073 } *cmd = container_of(header, typeof(*cmd), header);
3074 int ret;
3075
3076 if (!ctx_node) {
3077 DRM_ERROR("DX Context not set.\n");
3078 return -EINVAL;
3079 }
3080
3081 if (cmd->body.soid == SVGA3D_INVALID_ID)
3082 return 0;
3083
3084 /*
3085 * When device does not support SM5 then streamoutput with mob command is
3086 * not available to user-space. Simply return in this case.
3087 */
3088 if (!has_sm5_context(dev_priv))
3089 return 0;
3090
3091 /*
3092 * With SM5 capable device if lookup fails then user-space probably used
3093 * old streamoutput define command. Return without an error.
3094 */
3095 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3096 cmd->body.soid);
3097 if (IS_ERR(res)) {
3098 return 0;
3099 }
3100
3101 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3102 VMW_RES_DIRTY_NONE);
3103 if (ret) {
3104 DRM_ERROR("Error creating resource validation node.\n");
3105 return ret;
3106 }
3107
3108 binding.bi.ctx = ctx_node->ctx;
3109 binding.bi.res = res;
3110 binding.bi.bt = vmw_ctx_binding_so;
3111 binding.slot = 0; /* Only one SO set to context at a time. */
3112
3113 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3114 binding.slot);
3115
3116 return ret;
3117 }
3118
vmw_cmd_indexed_instanced_indirect(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3119 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3120 struct vmw_sw_context *sw_context,
3121 SVGA3dCmdHeader *header)
3122 {
3123 struct vmw_draw_indexed_instanced_indirect_cmd {
3124 SVGA3dCmdHeader header;
3125 SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3126 } *cmd = container_of(header, typeof(*cmd), header);
3127
3128 if (!has_sm5_context(dev_priv))
3129 return -EINVAL;
3130
3131 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3132 VMW_RES_DIRTY_NONE, user_surface_converter,
3133 &cmd->body.argsBufferSid, NULL);
3134 }
3135
vmw_cmd_instanced_indirect(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3136 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3137 struct vmw_sw_context *sw_context,
3138 SVGA3dCmdHeader *header)
3139 {
3140 struct vmw_draw_instanced_indirect_cmd {
3141 SVGA3dCmdHeader header;
3142 SVGA3dCmdDXDrawInstancedIndirect body;
3143 } *cmd = container_of(header, typeof(*cmd), header);
3144
3145 if (!has_sm5_context(dev_priv))
3146 return -EINVAL;
3147
3148 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3149 VMW_RES_DIRTY_NONE, user_surface_converter,
3150 &cmd->body.argsBufferSid, NULL);
3151 }
3152
vmw_cmd_dispatch_indirect(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3153 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3154 struct vmw_sw_context *sw_context,
3155 SVGA3dCmdHeader *header)
3156 {
3157 struct vmw_dispatch_indirect_cmd {
3158 SVGA3dCmdHeader header;
3159 SVGA3dCmdDXDispatchIndirect body;
3160 } *cmd = container_of(header, typeof(*cmd), header);
3161
3162 if (!has_sm5_context(dev_priv))
3163 return -EINVAL;
3164
3165 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3166 VMW_RES_DIRTY_NONE, user_surface_converter,
3167 &cmd->body.argsBufferSid, NULL);
3168 }
3169
vmw_cmd_check_not_3d(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3170 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3171 struct vmw_sw_context *sw_context,
3172 void *buf, uint32_t *size)
3173 {
3174 uint32_t size_remaining = *size;
3175 uint32_t cmd_id;
3176
3177 cmd_id = ((uint32_t *)buf)[0];
3178 switch (cmd_id) {
3179 case SVGA_CMD_UPDATE:
3180 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3181 break;
3182 case SVGA_CMD_DEFINE_GMRFB:
3183 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3184 break;
3185 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3186 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3187 break;
3188 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3189 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3190 break;
3191 default:
3192 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3193 return -EINVAL;
3194 }
3195
3196 if (*size > size_remaining) {
3197 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3198 cmd_id);
3199 return -EINVAL;
3200 }
3201
3202 if (unlikely(!sw_context->kernel)) {
3203 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3204 return -EPERM;
3205 }
3206
3207 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3208 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3209
3210 return 0;
3211 }
3212
3213 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3214 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3215 false, false, false),
3216 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3217 false, false, false),
3218 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3219 true, false, false),
3220 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3221 true, false, false),
3222 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3223 true, false, false),
3224 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3225 false, false, false),
3226 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3227 false, false, false),
3228 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3229 true, false, false),
3230 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3231 true, false, false),
3232 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3233 true, false, false),
3234 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3235 &vmw_cmd_set_render_target_check, true, false, false),
3236 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3237 true, false, false),
3238 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3239 true, false, false),
3240 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3241 true, false, false),
3242 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3243 true, false, false),
3244 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3245 true, false, false),
3246 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3247 true, false, false),
3248 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3249 true, false, false),
3250 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3251 false, false, false),
3252 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3253 true, false, false),
3254 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3255 true, false, false),
3256 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3257 true, false, false),
3258 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3259 true, false, false),
3260 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3261 true, false, false),
3262 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3263 true, false, false),
3264 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3265 true, false, false),
3266 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3267 true, false, false),
3268 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3269 true, false, false),
3270 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3271 true, false, false),
3272 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3273 &vmw_cmd_blt_surf_screen_check, false, false, false),
3274 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3275 false, false, false),
3276 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3277 false, false, false),
3278 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3279 false, false, false),
3280 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3281 false, false, false),
3282 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3283 false, false, false),
3284 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3285 false, false, false),
3286 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3287 false, false, false),
3288 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3289 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3290 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3291 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3292 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3293 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3294 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3295 false, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3297 false, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3299 false, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3301 false, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3303 false, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3305 false, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3307 false, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3309 false, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3311 true, false, true),
3312 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3313 false, false, true),
3314 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3315 true, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3317 &vmw_cmd_update_gb_surface, true, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3319 &vmw_cmd_readback_gb_image, true, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3321 &vmw_cmd_readback_gb_surface, true, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3323 &vmw_cmd_invalidate_gb_image, true, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3325 &vmw_cmd_invalidate_gb_surface, true, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3327 false, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3329 false, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3331 false, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3333 false, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3335 false, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3337 false, false, true),
3338 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3339 true, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3341 false, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3343 false, false, false),
3344 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3345 true, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3347 true, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3349 true, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3351 true, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3353 true, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3355 false, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3357 false, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3359 false, false, true),
3360 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3361 false, false, true),
3362 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3363 false, false, true),
3364 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3365 false, false, true),
3366 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3367 false, false, true),
3368 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3369 false, false, true),
3370 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3371 false, false, true),
3372 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3373 false, false, true),
3374 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3375 true, false, true),
3376 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3377 false, false, true),
3378 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3379 false, false, true),
3380 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3381 false, false, true),
3382 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3383 false, false, true),
3384
3385 /* SM commands */
3386 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3387 false, false, true),
3388 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3389 false, false, true),
3390 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3391 false, false, true),
3392 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3393 false, false, true),
3394 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3395 false, false, true),
3396 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3397 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3398 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3399 &vmw_cmd_dx_set_shader_res, true, false, true),
3400 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3401 true, false, true),
3402 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3403 true, false, true),
3404 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3405 true, false, true),
3406 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3407 true, false, true),
3408 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3409 true, false, true),
3410 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3411 &vmw_cmd_dx_cid_check, true, false, true),
3412 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3413 true, false, true),
3414 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3415 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3416 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3417 &vmw_cmd_dx_set_index_buffer, true, false, true),
3418 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3419 &vmw_cmd_dx_set_rendertargets, true, false, true),
3420 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3421 true, false, true),
3422 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3423 &vmw_cmd_dx_cid_check, true, false, true),
3424 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3425 &vmw_cmd_dx_cid_check, true, false, true),
3426 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3427 true, false, true),
3428 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3429 true, false, true),
3430 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3431 true, false, true),
3432 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3433 &vmw_cmd_dx_cid_check, true, false, true),
3434 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3435 true, false, true),
3436 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3437 true, false, true),
3438 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3439 true, false, true),
3440 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3441 true, false, true),
3442 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3443 true, false, true),
3444 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3445 true, false, true),
3446 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3447 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3448 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3449 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3450 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3451 true, false, true),
3452 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3453 true, false, true),
3454 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3455 &vmw_cmd_dx_check_subresource, true, false, true),
3456 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3457 &vmw_cmd_dx_check_subresource, true, false, true),
3458 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3459 &vmw_cmd_dx_check_subresource, true, false, true),
3460 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3461 &vmw_cmd_dx_view_define, true, false, true),
3462 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3463 &vmw_cmd_dx_view_remove, true, false, true),
3464 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3465 &vmw_cmd_dx_view_define, true, false, true),
3466 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3467 &vmw_cmd_dx_view_remove, true, false, true),
3468 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3469 &vmw_cmd_dx_view_define, true, false, true),
3470 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3471 &vmw_cmd_dx_view_remove, true, false, true),
3472 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3473 &vmw_cmd_dx_so_define, true, false, true),
3474 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3475 &vmw_cmd_dx_cid_check, true, false, true),
3476 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3477 &vmw_cmd_dx_so_define, true, false, true),
3478 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3479 &vmw_cmd_dx_cid_check, true, false, true),
3480 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3481 &vmw_cmd_dx_so_define, true, false, true),
3482 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3483 &vmw_cmd_dx_cid_check, true, false, true),
3484 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3485 &vmw_cmd_dx_so_define, true, false, true),
3486 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3487 &vmw_cmd_dx_cid_check, true, false, true),
3488 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3489 &vmw_cmd_dx_so_define, true, false, true),
3490 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3491 &vmw_cmd_dx_cid_check, true, false, true),
3492 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3493 &vmw_cmd_dx_define_shader, true, false, true),
3494 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3495 &vmw_cmd_dx_destroy_shader, true, false, true),
3496 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3497 &vmw_cmd_dx_bind_shader, true, false, true),
3498 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3499 &vmw_cmd_dx_so_define, true, false, true),
3500 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3501 &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3502 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3503 &vmw_cmd_dx_set_streamoutput, true, false, true),
3504 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3505 &vmw_cmd_dx_set_so_targets, true, false, true),
3506 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3507 &vmw_cmd_dx_cid_check, true, false, true),
3508 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3509 &vmw_cmd_dx_cid_check, true, false, true),
3510 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3511 &vmw_cmd_buffer_copy_check, true, false, true),
3512 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3513 &vmw_cmd_pred_copy_check, true, false, true),
3514 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3515 &vmw_cmd_dx_transfer_from_buffer,
3516 true, false, true),
3517 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3518 true, false, true),
3519
3520 /*
3521 * SM5 commands
3522 */
3523 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3524 true, false, true),
3525 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3526 true, false, true),
3527 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3528 true, false, true),
3529 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3530 &vmw_cmd_clear_uav_float, true, false, true),
3531 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3532 false, true),
3533 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3534 true),
3535 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3536 &vmw_cmd_indexed_instanced_indirect, true, false, true),
3537 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3538 &vmw_cmd_instanced_indirect, true, false, true),
3539 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3540 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3541 &vmw_cmd_dispatch_indirect, true, false, true),
3542 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3543 false, true),
3544 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3545 &vmw_cmd_sm5_view_define, true, false, true),
3546 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3547 &vmw_cmd_dx_define_streamoutput, true, false, true),
3548 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3549 &vmw_cmd_dx_bind_streamoutput, true, false, true),
3550 };
3551
vmw_cmd_describe(const void * buf,u32 * size,char const ** cmd)3552 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3553 {
3554 u32 cmd_id = ((u32 *) buf)[0];
3555
3556 if (cmd_id >= SVGA_CMD_MAX) {
3557 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3558 const struct vmw_cmd_entry *entry;
3559
3560 *size = header->size + sizeof(SVGA3dCmdHeader);
3561 cmd_id = header->id;
3562 if (cmd_id >= SVGA_3D_CMD_MAX)
3563 return false;
3564
3565 cmd_id -= SVGA_3D_CMD_BASE;
3566 entry = &vmw_cmd_entries[cmd_id];
3567 *cmd = entry->cmd_name;
3568 return true;
3569 }
3570
3571 switch (cmd_id) {
3572 case SVGA_CMD_UPDATE:
3573 *cmd = "SVGA_CMD_UPDATE";
3574 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3575 break;
3576 case SVGA_CMD_DEFINE_GMRFB:
3577 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3578 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3579 break;
3580 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3581 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3582 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3583 break;
3584 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3585 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3586 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3587 break;
3588 default:
3589 *cmd = "UNKNOWN";
3590 *size = 0;
3591 return false;
3592 }
3593
3594 return true;
3595 }
3596
vmw_cmd_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3597 static int vmw_cmd_check(struct vmw_private *dev_priv,
3598 struct vmw_sw_context *sw_context, void *buf,
3599 uint32_t *size)
3600 {
3601 uint32_t cmd_id;
3602 uint32_t size_remaining = *size;
3603 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3604 int ret;
3605 const struct vmw_cmd_entry *entry;
3606 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3607
3608 cmd_id = ((uint32_t *)buf)[0];
3609 /* Handle any none 3D commands */
3610 if (unlikely(cmd_id < SVGA_CMD_MAX))
3611 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3612
3613
3614 cmd_id = header->id;
3615 *size = header->size + sizeof(SVGA3dCmdHeader);
3616
3617 cmd_id -= SVGA_3D_CMD_BASE;
3618 if (unlikely(*size > size_remaining))
3619 goto out_invalid;
3620
3621 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3622 goto out_invalid;
3623
3624 entry = &vmw_cmd_entries[cmd_id];
3625 if (unlikely(!entry->func))
3626 goto out_invalid;
3627
3628 if (unlikely(!entry->user_allow && !sw_context->kernel))
3629 goto out_privileged;
3630
3631 if (unlikely(entry->gb_disable && gb))
3632 goto out_old;
3633
3634 if (unlikely(entry->gb_enable && !gb))
3635 goto out_new;
3636
3637 ret = entry->func(dev_priv, sw_context, header);
3638 if (unlikely(ret != 0)) {
3639 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3640 cmd_id + SVGA_3D_CMD_BASE, ret);
3641 return ret;
3642 }
3643
3644 return 0;
3645 out_invalid:
3646 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3647 cmd_id + SVGA_3D_CMD_BASE);
3648 return -EINVAL;
3649 out_privileged:
3650 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3651 cmd_id + SVGA_3D_CMD_BASE);
3652 return -EPERM;
3653 out_old:
3654 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3655 cmd_id + SVGA_3D_CMD_BASE);
3656 return -EINVAL;
3657 out_new:
3658 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3659 cmd_id + SVGA_3D_CMD_BASE);
3660 return -EINVAL;
3661 }
3662
vmw_cmd_check_all(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t size)3663 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3664 struct vmw_sw_context *sw_context, void *buf,
3665 uint32_t size)
3666 {
3667 int32_t cur_size = size;
3668 int ret;
3669
3670 sw_context->buf_start = buf;
3671
3672 while (cur_size > 0) {
3673 size = cur_size;
3674 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3675 if (unlikely(ret != 0))
3676 return ret;
3677 buf = (void *)((unsigned long) buf + size);
3678 cur_size -= size;
3679 }
3680
3681 if (unlikely(cur_size != 0)) {
3682 VMW_DEBUG_USER("Command verifier out of sync.\n");
3683 return -EINVAL;
3684 }
3685
3686 return 0;
3687 }
3688
vmw_free_relocations(struct vmw_sw_context * sw_context)3689 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3690 {
3691 /* Memory is validation context memory, so no need to free it */
3692 INIT_LIST_HEAD(&sw_context->bo_relocations);
3693 }
3694
vmw_apply_relocations(struct vmw_sw_context * sw_context)3695 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3696 {
3697 struct vmw_relocation *reloc;
3698 struct ttm_buffer_object *bo;
3699
3700 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3701 bo = &reloc->vbo->base;
3702 switch (bo->mem.mem_type) {
3703 case TTM_PL_VRAM:
3704 reloc->location->offset += bo->mem.start << PAGE_SHIFT;
3705 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3706 break;
3707 case VMW_PL_GMR:
3708 reloc->location->gmrId = bo->mem.start;
3709 break;
3710 case VMW_PL_MOB:
3711 *reloc->mob_loc = bo->mem.start;
3712 break;
3713 default:
3714 BUG();
3715 }
3716 }
3717 vmw_free_relocations(sw_context);
3718 }
3719
vmw_resize_cmd_bounce(struct vmw_sw_context * sw_context,uint32_t size)3720 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3721 uint32_t size)
3722 {
3723 if (likely(sw_context->cmd_bounce_size >= size))
3724 return 0;
3725
3726 if (sw_context->cmd_bounce_size == 0)
3727 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3728
3729 while (sw_context->cmd_bounce_size < size) {
3730 sw_context->cmd_bounce_size =
3731 PAGE_ALIGN(sw_context->cmd_bounce_size +
3732 (sw_context->cmd_bounce_size >> 1));
3733 }
3734
3735 vfree(sw_context->cmd_bounce);
3736 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3737
3738 if (sw_context->cmd_bounce == NULL) {
3739 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3740 sw_context->cmd_bounce_size = 0;
3741 return -ENOMEM;
3742 }
3743
3744 return 0;
3745 }
3746
3747 /**
3748 * vmw_execbuf_fence_commands - create and submit a command stream fence
3749 *
3750 * Creates a fence object and submits a command stream marker.
3751 * If this fails for some reason, We sync the fifo and return NULL.
3752 * It is then safe to fence buffers with a NULL pointer.
3753 *
3754 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3755 * userspace handle if @p_handle is not NULL, otherwise not.
3756 */
3757
vmw_execbuf_fence_commands(struct drm_file * file_priv,struct vmw_private * dev_priv,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)3758 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3759 struct vmw_private *dev_priv,
3760 struct vmw_fence_obj **p_fence,
3761 uint32_t *p_handle)
3762 {
3763 uint32_t sequence;
3764 int ret;
3765 bool synced = false;
3766
3767 /* p_handle implies file_priv. */
3768 BUG_ON(p_handle != NULL && file_priv == NULL);
3769
3770 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3771 if (unlikely(ret != 0)) {
3772 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3773 synced = true;
3774 }
3775
3776 if (p_handle != NULL)
3777 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3778 sequence, p_fence, p_handle);
3779 else
3780 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3781
3782 if (unlikely(ret != 0 && !synced)) {
3783 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3784 false, VMW_FENCE_WAIT_TIMEOUT);
3785 *p_fence = NULL;
3786 }
3787
3788 return ret;
3789 }
3790
3791 /**
3792 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3793 *
3794 * @dev_priv: Pointer to a vmw_private struct.
3795 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3796 * @ret: Return value from fence object creation.
3797 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3798 * the information should be copied.
3799 * @fence: Pointer to the fenc object.
3800 * @fence_handle: User-space fence handle.
3801 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3802 * @sync_file: Only used to clean up in case of an error in this function.
3803 *
3804 * This function copies fence information to user-space. If copying fails, the
3805 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3806 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3807 * will hopefully be detected.
3808 *
3809 * Also if copying fails, user-space will be unable to signal the fence object
3810 * so we wait for it immediately, and then unreference the user-space reference.
3811 */
3812 int
vmw_execbuf_copy_fence_user(struct vmw_private * dev_priv,struct vmw_fpriv * vmw_fp,int ret,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj * fence,uint32_t fence_handle,int32_t out_fence_fd)3813 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3814 struct vmw_fpriv *vmw_fp, int ret,
3815 struct drm_vmw_fence_rep __user *user_fence_rep,
3816 struct vmw_fence_obj *fence, uint32_t fence_handle,
3817 int32_t out_fence_fd)
3818 {
3819 struct drm_vmw_fence_rep fence_rep;
3820
3821 if (user_fence_rep == NULL)
3822 return 0;
3823
3824 memset(&fence_rep, 0, sizeof(fence_rep));
3825
3826 fence_rep.error = ret;
3827 fence_rep.fd = out_fence_fd;
3828 if (ret == 0) {
3829 BUG_ON(fence == NULL);
3830
3831 fence_rep.handle = fence_handle;
3832 fence_rep.seqno = fence->base.seqno;
3833 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3834 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3835 }
3836
3837 /*
3838 * copy_to_user errors will be detected by user space not seeing
3839 * fence_rep::error filled in. Typically user-space would have pre-set
3840 * that member to -EFAULT.
3841 */
3842 ret = copy_to_user(user_fence_rep, &fence_rep,
3843 sizeof(fence_rep));
3844
3845 /*
3846 * User-space lost the fence object. We need to sync and unreference the
3847 * handle.
3848 */
3849 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3850 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3851 TTM_REF_USAGE);
3852 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3853 (void) vmw_fence_obj_wait(fence, false, false,
3854 VMW_FENCE_WAIT_TIMEOUT);
3855 }
3856
3857 return ret ? -EFAULT : 0;
3858 }
3859
3860 /**
3861 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3862 *
3863 * @dev_priv: Pointer to a device private structure.
3864 * @kernel_commands: Pointer to the unpatched command batch.
3865 * @command_size: Size of the unpatched command batch.
3866 * @sw_context: Structure holding the relocation lists.
3867 *
3868 * Side effects: If this function returns 0, then the command batch pointed to
3869 * by @kernel_commands will have been modified.
3870 */
vmw_execbuf_submit_fifo(struct vmw_private * dev_priv,void * kernel_commands,u32 command_size,struct vmw_sw_context * sw_context)3871 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3872 void *kernel_commands, u32 command_size,
3873 struct vmw_sw_context *sw_context)
3874 {
3875 void *cmd;
3876
3877 if (sw_context->dx_ctx_node)
3878 cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
3879 sw_context->dx_ctx_node->ctx->id);
3880 else
3881 cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
3882
3883 if (!cmd)
3884 return -ENOMEM;
3885
3886 vmw_apply_relocations(sw_context);
3887 memcpy(cmd, kernel_commands, command_size);
3888 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3889 vmw_resource_relocations_free(&sw_context->res_relocations);
3890 vmw_fifo_commit(dev_priv, command_size);
3891
3892 return 0;
3893 }
3894
3895 /**
3896 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3897 * command buffer manager.
3898 *
3899 * @dev_priv: Pointer to a device private structure.
3900 * @header: Opaque handle to the command buffer allocation.
3901 * @command_size: Size of the unpatched command batch.
3902 * @sw_context: Structure holding the relocation lists.
3903 *
3904 * Side effects: If this function returns 0, then the command buffer represented
3905 * by @header will have been modified.
3906 */
vmw_execbuf_submit_cmdbuf(struct vmw_private * dev_priv,struct vmw_cmdbuf_header * header,u32 command_size,struct vmw_sw_context * sw_context)3907 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3908 struct vmw_cmdbuf_header *header,
3909 u32 command_size,
3910 struct vmw_sw_context *sw_context)
3911 {
3912 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3913 SVGA3D_INVALID_ID);
3914 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3915 header);
3916
3917 vmw_apply_relocations(sw_context);
3918 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3919 vmw_resource_relocations_free(&sw_context->res_relocations);
3920 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3921
3922 return 0;
3923 }
3924
3925 /**
3926 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3927 * submission using a command buffer.
3928 *
3929 * @dev_priv: Pointer to a device private structure.
3930 * @user_commands: User-space pointer to the commands to be submitted.
3931 * @command_size: Size of the unpatched command batch.
3932 * @header: Out parameter returning the opaque pointer to the command buffer.
3933 *
3934 * This function checks whether we can use the command buffer manager for
3935 * submission and if so, creates a command buffer of suitable size and copies
3936 * the user data into that buffer.
3937 *
3938 * On successful return, the function returns a pointer to the data in the
3939 * command buffer and *@header is set to non-NULL.
3940 *
3941 * If command buffers could not be used, the function will return the value of
3942 * @kernel_commands on function call. That value may be NULL. In that case, the
3943 * value of *@header will be set to NULL.
3944 *
3945 * If an error is encountered, the function will return a pointer error value.
3946 * If the function is interrupted by a signal while sleeping, it will return
3947 * -ERESTARTSYS casted to a pointer error value.
3948 */
vmw_execbuf_cmdbuf(struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,u32 command_size,struct vmw_cmdbuf_header ** header)3949 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3950 void __user *user_commands,
3951 void *kernel_commands, u32 command_size,
3952 struct vmw_cmdbuf_header **header)
3953 {
3954 size_t cmdbuf_size;
3955 int ret;
3956
3957 *header = NULL;
3958 if (command_size > SVGA_CB_MAX_SIZE) {
3959 VMW_DEBUG_USER("Command buffer is too large.\n");
3960 return ERR_PTR(-EINVAL);
3961 }
3962
3963 if (!dev_priv->cman || kernel_commands)
3964 return kernel_commands;
3965
3966 /* If possible, add a little space for fencing. */
3967 cmdbuf_size = command_size + 512;
3968 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3969 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3970 header);
3971 if (IS_ERR(kernel_commands))
3972 return kernel_commands;
3973
3974 ret = copy_from_user(kernel_commands, user_commands, command_size);
3975 if (ret) {
3976 VMW_DEBUG_USER("Failed copying commands.\n");
3977 vmw_cmdbuf_header_free(*header);
3978 *header = NULL;
3979 return ERR_PTR(-EFAULT);
3980 }
3981
3982 return kernel_commands;
3983 }
3984
vmw_execbuf_tie_context(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,uint32_t handle)3985 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3986 struct vmw_sw_context *sw_context,
3987 uint32_t handle)
3988 {
3989 struct vmw_resource *res;
3990 int ret;
3991 unsigned int size;
3992
3993 if (handle == SVGA3D_INVALID_ID)
3994 return 0;
3995
3996 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
3997 ret = vmw_validation_preload_res(sw_context->ctx, size);
3998 if (ret)
3999 return ret;
4000
4001 res = vmw_user_resource_noref_lookup_handle
4002 (dev_priv, sw_context->fp->tfile, handle,
4003 user_context_converter);
4004 if (IS_ERR(res)) {
4005 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4006 (unsigned int) handle);
4007 return PTR_ERR(res);
4008 }
4009
4010 ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
4011 if (unlikely(ret != 0))
4012 return ret;
4013
4014 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4015 sw_context->man = vmw_context_res_man(res);
4016
4017 return 0;
4018 }
4019
vmw_execbuf_process(struct drm_file * file_priv,struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,uint32_t command_size,uint64_t throttle_us,uint32_t dx_context_handle,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj ** out_fence,uint32_t flags)4020 int vmw_execbuf_process(struct drm_file *file_priv,
4021 struct vmw_private *dev_priv,
4022 void __user *user_commands, void *kernel_commands,
4023 uint32_t command_size, uint64_t throttle_us,
4024 uint32_t dx_context_handle,
4025 struct drm_vmw_fence_rep __user *user_fence_rep,
4026 struct vmw_fence_obj **out_fence, uint32_t flags)
4027 {
4028 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4029 struct vmw_fence_obj *fence = NULL;
4030 struct vmw_cmdbuf_header *header;
4031 uint32_t handle = 0;
4032 int ret;
4033 int32_t out_fence_fd = -1;
4034 struct sync_file *sync_file = NULL;
4035 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
4036
4037 vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
4038
4039 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4040 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4041 if (out_fence_fd < 0) {
4042 VMW_DEBUG_USER("Failed to get a fence fd.\n");
4043 return out_fence_fd;
4044 }
4045 }
4046
4047 if (throttle_us) {
4048 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4049 throttle_us);
4050
4051 if (ret)
4052 goto out_free_fence_fd;
4053 }
4054
4055 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4056 kernel_commands, command_size,
4057 &header);
4058 if (IS_ERR(kernel_commands)) {
4059 ret = PTR_ERR(kernel_commands);
4060 goto out_free_fence_fd;
4061 }
4062
4063 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4064 if (ret) {
4065 ret = -ERESTARTSYS;
4066 goto out_free_header;
4067 }
4068
4069 sw_context->kernel = false;
4070 if (kernel_commands == NULL) {
4071 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4072 if (unlikely(ret != 0))
4073 goto out_unlock;
4074
4075 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4076 command_size);
4077 if (unlikely(ret != 0)) {
4078 ret = -EFAULT;
4079 VMW_DEBUG_USER("Failed copying commands.\n");
4080 goto out_unlock;
4081 }
4082
4083 kernel_commands = sw_context->cmd_bounce;
4084 } else if (!header) {
4085 sw_context->kernel = true;
4086 }
4087
4088 sw_context->fp = vmw_fpriv(file_priv);
4089 INIT_LIST_HEAD(&sw_context->ctx_list);
4090 sw_context->cur_query_bo = dev_priv->pinned_bo;
4091 sw_context->last_query_ctx = NULL;
4092 sw_context->needs_post_query_barrier = false;
4093 sw_context->dx_ctx_node = NULL;
4094 sw_context->dx_query_mob = NULL;
4095 sw_context->dx_query_ctx = NULL;
4096 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4097 INIT_LIST_HEAD(&sw_context->res_relocations);
4098 INIT_LIST_HEAD(&sw_context->bo_relocations);
4099
4100 if (sw_context->staged_bindings)
4101 vmw_binding_state_reset(sw_context->staged_bindings);
4102
4103 if (!sw_context->res_ht_initialized) {
4104 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4105 if (unlikely(ret != 0))
4106 goto out_unlock;
4107
4108 sw_context->res_ht_initialized = true;
4109 }
4110
4111 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4112 sw_context->ctx = &val_ctx;
4113 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4114 if (unlikely(ret != 0))
4115 goto out_err_nores;
4116
4117 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4118 command_size);
4119 if (unlikely(ret != 0))
4120 goto out_err_nores;
4121
4122 ret = vmw_resources_reserve(sw_context);
4123 if (unlikely(ret != 0))
4124 goto out_err_nores;
4125
4126 ret = vmw_validation_bo_reserve(&val_ctx, true);
4127 if (unlikely(ret != 0))
4128 goto out_err_nores;
4129
4130 ret = vmw_validation_bo_validate(&val_ctx, true);
4131 if (unlikely(ret != 0))
4132 goto out_err;
4133
4134 ret = vmw_validation_res_validate(&val_ctx, true);
4135 if (unlikely(ret != 0))
4136 goto out_err;
4137
4138 vmw_validation_drop_ht(&val_ctx);
4139
4140 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4141 if (unlikely(ret != 0)) {
4142 ret = -ERESTARTSYS;
4143 goto out_err;
4144 }
4145
4146 if (dev_priv->has_mob) {
4147 ret = vmw_rebind_contexts(sw_context);
4148 if (unlikely(ret != 0))
4149 goto out_unlock_binding;
4150 }
4151
4152 if (!header) {
4153 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4154 command_size, sw_context);
4155 } else {
4156 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4157 sw_context);
4158 header = NULL;
4159 }
4160 mutex_unlock(&dev_priv->binding_mutex);
4161 if (ret)
4162 goto out_err;
4163
4164 vmw_query_bo_switch_commit(dev_priv, sw_context);
4165 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4166 (user_fence_rep) ? &handle : NULL);
4167 /*
4168 * This error is harmless, because if fence submission fails,
4169 * vmw_fifo_send_fence will sync. The error will be propagated to
4170 * user-space in @fence_rep
4171 */
4172 if (ret != 0)
4173 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4174
4175 vmw_execbuf_bindings_commit(sw_context, false);
4176 vmw_bind_dx_query_mob(sw_context);
4177 vmw_validation_res_unreserve(&val_ctx, false);
4178
4179 vmw_validation_bo_fence(sw_context->ctx, fence);
4180
4181 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4182 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4183
4184 /*
4185 * If anything fails here, give up trying to export the fence and do a
4186 * sync since the user mode will not be able to sync the fence itself.
4187 * This ensures we are still functionally correct.
4188 */
4189 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4190
4191 sync_file = sync_file_create(&fence->base);
4192 if (!sync_file) {
4193 VMW_DEBUG_USER("Sync file create failed for fence\n");
4194 put_unused_fd(out_fence_fd);
4195 out_fence_fd = -1;
4196
4197 (void) vmw_fence_obj_wait(fence, false, false,
4198 VMW_FENCE_WAIT_TIMEOUT);
4199 }
4200 }
4201
4202 ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4203 user_fence_rep, fence, handle, out_fence_fd);
4204
4205 if (sync_file) {
4206 if (ret) {
4207 /* usercopy of fence failed, put the file object */
4208 fput(sync_file->file);
4209 put_unused_fd(out_fence_fd);
4210 } else {
4211 /* Link the fence with the FD created earlier */
4212 fd_install(out_fence_fd, sync_file->file);
4213 }
4214 }
4215
4216 /* Don't unreference when handing fence out */
4217 if (unlikely(out_fence != NULL)) {
4218 *out_fence = fence;
4219 fence = NULL;
4220 } else if (likely(fence != NULL)) {
4221 vmw_fence_obj_unreference(&fence);
4222 }
4223
4224 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4225 mutex_unlock(&dev_priv->cmdbuf_mutex);
4226
4227 /*
4228 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4229 * in resource destruction paths.
4230 */
4231 vmw_validation_unref_lists(&val_ctx);
4232
4233 return ret;
4234
4235 out_unlock_binding:
4236 mutex_unlock(&dev_priv->binding_mutex);
4237 out_err:
4238 vmw_validation_bo_backoff(&val_ctx);
4239 out_err_nores:
4240 vmw_execbuf_bindings_commit(sw_context, true);
4241 vmw_validation_res_unreserve(&val_ctx, true);
4242 vmw_resource_relocations_free(&sw_context->res_relocations);
4243 vmw_free_relocations(sw_context);
4244 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4245 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4246 out_unlock:
4247 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4248 vmw_validation_drop_ht(&val_ctx);
4249 WARN_ON(!list_empty(&sw_context->ctx_list));
4250 mutex_unlock(&dev_priv->cmdbuf_mutex);
4251
4252 /*
4253 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4254 * in resource destruction paths.
4255 */
4256 vmw_validation_unref_lists(&val_ctx);
4257 out_free_header:
4258 if (header)
4259 vmw_cmdbuf_header_free(header);
4260 out_free_fence_fd:
4261 if (out_fence_fd >= 0)
4262 put_unused_fd(out_fence_fd);
4263
4264 return ret;
4265 }
4266
4267 /**
4268 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4269 *
4270 * @dev_priv: The device private structure.
4271 *
4272 * This function is called to idle the fifo and unpin the query buffer if the
4273 * normal way to do this hits an error, which should typically be extremely
4274 * rare.
4275 */
vmw_execbuf_unpin_panic(struct vmw_private * dev_priv)4276 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4277 {
4278 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4279
4280 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4281 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4282 if (dev_priv->dummy_query_bo_pinned) {
4283 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4284 dev_priv->dummy_query_bo_pinned = false;
4285 }
4286 }
4287
4288
4289 /**
4290 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4291 * bo.
4292 *
4293 * @dev_priv: The device private structure.
4294 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4295 * query barrier that flushes all queries touching the current buffer pointed to
4296 * by @dev_priv->pinned_bo
4297 *
4298 * This function should be used to unpin the pinned query bo, or as a query
4299 * barrier when we need to make sure that all queries have finished before the
4300 * next fifo command. (For example on hardware context destructions where the
4301 * hardware may otherwise leak unfinished queries).
4302 *
4303 * This function does not return any failure codes, but make attempts to do safe
4304 * unpinning in case of errors.
4305 *
4306 * The function will synchronize on the previous query barrier, and will thus
4307 * not finish until that barrier has executed.
4308 *
4309 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4310 * calling this function.
4311 */
__vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv,struct vmw_fence_obj * fence)4312 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4313 struct vmw_fence_obj *fence)
4314 {
4315 int ret = 0;
4316 struct vmw_fence_obj *lfence = NULL;
4317 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4318
4319 if (dev_priv->pinned_bo == NULL)
4320 goto out_unlock;
4321
4322 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4323 false);
4324 if (ret)
4325 goto out_no_reserve;
4326
4327 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4328 false);
4329 if (ret)
4330 goto out_no_reserve;
4331
4332 ret = vmw_validation_bo_reserve(&val_ctx, false);
4333 if (ret)
4334 goto out_no_reserve;
4335
4336 if (dev_priv->query_cid_valid) {
4337 BUG_ON(fence != NULL);
4338 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4339 if (ret)
4340 goto out_no_emit;
4341 dev_priv->query_cid_valid = false;
4342 }
4343
4344 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4345 if (dev_priv->dummy_query_bo_pinned) {
4346 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4347 dev_priv->dummy_query_bo_pinned = false;
4348 }
4349 if (fence == NULL) {
4350 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4351 NULL);
4352 fence = lfence;
4353 }
4354 vmw_validation_bo_fence(&val_ctx, fence);
4355 if (lfence != NULL)
4356 vmw_fence_obj_unreference(&lfence);
4357
4358 vmw_validation_unref_lists(&val_ctx);
4359 vmw_bo_unreference(&dev_priv->pinned_bo);
4360
4361 out_unlock:
4362 return;
4363 out_no_emit:
4364 vmw_validation_bo_backoff(&val_ctx);
4365 out_no_reserve:
4366 vmw_validation_unref_lists(&val_ctx);
4367 vmw_execbuf_unpin_panic(dev_priv);
4368 vmw_bo_unreference(&dev_priv->pinned_bo);
4369 }
4370
4371 /**
4372 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4373 *
4374 * @dev_priv: The device private structure.
4375 *
4376 * This function should be used to unpin the pinned query bo, or as a query
4377 * barrier when we need to make sure that all queries have finished before the
4378 * next fifo command. (For example on hardware context destructions where the
4379 * hardware may otherwise leak unfinished queries).
4380 *
4381 * This function does not return any failure codes, but make attempts to do safe
4382 * unpinning in case of errors.
4383 *
4384 * The function will synchronize on the previous query barrier, and will thus
4385 * not finish until that barrier has executed.
4386 */
vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv)4387 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4388 {
4389 mutex_lock(&dev_priv->cmdbuf_mutex);
4390 if (dev_priv->query_cid_valid)
4391 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4392 mutex_unlock(&dev_priv->cmdbuf_mutex);
4393 }
4394
vmw_execbuf_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)4395 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4396 struct drm_file *file_priv)
4397 {
4398 struct vmw_private *dev_priv = vmw_priv(dev);
4399 struct drm_vmw_execbuf_arg *arg = data;
4400 int ret;
4401 struct dma_fence *in_fence = NULL;
4402
4403 /*
4404 * Extend the ioctl argument while maintaining backwards compatibility:
4405 * We take different code paths depending on the value of arg->version.
4406 *
4407 * Note: The ioctl argument is extended and zeropadded by core DRM.
4408 */
4409 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4410 arg->version == 0)) {
4411 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4412 return -EINVAL;
4413 }
4414
4415 switch (arg->version) {
4416 case 1:
4417 /* For v1 core DRM have extended + zeropadded the data */
4418 arg->context_handle = (uint32_t) -1;
4419 break;
4420 case 2:
4421 default:
4422 /* For v2 and later core DRM would have correctly copied it */
4423 break;
4424 }
4425
4426 /* If imported a fence FD from elsewhere, then wait on it */
4427 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4428 in_fence = sync_file_get_fence(arg->imported_fence_fd);
4429
4430 if (!in_fence) {
4431 VMW_DEBUG_USER("Cannot get imported fence\n");
4432 return -EINVAL;
4433 }
4434
4435 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4436 if (ret)
4437 goto out;
4438 }
4439
4440 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4441 if (unlikely(ret != 0))
4442 return ret;
4443
4444 ret = vmw_execbuf_process(file_priv, dev_priv,
4445 (void __user *)(unsigned long)arg->commands,
4446 NULL, arg->command_size, arg->throttle_us,
4447 arg->context_handle,
4448 (void __user *)(unsigned long)arg->fence_rep,
4449 NULL, arg->flags);
4450
4451 ttm_read_unlock(&dev_priv->reservation_sem);
4452 if (unlikely(ret != 0))
4453 goto out;
4454
4455 vmw_kms_cursor_post_execbuf(dev_priv);
4456
4457 out:
4458 if (in_fence)
4459 dma_fence_put(in_fence);
4460 return ret;
4461 }
4462