1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28 #include <linux/slab.h>
29 #include "vmwgfx_validation.h"
30 #include "vmwgfx_drv.h"
31
32 /**
33 * struct vmw_validation_bo_node - Buffer object validation metadata.
34 * @base: Metadata used for TTM reservation- and validation.
35 * @hash: A hash entry used for the duplicate detection hash table.
36 * @coherent_count: If switching backup buffers, number of new coherent
37 * resources that will have this buffer as a backup buffer.
38 * @as_mob: Validate as mob.
39 * @cpu_blit: Validate for cpu blit access.
40 *
41 * Bit fields are used since these structures are allocated and freed in
42 * large numbers and space conservation is desired.
43 */
44 struct vmw_validation_bo_node {
45 struct ttm_validate_buffer base;
46 struct drm_hash_item hash;
47 unsigned int coherent_count;
48 u32 as_mob : 1;
49 u32 cpu_blit : 1;
50 };
51 /**
52 * struct vmw_validation_res_node - Resource validation metadata.
53 * @head: List head for the resource validation list.
54 * @hash: A hash entry used for the duplicate detection hash table.
55 * @res: Reference counted resource pointer.
56 * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
57 * to a resource.
58 * @new_backup_offset: Offset into the new backup mob for resources that can
59 * share MOBs.
60 * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
61 * the command stream provides a mob bind operation.
62 * @switching_backup: The validation process is switching backup MOB.
63 * @first_usage: True iff the resource has been seen only once in the current
64 * validation batch.
65 * @reserved: Whether the resource is currently reserved by this process.
66 * @dirty_set: Change dirty status of the resource.
67 * @dirty: Dirty information VMW_RES_DIRTY_XX.
68 * @private: Optionally additional memory for caller-private data.
69 *
70 * Bit fields are used since these structures are allocated and freed in
71 * large numbers and space conservation is desired.
72 */
73 struct vmw_validation_res_node {
74 struct list_head head;
75 struct drm_hash_item hash;
76 struct vmw_resource *res;
77 struct vmw_buffer_object *new_backup;
78 unsigned long new_backup_offset;
79 u32 no_buffer_needed : 1;
80 u32 switching_backup : 1;
81 u32 first_usage : 1;
82 u32 reserved : 1;
83 u32 dirty : 1;
84 u32 dirty_set : 1;
85 unsigned long private[];
86 };
87
88 /**
89 * vmw_validation_mem_alloc - Allocate kernel memory from the validation
90 * context based allocator
91 * @ctx: The validation context
92 * @size: The number of bytes to allocated.
93 *
94 * The memory allocated may not exceed PAGE_SIZE, and the returned
95 * address is aligned to sizeof(long). All memory allocated this way is
96 * reclaimed after validation when calling any of the exported functions:
97 * vmw_validation_unref_lists()
98 * vmw_validation_revert()
99 * vmw_validation_done()
100 *
101 * Return: Pointer to the allocated memory on success. NULL on failure.
102 */
vmw_validation_mem_alloc(struct vmw_validation_context * ctx,unsigned int size)103 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
104 unsigned int size)
105 {
106 void *addr;
107
108 size = vmw_validation_align(size);
109 if (size > PAGE_SIZE)
110 return NULL;
111
112 if (ctx->mem_size_left < size) {
113 struct page *page;
114
115 if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
116 int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
117
118 if (ret)
119 return NULL;
120
121 ctx->vm_size_left += ctx->vm->gran;
122 ctx->total_mem += ctx->vm->gran;
123 }
124
125 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
126 if (!page)
127 return NULL;
128
129 if (ctx->vm)
130 ctx->vm_size_left -= PAGE_SIZE;
131
132 list_add_tail(&page->lru, &ctx->page_list);
133 ctx->page_address = page_address(page);
134 ctx->mem_size_left = PAGE_SIZE;
135 }
136
137 addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
138 ctx->mem_size_left -= size;
139
140 return addr;
141 }
142
143 /**
144 * vmw_validation_mem_free - Free all memory allocated using
145 * vmw_validation_mem_alloc()
146 * @ctx: The validation context
147 *
148 * All memory previously allocated for this context using
149 * vmw_validation_mem_alloc() is freed.
150 */
vmw_validation_mem_free(struct vmw_validation_context * ctx)151 static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
152 {
153 struct page *entry, *next;
154
155 list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
156 list_del_init(&entry->lru);
157 __free_page(entry);
158 }
159
160 ctx->mem_size_left = 0;
161 if (ctx->vm && ctx->total_mem) {
162 ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
163 ctx->total_mem = 0;
164 ctx->vm_size_left = 0;
165 }
166 }
167
168 /**
169 * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
170 * validation context's lists.
171 * @ctx: The validation context to search.
172 * @vbo: The buffer object to search for.
173 *
174 * Return: Pointer to the struct vmw_validation_bo_node referencing the
175 * duplicate, or NULL if none found.
176 */
177 static struct vmw_validation_bo_node *
vmw_validation_find_bo_dup(struct vmw_validation_context * ctx,struct vmw_buffer_object * vbo)178 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
179 struct vmw_buffer_object *vbo)
180 {
181 struct vmw_validation_bo_node *bo_node = NULL;
182
183 if (!ctx->merge_dups)
184 return NULL;
185
186 if (ctx->ht) {
187 struct drm_hash_item *hash;
188
189 if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
190 bo_node = container_of(hash, typeof(*bo_node), hash);
191 } else {
192 struct vmw_validation_bo_node *entry;
193
194 list_for_each_entry(entry, &ctx->bo_list, base.head) {
195 if (entry->base.bo == &vbo->base) {
196 bo_node = entry;
197 break;
198 }
199 }
200 }
201
202 return bo_node;
203 }
204
205 /**
206 * vmw_validation_find_res_dup - Find a duplicate resource entry in the
207 * validation context's lists.
208 * @ctx: The validation context to search.
209 * @res: Reference counted resource pointer.
210 *
211 * Return: Pointer to the struct vmw_validation_bo_node referencing the
212 * duplicate, or NULL if none found.
213 */
214 static struct vmw_validation_res_node *
vmw_validation_find_res_dup(struct vmw_validation_context * ctx,struct vmw_resource * res)215 vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
216 struct vmw_resource *res)
217 {
218 struct vmw_validation_res_node *res_node = NULL;
219
220 if (!ctx->merge_dups)
221 return NULL;
222
223 if (ctx->ht) {
224 struct drm_hash_item *hash;
225
226 if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
227 res_node = container_of(hash, typeof(*res_node), hash);
228 } else {
229 struct vmw_validation_res_node *entry;
230
231 list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
232 if (entry->res == res) {
233 res_node = entry;
234 goto out;
235 }
236 }
237
238 list_for_each_entry(entry, &ctx->resource_list, head) {
239 if (entry->res == res) {
240 res_node = entry;
241 break;
242 }
243 }
244
245 }
246 out:
247 return res_node;
248 }
249
250 /**
251 * vmw_validation_add_bo - Add a buffer object to the validation context.
252 * @ctx: The validation context.
253 * @vbo: The buffer object.
254 * @as_mob: Validate as mob, otherwise suitable for GMR operations.
255 * @cpu_blit: Validate in a page-mappable location.
256 *
257 * Return: Zero on success, negative error code otherwise.
258 */
vmw_validation_add_bo(struct vmw_validation_context * ctx,struct vmw_buffer_object * vbo,bool as_mob,bool cpu_blit)259 int vmw_validation_add_bo(struct vmw_validation_context *ctx,
260 struct vmw_buffer_object *vbo,
261 bool as_mob,
262 bool cpu_blit)
263 {
264 struct vmw_validation_bo_node *bo_node;
265
266 bo_node = vmw_validation_find_bo_dup(ctx, vbo);
267 if (bo_node) {
268 if (bo_node->as_mob != as_mob ||
269 bo_node->cpu_blit != cpu_blit) {
270 DRM_ERROR("Inconsistent buffer usage.\n");
271 return -EINVAL;
272 }
273 } else {
274 struct ttm_validate_buffer *val_buf;
275 int ret;
276
277 bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
278 if (!bo_node)
279 return -ENOMEM;
280
281 if (ctx->ht) {
282 bo_node->hash.key = (unsigned long) vbo;
283 ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
284 if (ret) {
285 DRM_ERROR("Failed to initialize a buffer "
286 "validation entry.\n");
287 return ret;
288 }
289 }
290 val_buf = &bo_node->base;
291 val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
292 if (!val_buf->bo)
293 return -ESRCH;
294 val_buf->num_shared = 0;
295 list_add_tail(&val_buf->head, &ctx->bo_list);
296 bo_node->as_mob = as_mob;
297 bo_node->cpu_blit = cpu_blit;
298 }
299
300 return 0;
301 }
302
303 /**
304 * vmw_validation_add_resource - Add a resource to the validation context.
305 * @ctx: The validation context.
306 * @res: The resource.
307 * @priv_size: Size of private, additional metadata.
308 * @dirty: Whether to change dirty status.
309 * @p_node: Output pointer of additional metadata address.
310 * @first_usage: Whether this was the first time this resource was seen.
311 *
312 * Return: Zero on success, negative error code otherwise.
313 */
vmw_validation_add_resource(struct vmw_validation_context * ctx,struct vmw_resource * res,size_t priv_size,u32 dirty,void ** p_node,bool * first_usage)314 int vmw_validation_add_resource(struct vmw_validation_context *ctx,
315 struct vmw_resource *res,
316 size_t priv_size,
317 u32 dirty,
318 void **p_node,
319 bool *first_usage)
320 {
321 struct vmw_validation_res_node *node;
322 int ret;
323
324 node = vmw_validation_find_res_dup(ctx, res);
325 if (node) {
326 node->first_usage = 0;
327 goto out_fill;
328 }
329
330 node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
331 if (!node) {
332 VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
333 return -ENOMEM;
334 }
335
336 if (ctx->ht) {
337 node->hash.key = (unsigned long) res;
338 ret = drm_ht_insert_item(ctx->ht, &node->hash);
339 if (ret) {
340 DRM_ERROR("Failed to initialize a resource validation "
341 "entry.\n");
342 return ret;
343 }
344 }
345 node->res = vmw_resource_reference_unless_doomed(res);
346 if (!node->res)
347 return -ESRCH;
348
349 node->first_usage = 1;
350 if (!res->dev_priv->has_mob) {
351 list_add_tail(&node->head, &ctx->resource_list);
352 } else {
353 switch (vmw_res_type(res)) {
354 case vmw_res_context:
355 case vmw_res_dx_context:
356 list_add(&node->head, &ctx->resource_ctx_list);
357 break;
358 case vmw_res_cotable:
359 list_add_tail(&node->head, &ctx->resource_ctx_list);
360 break;
361 default:
362 list_add_tail(&node->head, &ctx->resource_list);
363 break;
364 }
365 }
366
367 out_fill:
368 if (dirty) {
369 node->dirty_set = 1;
370 /* Overwriting previous information here is intentional! */
371 node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
372 }
373 if (first_usage)
374 *first_usage = node->first_usage;
375 if (p_node)
376 *p_node = &node->private;
377
378 return 0;
379 }
380
381 /**
382 * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
383 * validation.
384 * @ctx: The validation context.
385 * @val_private: The additional meta-data pointer returned when the
386 * resource was registered with the validation context. Used to identify
387 * the resource.
388 * @dirty: Dirty information VMW_RES_DIRTY_XX
389 */
vmw_validation_res_set_dirty(struct vmw_validation_context * ctx,void * val_private,u32 dirty)390 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
391 void *val_private, u32 dirty)
392 {
393 struct vmw_validation_res_node *val;
394
395 if (!dirty)
396 return;
397
398 val = container_of(val_private, typeof(*val), private);
399 val->dirty_set = 1;
400 /* Overwriting previous information here is intentional! */
401 val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
402 }
403
404 /**
405 * vmw_validation_res_switch_backup - Register a backup MOB switch during
406 * validation.
407 * @ctx: The validation context.
408 * @val_private: The additional meta-data pointer returned when the
409 * resource was registered with the validation context. Used to identify
410 * the resource.
411 * @vbo: The new backup buffer object MOB. This buffer object needs to have
412 * already been registered with the validation context.
413 * @backup_offset: Offset into the new backup MOB.
414 */
vmw_validation_res_switch_backup(struct vmw_validation_context * ctx,void * val_private,struct vmw_buffer_object * vbo,unsigned long backup_offset)415 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
416 void *val_private,
417 struct vmw_buffer_object *vbo,
418 unsigned long backup_offset)
419 {
420 struct vmw_validation_res_node *val;
421
422 val = container_of(val_private, typeof(*val), private);
423
424 val->switching_backup = 1;
425 if (val->first_usage)
426 val->no_buffer_needed = 1;
427
428 val->new_backup = vbo;
429 val->new_backup_offset = backup_offset;
430 }
431
432 /**
433 * vmw_validation_res_reserve - Reserve all resources registered with this
434 * validation context.
435 * @ctx: The validation context.
436 * @intr: Use interruptible waits when possible.
437 *
438 * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
439 * code on failure.
440 */
vmw_validation_res_reserve(struct vmw_validation_context * ctx,bool intr)441 int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
442 bool intr)
443 {
444 struct vmw_validation_res_node *val;
445 int ret = 0;
446
447 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
448
449 list_for_each_entry(val, &ctx->resource_list, head) {
450 struct vmw_resource *res = val->res;
451
452 ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
453 if (ret)
454 goto out_unreserve;
455
456 val->reserved = 1;
457 if (res->backup) {
458 struct vmw_buffer_object *vbo = res->backup;
459
460 ret = vmw_validation_add_bo
461 (ctx, vbo, vmw_resource_needs_backup(res),
462 false);
463 if (ret)
464 goto out_unreserve;
465 }
466
467 if (val->switching_backup && val->new_backup &&
468 res->coherent) {
469 struct vmw_validation_bo_node *bo_node =
470 vmw_validation_find_bo_dup(ctx,
471 val->new_backup);
472
473 if (WARN_ON(!bo_node)) {
474 ret = -EINVAL;
475 goto out_unreserve;
476 }
477 bo_node->coherent_count++;
478 }
479 }
480
481 return 0;
482
483 out_unreserve:
484 vmw_validation_res_unreserve(ctx, true);
485 return ret;
486 }
487
488 /**
489 * vmw_validation_res_unreserve - Unreserve all reserved resources
490 * registered with this validation context.
491 * @ctx: The validation context.
492 * @backoff: Whether this is a backoff- of a commit-type operation. This
493 * is used to determine whether to switch backup MOBs or not.
494 */
vmw_validation_res_unreserve(struct vmw_validation_context * ctx,bool backoff)495 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
496 bool backoff)
497 {
498 struct vmw_validation_res_node *val;
499
500 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
501 if (backoff)
502 list_for_each_entry(val, &ctx->resource_list, head) {
503 if (val->reserved)
504 vmw_resource_unreserve(val->res,
505 false, false, false,
506 NULL, 0);
507 }
508 else
509 list_for_each_entry(val, &ctx->resource_list, head) {
510 if (val->reserved)
511 vmw_resource_unreserve(val->res,
512 val->dirty_set,
513 val->dirty,
514 val->switching_backup,
515 val->new_backup,
516 val->new_backup_offset);
517 }
518 }
519
520 /**
521 * vmw_validation_bo_validate_single - Validate a single buffer object.
522 * @bo: The TTM buffer object base.
523 * @interruptible: Whether to perform waits interruptible if possible.
524 * @validate_as_mob: Whether to validate in MOB memory.
525 *
526 * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
527 * code on failure.
528 */
vmw_validation_bo_validate_single(struct ttm_buffer_object * bo,bool interruptible,bool validate_as_mob)529 int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
530 bool interruptible,
531 bool validate_as_mob)
532 {
533 struct vmw_buffer_object *vbo =
534 container_of(bo, struct vmw_buffer_object, base);
535 struct ttm_operation_ctx ctx = {
536 .interruptible = interruptible,
537 .no_wait_gpu = false
538 };
539 int ret;
540
541 if (atomic_read(&vbo->cpu_writers))
542 return -EBUSY;
543
544 if (vbo->base.pin_count > 0)
545 return 0;
546
547 if (validate_as_mob)
548 return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
549
550 /**
551 * Put BO in VRAM if there is space, otherwise as a GMR.
552 * If there is no space in VRAM and GMR ids are all used up,
553 * start evicting GMRs to make room. If the DMA buffer can't be
554 * used as a GMR, this will return -ENOMEM.
555 */
556
557 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
558 if (ret == 0 || ret == -ERESTARTSYS)
559 return ret;
560
561 /**
562 * If that failed, try VRAM again, this time evicting
563 * previous contents.
564 */
565
566 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
567 return ret;
568 }
569
570 /**
571 * vmw_validation_bo_validate - Validate all buffer objects registered with
572 * the validation context.
573 * @ctx: The validation context.
574 * @intr: Whether to perform waits interruptible if possible.
575 *
576 * Return: Zero on success, -ERESTARTSYS if interrupted,
577 * negative error code on failure.
578 */
vmw_validation_bo_validate(struct vmw_validation_context * ctx,bool intr)579 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
580 {
581 struct vmw_validation_bo_node *entry;
582 int ret;
583
584 list_for_each_entry(entry, &ctx->bo_list, base.head) {
585 struct vmw_buffer_object *vbo =
586 container_of(entry->base.bo, typeof(*vbo), base);
587
588 if (entry->cpu_blit) {
589 struct ttm_operation_ctx ttm_ctx = {
590 .interruptible = intr,
591 .no_wait_gpu = false
592 };
593
594 ret = ttm_bo_validate(entry->base.bo,
595 &vmw_nonfixed_placement, &ttm_ctx);
596 } else {
597 ret = vmw_validation_bo_validate_single
598 (entry->base.bo, intr, entry->as_mob);
599 }
600 if (ret)
601 return ret;
602
603 /*
604 * Rather than having the resource code allocating the bo
605 * dirty tracker in resource_unreserve() where we can't fail,
606 * Do it here when validating the buffer object.
607 */
608 if (entry->coherent_count) {
609 unsigned int coherent_count = entry->coherent_count;
610
611 while (coherent_count) {
612 ret = vmw_bo_dirty_add(vbo);
613 if (ret)
614 return ret;
615
616 coherent_count--;
617 }
618 entry->coherent_count -= coherent_count;
619 }
620
621 if (vbo->dirty)
622 vmw_bo_dirty_scan(vbo);
623 }
624 return 0;
625 }
626
627 /**
628 * vmw_validation_res_validate - Validate all resources registered with the
629 * validation context.
630 * @ctx: The validation context.
631 * @intr: Whether to perform waits interruptible if possible.
632 *
633 * Before this function is called, all resource backup buffers must have
634 * been validated.
635 *
636 * Return: Zero on success, -ERESTARTSYS if interrupted,
637 * negative error code on failure.
638 */
vmw_validation_res_validate(struct vmw_validation_context * ctx,bool intr)639 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
640 {
641 struct vmw_validation_res_node *val;
642 int ret;
643
644 list_for_each_entry(val, &ctx->resource_list, head) {
645 struct vmw_resource *res = val->res;
646 struct vmw_buffer_object *backup = res->backup;
647
648 ret = vmw_resource_validate(res, intr, val->dirty_set &&
649 val->dirty);
650 if (ret) {
651 if (ret != -ERESTARTSYS)
652 DRM_ERROR("Failed to validate resource.\n");
653 return ret;
654 }
655
656 /* Check if the resource switched backup buffer */
657 if (backup && res->backup && (backup != res->backup)) {
658 struct vmw_buffer_object *vbo = res->backup;
659
660 ret = vmw_validation_add_bo
661 (ctx, vbo, vmw_resource_needs_backup(res),
662 false);
663 if (ret)
664 return ret;
665 }
666 }
667 return 0;
668 }
669
670 /**
671 * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
672 * and unregister it from this validation context.
673 * @ctx: The validation context.
674 *
675 * The hash table used for duplicate finding is an expensive resource and
676 * may be protected by mutexes that may cause deadlocks during resource
677 * unreferencing if held. After resource- and buffer object registering,
678 * there is no longer any use for this hash table, so allow freeing it
679 * either to shorten any mutex locking time, or before resources- and
680 * buffer objects are freed during validation context cleanup.
681 */
vmw_validation_drop_ht(struct vmw_validation_context * ctx)682 void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
683 {
684 struct vmw_validation_bo_node *entry;
685 struct vmw_validation_res_node *val;
686
687 if (!ctx->ht)
688 return;
689
690 list_for_each_entry(entry, &ctx->bo_list, base.head)
691 (void) drm_ht_remove_item(ctx->ht, &entry->hash);
692
693 list_for_each_entry(val, &ctx->resource_list, head)
694 (void) drm_ht_remove_item(ctx->ht, &val->hash);
695
696 list_for_each_entry(val, &ctx->resource_ctx_list, head)
697 (void) drm_ht_remove_item(ctx->ht, &val->hash);
698
699 ctx->ht = NULL;
700 }
701
702 /**
703 * vmw_validation_unref_lists - Unregister previously registered buffer
704 * object and resources.
705 * @ctx: The validation context.
706 *
707 * Note that this function may cause buffer object- and resource destructors
708 * to be invoked.
709 */
vmw_validation_unref_lists(struct vmw_validation_context * ctx)710 void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
711 {
712 struct vmw_validation_bo_node *entry;
713 struct vmw_validation_res_node *val;
714
715 list_for_each_entry(entry, &ctx->bo_list, base.head) {
716 ttm_bo_put(entry->base.bo);
717 entry->base.bo = NULL;
718 }
719
720 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
721 list_for_each_entry(val, &ctx->resource_list, head)
722 vmw_resource_unreference(&val->res);
723
724 /*
725 * No need to detach each list entry since they are all freed with
726 * vmw_validation_free_mem. Just make the inaccessible.
727 */
728 INIT_LIST_HEAD(&ctx->bo_list);
729 INIT_LIST_HEAD(&ctx->resource_list);
730
731 vmw_validation_mem_free(ctx);
732 }
733
734 /**
735 * vmw_validation_prepare - Prepare a validation context for command
736 * submission.
737 * @ctx: The validation context.
738 * @mutex: The mutex used to protect resource reservation.
739 * @intr: Whether to perform waits interruptible if possible.
740 *
741 * Note that the single reservation mutex @mutex is an unfortunate
742 * construct. Ideally resource reservation should be moved to per-resource
743 * ww_mutexes.
744 * If this functions doesn't return Zero to indicate success, all resources
745 * are left unreserved but still referenced.
746 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
747 * on error.
748 */
vmw_validation_prepare(struct vmw_validation_context * ctx,struct mutex * mutex,bool intr)749 int vmw_validation_prepare(struct vmw_validation_context *ctx,
750 struct mutex *mutex,
751 bool intr)
752 {
753 int ret = 0;
754
755 if (mutex) {
756 if (intr)
757 ret = mutex_lock_interruptible(mutex);
758 else
759 mutex_lock(mutex);
760 if (ret)
761 return -ERESTARTSYS;
762 }
763
764 ctx->res_mutex = mutex;
765 ret = vmw_validation_res_reserve(ctx, intr);
766 if (ret)
767 goto out_no_res_reserve;
768
769 ret = vmw_validation_bo_reserve(ctx, intr);
770 if (ret)
771 goto out_no_bo_reserve;
772
773 ret = vmw_validation_bo_validate(ctx, intr);
774 if (ret)
775 goto out_no_validate;
776
777 ret = vmw_validation_res_validate(ctx, intr);
778 if (ret)
779 goto out_no_validate;
780
781 return 0;
782
783 out_no_validate:
784 vmw_validation_bo_backoff(ctx);
785 out_no_bo_reserve:
786 vmw_validation_res_unreserve(ctx, true);
787 out_no_res_reserve:
788 if (mutex)
789 mutex_unlock(mutex);
790
791 return ret;
792 }
793
794 /**
795 * vmw_validation_revert - Revert validation actions if command submission
796 * failed.
797 *
798 * @ctx: The validation context.
799 *
800 * The caller still needs to unref resources after a call to this function.
801 */
vmw_validation_revert(struct vmw_validation_context * ctx)802 void vmw_validation_revert(struct vmw_validation_context *ctx)
803 {
804 vmw_validation_bo_backoff(ctx);
805 vmw_validation_res_unreserve(ctx, true);
806 if (ctx->res_mutex)
807 mutex_unlock(ctx->res_mutex);
808 vmw_validation_unref_lists(ctx);
809 }
810
811 /**
812 * vmw_validation_done - Commit validation actions after command submission
813 * success.
814 * @ctx: The validation context.
815 * @fence: Fence with which to fence all buffer objects taking part in the
816 * command submission.
817 *
818 * The caller does NOT need to unref resources after a call to this function.
819 */
vmw_validation_done(struct vmw_validation_context * ctx,struct vmw_fence_obj * fence)820 void vmw_validation_done(struct vmw_validation_context *ctx,
821 struct vmw_fence_obj *fence)
822 {
823 vmw_validation_bo_fence(ctx, fence);
824 vmw_validation_res_unreserve(ctx, false);
825 if (ctx->res_mutex)
826 mutex_unlock(ctx->res_mutex);
827 vmw_validation_unref_lists(ctx);
828 }
829
830 /**
831 * vmw_validation_preload_bo - Preload the validation memory allocator for a
832 * call to vmw_validation_add_bo().
833 * @ctx: Pointer to the validation context.
834 *
835 * Iff this function returns successfully, the next call to
836 * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
837 * but voids the guarantee.
838 *
839 * Returns: Zero if successful, %-EINVAL otherwise.
840 */
vmw_validation_preload_bo(struct vmw_validation_context * ctx)841 int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
842 {
843 unsigned int size = sizeof(struct vmw_validation_bo_node);
844
845 if (!vmw_validation_mem_alloc(ctx, size))
846 return -ENOMEM;
847
848 ctx->mem_size_left += size;
849 return 0;
850 }
851
852 /**
853 * vmw_validation_preload_res - Preload the validation memory allocator for a
854 * call to vmw_validation_add_res().
855 * @ctx: Pointer to the validation context.
856 * @size: Size of the validation node extra data. See below.
857 *
858 * Iff this function returns successfully, the next call to
859 * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
860 * sleep. An error is not fatal but voids the guarantee.
861 *
862 * Returns: Zero if successful, %-EINVAL otherwise.
863 */
vmw_validation_preload_res(struct vmw_validation_context * ctx,unsigned int size)864 int vmw_validation_preload_res(struct vmw_validation_context *ctx,
865 unsigned int size)
866 {
867 size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
868 size) +
869 vmw_validation_align(sizeof(struct vmw_validation_bo_node));
870 if (!vmw_validation_mem_alloc(ctx, size))
871 return -ENOMEM;
872
873 ctx->mem_size_left += size;
874 return 0;
875 }
876
877 /**
878 * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
879 * validation context
880 * @ctx: The validation context
881 *
882 * This function unreserves the buffer objects previously reserved using
883 * vmw_validation_bo_reserve. It's typically used as part of an error path
884 */
vmw_validation_bo_backoff(struct vmw_validation_context * ctx)885 void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
886 {
887 struct vmw_validation_bo_node *entry;
888
889 /*
890 * Switching coherent resource backup buffers failed.
891 * Release corresponding buffer object dirty trackers.
892 */
893 list_for_each_entry(entry, &ctx->bo_list, base.head) {
894 if (entry->coherent_count) {
895 unsigned int coherent_count = entry->coherent_count;
896 struct vmw_buffer_object *vbo =
897 container_of(entry->base.bo, typeof(*vbo),
898 base);
899
900 while (coherent_count--)
901 vmw_bo_dirty_release(vbo);
902 }
903 }
904
905 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
906 }
907