• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_placement.h>
29 
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_binding.h"
32 #include "vmwgfx_drv.h"
33 
34 #define VMW_RES_EVICT_ERR_COUNT 10
35 
36 /**
37  * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
38  * @res: The resource
39  */
vmw_resource_mob_attach(struct vmw_resource * res)40 void vmw_resource_mob_attach(struct vmw_resource *res)
41 {
42 	struct vmw_buffer_object *backup = res->backup;
43 	struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
44 
45 	dma_resv_assert_held(res->backup->base.base.resv);
46 	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
47 		res->func->prio;
48 
49 	while (*new) {
50 		struct vmw_resource *this =
51 			container_of(*new, struct vmw_resource, mob_node);
52 
53 		parent = *new;
54 		new = (res->backup_offset < this->backup_offset) ?
55 			&((*new)->rb_left) : &((*new)->rb_right);
56 	}
57 
58 	rb_link_node(&res->mob_node, parent, new);
59 	rb_insert_color(&res->mob_node, &backup->res_tree);
60 
61 	vmw_bo_prio_add(backup, res->used_prio);
62 }
63 
64 /**
65  * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
66  * @res: The resource
67  */
vmw_resource_mob_detach(struct vmw_resource * res)68 void vmw_resource_mob_detach(struct vmw_resource *res)
69 {
70 	struct vmw_buffer_object *backup = res->backup;
71 
72 	dma_resv_assert_held(backup->base.base.resv);
73 	if (vmw_resource_mob_attached(res)) {
74 		rb_erase(&res->mob_node, &backup->res_tree);
75 		RB_CLEAR_NODE(&res->mob_node);
76 		vmw_bo_prio_del(backup, res->used_prio);
77 	}
78 }
79 
vmw_resource_reference(struct vmw_resource * res)80 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
81 {
82 	kref_get(&res->kref);
83 	return res;
84 }
85 
86 struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource * res)87 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
88 {
89 	return kref_get_unless_zero(&res->kref) ? res : NULL;
90 }
91 
92 /**
93  * vmw_resource_release_id - release a resource id to the id manager.
94  *
95  * @res: Pointer to the resource.
96  *
97  * Release the resource id to the resource id manager and set it to -1
98  */
vmw_resource_release_id(struct vmw_resource * res)99 void vmw_resource_release_id(struct vmw_resource *res)
100 {
101 	struct vmw_private *dev_priv = res->dev_priv;
102 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
103 
104 	spin_lock(&dev_priv->resource_lock);
105 	if (res->id != -1)
106 		idr_remove(idr, res->id);
107 	res->id = -1;
108 	spin_unlock(&dev_priv->resource_lock);
109 }
110 
vmw_resource_release(struct kref * kref)111 static void vmw_resource_release(struct kref *kref)
112 {
113 	struct vmw_resource *res =
114 	    container_of(kref, struct vmw_resource, kref);
115 	struct vmw_private *dev_priv = res->dev_priv;
116 	int id;
117 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
118 
119 	spin_lock(&dev_priv->resource_lock);
120 	list_del_init(&res->lru_head);
121 	spin_unlock(&dev_priv->resource_lock);
122 	if (res->backup) {
123 		struct ttm_buffer_object *bo = &res->backup->base;
124 
125 		ttm_bo_reserve(bo, false, false, NULL);
126 		if (vmw_resource_mob_attached(res) &&
127 		    res->func->unbind != NULL) {
128 			struct ttm_validate_buffer val_buf;
129 
130 			val_buf.bo = bo;
131 			val_buf.num_shared = 0;
132 			res->func->unbind(res, false, &val_buf);
133 		}
134 		res->backup_dirty = false;
135 		vmw_resource_mob_detach(res);
136 		if (res->dirty)
137 			res->func->dirty_free(res);
138 		if (res->coherent)
139 			vmw_bo_dirty_release(res->backup);
140 		ttm_bo_unreserve(bo);
141 		vmw_bo_unreference(&res->backup);
142 	}
143 
144 	if (likely(res->hw_destroy != NULL)) {
145 		mutex_lock(&dev_priv->binding_mutex);
146 		vmw_binding_res_list_kill(&res->binding_head);
147 		mutex_unlock(&dev_priv->binding_mutex);
148 		res->hw_destroy(res);
149 	}
150 
151 	id = res->id;
152 	if (res->res_free != NULL)
153 		res->res_free(res);
154 	else
155 		kfree(res);
156 
157 	spin_lock(&dev_priv->resource_lock);
158 	if (id != -1)
159 		idr_remove(idr, id);
160 	spin_unlock(&dev_priv->resource_lock);
161 }
162 
vmw_resource_unreference(struct vmw_resource ** p_res)163 void vmw_resource_unreference(struct vmw_resource **p_res)
164 {
165 	struct vmw_resource *res = *p_res;
166 
167 	*p_res = NULL;
168 	kref_put(&res->kref, vmw_resource_release);
169 }
170 
171 
172 /**
173  * vmw_resource_alloc_id - release a resource id to the id manager.
174  *
175  * @res: Pointer to the resource.
176  *
177  * Allocate the lowest free resource from the resource manager, and set
178  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
179  */
vmw_resource_alloc_id(struct vmw_resource * res)180 int vmw_resource_alloc_id(struct vmw_resource *res)
181 {
182 	struct vmw_private *dev_priv = res->dev_priv;
183 	int ret;
184 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
185 
186 	BUG_ON(res->id != -1);
187 
188 	idr_preload(GFP_KERNEL);
189 	spin_lock(&dev_priv->resource_lock);
190 
191 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
192 	if (ret >= 0)
193 		res->id = ret;
194 
195 	spin_unlock(&dev_priv->resource_lock);
196 	idr_preload_end();
197 	return ret < 0 ? ret : 0;
198 }
199 
200 /**
201  * vmw_resource_init - initialize a struct vmw_resource
202  *
203  * @dev_priv:       Pointer to a device private struct.
204  * @res:            The struct vmw_resource to initialize.
205  * @obj_type:       Resource object type.
206  * @delay_id:       Boolean whether to defer device id allocation until
207  *                  the first validation.
208  * @res_free:       Resource destructor.
209  * @func:           Resource function table.
210  */
vmw_resource_init(struct vmw_private * dev_priv,struct vmw_resource * res,bool delay_id,void (* res_free)(struct vmw_resource * res),const struct vmw_res_func * func)211 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
212 		      bool delay_id,
213 		      void (*res_free) (struct vmw_resource *res),
214 		      const struct vmw_res_func *func)
215 {
216 	kref_init(&res->kref);
217 	res->hw_destroy = NULL;
218 	res->res_free = res_free;
219 	res->dev_priv = dev_priv;
220 	res->func = func;
221 	RB_CLEAR_NODE(&res->mob_node);
222 	INIT_LIST_HEAD(&res->lru_head);
223 	INIT_LIST_HEAD(&res->binding_head);
224 	res->id = -1;
225 	res->backup = NULL;
226 	res->backup_offset = 0;
227 	res->backup_dirty = false;
228 	res->res_dirty = false;
229 	res->coherent = false;
230 	res->used_prio = 3;
231 	res->dirty = NULL;
232 	if (delay_id)
233 		return 0;
234 	else
235 		return vmw_resource_alloc_id(res);
236 }
237 
238 
239 /**
240  * vmw_user_resource_lookup_handle - lookup a struct resource from a
241  * TTM user-space handle and perform basic type checks
242  *
243  * @dev_priv:     Pointer to a device private struct
244  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
245  * @handle:       The TTM user-space handle
246  * @converter:    Pointer to an object describing the resource type
247  * @p_res:        On successful return the location pointed to will contain
248  *                a pointer to a refcounted struct vmw_resource.
249  *
250  * If the handle can't be found or is associated with an incorrect resource
251  * type, -EINVAL will be returned.
252  */
vmw_user_resource_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,const struct vmw_user_resource_conv * converter,struct vmw_resource ** p_res)253 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
254 				    struct ttm_object_file *tfile,
255 				    uint32_t handle,
256 				    const struct vmw_user_resource_conv
257 				    *converter,
258 				    struct vmw_resource **p_res)
259 {
260 	struct ttm_base_object *base;
261 	struct vmw_resource *res;
262 	int ret = -EINVAL;
263 
264 	base = ttm_base_object_lookup(tfile, handle);
265 	if (unlikely(base == NULL))
266 		return -EINVAL;
267 
268 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
269 		goto out_bad_resource;
270 
271 	res = converter->base_obj_to_res(base);
272 	kref_get(&res->kref);
273 
274 	*p_res = res;
275 	ret = 0;
276 
277 out_bad_resource:
278 	ttm_base_object_unref(&base);
279 
280 	return ret;
281 }
282 
283 /**
284  * vmw_user_resource_lookup_handle - lookup a struct resource from a
285  * TTM user-space handle and perform basic type checks
286  *
287  * @dev_priv:     Pointer to a device private struct
288  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
289  * @handle:       The TTM user-space handle
290  * @converter:    Pointer to an object describing the resource type
291  * @p_res:        On successful return the location pointed to will contain
292  *                a pointer to a refcounted struct vmw_resource.
293  *
294  * If the handle can't be found or is associated with an incorrect resource
295  * type, -EINVAL will be returned.
296  */
297 struct vmw_resource *
vmw_user_resource_noref_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,const struct vmw_user_resource_conv * converter)298 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
299 				      struct ttm_object_file *tfile,
300 				      uint32_t handle,
301 				      const struct vmw_user_resource_conv
302 				      *converter)
303 {
304 	struct ttm_base_object *base;
305 
306 	base = ttm_base_object_noref_lookup(tfile, handle);
307 	if (!base)
308 		return ERR_PTR(-ESRCH);
309 
310 	if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
311 		ttm_base_object_noref_release();
312 		return ERR_PTR(-EINVAL);
313 	}
314 
315 	return converter->base_obj_to_res(base);
316 }
317 
318 /**
319  * Helper function that looks either a surface or bo.
320  *
321  * The pointer this pointed at by out_surf and out_buf needs to be null.
322  */
vmw_user_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,struct vmw_surface ** out_surf,struct vmw_buffer_object ** out_buf)323 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
324 			   struct ttm_object_file *tfile,
325 			   uint32_t handle,
326 			   struct vmw_surface **out_surf,
327 			   struct vmw_buffer_object **out_buf)
328 {
329 	struct vmw_resource *res;
330 	int ret;
331 
332 	BUG_ON(*out_surf || *out_buf);
333 
334 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
335 					      user_surface_converter,
336 					      &res);
337 	if (!ret) {
338 		*out_surf = vmw_res_to_srf(res);
339 		return 0;
340 	}
341 
342 	*out_surf = NULL;
343 	ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
344 	return ret;
345 }
346 
347 /**
348  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
349  *
350  * @res:            The resource for which to allocate a backup buffer.
351  * @interruptible:  Whether any sleeps during allocation should be
352  *                  performed while interruptible.
353  */
vmw_resource_buf_alloc(struct vmw_resource * res,bool interruptible)354 static int vmw_resource_buf_alloc(struct vmw_resource *res,
355 				  bool interruptible)
356 {
357 	unsigned long size =
358 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
359 	struct vmw_buffer_object *backup;
360 	int ret;
361 
362 	if (likely(res->backup)) {
363 		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
364 		return 0;
365 	}
366 
367 	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
368 	if (unlikely(!backup))
369 		return -ENOMEM;
370 
371 	ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
372 			      res->func->backup_placement,
373 			      interruptible,
374 			      &vmw_bo_bo_free);
375 	if (unlikely(ret != 0))
376 		goto out_no_bo;
377 
378 	res->backup = backup;
379 
380 out_no_bo:
381 	return ret;
382 }
383 
384 /**
385  * vmw_resource_do_validate - Make a resource up-to-date and visible
386  *                            to the device.
387  *
388  * @res:            The resource to make visible to the device.
389  * @val_buf:        Information about a buffer possibly
390  *                  containing backup data if a bind operation is needed.
391  *
392  * On hardware resource shortage, this function returns -EBUSY and
393  * should be retried once resources have been freed up.
394  */
vmw_resource_do_validate(struct vmw_resource * res,struct ttm_validate_buffer * val_buf,bool dirtying)395 static int vmw_resource_do_validate(struct vmw_resource *res,
396 				    struct ttm_validate_buffer *val_buf,
397 				    bool dirtying)
398 {
399 	int ret = 0;
400 	const struct vmw_res_func *func = res->func;
401 
402 	if (unlikely(res->id == -1)) {
403 		ret = func->create(res);
404 		if (unlikely(ret != 0))
405 			return ret;
406 	}
407 
408 	if (func->bind &&
409 	    ((func->needs_backup && !vmw_resource_mob_attached(res) &&
410 	      val_buf->bo != NULL) ||
411 	     (!func->needs_backup && val_buf->bo != NULL))) {
412 		ret = func->bind(res, val_buf);
413 		if (unlikely(ret != 0))
414 			goto out_bind_failed;
415 		if (func->needs_backup)
416 			vmw_resource_mob_attach(res);
417 	}
418 
419 	/*
420 	 * Handle the case where the backup mob is marked coherent but
421 	 * the resource isn't.
422 	 */
423 	if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
424 	    !res->coherent) {
425 		if (res->backup->dirty && !res->dirty) {
426 			ret = func->dirty_alloc(res);
427 			if (ret)
428 				return ret;
429 		} else if (!res->backup->dirty && res->dirty) {
430 			func->dirty_free(res);
431 		}
432 	}
433 
434 	/*
435 	 * Transfer the dirty regions to the resource and update
436 	 * the resource.
437 	 */
438 	if (res->dirty) {
439 		if (dirtying && !res->res_dirty) {
440 			pgoff_t start = res->backup_offset >> PAGE_SHIFT;
441 			pgoff_t end = __KERNEL_DIV_ROUND_UP
442 				(res->backup_offset + res->backup_size,
443 				 PAGE_SIZE);
444 
445 			vmw_bo_dirty_unmap(res->backup, start, end);
446 		}
447 
448 		vmw_bo_dirty_transfer_to_res(res);
449 		return func->dirty_sync(res);
450 	}
451 
452 	return 0;
453 
454 out_bind_failed:
455 	func->destroy(res);
456 
457 	return ret;
458 }
459 
460 /**
461  * vmw_resource_unreserve - Unreserve a resource previously reserved for
462  * command submission.
463  *
464  * @res:               Pointer to the struct vmw_resource to unreserve.
465  * @dirty_set:         Change dirty status of the resource.
466  * @dirty:             When changing dirty status indicates the new status.
467  * @switch_backup:     Backup buffer has been switched.
468  * @new_backup:        Pointer to new backup buffer if command submission
469  *                     switched. May be NULL.
470  * @new_backup_offset: New backup offset if @switch_backup is true.
471  *
472  * Currently unreserving a resource means putting it back on the device's
473  * resource lru list, so that it can be evicted if necessary.
474  */
vmw_resource_unreserve(struct vmw_resource * res,bool dirty_set,bool dirty,bool switch_backup,struct vmw_buffer_object * new_backup,unsigned long new_backup_offset)475 void vmw_resource_unreserve(struct vmw_resource *res,
476 			    bool dirty_set,
477 			    bool dirty,
478 			    bool switch_backup,
479 			    struct vmw_buffer_object *new_backup,
480 			    unsigned long new_backup_offset)
481 {
482 	struct vmw_private *dev_priv = res->dev_priv;
483 
484 	if (!list_empty(&res->lru_head))
485 		return;
486 
487 	if (switch_backup && new_backup != res->backup) {
488 		if (res->backup) {
489 			vmw_resource_mob_detach(res);
490 			if (res->coherent)
491 				vmw_bo_dirty_release(res->backup);
492 			vmw_bo_unreference(&res->backup);
493 		}
494 
495 		if (new_backup) {
496 			res->backup = vmw_bo_reference(new_backup);
497 
498 			/*
499 			 * The validation code should already have added a
500 			 * dirty tracker here.
501 			 */
502 			WARN_ON(res->coherent && !new_backup->dirty);
503 
504 			vmw_resource_mob_attach(res);
505 		} else {
506 			res->backup = NULL;
507 		}
508 	} else if (switch_backup && res->coherent) {
509 		vmw_bo_dirty_release(res->backup);
510 	}
511 
512 	if (switch_backup)
513 		res->backup_offset = new_backup_offset;
514 
515 	if (dirty_set)
516 		res->res_dirty = dirty;
517 
518 	if (!res->func->may_evict || res->id == -1 || res->pin_count)
519 		return;
520 
521 	spin_lock(&dev_priv->resource_lock);
522 	list_add_tail(&res->lru_head,
523 		      &res->dev_priv->res_lru[res->func->res_type]);
524 	spin_unlock(&dev_priv->resource_lock);
525 }
526 
527 /**
528  * vmw_resource_check_buffer - Check whether a backup buffer is needed
529  *                             for a resource and in that case, allocate
530  *                             one, reserve and validate it.
531  *
532  * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
533  * @res:            The resource for which to allocate a backup buffer.
534  * @interruptible:  Whether any sleeps during allocation should be
535  *                  performed while interruptible.
536  * @val_buf:        On successful return contains data about the
537  *                  reserved and validated backup buffer.
538  */
539 static int
vmw_resource_check_buffer(struct ww_acquire_ctx * ticket,struct vmw_resource * res,bool interruptible,struct ttm_validate_buffer * val_buf)540 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
541 			  struct vmw_resource *res,
542 			  bool interruptible,
543 			  struct ttm_validate_buffer *val_buf)
544 {
545 	struct ttm_operation_ctx ctx = { true, false };
546 	struct list_head val_list;
547 	bool backup_dirty = false;
548 	int ret;
549 
550 	if (unlikely(res->backup == NULL)) {
551 		ret = vmw_resource_buf_alloc(res, interruptible);
552 		if (unlikely(ret != 0))
553 			return ret;
554 	}
555 
556 	INIT_LIST_HEAD(&val_list);
557 	ttm_bo_get(&res->backup->base);
558 	val_buf->bo = &res->backup->base;
559 	val_buf->num_shared = 0;
560 	list_add_tail(&val_buf->head, &val_list);
561 	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
562 	if (unlikely(ret != 0))
563 		goto out_no_reserve;
564 
565 	if (res->func->needs_backup && !vmw_resource_mob_attached(res))
566 		return 0;
567 
568 	backup_dirty = res->backup_dirty;
569 	ret = ttm_bo_validate(&res->backup->base,
570 			      res->func->backup_placement,
571 			      &ctx);
572 
573 	if (unlikely(ret != 0))
574 		goto out_no_validate;
575 
576 	return 0;
577 
578 out_no_validate:
579 	ttm_eu_backoff_reservation(ticket, &val_list);
580 out_no_reserve:
581 	ttm_bo_put(val_buf->bo);
582 	val_buf->bo = NULL;
583 	if (backup_dirty)
584 		vmw_bo_unreference(&res->backup);
585 
586 	return ret;
587 }
588 
589 /**
590  * vmw_resource_reserve - Reserve a resource for command submission
591  *
592  * @res:            The resource to reserve.
593  *
594  * This function takes the resource off the LRU list and make sure
595  * a backup buffer is present for guest-backed resources. However,
596  * the buffer may not be bound to the resource at this point.
597  *
598  */
vmw_resource_reserve(struct vmw_resource * res,bool interruptible,bool no_backup)599 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
600 			 bool no_backup)
601 {
602 	struct vmw_private *dev_priv = res->dev_priv;
603 	int ret;
604 
605 	spin_lock(&dev_priv->resource_lock);
606 	list_del_init(&res->lru_head);
607 	spin_unlock(&dev_priv->resource_lock);
608 
609 	if (res->func->needs_backup && res->backup == NULL &&
610 	    !no_backup) {
611 		ret = vmw_resource_buf_alloc(res, interruptible);
612 		if (unlikely(ret != 0)) {
613 			DRM_ERROR("Failed to allocate a backup buffer "
614 				  "of size %lu. bytes\n",
615 				  (unsigned long) res->backup_size);
616 			return ret;
617 		}
618 	}
619 
620 	return 0;
621 }
622 
623 /**
624  * vmw_resource_backoff_reservation - Unreserve and unreference a
625  *                                    backup buffer
626  *.
627  * @ticket:         The ww acquire ctx used for reservation.
628  * @val_buf:        Backup buffer information.
629  */
630 static void
vmw_resource_backoff_reservation(struct ww_acquire_ctx * ticket,struct ttm_validate_buffer * val_buf)631 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
632 				 struct ttm_validate_buffer *val_buf)
633 {
634 	struct list_head val_list;
635 
636 	if (likely(val_buf->bo == NULL))
637 		return;
638 
639 	INIT_LIST_HEAD(&val_list);
640 	list_add_tail(&val_buf->head, &val_list);
641 	ttm_eu_backoff_reservation(ticket, &val_list);
642 	ttm_bo_put(val_buf->bo);
643 	val_buf->bo = NULL;
644 }
645 
646 /**
647  * vmw_resource_do_evict - Evict a resource, and transfer its data
648  *                         to a backup buffer.
649  *
650  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
651  * @res:            The resource to evict.
652  * @interruptible:  Whether to wait interruptible.
653  */
vmw_resource_do_evict(struct ww_acquire_ctx * ticket,struct vmw_resource * res,bool interruptible)654 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
655 				 struct vmw_resource *res, bool interruptible)
656 {
657 	struct ttm_validate_buffer val_buf;
658 	const struct vmw_res_func *func = res->func;
659 	int ret;
660 
661 	BUG_ON(!func->may_evict);
662 
663 	val_buf.bo = NULL;
664 	val_buf.num_shared = 0;
665 	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
666 	if (unlikely(ret != 0))
667 		return ret;
668 
669 	if (unlikely(func->unbind != NULL &&
670 		     (!func->needs_backup || vmw_resource_mob_attached(res)))) {
671 		ret = func->unbind(res, res->res_dirty, &val_buf);
672 		if (unlikely(ret != 0))
673 			goto out_no_unbind;
674 		vmw_resource_mob_detach(res);
675 	}
676 	ret = func->destroy(res);
677 	res->backup_dirty = true;
678 	res->res_dirty = false;
679 out_no_unbind:
680 	vmw_resource_backoff_reservation(ticket, &val_buf);
681 
682 	return ret;
683 }
684 
685 
686 /**
687  * vmw_resource_validate - Make a resource up-to-date and visible
688  *                         to the device.
689  * @res: The resource to make visible to the device.
690  * @intr: Perform waits interruptible if possible.
691  * @dirtying: Pending GPU operation will dirty the resource
692  *
693  * On succesful return, any backup DMA buffer pointed to by @res->backup will
694  * be reserved and validated.
695  * On hardware resource shortage, this function will repeatedly evict
696  * resources of the same type until the validation succeeds.
697  *
698  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
699  * on failure.
700  */
vmw_resource_validate(struct vmw_resource * res,bool intr,bool dirtying)701 int vmw_resource_validate(struct vmw_resource *res, bool intr,
702 			  bool dirtying)
703 {
704 	int ret;
705 	struct vmw_resource *evict_res;
706 	struct vmw_private *dev_priv = res->dev_priv;
707 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
708 	struct ttm_validate_buffer val_buf;
709 	unsigned err_count = 0;
710 
711 	if (!res->func->create)
712 		return 0;
713 
714 	val_buf.bo = NULL;
715 	val_buf.num_shared = 0;
716 	if (res->backup)
717 		val_buf.bo = &res->backup->base;
718 	do {
719 		ret = vmw_resource_do_validate(res, &val_buf, dirtying);
720 		if (likely(ret != -EBUSY))
721 			break;
722 
723 		spin_lock(&dev_priv->resource_lock);
724 		if (list_empty(lru_list) || !res->func->may_evict) {
725 			DRM_ERROR("Out of device device resources "
726 				  "for %s.\n", res->func->type_name);
727 			ret = -EBUSY;
728 			spin_unlock(&dev_priv->resource_lock);
729 			break;
730 		}
731 
732 		evict_res = vmw_resource_reference
733 			(list_first_entry(lru_list, struct vmw_resource,
734 					  lru_head));
735 		list_del_init(&evict_res->lru_head);
736 
737 		spin_unlock(&dev_priv->resource_lock);
738 
739 		/* Trylock backup buffers with a NULL ticket. */
740 		ret = vmw_resource_do_evict(NULL, evict_res, intr);
741 		if (unlikely(ret != 0)) {
742 			spin_lock(&dev_priv->resource_lock);
743 			list_add_tail(&evict_res->lru_head, lru_list);
744 			spin_unlock(&dev_priv->resource_lock);
745 			if (ret == -ERESTARTSYS ||
746 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
747 				vmw_resource_unreference(&evict_res);
748 				goto out_no_validate;
749 			}
750 		}
751 
752 		vmw_resource_unreference(&evict_res);
753 	} while (1);
754 
755 	if (unlikely(ret != 0))
756 		goto out_no_validate;
757 	else if (!res->func->needs_backup && res->backup) {
758 		WARN_ON_ONCE(vmw_resource_mob_attached(res));
759 		vmw_bo_unreference(&res->backup);
760 	}
761 
762 	return 0;
763 
764 out_no_validate:
765 	return ret;
766 }
767 
768 
769 /**
770  * vmw_resource_unbind_list
771  *
772  * @vbo: Pointer to the current backing MOB.
773  *
774  * Evicts the Guest Backed hardware resource if the backup
775  * buffer is being moved out of MOB memory.
776  * Note that this function will not race with the resource
777  * validation code, since resource validation and eviction
778  * both require the backup buffer to be reserved.
779  */
vmw_resource_unbind_list(struct vmw_buffer_object * vbo)780 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
781 {
782 	struct ttm_validate_buffer val_buf = {
783 		.bo = &vbo->base,
784 		.num_shared = 0
785 	};
786 
787 	dma_resv_assert_held(vbo->base.base.resv);
788 	while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
789 		struct rb_node *node = vbo->res_tree.rb_node;
790 		struct vmw_resource *res =
791 			container_of(node, struct vmw_resource, mob_node);
792 
793 		if (!WARN_ON_ONCE(!res->func->unbind))
794 			(void) res->func->unbind(res, res->res_dirty, &val_buf);
795 
796 		res->backup_dirty = true;
797 		res->res_dirty = false;
798 		vmw_resource_mob_detach(res);
799 	}
800 
801 	(void) ttm_bo_wait(&vbo->base, false, false);
802 }
803 
804 
805 /**
806  * vmw_query_readback_all - Read back cached query states
807  *
808  * @dx_query_mob: Buffer containing the DX query MOB
809  *
810  * Read back cached states from the device if they exist.  This function
811  * assumings binding_mutex is held.
812  */
vmw_query_readback_all(struct vmw_buffer_object * dx_query_mob)813 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
814 {
815 	struct vmw_resource *dx_query_ctx;
816 	struct vmw_private *dev_priv;
817 	struct {
818 		SVGA3dCmdHeader header;
819 		SVGA3dCmdDXReadbackAllQuery body;
820 	} *cmd;
821 
822 
823 	/* No query bound, so do nothing */
824 	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
825 		return 0;
826 
827 	dx_query_ctx = dx_query_mob->dx_query_ctx;
828 	dev_priv     = dx_query_ctx->dev_priv;
829 
830 	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
831 	if (unlikely(cmd == NULL))
832 		return -ENOMEM;
833 
834 	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
835 	cmd->header.size = sizeof(cmd->body);
836 	cmd->body.cid    = dx_query_ctx->id;
837 
838 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
839 
840 	/* Triggers a rebind the next time affected context is bound */
841 	dx_query_mob->dx_query_ctx = NULL;
842 
843 	return 0;
844 }
845 
846 
847 
848 /**
849  * vmw_query_move_notify - Read back cached query states
850  *
851  * @bo: The TTM buffer object about to move.
852  * @mem: The memory region @bo is moving to.
853  *
854  * Called before the query MOB is swapped out to read back cached query
855  * states from the device.
856  */
vmw_query_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * mem)857 void vmw_query_move_notify(struct ttm_buffer_object *bo,
858 			   struct ttm_resource *mem)
859 {
860 	struct vmw_buffer_object *dx_query_mob;
861 	struct ttm_bo_device *bdev = bo->bdev;
862 	struct vmw_private *dev_priv;
863 
864 
865 	dev_priv = container_of(bdev, struct vmw_private, bdev);
866 
867 	mutex_lock(&dev_priv->binding_mutex);
868 
869 	dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
870 	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
871 		mutex_unlock(&dev_priv->binding_mutex);
872 		return;
873 	}
874 
875 	/* If BO is being moved from MOB to system memory */
876 	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
877 		struct vmw_fence_obj *fence;
878 
879 		(void) vmw_query_readback_all(dx_query_mob);
880 		mutex_unlock(&dev_priv->binding_mutex);
881 
882 		/* Create a fence and attach the BO to it */
883 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
884 		vmw_bo_fence_single(bo, fence);
885 
886 		if (fence != NULL)
887 			vmw_fence_obj_unreference(&fence);
888 
889 		(void) ttm_bo_wait(bo, false, false);
890 	} else
891 		mutex_unlock(&dev_priv->binding_mutex);
892 
893 }
894 
895 /**
896  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
897  *
898  * @res:            The resource being queried.
899  */
vmw_resource_needs_backup(const struct vmw_resource * res)900 bool vmw_resource_needs_backup(const struct vmw_resource *res)
901 {
902 	return res->func->needs_backup;
903 }
904 
905 /**
906  * vmw_resource_evict_type - Evict all resources of a specific type
907  *
908  * @dev_priv:       Pointer to a device private struct
909  * @type:           The resource type to evict
910  *
911  * To avoid thrashing starvation or as part of the hibernation sequence,
912  * try to evict all evictable resources of a specific type.
913  */
vmw_resource_evict_type(struct vmw_private * dev_priv,enum vmw_res_type type)914 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
915 				    enum vmw_res_type type)
916 {
917 	struct list_head *lru_list = &dev_priv->res_lru[type];
918 	struct vmw_resource *evict_res;
919 	unsigned err_count = 0;
920 	int ret;
921 	struct ww_acquire_ctx ticket;
922 
923 	do {
924 		spin_lock(&dev_priv->resource_lock);
925 
926 		if (list_empty(lru_list))
927 			goto out_unlock;
928 
929 		evict_res = vmw_resource_reference(
930 			list_first_entry(lru_list, struct vmw_resource,
931 					 lru_head));
932 		list_del_init(&evict_res->lru_head);
933 		spin_unlock(&dev_priv->resource_lock);
934 
935 		/* Wait lock backup buffers with a ticket. */
936 		ret = vmw_resource_do_evict(&ticket, evict_res, false);
937 		if (unlikely(ret != 0)) {
938 			spin_lock(&dev_priv->resource_lock);
939 			list_add_tail(&evict_res->lru_head, lru_list);
940 			spin_unlock(&dev_priv->resource_lock);
941 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
942 				vmw_resource_unreference(&evict_res);
943 				return;
944 			}
945 		}
946 
947 		vmw_resource_unreference(&evict_res);
948 	} while (1);
949 
950 out_unlock:
951 	spin_unlock(&dev_priv->resource_lock);
952 }
953 
954 /**
955  * vmw_resource_evict_all - Evict all evictable resources
956  *
957  * @dev_priv:       Pointer to a device private struct
958  *
959  * To avoid thrashing starvation or as part of the hibernation sequence,
960  * evict all evictable resources. In particular this means that all
961  * guest-backed resources that are registered with the device are
962  * evicted and the OTable becomes clean.
963  */
vmw_resource_evict_all(struct vmw_private * dev_priv)964 void vmw_resource_evict_all(struct vmw_private *dev_priv)
965 {
966 	enum vmw_res_type type;
967 
968 	mutex_lock(&dev_priv->cmdbuf_mutex);
969 
970 	for (type = 0; type < vmw_res_max; ++type)
971 		vmw_resource_evict_type(dev_priv, type);
972 
973 	mutex_unlock(&dev_priv->cmdbuf_mutex);
974 }
975 
976 /**
977  * vmw_resource_pin - Add a pin reference on a resource
978  *
979  * @res: The resource to add a pin reference on
980  *
981  * This function adds a pin reference, and if needed validates the resource.
982  * Having a pin reference means that the resource can never be evicted, and
983  * its id will never change as long as there is a pin reference.
984  * This function returns 0 on success and a negative error code on failure.
985  */
vmw_resource_pin(struct vmw_resource * res,bool interruptible)986 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
987 {
988 	struct ttm_operation_ctx ctx = { interruptible, false };
989 	struct vmw_private *dev_priv = res->dev_priv;
990 	int ret;
991 
992 	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
993 	mutex_lock(&dev_priv->cmdbuf_mutex);
994 	ret = vmw_resource_reserve(res, interruptible, false);
995 	if (ret)
996 		goto out_no_reserve;
997 
998 	if (res->pin_count == 0) {
999 		struct vmw_buffer_object *vbo = NULL;
1000 
1001 		if (res->backup) {
1002 			vbo = res->backup;
1003 
1004 			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1005 			if (!vbo->pin_count) {
1006 				ret = ttm_bo_validate
1007 					(&vbo->base,
1008 					 res->func->backup_placement,
1009 					 &ctx);
1010 				if (ret) {
1011 					ttm_bo_unreserve(&vbo->base);
1012 					goto out_no_validate;
1013 				}
1014 			}
1015 
1016 			/* Do we really need to pin the MOB as well? */
1017 			vmw_bo_pin_reserved(vbo, true);
1018 		}
1019 		ret = vmw_resource_validate(res, interruptible, true);
1020 		if (vbo)
1021 			ttm_bo_unreserve(&vbo->base);
1022 		if (ret)
1023 			goto out_no_validate;
1024 	}
1025 	res->pin_count++;
1026 
1027 out_no_validate:
1028 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1029 out_no_reserve:
1030 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1031 	ttm_write_unlock(&dev_priv->reservation_sem);
1032 
1033 	return ret;
1034 }
1035 
1036 /**
1037  * vmw_resource_unpin - Remove a pin reference from a resource
1038  *
1039  * @res: The resource to remove a pin reference from
1040  *
1041  * Having a pin reference means that the resource can never be evicted, and
1042  * its id will never change as long as there is a pin reference.
1043  */
vmw_resource_unpin(struct vmw_resource * res)1044 void vmw_resource_unpin(struct vmw_resource *res)
1045 {
1046 	struct vmw_private *dev_priv = res->dev_priv;
1047 	int ret;
1048 
1049 	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
1050 	mutex_lock(&dev_priv->cmdbuf_mutex);
1051 
1052 	ret = vmw_resource_reserve(res, false, true);
1053 	WARN_ON(ret);
1054 
1055 	WARN_ON(res->pin_count == 0);
1056 	if (--res->pin_count == 0 && res->backup) {
1057 		struct vmw_buffer_object *vbo = res->backup;
1058 
1059 		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1060 		vmw_bo_pin_reserved(vbo, false);
1061 		ttm_bo_unreserve(&vbo->base);
1062 	}
1063 
1064 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1065 
1066 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1067 	ttm_read_unlock(&dev_priv->reservation_sem);
1068 }
1069 
1070 /**
1071  * vmw_res_type - Return the resource type
1072  *
1073  * @res: Pointer to the resource
1074  */
vmw_res_type(const struct vmw_resource * res)1075 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1076 {
1077 	return res->func->res_type;
1078 }
1079 
1080 /**
1081  * vmw_resource_update_dirty - Update a resource's dirty tracker with a
1082  * sequential range of touched backing store memory.
1083  * @res: The resource.
1084  * @start: The first page touched.
1085  * @end: The last page touched + 1.
1086  */
vmw_resource_dirty_update(struct vmw_resource * res,pgoff_t start,pgoff_t end)1087 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1088 			       pgoff_t end)
1089 {
1090 	if (res->dirty)
1091 		res->func->dirty_range_add(res, start << PAGE_SHIFT,
1092 					   end << PAGE_SHIFT);
1093 }
1094 
1095 /**
1096  * vmw_resources_clean - Clean resources intersecting a mob range
1097  * @vbo: The mob buffer object
1098  * @start: The mob page offset starting the range
1099  * @end: The mob page offset ending the range
1100  * @num_prefault: Returns how many pages including the first have been
1101  * cleaned and are ok to prefault
1102  */
vmw_resources_clean(struct vmw_buffer_object * vbo,pgoff_t start,pgoff_t end,pgoff_t * num_prefault)1103 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1104 			pgoff_t end, pgoff_t *num_prefault)
1105 {
1106 	struct rb_node *cur = vbo->res_tree.rb_node;
1107 	struct vmw_resource *found = NULL;
1108 	unsigned long res_start = start << PAGE_SHIFT;
1109 	unsigned long res_end = end << PAGE_SHIFT;
1110 	unsigned long last_cleaned = 0;
1111 
1112 	/*
1113 	 * Find the resource with lowest backup_offset that intersects the
1114 	 * range.
1115 	 */
1116 	while (cur) {
1117 		struct vmw_resource *cur_res =
1118 			container_of(cur, struct vmw_resource, mob_node);
1119 
1120 		if (cur_res->backup_offset >= res_end) {
1121 			cur = cur->rb_left;
1122 		} else if (cur_res->backup_offset + cur_res->backup_size <=
1123 			   res_start) {
1124 			cur = cur->rb_right;
1125 		} else {
1126 			found = cur_res;
1127 			cur = cur->rb_left;
1128 			/* Continue to look for resources with lower offsets */
1129 		}
1130 	}
1131 
1132 	/*
1133 	 * In order of increasing backup_offset, clean dirty resorces
1134 	 * intersecting the range.
1135 	 */
1136 	while (found) {
1137 		if (found->res_dirty) {
1138 			int ret;
1139 
1140 			if (!found->func->clean)
1141 				return -EINVAL;
1142 
1143 			ret = found->func->clean(found);
1144 			if (ret)
1145 				return ret;
1146 
1147 			found->res_dirty = false;
1148 		}
1149 		last_cleaned = found->backup_offset + found->backup_size;
1150 		cur = rb_next(&found->mob_node);
1151 		if (!cur)
1152 			break;
1153 
1154 		found = container_of(cur, struct vmw_resource, mob_node);
1155 		if (found->backup_offset >= res_end)
1156 			break;
1157 	}
1158 
1159 	/*
1160 	 * Set number of pages allowed prefaulting and fence the buffer object
1161 	 */
1162 	*num_prefault = 1;
1163 	if (last_cleaned > res_start) {
1164 		struct ttm_buffer_object *bo = &vbo->base;
1165 
1166 		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1167 						      PAGE_SIZE);
1168 		vmw_bo_fence_single(bo, NULL);
1169 		if (bo->moving)
1170 			dma_fence_put(bo->moving);
1171 		bo->moving = dma_fence_get
1172 			(dma_resv_get_excl(bo->base.resv));
1173 	}
1174 
1175 	return 0;
1176 }
1177