• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/sched/mm.h>
26 #include <drm/drm_gem.h>
27 
28 #include "display/intel_frontbuffer.h"
29 
30 #include "gem/i915_gem_lmem.h"
31 #include "gt/intel_engine.h"
32 #include "gt/intel_engine_heartbeat.h"
33 #include "gt/intel_gt.h"
34 #include "gt/intel_gt_requests.h"
35 
36 #include "i915_drv.h"
37 #include "i915_sw_fence_work.h"
38 #include "i915_trace.h"
39 #include "i915_vma.h"
40 
41 static struct kmem_cache *slab_vmas;
42 
i915_vma_alloc(void)43 struct i915_vma *i915_vma_alloc(void)
44 {
45 	return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
46 }
47 
i915_vma_free(struct i915_vma * vma)48 void i915_vma_free(struct i915_vma *vma)
49 {
50 	return kmem_cache_free(slab_vmas, vma);
51 }
52 
53 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
54 
55 #include <linux/stackdepot.h>
56 
vma_print_allocator(struct i915_vma * vma,const char * reason)57 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
58 {
59 	unsigned long *entries;
60 	unsigned int nr_entries;
61 	char buf[512];
62 
63 	if (!vma->node.stack) {
64 		DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
65 				 vma->node.start, vma->node.size, reason);
66 		return;
67 	}
68 
69 	nr_entries = stack_depot_fetch(vma->node.stack, &entries);
70 	stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
71 	DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
72 			 vma->node.start, vma->node.size, reason, buf);
73 }
74 
75 #else
76 
vma_print_allocator(struct i915_vma * vma,const char * reason)77 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
78 {
79 }
80 
81 #endif
82 
active_to_vma(struct i915_active * ref)83 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
84 {
85 	return container_of(ref, typeof(struct i915_vma), active);
86 }
87 
__i915_vma_active(struct i915_active * ref)88 static int __i915_vma_active(struct i915_active *ref)
89 {
90 	return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
91 }
92 
__i915_vma_retire(struct i915_active * ref)93 static void __i915_vma_retire(struct i915_active *ref)
94 {
95 	i915_vma_put(active_to_vma(ref));
96 }
97 
98 static struct i915_vma *
vma_create(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_ggtt_view * view)99 vma_create(struct drm_i915_gem_object *obj,
100 	   struct i915_address_space *vm,
101 	   const struct i915_ggtt_view *view)
102 {
103 	struct i915_vma *pos = ERR_PTR(-E2BIG);
104 	struct i915_vma *vma;
105 	struct rb_node *rb, **p;
106 
107 	/* The aliasing_ppgtt should never be used directly! */
108 	GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
109 
110 	vma = i915_vma_alloc();
111 	if (vma == NULL)
112 		return ERR_PTR(-ENOMEM);
113 
114 	kref_init(&vma->ref);
115 	mutex_init(&vma->pages_mutex);
116 	vma->vm = i915_vm_get(vm);
117 	vma->ops = &vm->vma_ops;
118 	vma->obj = obj;
119 	vma->resv = obj->base.resv;
120 	vma->size = obj->base.size;
121 	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
122 
123 	i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
124 
125 	/* Declare ourselves safe for use inside shrinkers */
126 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
127 		fs_reclaim_acquire(GFP_KERNEL);
128 		might_lock(&vma->active.mutex);
129 		fs_reclaim_release(GFP_KERNEL);
130 	}
131 
132 	INIT_LIST_HEAD(&vma->closed_link);
133 
134 	if (view && view->type != I915_GGTT_VIEW_NORMAL) {
135 		vma->ggtt_view = *view;
136 		if (view->type == I915_GGTT_VIEW_PARTIAL) {
137 			GEM_BUG_ON(range_overflows_t(u64,
138 						     view->partial.offset,
139 						     view->partial.size,
140 						     obj->base.size >> PAGE_SHIFT));
141 			vma->size = view->partial.size;
142 			vma->size <<= PAGE_SHIFT;
143 			GEM_BUG_ON(vma->size > obj->base.size);
144 		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
145 			vma->size = intel_rotation_info_size(&view->rotated);
146 			vma->size <<= PAGE_SHIFT;
147 		} else if (view->type == I915_GGTT_VIEW_REMAPPED) {
148 			vma->size = intel_remapped_info_size(&view->remapped);
149 			vma->size <<= PAGE_SHIFT;
150 		}
151 	}
152 
153 	if (unlikely(vma->size > vm->total))
154 		goto err_vma;
155 
156 	GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
157 
158 	spin_lock(&obj->vma.lock);
159 
160 	if (i915_is_ggtt(vm)) {
161 		if (unlikely(overflows_type(vma->size, u32)))
162 			goto err_unlock;
163 
164 		vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
165 						      i915_gem_object_get_tiling(obj),
166 						      i915_gem_object_get_stride(obj));
167 		if (unlikely(vma->fence_size < vma->size || /* overflow */
168 			     vma->fence_size > vm->total))
169 			goto err_unlock;
170 
171 		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
172 
173 		vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
174 								i915_gem_object_get_tiling(obj),
175 								i915_gem_object_get_stride(obj));
176 		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
177 
178 		__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
179 	}
180 
181 	rb = NULL;
182 	p = &obj->vma.tree.rb_node;
183 	while (*p) {
184 		long cmp;
185 
186 		rb = *p;
187 		pos = rb_entry(rb, struct i915_vma, obj_node);
188 
189 		/*
190 		 * If the view already exists in the tree, another thread
191 		 * already created a matching vma, so return the older instance
192 		 * and dispose of ours.
193 		 */
194 		cmp = i915_vma_compare(pos, vm, view);
195 		if (cmp < 0)
196 			p = &rb->rb_right;
197 		else if (cmp > 0)
198 			p = &rb->rb_left;
199 		else
200 			goto err_unlock;
201 	}
202 	rb_link_node(&vma->obj_node, rb, p);
203 	rb_insert_color(&vma->obj_node, &obj->vma.tree);
204 
205 	if (i915_vma_is_ggtt(vma))
206 		/*
207 		 * We put the GGTT vma at the start of the vma-list, followed
208 		 * by the ppGGTT vma. This allows us to break early when
209 		 * iterating over only the GGTT vma for an object, see
210 		 * for_each_ggtt_vma()
211 		 */
212 		list_add(&vma->obj_link, &obj->vma.list);
213 	else
214 		list_add_tail(&vma->obj_link, &obj->vma.list);
215 
216 	spin_unlock(&obj->vma.lock);
217 
218 	return vma;
219 
220 err_unlock:
221 	spin_unlock(&obj->vma.lock);
222 err_vma:
223 	i915_vm_put(vm);
224 	i915_vma_free(vma);
225 	return pos;
226 }
227 
228 static struct i915_vma *
i915_vma_lookup(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_ggtt_view * view)229 i915_vma_lookup(struct drm_i915_gem_object *obj,
230 	   struct i915_address_space *vm,
231 	   const struct i915_ggtt_view *view)
232 {
233 	struct rb_node *rb;
234 
235 	rb = obj->vma.tree.rb_node;
236 	while (rb) {
237 		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
238 		long cmp;
239 
240 		cmp = i915_vma_compare(vma, vm, view);
241 		if (cmp == 0)
242 			return vma;
243 
244 		if (cmp < 0)
245 			rb = rb->rb_right;
246 		else
247 			rb = rb->rb_left;
248 	}
249 
250 	return NULL;
251 }
252 
253 /**
254  * i915_vma_instance - return the singleton instance of the VMA
255  * @obj: parent &struct drm_i915_gem_object to be mapped
256  * @vm: address space in which the mapping is located
257  * @view: additional mapping requirements
258  *
259  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
260  * the same @view characteristics. If a match is not found, one is created.
261  * Once created, the VMA is kept until either the object is freed, or the
262  * address space is closed.
263  *
264  * Returns the vma, or an error pointer.
265  */
266 struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_ggtt_view * view)267 i915_vma_instance(struct drm_i915_gem_object *obj,
268 		  struct i915_address_space *vm,
269 		  const struct i915_ggtt_view *view)
270 {
271 	struct i915_vma *vma;
272 
273 	GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
274 	GEM_BUG_ON(!atomic_read(&vm->open));
275 
276 	spin_lock(&obj->vma.lock);
277 	vma = i915_vma_lookup(obj, vm, view);
278 	spin_unlock(&obj->vma.lock);
279 
280 	/* vma_create() will resolve the race if another creates the vma */
281 	if (unlikely(!vma))
282 		vma = vma_create(obj, vm, view);
283 
284 	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
285 	return vma;
286 }
287 
288 struct i915_vma_work {
289 	struct dma_fence_work base;
290 	struct i915_address_space *vm;
291 	struct i915_vm_pt_stash stash;
292 	struct i915_vma *vma;
293 	struct drm_i915_gem_object *pinned;
294 	struct i915_sw_dma_fence_cb cb;
295 	enum i915_cache_level cache_level;
296 	unsigned int flags;
297 };
298 
__vma_bind(struct dma_fence_work * work)299 static void __vma_bind(struct dma_fence_work *work)
300 {
301 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
302 	struct i915_vma *vma = vw->vma;
303 
304 	vma->ops->bind_vma(vw->vm, &vw->stash,
305 			   vma, vw->cache_level, vw->flags);
306 }
307 
__vma_release(struct dma_fence_work * work)308 static void __vma_release(struct dma_fence_work *work)
309 {
310 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
311 
312 	if (vw->pinned) {
313 		__i915_gem_object_unpin_pages(vw->pinned);
314 		i915_gem_object_put(vw->pinned);
315 	}
316 
317 	i915_vm_free_pt_stash(vw->vm, &vw->stash);
318 	i915_vm_put(vw->vm);
319 }
320 
321 static const struct dma_fence_work_ops bind_ops = {
322 	.name = "bind",
323 	.work = __vma_bind,
324 	.release = __vma_release,
325 };
326 
i915_vma_work(void)327 struct i915_vma_work *i915_vma_work(void)
328 {
329 	struct i915_vma_work *vw;
330 
331 	vw = kzalloc(sizeof(*vw), GFP_KERNEL);
332 	if (!vw)
333 		return NULL;
334 
335 	dma_fence_work_init(&vw->base, &bind_ops);
336 	vw->base.dma.error = -EAGAIN; /* disable the worker by default */
337 
338 	return vw;
339 }
340 
i915_vma_wait_for_bind(struct i915_vma * vma)341 int i915_vma_wait_for_bind(struct i915_vma *vma)
342 {
343 	int err = 0;
344 
345 	if (rcu_access_pointer(vma->active.excl.fence)) {
346 		struct dma_fence *fence;
347 
348 		rcu_read_lock();
349 		fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
350 		rcu_read_unlock();
351 		if (fence) {
352 			err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
353 			dma_fence_put(fence);
354 		}
355 	}
356 
357 	return err;
358 }
359 
360 /**
361  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
362  * @vma: VMA to map
363  * @cache_level: mapping cache level
364  * @flags: flags like global or local mapping
365  * @work: preallocated worker for allocating and binding the PTE
366  *
367  * DMA addresses are taken from the scatter-gather table of this object (or of
368  * this VMA in case of non-default GGTT views) and PTE entries set up.
369  * Note that DMA addresses are also the only part of the SG table we care about.
370  */
i915_vma_bind(struct i915_vma * vma,enum i915_cache_level cache_level,u32 flags,struct i915_vma_work * work)371 int i915_vma_bind(struct i915_vma *vma,
372 		  enum i915_cache_level cache_level,
373 		  u32 flags,
374 		  struct i915_vma_work *work)
375 {
376 	u32 bind_flags;
377 	u32 vma_flags;
378 
379 	lockdep_assert_held(&vma->vm->mutex);
380 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
381 	GEM_BUG_ON(vma->size > vma->node.size);
382 
383 	if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
384 					      vma->node.size,
385 					      vma->vm->total)))
386 		return -ENODEV;
387 
388 	if (GEM_DEBUG_WARN_ON(!flags))
389 		return -EINVAL;
390 
391 	bind_flags = flags;
392 	bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
393 
394 	vma_flags = atomic_read(&vma->flags);
395 	vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
396 
397 	bind_flags &= ~vma_flags;
398 	if (bind_flags == 0)
399 		return 0;
400 
401 	GEM_BUG_ON(!vma->pages);
402 
403 	trace_i915_vma_bind(vma, bind_flags);
404 	if (work && bind_flags & vma->vm->bind_async_flags) {
405 		struct dma_fence *prev;
406 
407 		work->vma = vma;
408 		work->cache_level = cache_level;
409 		work->flags = bind_flags;
410 
411 		/*
412 		 * Note we only want to chain up to the migration fence on
413 		 * the pages (not the object itself). As we don't track that,
414 		 * yet, we have to use the exclusive fence instead.
415 		 *
416 		 * Also note that we do not want to track the async vma as
417 		 * part of the obj->resv->excl_fence as it only affects
418 		 * execution and not content or object's backing store lifetime.
419 		 */
420 		prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
421 		if (prev) {
422 			__i915_sw_fence_await_dma_fence(&work->base.chain,
423 							prev,
424 							&work->cb);
425 			dma_fence_put(prev);
426 		}
427 
428 		work->base.dma.error = 0; /* enable the queue_work() */
429 
430 		if (vma->obj) {
431 			__i915_gem_object_pin_pages(vma->obj);
432 			work->pinned = i915_gem_object_get(vma->obj);
433 		}
434 	} else {
435 		vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
436 	}
437 
438 	if (vma->obj)
439 		set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
440 
441 	atomic_or(bind_flags, &vma->flags);
442 	return 0;
443 }
444 
i915_vma_pin_iomap(struct i915_vma * vma)445 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
446 {
447 	void __iomem *ptr;
448 	int err;
449 
450 	if (!i915_gem_object_is_lmem(vma->obj)) {
451 		if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
452 			err = -ENODEV;
453 			goto err;
454 		}
455 	}
456 
457 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
458 	GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
459 
460 	ptr = READ_ONCE(vma->iomap);
461 	if (ptr == NULL) {
462 		/*
463 		 * TODO: consider just using i915_gem_object_pin_map() for lmem
464 		 * instead, which already supports mapping non-contiguous chunks
465 		 * of pages, that way we can also drop the
466 		 * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
467 		 */
468 		if (i915_gem_object_is_lmem(vma->obj))
469 			ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
470 							  vma->obj->base.size);
471 		else
472 			ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
473 						vma->node.start,
474 						vma->node.size);
475 		if (ptr == NULL) {
476 			err = -ENOMEM;
477 			goto err;
478 		}
479 
480 		if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
481 			io_mapping_unmap(ptr);
482 			ptr = vma->iomap;
483 		}
484 	}
485 
486 	__i915_vma_pin(vma);
487 
488 	err = i915_vma_pin_fence(vma);
489 	if (err)
490 		goto err_unpin;
491 
492 	i915_vma_set_ggtt_write(vma);
493 
494 	/* NB Access through the GTT requires the device to be awake. */
495 	return ptr;
496 
497 err_unpin:
498 	__i915_vma_unpin(vma);
499 err:
500 	return IO_ERR_PTR(err);
501 }
502 
i915_vma_flush_writes(struct i915_vma * vma)503 void i915_vma_flush_writes(struct i915_vma *vma)
504 {
505 	if (i915_vma_unset_ggtt_write(vma))
506 		intel_gt_flush_ggtt_writes(vma->vm->gt);
507 }
508 
i915_vma_unpin_iomap(struct i915_vma * vma)509 void i915_vma_unpin_iomap(struct i915_vma *vma)
510 {
511 	GEM_BUG_ON(vma->iomap == NULL);
512 
513 	i915_vma_flush_writes(vma);
514 
515 	i915_vma_unpin_fence(vma);
516 	i915_vma_unpin(vma);
517 }
518 
i915_vma_unpin_and_release(struct i915_vma ** p_vma,unsigned int flags)519 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
520 {
521 	struct i915_vma *vma;
522 	struct drm_i915_gem_object *obj;
523 
524 	vma = fetch_and_zero(p_vma);
525 	if (!vma)
526 		return;
527 
528 	obj = vma->obj;
529 	GEM_BUG_ON(!obj);
530 
531 	i915_vma_unpin(vma);
532 
533 	if (flags & I915_VMA_RELEASE_MAP)
534 		i915_gem_object_unpin_map(obj);
535 
536 	i915_gem_object_put(obj);
537 }
538 
i915_vma_misplaced(const struct i915_vma * vma,u64 size,u64 alignment,u64 flags)539 bool i915_vma_misplaced(const struct i915_vma *vma,
540 			u64 size, u64 alignment, u64 flags)
541 {
542 	if (!drm_mm_node_allocated(&vma->node))
543 		return false;
544 
545 	if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
546 		return true;
547 
548 	if (vma->node.size < size)
549 		return true;
550 
551 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
552 	if (alignment && !IS_ALIGNED(vma->node.start, alignment))
553 		return true;
554 
555 	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
556 		return true;
557 
558 	if (flags & PIN_OFFSET_BIAS &&
559 	    vma->node.start < (flags & PIN_OFFSET_MASK))
560 		return true;
561 
562 	if (flags & PIN_OFFSET_FIXED &&
563 	    vma->node.start != (flags & PIN_OFFSET_MASK))
564 		return true;
565 
566 	return false;
567 }
568 
__i915_vma_set_map_and_fenceable(struct i915_vma * vma)569 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
570 {
571 	bool mappable, fenceable;
572 
573 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
574 	GEM_BUG_ON(!vma->fence_size);
575 
576 	fenceable = (vma->node.size >= vma->fence_size &&
577 		     IS_ALIGNED(vma->node.start, vma->fence_alignment));
578 
579 	mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
580 
581 	if (mappable && fenceable)
582 		set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
583 	else
584 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
585 }
586 
i915_gem_valid_gtt_space(struct i915_vma * vma,unsigned long color)587 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
588 {
589 	struct drm_mm_node *node = &vma->node;
590 	struct drm_mm_node *other;
591 
592 	/*
593 	 * On some machines we have to be careful when putting differing types
594 	 * of snoopable memory together to avoid the prefetcher crossing memory
595 	 * domains and dying. During vm initialisation, we decide whether or not
596 	 * these constraints apply and set the drm_mm.color_adjust
597 	 * appropriately.
598 	 */
599 	if (!i915_vm_has_cache_coloring(vma->vm))
600 		return true;
601 
602 	/* Only valid to be called on an already inserted vma */
603 	GEM_BUG_ON(!drm_mm_node_allocated(node));
604 	GEM_BUG_ON(list_empty(&node->node_list));
605 
606 	other = list_prev_entry(node, node_list);
607 	if (i915_node_color_differs(other, color) &&
608 	    !drm_mm_hole_follows(other))
609 		return false;
610 
611 	other = list_next_entry(node, node_list);
612 	if (i915_node_color_differs(other, color) &&
613 	    !drm_mm_hole_follows(node))
614 		return false;
615 
616 	return true;
617 }
618 
619 /**
620  * i915_vma_insert - finds a slot for the vma in its address space
621  * @vma: the vma
622  * @size: requested size in bytes (can be larger than the VMA)
623  * @alignment: required alignment
624  * @flags: mask of PIN_* flags to use
625  *
626  * First we try to allocate some free space that meets the requirements for
627  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
628  * preferrably the oldest idle entry to make room for the new VMA.
629  *
630  * Returns:
631  * 0 on success, negative error code otherwise.
632  */
633 static int
i915_vma_insert(struct i915_vma * vma,u64 size,u64 alignment,u64 flags)634 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
635 {
636 	unsigned long color;
637 	u64 start, end;
638 	int ret;
639 
640 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
641 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
642 
643 	size = max(size, vma->size);
644 	alignment = max(alignment, vma->display_alignment);
645 	if (flags & PIN_MAPPABLE) {
646 		size = max_t(typeof(size), size, vma->fence_size);
647 		alignment = max_t(typeof(alignment),
648 				  alignment, vma->fence_alignment);
649 	}
650 
651 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
652 	GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
653 	GEM_BUG_ON(!is_power_of_2(alignment));
654 
655 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
656 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
657 
658 	end = vma->vm->total;
659 	if (flags & PIN_MAPPABLE)
660 		end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
661 	if (flags & PIN_ZONE_4G)
662 		end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
663 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
664 
665 	/* If binding the object/GGTT view requires more space than the entire
666 	 * aperture has, reject it early before evicting everything in a vain
667 	 * attempt to find space.
668 	 */
669 	if (size > end) {
670 		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
671 			  size, flags & PIN_MAPPABLE ? "mappable" : "total",
672 			  end);
673 		return -ENOSPC;
674 	}
675 
676 	color = 0;
677 	if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
678 		color = vma->obj->cache_level;
679 
680 	if (flags & PIN_OFFSET_FIXED) {
681 		u64 offset = flags & PIN_OFFSET_MASK;
682 		if (!IS_ALIGNED(offset, alignment) ||
683 		    range_overflows(offset, size, end))
684 			return -EINVAL;
685 
686 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
687 					   size, offset, color,
688 					   flags);
689 		if (ret)
690 			return ret;
691 	} else {
692 		/*
693 		 * We only support huge gtt pages through the 48b PPGTT,
694 		 * however we also don't want to force any alignment for
695 		 * objects which need to be tightly packed into the low 32bits.
696 		 *
697 		 * Note that we assume that GGTT are limited to 4GiB for the
698 		 * forseeable future. See also i915_ggtt_offset().
699 		 */
700 		if (upper_32_bits(end - 1) &&
701 		    vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
702 			/*
703 			 * We can't mix 64K and 4K PTEs in the same page-table
704 			 * (2M block), and so to avoid the ugliness and
705 			 * complexity of coloring we opt for just aligning 64K
706 			 * objects to 2M.
707 			 */
708 			u64 page_alignment =
709 				rounddown_pow_of_two(vma->page_sizes.sg |
710 						     I915_GTT_PAGE_SIZE_2M);
711 
712 			/*
713 			 * Check we don't expand for the limited Global GTT
714 			 * (mappable aperture is even more precious!). This
715 			 * also checks that we exclude the aliasing-ppgtt.
716 			 */
717 			GEM_BUG_ON(i915_vma_is_ggtt(vma));
718 
719 			alignment = max(alignment, page_alignment);
720 
721 			if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
722 				size = round_up(size, I915_GTT_PAGE_SIZE_2M);
723 		}
724 
725 		ret = i915_gem_gtt_insert(vma->vm, &vma->node,
726 					  size, alignment, color,
727 					  start, end, flags);
728 		if (ret)
729 			return ret;
730 
731 		GEM_BUG_ON(vma->node.start < start);
732 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
733 	}
734 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
735 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
736 
737 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
738 
739 	return 0;
740 }
741 
742 static void
i915_vma_detach(struct i915_vma * vma)743 i915_vma_detach(struct i915_vma *vma)
744 {
745 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
746 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
747 
748 	/*
749 	 * And finally now the object is completely decoupled from this
750 	 * vma, we can drop its hold on the backing storage and allow
751 	 * it to be reaped by the shrinker.
752 	 */
753 	list_del(&vma->vm_link);
754 }
755 
try_qad_pin(struct i915_vma * vma,unsigned int flags)756 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
757 {
758 	unsigned int bound;
759 	bool pinned = true;
760 
761 	bound = atomic_read(&vma->flags);
762 	do {
763 		if (unlikely(flags & ~bound))
764 			return false;
765 
766 		if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
767 			return false;
768 
769 		if (!(bound & I915_VMA_PIN_MASK))
770 			goto unpinned;
771 
772 		GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
773 	} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
774 
775 	return true;
776 
777 unpinned:
778 	/*
779 	 * If pin_count==0, but we are bound, check under the lock to avoid
780 	 * racing with a concurrent i915_vma_unbind().
781 	 */
782 	mutex_lock(&vma->vm->mutex);
783 	do {
784 		if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
785 			pinned = false;
786 			break;
787 		}
788 
789 		if (unlikely(flags & ~bound)) {
790 			pinned = false;
791 			break;
792 		}
793 	} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
794 	mutex_unlock(&vma->vm->mutex);
795 
796 	return pinned;
797 }
798 
vma_get_pages(struct i915_vma * vma)799 static int vma_get_pages(struct i915_vma *vma)
800 {
801 	int err = 0;
802 	bool pinned_pages = false;
803 
804 	if (atomic_add_unless(&vma->pages_count, 1, 0))
805 		return 0;
806 
807 	if (vma->obj) {
808 		err = i915_gem_object_pin_pages(vma->obj);
809 		if (err)
810 			return err;
811 		pinned_pages = true;
812 	}
813 
814 	/* Allocations ahoy! */
815 	if (mutex_lock_interruptible(&vma->pages_mutex)) {
816 		err = -EINTR;
817 		goto unpin;
818 	}
819 
820 	if (!atomic_read(&vma->pages_count)) {
821 		err = vma->ops->set_pages(vma);
822 		if (err)
823 			goto unlock;
824 		pinned_pages = false;
825 	}
826 	atomic_inc(&vma->pages_count);
827 
828 unlock:
829 	mutex_unlock(&vma->pages_mutex);
830 unpin:
831 	if (pinned_pages)
832 		__i915_gem_object_unpin_pages(vma->obj);
833 
834 	return err;
835 }
836 
__vma_put_pages(struct i915_vma * vma,unsigned int count)837 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
838 {
839 	/* We allocate under vma_get_pages, so beware the shrinker */
840 	mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
841 	GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
842 	if (atomic_sub_return(count, &vma->pages_count) == 0) {
843 		vma->ops->clear_pages(vma);
844 		GEM_BUG_ON(vma->pages);
845 		if (vma->obj)
846 			i915_gem_object_unpin_pages(vma->obj);
847 	}
848 	mutex_unlock(&vma->pages_mutex);
849 }
850 
vma_put_pages(struct i915_vma * vma)851 static void vma_put_pages(struct i915_vma *vma)
852 {
853 	if (atomic_add_unless(&vma->pages_count, -1, 1))
854 		return;
855 
856 	__vma_put_pages(vma, 1);
857 }
858 
vma_unbind_pages(struct i915_vma * vma)859 static void vma_unbind_pages(struct i915_vma *vma)
860 {
861 	unsigned int count;
862 
863 	lockdep_assert_held(&vma->vm->mutex);
864 
865 	/* The upper portion of pages_count is the number of bindings */
866 	count = atomic_read(&vma->pages_count);
867 	count >>= I915_VMA_PAGES_BIAS;
868 	GEM_BUG_ON(!count);
869 
870 	__vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
871 }
872 
i915_vma_pin_ww(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u64 size,u64 alignment,u64 flags)873 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
874 		    u64 size, u64 alignment, u64 flags)
875 {
876 	struct i915_vma_work *work = NULL;
877 	intel_wakeref_t wakeref = 0;
878 	unsigned int bound;
879 	int err;
880 
881 #ifdef CONFIG_PROVE_LOCKING
882 	if (debug_locks && !WARN_ON(!ww) && vma->resv)
883 		assert_vma_held(vma);
884 #endif
885 
886 	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
887 	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
888 
889 	GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
890 
891 	/* First try and grab the pin without rebinding the vma */
892 	if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
893 		return 0;
894 
895 	err = vma_get_pages(vma);
896 	if (err)
897 		return err;
898 
899 	if (flags & PIN_GLOBAL)
900 		wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
901 
902 	if (flags & vma->vm->bind_async_flags) {
903 		/* lock VM */
904 		err = i915_vm_lock_objects(vma->vm, ww);
905 		if (err)
906 			goto err_rpm;
907 
908 		work = i915_vma_work();
909 		if (!work) {
910 			err = -ENOMEM;
911 			goto err_rpm;
912 		}
913 
914 		work->vm = i915_vm_get(vma->vm);
915 
916 		/* Allocate enough page directories to used PTE */
917 		if (vma->vm->allocate_va_range) {
918 			err = i915_vm_alloc_pt_stash(vma->vm,
919 						     &work->stash,
920 						     vma->size);
921 			if (err)
922 				goto err_fence;
923 
924 			err = i915_vm_map_pt_stash(vma->vm, &work->stash);
925 			if (err)
926 				goto err_fence;
927 		}
928 	}
929 
930 	/*
931 	 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
932 	 *
933 	 * We conflate the Global GTT with the user's vma when using the
934 	 * aliasing-ppgtt, but it is still vitally important to try and
935 	 * keep the use cases distinct. For example, userptr objects are
936 	 * not allowed inside the Global GTT as that will cause lock
937 	 * inversions when we have to evict them the mmu_notifier callbacks -
938 	 * but they are allowed to be part of the user ppGTT which can never
939 	 * be mapped. As such we try to give the distinct users of the same
940 	 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
941 	 * and i915_ppgtt separate].
942 	 *
943 	 * NB this may cause us to mask real lock inversions -- while the
944 	 * code is safe today, lockdep may not be able to spot future
945 	 * transgressions.
946 	 */
947 	err = mutex_lock_interruptible_nested(&vma->vm->mutex,
948 					      !(flags & PIN_GLOBAL));
949 	if (err)
950 		goto err_fence;
951 
952 	/* No more allocations allowed now we hold vm->mutex */
953 
954 	if (unlikely(i915_vma_is_closed(vma))) {
955 		err = -ENOENT;
956 		goto err_unlock;
957 	}
958 
959 	bound = atomic_read(&vma->flags);
960 	if (unlikely(bound & I915_VMA_ERROR)) {
961 		err = -ENOMEM;
962 		goto err_unlock;
963 	}
964 
965 	if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
966 		err = -EAGAIN; /* pins are meant to be fairly temporary */
967 		goto err_unlock;
968 	}
969 
970 	if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
971 		__i915_vma_pin(vma);
972 		goto err_unlock;
973 	}
974 
975 	err = i915_active_acquire(&vma->active);
976 	if (err)
977 		goto err_unlock;
978 
979 	if (!(bound & I915_VMA_BIND_MASK)) {
980 		err = i915_vma_insert(vma, size, alignment, flags);
981 		if (err)
982 			goto err_active;
983 
984 		if (i915_is_ggtt(vma->vm))
985 			__i915_vma_set_map_and_fenceable(vma);
986 	}
987 
988 	GEM_BUG_ON(!vma->pages);
989 	err = i915_vma_bind(vma,
990 			    vma->obj ? vma->obj->cache_level : 0,
991 			    flags, work);
992 	if (err)
993 		goto err_remove;
994 
995 	/* There should only be at most 2 active bindings (user, global) */
996 	GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
997 	atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
998 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
999 
1000 	__i915_vma_pin(vma);
1001 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
1002 	GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
1003 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1004 
1005 err_remove:
1006 	if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1007 		i915_vma_detach(vma);
1008 		drm_mm_remove_node(&vma->node);
1009 	}
1010 err_active:
1011 	i915_active_release(&vma->active);
1012 err_unlock:
1013 	mutex_unlock(&vma->vm->mutex);
1014 err_fence:
1015 	if (work)
1016 		dma_fence_work_commit_imm(&work->base);
1017 err_rpm:
1018 	if (wakeref)
1019 		intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1020 	vma_put_pages(vma);
1021 	return err;
1022 }
1023 
flush_idle_contexts(struct intel_gt * gt)1024 static void flush_idle_contexts(struct intel_gt *gt)
1025 {
1026 	struct intel_engine_cs *engine;
1027 	enum intel_engine_id id;
1028 
1029 	for_each_engine(engine, gt, id)
1030 		intel_engine_flush_barriers(engine);
1031 
1032 	intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1033 }
1034 
i915_ggtt_pin(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u32 align,unsigned int flags)1035 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1036 		  u32 align, unsigned int flags)
1037 {
1038 	struct i915_address_space *vm = vma->vm;
1039 	int err;
1040 
1041 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1042 
1043 #ifdef CONFIG_LOCKDEP
1044 	WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv));
1045 #endif
1046 
1047 	do {
1048 		if (ww)
1049 			err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1050 		else
1051 			err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
1052 		if (err != -ENOSPC) {
1053 			if (!err) {
1054 				err = i915_vma_wait_for_bind(vma);
1055 				if (err)
1056 					i915_vma_unpin(vma);
1057 			}
1058 			return err;
1059 		}
1060 
1061 		/* Unlike i915_vma_pin, we don't take no for an answer! */
1062 		flush_idle_contexts(vm->gt);
1063 		if (mutex_lock_interruptible(&vm->mutex) == 0) {
1064 			i915_gem_evict_vm(vm);
1065 			mutex_unlock(&vm->mutex);
1066 		}
1067 	} while (1);
1068 }
1069 
__vma_close(struct i915_vma * vma,struct intel_gt * gt)1070 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1071 {
1072 	/*
1073 	 * We defer actually closing, unbinding and destroying the VMA until
1074 	 * the next idle point, or if the object is freed in the meantime. By
1075 	 * postponing the unbind, we allow for it to be resurrected by the
1076 	 * client, avoiding the work required to rebind the VMA. This is
1077 	 * advantageous for DRI, where the client/server pass objects
1078 	 * between themselves, temporarily opening a local VMA to the
1079 	 * object, and then closing it again. The same object is then reused
1080 	 * on the next frame (or two, depending on the depth of the swap queue)
1081 	 * causing us to rebind the VMA once more. This ends up being a lot
1082 	 * of wasted work for the steady state.
1083 	 */
1084 	GEM_BUG_ON(i915_vma_is_closed(vma));
1085 	list_add(&vma->closed_link, &gt->closed_vma);
1086 }
1087 
i915_vma_close(struct i915_vma * vma)1088 void i915_vma_close(struct i915_vma *vma)
1089 {
1090 	struct intel_gt *gt = vma->vm->gt;
1091 	unsigned long flags;
1092 
1093 	if (i915_vma_is_ggtt(vma))
1094 		return;
1095 
1096 	GEM_BUG_ON(!atomic_read(&vma->open_count));
1097 	if (atomic_dec_and_lock_irqsave(&vma->open_count,
1098 					&gt->closed_lock,
1099 					flags)) {
1100 		__vma_close(vma, gt);
1101 		spin_unlock_irqrestore(&gt->closed_lock, flags);
1102 	}
1103 }
1104 
__i915_vma_remove_closed(struct i915_vma * vma)1105 static void __i915_vma_remove_closed(struct i915_vma *vma)
1106 {
1107 	struct intel_gt *gt = vma->vm->gt;
1108 
1109 	spin_lock_irq(&gt->closed_lock);
1110 	list_del_init(&vma->closed_link);
1111 	spin_unlock_irq(&gt->closed_lock);
1112 }
1113 
i915_vma_reopen(struct i915_vma * vma)1114 void i915_vma_reopen(struct i915_vma *vma)
1115 {
1116 	if (i915_vma_is_closed(vma))
1117 		__i915_vma_remove_closed(vma);
1118 }
1119 
i915_vma_release(struct kref * ref)1120 void i915_vma_release(struct kref *ref)
1121 {
1122 	struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1123 
1124 	if (drm_mm_node_allocated(&vma->node)) {
1125 		mutex_lock(&vma->vm->mutex);
1126 		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1127 		WARN_ON(__i915_vma_unbind(vma));
1128 		mutex_unlock(&vma->vm->mutex);
1129 		GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1130 	}
1131 	GEM_BUG_ON(i915_vma_is_active(vma));
1132 
1133 	if (vma->obj) {
1134 		struct drm_i915_gem_object *obj = vma->obj;
1135 
1136 		spin_lock(&obj->vma.lock);
1137 		list_del(&vma->obj_link);
1138 		if (!RB_EMPTY_NODE(&vma->obj_node))
1139 			rb_erase(&vma->obj_node, &obj->vma.tree);
1140 		spin_unlock(&obj->vma.lock);
1141 	}
1142 
1143 	__i915_vma_remove_closed(vma);
1144 	i915_vm_put(vma->vm);
1145 
1146 	i915_active_fini(&vma->active);
1147 	i915_vma_free(vma);
1148 }
1149 
i915_vma_parked(struct intel_gt * gt)1150 void i915_vma_parked(struct intel_gt *gt)
1151 {
1152 	struct i915_vma *vma, *next;
1153 	LIST_HEAD(closed);
1154 
1155 	spin_lock_irq(&gt->closed_lock);
1156 	list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1157 		struct drm_i915_gem_object *obj = vma->obj;
1158 		struct i915_address_space *vm = vma->vm;
1159 
1160 		/* XXX All to avoid keeping a reference on i915_vma itself */
1161 
1162 		if (!kref_get_unless_zero(&obj->base.refcount))
1163 			continue;
1164 
1165 		if (!i915_vm_tryopen(vm)) {
1166 			i915_gem_object_put(obj);
1167 			continue;
1168 		}
1169 
1170 		list_move(&vma->closed_link, &closed);
1171 	}
1172 	spin_unlock_irq(&gt->closed_lock);
1173 
1174 	/* As the GT is held idle, no vma can be reopened as we destroy them */
1175 	list_for_each_entry_safe(vma, next, &closed, closed_link) {
1176 		struct drm_i915_gem_object *obj = vma->obj;
1177 		struct i915_address_space *vm = vma->vm;
1178 
1179 		INIT_LIST_HEAD(&vma->closed_link);
1180 		__i915_vma_put(vma);
1181 
1182 		i915_gem_object_put(obj);
1183 		i915_vm_close(vm);
1184 	}
1185 }
1186 
__i915_vma_iounmap(struct i915_vma * vma)1187 static void __i915_vma_iounmap(struct i915_vma *vma)
1188 {
1189 	GEM_BUG_ON(i915_vma_is_pinned(vma));
1190 
1191 	if (vma->iomap == NULL)
1192 		return;
1193 
1194 	io_mapping_unmap(vma->iomap);
1195 	vma->iomap = NULL;
1196 }
1197 
i915_vma_revoke_mmap(struct i915_vma * vma)1198 void i915_vma_revoke_mmap(struct i915_vma *vma)
1199 {
1200 	struct drm_vma_offset_node *node;
1201 	u64 vma_offset;
1202 
1203 	if (!i915_vma_has_userfault(vma))
1204 		return;
1205 
1206 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1207 	GEM_BUG_ON(!vma->obj->userfault_count);
1208 
1209 	node = &vma->mmo->vma_node;
1210 	vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1211 	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1212 			    drm_vma_node_offset_addr(node) + vma_offset,
1213 			    vma->size,
1214 			    1);
1215 
1216 	i915_vma_unset_userfault(vma);
1217 	if (!--vma->obj->userfault_count)
1218 		list_del(&vma->obj->userfault_link);
1219 }
1220 
1221 static int
__i915_request_await_bind(struct i915_request * rq,struct i915_vma * vma)1222 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1223 {
1224 	return __i915_request_await_exclusive(rq, &vma->active);
1225 }
1226 
__i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq)1227 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1228 {
1229 	int err;
1230 
1231 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
1232 
1233 	/* Wait for the vma to be bound before we start! */
1234 	err = __i915_request_await_bind(rq, vma);
1235 	if (err)
1236 		return err;
1237 
1238 	return i915_active_add_request(&vma->active, rq);
1239 }
1240 
i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq,unsigned int flags)1241 int i915_vma_move_to_active(struct i915_vma *vma,
1242 			    struct i915_request *rq,
1243 			    unsigned int flags)
1244 {
1245 	struct drm_i915_gem_object *obj = vma->obj;
1246 	int err;
1247 
1248 	assert_object_held(obj);
1249 
1250 	err = __i915_vma_move_to_active(vma, rq);
1251 	if (unlikely(err))
1252 		return err;
1253 
1254 	if (flags & EXEC_OBJECT_WRITE) {
1255 		struct intel_frontbuffer *front;
1256 
1257 		front = __intel_frontbuffer_get(obj);
1258 		if (unlikely(front)) {
1259 			if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1260 				i915_active_add_request(&front->write, rq);
1261 			intel_frontbuffer_put(front);
1262 		}
1263 
1264 		dma_resv_add_excl_fence(vma->resv, &rq->fence);
1265 		obj->write_domain = I915_GEM_DOMAIN_RENDER;
1266 		obj->read_domains = 0;
1267 	} else {
1268 		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
1269 			err = dma_resv_reserve_shared(vma->resv, 1);
1270 			if (unlikely(err))
1271 				return err;
1272 		}
1273 
1274 		dma_resv_add_shared_fence(vma->resv, &rq->fence);
1275 		obj->write_domain = 0;
1276 	}
1277 
1278 	if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1279 		i915_active_add_request(&vma->fence->active, rq);
1280 
1281 	obj->read_domains |= I915_GEM_GPU_DOMAINS;
1282 	obj->mm.dirty = true;
1283 
1284 	GEM_BUG_ON(!i915_vma_is_active(vma));
1285 	return 0;
1286 }
1287 
__i915_vma_evict(struct i915_vma * vma)1288 void __i915_vma_evict(struct i915_vma *vma)
1289 {
1290 	GEM_BUG_ON(i915_vma_is_pinned(vma));
1291 
1292 	if (i915_vma_is_map_and_fenceable(vma)) {
1293 		/* Force a pagefault for domain tracking on next user access */
1294 		i915_vma_revoke_mmap(vma);
1295 
1296 		/*
1297 		 * Check that we have flushed all writes through the GGTT
1298 		 * before the unbind, other due to non-strict nature of those
1299 		 * indirect writes they may end up referencing the GGTT PTE
1300 		 * after the unbind.
1301 		 *
1302 		 * Note that we may be concurrently poking at the GGTT_WRITE
1303 		 * bit from set-domain, as we mark all GGTT vma associated
1304 		 * with an object. We know this is for another vma, as we
1305 		 * are currently unbinding this one -- so if this vma will be
1306 		 * reused, it will be refaulted and have its dirty bit set
1307 		 * before the next write.
1308 		 */
1309 		i915_vma_flush_writes(vma);
1310 
1311 		/* release the fence reg _after_ flushing */
1312 		i915_vma_revoke_fence(vma);
1313 
1314 		__i915_vma_iounmap(vma);
1315 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1316 	}
1317 	GEM_BUG_ON(vma->fence);
1318 	GEM_BUG_ON(i915_vma_has_userfault(vma));
1319 
1320 	if (likely(atomic_read(&vma->vm->open))) {
1321 		trace_i915_vma_unbind(vma);
1322 		vma->ops->unbind_vma(vma->vm, vma);
1323 	}
1324 	atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1325 		   &vma->flags);
1326 
1327 	i915_vma_detach(vma);
1328 	vma_unbind_pages(vma);
1329 }
1330 
__i915_vma_unbind(struct i915_vma * vma)1331 int __i915_vma_unbind(struct i915_vma *vma)
1332 {
1333 	int ret;
1334 
1335 	lockdep_assert_held(&vma->vm->mutex);
1336 
1337 	if (!drm_mm_node_allocated(&vma->node))
1338 		return 0;
1339 
1340 	if (i915_vma_is_pinned(vma)) {
1341 		vma_print_allocator(vma, "is pinned");
1342 		return -EAGAIN;
1343 	}
1344 
1345 	/*
1346 	 * After confirming that no one else is pinning this vma, wait for
1347 	 * any laggards who may have crept in during the wait (through
1348 	 * a residual pin skipping the vm->mutex) to complete.
1349 	 */
1350 	ret = i915_vma_sync(vma);
1351 	if (ret)
1352 		return ret;
1353 
1354 	GEM_BUG_ON(i915_vma_is_active(vma));
1355 	__i915_vma_evict(vma);
1356 
1357 	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1358 	return 0;
1359 }
1360 
i915_vma_unbind(struct i915_vma * vma)1361 int i915_vma_unbind(struct i915_vma *vma)
1362 {
1363 	struct i915_address_space *vm = vma->vm;
1364 	intel_wakeref_t wakeref = 0;
1365 	int err;
1366 
1367 	/* Optimistic wait before taking the mutex */
1368 	err = i915_vma_sync(vma);
1369 	if (err)
1370 		return err;
1371 
1372 	if (!drm_mm_node_allocated(&vma->node))
1373 		return 0;
1374 
1375 	if (i915_vma_is_pinned(vma)) {
1376 		vma_print_allocator(vma, "is pinned");
1377 		return -EAGAIN;
1378 	}
1379 
1380 	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1381 		/* XXX not always required: nop_clear_range */
1382 		wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1383 
1384 	err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
1385 	if (err)
1386 		goto out_rpm;
1387 
1388 	err = __i915_vma_unbind(vma);
1389 	mutex_unlock(&vm->mutex);
1390 
1391 out_rpm:
1392 	if (wakeref)
1393 		intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1394 	return err;
1395 }
1396 
i915_vma_make_unshrinkable(struct i915_vma * vma)1397 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1398 {
1399 	i915_gem_object_make_unshrinkable(vma->obj);
1400 	return vma;
1401 }
1402 
i915_vma_make_shrinkable(struct i915_vma * vma)1403 void i915_vma_make_shrinkable(struct i915_vma *vma)
1404 {
1405 	i915_gem_object_make_shrinkable(vma->obj);
1406 }
1407 
i915_vma_make_purgeable(struct i915_vma * vma)1408 void i915_vma_make_purgeable(struct i915_vma *vma)
1409 {
1410 	i915_gem_object_make_purgeable(vma->obj);
1411 }
1412 
1413 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1414 #include "selftests/i915_vma.c"
1415 #endif
1416 
i915_vma_module_exit(void)1417 void i915_vma_module_exit(void)
1418 {
1419 	kmem_cache_destroy(slab_vmas);
1420 }
1421 
i915_vma_module_init(void)1422 int __init i915_vma_module_init(void)
1423 {
1424 	slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1425 	if (!slab_vmas)
1426 		return -ENOMEM;
1427 
1428 	return 0;
1429 }
1430