• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/sched/mm.h>
26 #include <drm/drm_gem.h>
27 
28 #include "display/intel_frontbuffer.h"
29 
30 #include "gt/intel_engine.h"
31 #include "gt/intel_engine_heartbeat.h"
32 #include "gt/intel_gt.h"
33 #include "gt/intel_gt_requests.h"
34 
35 #include "i915_drv.h"
36 #include "i915_globals.h"
37 #include "i915_sw_fence_work.h"
38 #include "i915_trace.h"
39 #include "i915_vma.h"
40 
41 static struct i915_global_vma {
42 	struct i915_global base;
43 	struct kmem_cache *slab_vmas;
44 } global;
45 
i915_vma_alloc(void)46 struct i915_vma *i915_vma_alloc(void)
47 {
48 	return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
49 }
50 
i915_vma_free(struct i915_vma * vma)51 void i915_vma_free(struct i915_vma *vma)
52 {
53 	return kmem_cache_free(global.slab_vmas, vma);
54 }
55 
56 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
57 
58 #include <linux/stackdepot.h>
59 
vma_print_allocator(struct i915_vma * vma,const char * reason)60 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
61 {
62 	unsigned long *entries;
63 	unsigned int nr_entries;
64 	char buf[512];
65 
66 	if (!vma->node.stack) {
67 		DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
68 				 vma->node.start, vma->node.size, reason);
69 		return;
70 	}
71 
72 	nr_entries = stack_depot_fetch(vma->node.stack, &entries);
73 	stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
74 	DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
75 			 vma->node.start, vma->node.size, reason, buf);
76 }
77 
78 #else
79 
vma_print_allocator(struct i915_vma * vma,const char * reason)80 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
81 {
82 }
83 
84 #endif
85 
active_to_vma(struct i915_active * ref)86 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
87 {
88 	return container_of(ref, typeof(struct i915_vma), active);
89 }
90 
__i915_vma_active(struct i915_active * ref)91 static int __i915_vma_active(struct i915_active *ref)
92 {
93 	return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
94 }
95 
96 __i915_active_call
__i915_vma_retire(struct i915_active * ref)97 static void __i915_vma_retire(struct i915_active *ref)
98 {
99 	i915_vma_put(active_to_vma(ref));
100 }
101 
102 static struct i915_vma *
vma_create(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_ggtt_view * view)103 vma_create(struct drm_i915_gem_object *obj,
104 	   struct i915_address_space *vm,
105 	   const struct i915_ggtt_view *view)
106 {
107 	struct i915_vma *pos = ERR_PTR(-E2BIG);
108 	struct i915_vma *vma;
109 	struct rb_node *rb, **p;
110 
111 	/* The aliasing_ppgtt should never be used directly! */
112 	GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
113 
114 	vma = i915_vma_alloc();
115 	if (vma == NULL)
116 		return ERR_PTR(-ENOMEM);
117 
118 	kref_init(&vma->ref);
119 	mutex_init(&vma->pages_mutex);
120 	vma->vm = i915_vm_get(vm);
121 	vma->ops = &vm->vma_ops;
122 	vma->obj = obj;
123 	vma->resv = obj->base.resv;
124 	vma->size = obj->base.size;
125 	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
126 
127 	i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
128 
129 	/* Declare ourselves safe for use inside shrinkers */
130 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
131 		fs_reclaim_acquire(GFP_KERNEL);
132 		might_lock(&vma->active.mutex);
133 		fs_reclaim_release(GFP_KERNEL);
134 	}
135 
136 	INIT_LIST_HEAD(&vma->closed_link);
137 
138 	if (view && view->type != I915_GGTT_VIEW_NORMAL) {
139 		vma->ggtt_view = *view;
140 		if (view->type == I915_GGTT_VIEW_PARTIAL) {
141 			GEM_BUG_ON(range_overflows_t(u64,
142 						     view->partial.offset,
143 						     view->partial.size,
144 						     obj->base.size >> PAGE_SHIFT));
145 			vma->size = view->partial.size;
146 			vma->size <<= PAGE_SHIFT;
147 			GEM_BUG_ON(vma->size > obj->base.size);
148 		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
149 			vma->size = intel_rotation_info_size(&view->rotated);
150 			vma->size <<= PAGE_SHIFT;
151 		} else if (view->type == I915_GGTT_VIEW_REMAPPED) {
152 			vma->size = intel_remapped_info_size(&view->remapped);
153 			vma->size <<= PAGE_SHIFT;
154 		}
155 	}
156 
157 	if (unlikely(vma->size > vm->total))
158 		goto err_vma;
159 
160 	GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
161 
162 	spin_lock(&obj->vma.lock);
163 
164 	if (i915_is_ggtt(vm)) {
165 		if (unlikely(overflows_type(vma->size, u32)))
166 			goto err_unlock;
167 
168 		vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
169 						      i915_gem_object_get_tiling(obj),
170 						      i915_gem_object_get_stride(obj));
171 		if (unlikely(vma->fence_size < vma->size || /* overflow */
172 			     vma->fence_size > vm->total))
173 			goto err_unlock;
174 
175 		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
176 
177 		vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
178 								i915_gem_object_get_tiling(obj),
179 								i915_gem_object_get_stride(obj));
180 		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
181 
182 		__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
183 	}
184 
185 	rb = NULL;
186 	p = &obj->vma.tree.rb_node;
187 	while (*p) {
188 		long cmp;
189 
190 		rb = *p;
191 		pos = rb_entry(rb, struct i915_vma, obj_node);
192 
193 		/*
194 		 * If the view already exists in the tree, another thread
195 		 * already created a matching vma, so return the older instance
196 		 * and dispose of ours.
197 		 */
198 		cmp = i915_vma_compare(pos, vm, view);
199 		if (cmp < 0)
200 			p = &rb->rb_right;
201 		else if (cmp > 0)
202 			p = &rb->rb_left;
203 		else
204 			goto err_unlock;
205 	}
206 	rb_link_node(&vma->obj_node, rb, p);
207 	rb_insert_color(&vma->obj_node, &obj->vma.tree);
208 
209 	if (i915_vma_is_ggtt(vma))
210 		/*
211 		 * We put the GGTT vma at the start of the vma-list, followed
212 		 * by the ppGGTT vma. This allows us to break early when
213 		 * iterating over only the GGTT vma for an object, see
214 		 * for_each_ggtt_vma()
215 		 */
216 		list_add(&vma->obj_link, &obj->vma.list);
217 	else
218 		list_add_tail(&vma->obj_link, &obj->vma.list);
219 
220 	spin_unlock(&obj->vma.lock);
221 
222 	return vma;
223 
224 err_unlock:
225 	spin_unlock(&obj->vma.lock);
226 err_vma:
227 	i915_vm_put(vm);
228 	i915_vma_free(vma);
229 	return pos;
230 }
231 
232 static struct i915_vma *
vma_lookup(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_ggtt_view * view)233 vma_lookup(struct drm_i915_gem_object *obj,
234 	   struct i915_address_space *vm,
235 	   const struct i915_ggtt_view *view)
236 {
237 	struct rb_node *rb;
238 
239 	rb = obj->vma.tree.rb_node;
240 	while (rb) {
241 		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
242 		long cmp;
243 
244 		cmp = i915_vma_compare(vma, vm, view);
245 		if (cmp == 0)
246 			return vma;
247 
248 		if (cmp < 0)
249 			rb = rb->rb_right;
250 		else
251 			rb = rb->rb_left;
252 	}
253 
254 	return NULL;
255 }
256 
257 /**
258  * i915_vma_instance - return the singleton instance of the VMA
259  * @obj: parent &struct drm_i915_gem_object to be mapped
260  * @vm: address space in which the mapping is located
261  * @view: additional mapping requirements
262  *
263  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
264  * the same @view characteristics. If a match is not found, one is created.
265  * Once created, the VMA is kept until either the object is freed, or the
266  * address space is closed.
267  *
268  * Returns the vma, or an error pointer.
269  */
270 struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_ggtt_view * view)271 i915_vma_instance(struct drm_i915_gem_object *obj,
272 		  struct i915_address_space *vm,
273 		  const struct i915_ggtt_view *view)
274 {
275 	struct i915_vma *vma;
276 
277 	GEM_BUG_ON(view && !i915_is_ggtt(vm));
278 	GEM_BUG_ON(!atomic_read(&vm->open));
279 
280 	spin_lock(&obj->vma.lock);
281 	vma = vma_lookup(obj, vm, view);
282 	spin_unlock(&obj->vma.lock);
283 
284 	/* vma_create() will resolve the race if another creates the vma */
285 	if (unlikely(!vma))
286 		vma = vma_create(obj, vm, view);
287 
288 	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
289 	return vma;
290 }
291 
292 struct i915_vma_work {
293 	struct dma_fence_work base;
294 	struct i915_address_space *vm;
295 	struct i915_vm_pt_stash stash;
296 	struct i915_vma *vma;
297 	struct drm_i915_gem_object *pinned;
298 	struct i915_sw_dma_fence_cb cb;
299 	enum i915_cache_level cache_level;
300 	unsigned int flags;
301 };
302 
__vma_bind(struct dma_fence_work * work)303 static int __vma_bind(struct dma_fence_work *work)
304 {
305 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
306 	struct i915_vma *vma = vw->vma;
307 
308 	vma->ops->bind_vma(vw->vm, &vw->stash,
309 			   vma, vw->cache_level, vw->flags);
310 	return 0;
311 }
312 
__vma_release(struct dma_fence_work * work)313 static void __vma_release(struct dma_fence_work *work)
314 {
315 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
316 
317 	if (vw->pinned) {
318 		__i915_gem_object_unpin_pages(vw->pinned);
319 		i915_gem_object_put(vw->pinned);
320 	}
321 
322 	i915_vm_free_pt_stash(vw->vm, &vw->stash);
323 	i915_vm_put(vw->vm);
324 }
325 
326 static const struct dma_fence_work_ops bind_ops = {
327 	.name = "bind",
328 	.work = __vma_bind,
329 	.release = __vma_release,
330 };
331 
i915_vma_work(void)332 struct i915_vma_work *i915_vma_work(void)
333 {
334 	struct i915_vma_work *vw;
335 
336 	vw = kzalloc(sizeof(*vw), GFP_KERNEL);
337 	if (!vw)
338 		return NULL;
339 
340 	dma_fence_work_init(&vw->base, &bind_ops);
341 	vw->base.dma.error = -EAGAIN; /* disable the worker by default */
342 
343 	return vw;
344 }
345 
i915_vma_wait_for_bind(struct i915_vma * vma)346 int i915_vma_wait_for_bind(struct i915_vma *vma)
347 {
348 	int err = 0;
349 
350 	if (rcu_access_pointer(vma->active.excl.fence)) {
351 		struct dma_fence *fence;
352 
353 		rcu_read_lock();
354 		fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
355 		rcu_read_unlock();
356 		if (fence) {
357 			err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
358 			dma_fence_put(fence);
359 		}
360 	}
361 
362 	return err;
363 }
364 
365 /**
366  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
367  * @vma: VMA to map
368  * @cache_level: mapping cache level
369  * @flags: flags like global or local mapping
370  * @work: preallocated worker for allocating and binding the PTE
371  *
372  * DMA addresses are taken from the scatter-gather table of this object (or of
373  * this VMA in case of non-default GGTT views) and PTE entries set up.
374  * Note that DMA addresses are also the only part of the SG table we care about.
375  */
i915_vma_bind(struct i915_vma * vma,enum i915_cache_level cache_level,u32 flags,struct i915_vma_work * work)376 int i915_vma_bind(struct i915_vma *vma,
377 		  enum i915_cache_level cache_level,
378 		  u32 flags,
379 		  struct i915_vma_work *work)
380 {
381 	u32 bind_flags;
382 	u32 vma_flags;
383 
384 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
385 	GEM_BUG_ON(vma->size > vma->node.size);
386 
387 	if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
388 					      vma->node.size,
389 					      vma->vm->total)))
390 		return -ENODEV;
391 
392 	if (GEM_DEBUG_WARN_ON(!flags))
393 		return -EINVAL;
394 
395 	bind_flags = flags;
396 	bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
397 
398 	vma_flags = atomic_read(&vma->flags);
399 	vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
400 
401 	bind_flags &= ~vma_flags;
402 	if (bind_flags == 0)
403 		return 0;
404 
405 	GEM_BUG_ON(!vma->pages);
406 
407 	trace_i915_vma_bind(vma, bind_flags);
408 	if (work && bind_flags & vma->vm->bind_async_flags) {
409 		struct dma_fence *prev;
410 
411 		work->vma = vma;
412 		work->cache_level = cache_level;
413 		work->flags = bind_flags;
414 
415 		/*
416 		 * Note we only want to chain up to the migration fence on
417 		 * the pages (not the object itself). As we don't track that,
418 		 * yet, we have to use the exclusive fence instead.
419 		 *
420 		 * Also note that we do not want to track the async vma as
421 		 * part of the obj->resv->excl_fence as it only affects
422 		 * execution and not content or object's backing store lifetime.
423 		 */
424 		prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
425 		if (prev) {
426 			__i915_sw_fence_await_dma_fence(&work->base.chain,
427 							prev,
428 							&work->cb);
429 			dma_fence_put(prev);
430 		}
431 
432 		work->base.dma.error = 0; /* enable the queue_work() */
433 
434 		if (vma->obj) {
435 			__i915_gem_object_pin_pages(vma->obj);
436 			work->pinned = i915_gem_object_get(vma->obj);
437 		}
438 	} else {
439 		vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
440 	}
441 
442 	if (vma->obj)
443 		set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
444 
445 	atomic_or(bind_flags, &vma->flags);
446 	return 0;
447 }
448 
i915_vma_pin_iomap(struct i915_vma * vma)449 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
450 {
451 	void __iomem *ptr;
452 	int err;
453 
454 	if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
455 		err = -ENODEV;
456 		goto err;
457 	}
458 
459 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
460 	GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
461 
462 	ptr = READ_ONCE(vma->iomap);
463 	if (ptr == NULL) {
464 		ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
465 					vma->node.start,
466 					vma->node.size);
467 		if (ptr == NULL) {
468 			err = -ENOMEM;
469 			goto err;
470 		}
471 
472 		if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
473 			io_mapping_unmap(ptr);
474 			ptr = vma->iomap;
475 		}
476 	}
477 
478 	__i915_vma_pin(vma);
479 
480 	err = i915_vma_pin_fence(vma);
481 	if (err)
482 		goto err_unpin;
483 
484 	i915_vma_set_ggtt_write(vma);
485 
486 	/* NB Access through the GTT requires the device to be awake. */
487 	return ptr;
488 
489 err_unpin:
490 	__i915_vma_unpin(vma);
491 err:
492 	return IO_ERR_PTR(err);
493 }
494 
i915_vma_flush_writes(struct i915_vma * vma)495 void i915_vma_flush_writes(struct i915_vma *vma)
496 {
497 	if (i915_vma_unset_ggtt_write(vma))
498 		intel_gt_flush_ggtt_writes(vma->vm->gt);
499 }
500 
i915_vma_unpin_iomap(struct i915_vma * vma)501 void i915_vma_unpin_iomap(struct i915_vma *vma)
502 {
503 	GEM_BUG_ON(vma->iomap == NULL);
504 
505 	i915_vma_flush_writes(vma);
506 
507 	i915_vma_unpin_fence(vma);
508 	i915_vma_unpin(vma);
509 }
510 
i915_vma_unpin_and_release(struct i915_vma ** p_vma,unsigned int flags)511 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
512 {
513 	struct i915_vma *vma;
514 	struct drm_i915_gem_object *obj;
515 
516 	vma = fetch_and_zero(p_vma);
517 	if (!vma)
518 		return;
519 
520 	obj = vma->obj;
521 	GEM_BUG_ON(!obj);
522 
523 	i915_vma_unpin(vma);
524 
525 	if (flags & I915_VMA_RELEASE_MAP)
526 		i915_gem_object_unpin_map(obj);
527 
528 	i915_gem_object_put(obj);
529 }
530 
i915_vma_misplaced(const struct i915_vma * vma,u64 size,u64 alignment,u64 flags)531 bool i915_vma_misplaced(const struct i915_vma *vma,
532 			u64 size, u64 alignment, u64 flags)
533 {
534 	if (!drm_mm_node_allocated(&vma->node))
535 		return false;
536 
537 	if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
538 		return true;
539 
540 	if (vma->node.size < size)
541 		return true;
542 
543 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
544 	if (alignment && !IS_ALIGNED(vma->node.start, alignment))
545 		return true;
546 
547 	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
548 		return true;
549 
550 	if (flags & PIN_OFFSET_BIAS &&
551 	    vma->node.start < (flags & PIN_OFFSET_MASK))
552 		return true;
553 
554 	if (flags & PIN_OFFSET_FIXED &&
555 	    vma->node.start != (flags & PIN_OFFSET_MASK))
556 		return true;
557 
558 	return false;
559 }
560 
__i915_vma_set_map_and_fenceable(struct i915_vma * vma)561 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
562 {
563 	bool mappable, fenceable;
564 
565 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
566 	GEM_BUG_ON(!vma->fence_size);
567 
568 	fenceable = (vma->node.size >= vma->fence_size &&
569 		     IS_ALIGNED(vma->node.start, vma->fence_alignment));
570 
571 	mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
572 
573 	if (mappable && fenceable)
574 		set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
575 	else
576 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
577 }
578 
i915_gem_valid_gtt_space(struct i915_vma * vma,unsigned long color)579 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
580 {
581 	struct drm_mm_node *node = &vma->node;
582 	struct drm_mm_node *other;
583 
584 	/*
585 	 * On some machines we have to be careful when putting differing types
586 	 * of snoopable memory together to avoid the prefetcher crossing memory
587 	 * domains and dying. During vm initialisation, we decide whether or not
588 	 * these constraints apply and set the drm_mm.color_adjust
589 	 * appropriately.
590 	 */
591 	if (!i915_vm_has_cache_coloring(vma->vm))
592 		return true;
593 
594 	/* Only valid to be called on an already inserted vma */
595 	GEM_BUG_ON(!drm_mm_node_allocated(node));
596 	GEM_BUG_ON(list_empty(&node->node_list));
597 
598 	other = list_prev_entry(node, node_list);
599 	if (i915_node_color_differs(other, color) &&
600 	    !drm_mm_hole_follows(other))
601 		return false;
602 
603 	other = list_next_entry(node, node_list);
604 	if (i915_node_color_differs(other, color) &&
605 	    !drm_mm_hole_follows(node))
606 		return false;
607 
608 	return true;
609 }
610 
611 /**
612  * i915_vma_insert - finds a slot for the vma in its address space
613  * @vma: the vma
614  * @size: requested size in bytes (can be larger than the VMA)
615  * @alignment: required alignment
616  * @flags: mask of PIN_* flags to use
617  *
618  * First we try to allocate some free space that meets the requirements for
619  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
620  * preferrably the oldest idle entry to make room for the new VMA.
621  *
622  * Returns:
623  * 0 on success, negative error code otherwise.
624  */
625 static int
i915_vma_insert(struct i915_vma * vma,u64 size,u64 alignment,u64 flags)626 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
627 {
628 	unsigned long color;
629 	u64 start, end;
630 	int ret;
631 
632 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
633 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
634 
635 	size = max(size, vma->size);
636 	alignment = max(alignment, vma->display_alignment);
637 	if (flags & PIN_MAPPABLE) {
638 		size = max_t(typeof(size), size, vma->fence_size);
639 		alignment = max_t(typeof(alignment),
640 				  alignment, vma->fence_alignment);
641 	}
642 
643 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
644 	GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
645 	GEM_BUG_ON(!is_power_of_2(alignment));
646 
647 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
648 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
649 
650 	end = vma->vm->total;
651 	if (flags & PIN_MAPPABLE)
652 		end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
653 	if (flags & PIN_ZONE_4G)
654 		end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
655 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
656 
657 	/* If binding the object/GGTT view requires more space than the entire
658 	 * aperture has, reject it early before evicting everything in a vain
659 	 * attempt to find space.
660 	 */
661 	if (size > end) {
662 		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
663 			  size, flags & PIN_MAPPABLE ? "mappable" : "total",
664 			  end);
665 		return -ENOSPC;
666 	}
667 
668 	color = 0;
669 	if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
670 		color = vma->obj->cache_level;
671 
672 	if (flags & PIN_OFFSET_FIXED) {
673 		u64 offset = flags & PIN_OFFSET_MASK;
674 		if (!IS_ALIGNED(offset, alignment) ||
675 		    range_overflows(offset, size, end))
676 			return -EINVAL;
677 
678 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
679 					   size, offset, color,
680 					   flags);
681 		if (ret)
682 			return ret;
683 	} else {
684 		/*
685 		 * We only support huge gtt pages through the 48b PPGTT,
686 		 * however we also don't want to force any alignment for
687 		 * objects which need to be tightly packed into the low 32bits.
688 		 *
689 		 * Note that we assume that GGTT are limited to 4GiB for the
690 		 * forseeable future. See also i915_ggtt_offset().
691 		 */
692 		if (upper_32_bits(end - 1) &&
693 		    vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
694 			/*
695 			 * We can't mix 64K and 4K PTEs in the same page-table
696 			 * (2M block), and so to avoid the ugliness and
697 			 * complexity of coloring we opt for just aligning 64K
698 			 * objects to 2M.
699 			 */
700 			u64 page_alignment =
701 				rounddown_pow_of_two(vma->page_sizes.sg |
702 						     I915_GTT_PAGE_SIZE_2M);
703 
704 			/*
705 			 * Check we don't expand for the limited Global GTT
706 			 * (mappable aperture is even more precious!). This
707 			 * also checks that we exclude the aliasing-ppgtt.
708 			 */
709 			GEM_BUG_ON(i915_vma_is_ggtt(vma));
710 
711 			alignment = max(alignment, page_alignment);
712 
713 			if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
714 				size = round_up(size, I915_GTT_PAGE_SIZE_2M);
715 		}
716 
717 		ret = i915_gem_gtt_insert(vma->vm, &vma->node,
718 					  size, alignment, color,
719 					  start, end, flags);
720 		if (ret)
721 			return ret;
722 
723 		GEM_BUG_ON(vma->node.start < start);
724 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
725 	}
726 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
727 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
728 
729 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
730 
731 	return 0;
732 }
733 
734 static void
i915_vma_detach(struct i915_vma * vma)735 i915_vma_detach(struct i915_vma *vma)
736 {
737 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
738 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
739 
740 	/*
741 	 * And finally now the object is completely decoupled from this
742 	 * vma, we can drop its hold on the backing storage and allow
743 	 * it to be reaped by the shrinker.
744 	 */
745 	list_del(&vma->vm_link);
746 }
747 
try_qad_pin(struct i915_vma * vma,unsigned int flags)748 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
749 {
750 	unsigned int bound;
751 	bool pinned = true;
752 
753 	bound = atomic_read(&vma->flags);
754 	do {
755 		if (unlikely(flags & ~bound))
756 			return false;
757 
758 		if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
759 			return false;
760 
761 		if (!(bound & I915_VMA_PIN_MASK))
762 			goto unpinned;
763 
764 		GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
765 	} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
766 
767 	return true;
768 
769 unpinned:
770 	/*
771 	 * If pin_count==0, but we are bound, check under the lock to avoid
772 	 * racing with a concurrent i915_vma_unbind().
773 	 */
774 	mutex_lock(&vma->vm->mutex);
775 	do {
776 		if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
777 			pinned = false;
778 			break;
779 		}
780 
781 		if (unlikely(flags & ~bound)) {
782 			pinned = false;
783 			break;
784 		}
785 	} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
786 	mutex_unlock(&vma->vm->mutex);
787 
788 	return pinned;
789 }
790 
vma_get_pages(struct i915_vma * vma)791 static int vma_get_pages(struct i915_vma *vma)
792 {
793 	int err = 0;
794 
795 	if (atomic_add_unless(&vma->pages_count, 1, 0))
796 		return 0;
797 
798 	/* Allocations ahoy! */
799 	if (mutex_lock_interruptible(&vma->pages_mutex))
800 		return -EINTR;
801 
802 	if (!atomic_read(&vma->pages_count)) {
803 		if (vma->obj) {
804 			err = i915_gem_object_pin_pages(vma->obj);
805 			if (err)
806 				goto unlock;
807 		}
808 
809 		err = vma->ops->set_pages(vma);
810 		if (err) {
811 			if (vma->obj)
812 				i915_gem_object_unpin_pages(vma->obj);
813 			goto unlock;
814 		}
815 	}
816 	atomic_inc(&vma->pages_count);
817 
818 unlock:
819 	mutex_unlock(&vma->pages_mutex);
820 
821 	return err;
822 }
823 
__vma_put_pages(struct i915_vma * vma,unsigned int count)824 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
825 {
826 	/* We allocate under vma_get_pages, so beware the shrinker */
827 	mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
828 	GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
829 	if (atomic_sub_return(count, &vma->pages_count) == 0) {
830 		vma->ops->clear_pages(vma);
831 		GEM_BUG_ON(vma->pages);
832 		if (vma->obj)
833 			i915_gem_object_unpin_pages(vma->obj);
834 	}
835 	mutex_unlock(&vma->pages_mutex);
836 }
837 
vma_put_pages(struct i915_vma * vma)838 static void vma_put_pages(struct i915_vma *vma)
839 {
840 	if (atomic_add_unless(&vma->pages_count, -1, 1))
841 		return;
842 
843 	__vma_put_pages(vma, 1);
844 }
845 
vma_unbind_pages(struct i915_vma * vma)846 static void vma_unbind_pages(struct i915_vma *vma)
847 {
848 	unsigned int count;
849 
850 	lockdep_assert_held(&vma->vm->mutex);
851 
852 	/* The upper portion of pages_count is the number of bindings */
853 	count = atomic_read(&vma->pages_count);
854 	count >>= I915_VMA_PAGES_BIAS;
855 	GEM_BUG_ON(!count);
856 
857 	__vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
858 }
859 
i915_vma_pin_ww(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u64 size,u64 alignment,u64 flags)860 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
861 		    u64 size, u64 alignment, u64 flags)
862 {
863 	struct i915_vma_work *work = NULL;
864 	intel_wakeref_t wakeref = 0;
865 	unsigned int bound;
866 	int err;
867 
868 #ifdef CONFIG_PROVE_LOCKING
869 	if (debug_locks && lockdep_is_held(&vma->vm->i915->drm.struct_mutex))
870 		WARN_ON(!ww);
871 #endif
872 
873 	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
874 	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
875 
876 	GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
877 
878 	/* First try and grab the pin without rebinding the vma */
879 	if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
880 		return 0;
881 
882 	err = vma_get_pages(vma);
883 	if (err)
884 		return err;
885 
886 	if (flags & PIN_GLOBAL)
887 		wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
888 
889 	if (flags & vma->vm->bind_async_flags) {
890 		work = i915_vma_work();
891 		if (!work) {
892 			err = -ENOMEM;
893 			goto err_rpm;
894 		}
895 
896 		work->vm = i915_vm_get(vma->vm);
897 
898 		/* Allocate enough page directories to used PTE */
899 		if (vma->vm->allocate_va_range) {
900 			err = i915_vm_alloc_pt_stash(vma->vm,
901 						     &work->stash,
902 						     vma->size);
903 			if (err)
904 				goto err_fence;
905 
906 			err = i915_vm_pin_pt_stash(vma->vm,
907 						   &work->stash);
908 			if (err)
909 				goto err_fence;
910 		}
911 	}
912 
913 	/*
914 	 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
915 	 *
916 	 * We conflate the Global GTT with the user's vma when using the
917 	 * aliasing-ppgtt, but it is still vitally important to try and
918 	 * keep the use cases distinct. For example, userptr objects are
919 	 * not allowed inside the Global GTT as that will cause lock
920 	 * inversions when we have to evict them the mmu_notifier callbacks -
921 	 * but they are allowed to be part of the user ppGTT which can never
922 	 * be mapped. As such we try to give the distinct users of the same
923 	 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
924 	 * and i915_ppgtt separate].
925 	 *
926 	 * NB this may cause us to mask real lock inversions -- while the
927 	 * code is safe today, lockdep may not be able to spot future
928 	 * transgressions.
929 	 */
930 	err = mutex_lock_interruptible_nested(&vma->vm->mutex,
931 					      !(flags & PIN_GLOBAL));
932 	if (err)
933 		goto err_fence;
934 
935 	/* No more allocations allowed now we hold vm->mutex */
936 
937 	if (unlikely(i915_vma_is_closed(vma))) {
938 		err = -ENOENT;
939 		goto err_unlock;
940 	}
941 
942 	bound = atomic_read(&vma->flags);
943 	if (unlikely(bound & I915_VMA_ERROR)) {
944 		err = -ENOMEM;
945 		goto err_unlock;
946 	}
947 
948 	if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
949 		err = -EAGAIN; /* pins are meant to be fairly temporary */
950 		goto err_unlock;
951 	}
952 
953 	if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
954 		__i915_vma_pin(vma);
955 		goto err_unlock;
956 	}
957 
958 	err = i915_active_acquire(&vma->active);
959 	if (err)
960 		goto err_unlock;
961 
962 	if (!(bound & I915_VMA_BIND_MASK)) {
963 		err = i915_vma_insert(vma, size, alignment, flags);
964 		if (err)
965 			goto err_active;
966 
967 		if (i915_is_ggtt(vma->vm))
968 			__i915_vma_set_map_and_fenceable(vma);
969 	}
970 
971 	GEM_BUG_ON(!vma->pages);
972 	err = i915_vma_bind(vma,
973 			    vma->obj ? vma->obj->cache_level : 0,
974 			    flags, work);
975 	if (err)
976 		goto err_remove;
977 
978 	/* There should only be at most 2 active bindings (user, global) */
979 	GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
980 	atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
981 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
982 
983 	__i915_vma_pin(vma);
984 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
985 	GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
986 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
987 
988 err_remove:
989 	if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
990 		i915_vma_detach(vma);
991 		drm_mm_remove_node(&vma->node);
992 	}
993 err_active:
994 	i915_active_release(&vma->active);
995 err_unlock:
996 	mutex_unlock(&vma->vm->mutex);
997 err_fence:
998 	if (work)
999 		dma_fence_work_commit_imm(&work->base);
1000 err_rpm:
1001 	if (wakeref)
1002 		intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1003 	vma_put_pages(vma);
1004 	return err;
1005 }
1006 
flush_idle_contexts(struct intel_gt * gt)1007 static void flush_idle_contexts(struct intel_gt *gt)
1008 {
1009 	struct intel_engine_cs *engine;
1010 	enum intel_engine_id id;
1011 
1012 	for_each_engine(engine, gt, id)
1013 		intel_engine_flush_barriers(engine);
1014 
1015 	intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1016 }
1017 
i915_ggtt_pin(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u32 align,unsigned int flags)1018 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1019 		  u32 align, unsigned int flags)
1020 {
1021 	struct i915_address_space *vm = vma->vm;
1022 	int err;
1023 
1024 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1025 
1026 	do {
1027 		err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1028 		if (err != -ENOSPC) {
1029 			if (!err) {
1030 				err = i915_vma_wait_for_bind(vma);
1031 				if (err)
1032 					i915_vma_unpin(vma);
1033 			}
1034 			return err;
1035 		}
1036 
1037 		/* Unlike i915_vma_pin, we don't take no for an answer! */
1038 		flush_idle_contexts(vm->gt);
1039 		if (mutex_lock_interruptible(&vm->mutex) == 0) {
1040 			i915_gem_evict_vm(vm);
1041 			mutex_unlock(&vm->mutex);
1042 		}
1043 	} while (1);
1044 }
1045 
__vma_close(struct i915_vma * vma,struct intel_gt * gt)1046 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1047 {
1048 	/*
1049 	 * We defer actually closing, unbinding and destroying the VMA until
1050 	 * the next idle point, or if the object is freed in the meantime. By
1051 	 * postponing the unbind, we allow for it to be resurrected by the
1052 	 * client, avoiding the work required to rebind the VMA. This is
1053 	 * advantageous for DRI, where the client/server pass objects
1054 	 * between themselves, temporarily opening a local VMA to the
1055 	 * object, and then closing it again. The same object is then reused
1056 	 * on the next frame (or two, depending on the depth of the swap queue)
1057 	 * causing us to rebind the VMA once more. This ends up being a lot
1058 	 * of wasted work for the steady state.
1059 	 */
1060 	GEM_BUG_ON(i915_vma_is_closed(vma));
1061 	list_add(&vma->closed_link, &gt->closed_vma);
1062 }
1063 
i915_vma_close(struct i915_vma * vma)1064 void i915_vma_close(struct i915_vma *vma)
1065 {
1066 	struct intel_gt *gt = vma->vm->gt;
1067 	unsigned long flags;
1068 
1069 	if (i915_vma_is_ggtt(vma))
1070 		return;
1071 
1072 	GEM_BUG_ON(!atomic_read(&vma->open_count));
1073 	if (atomic_dec_and_lock_irqsave(&vma->open_count,
1074 					&gt->closed_lock,
1075 					flags)) {
1076 		__vma_close(vma, gt);
1077 		spin_unlock_irqrestore(&gt->closed_lock, flags);
1078 	}
1079 }
1080 
__i915_vma_remove_closed(struct i915_vma * vma)1081 static void __i915_vma_remove_closed(struct i915_vma *vma)
1082 {
1083 	struct intel_gt *gt = vma->vm->gt;
1084 
1085 	spin_lock_irq(&gt->closed_lock);
1086 	list_del_init(&vma->closed_link);
1087 	spin_unlock_irq(&gt->closed_lock);
1088 }
1089 
i915_vma_reopen(struct i915_vma * vma)1090 void i915_vma_reopen(struct i915_vma *vma)
1091 {
1092 	if (i915_vma_is_closed(vma))
1093 		__i915_vma_remove_closed(vma);
1094 }
1095 
i915_vma_release(struct kref * ref)1096 void i915_vma_release(struct kref *ref)
1097 {
1098 	struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1099 
1100 	if (drm_mm_node_allocated(&vma->node)) {
1101 		mutex_lock(&vma->vm->mutex);
1102 		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1103 		WARN_ON(__i915_vma_unbind(vma));
1104 		mutex_unlock(&vma->vm->mutex);
1105 		GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1106 	}
1107 	GEM_BUG_ON(i915_vma_is_active(vma));
1108 
1109 	if (vma->obj) {
1110 		struct drm_i915_gem_object *obj = vma->obj;
1111 
1112 		spin_lock(&obj->vma.lock);
1113 		list_del(&vma->obj_link);
1114 		if (!RB_EMPTY_NODE(&vma->obj_node))
1115 			rb_erase(&vma->obj_node, &obj->vma.tree);
1116 		spin_unlock(&obj->vma.lock);
1117 	}
1118 
1119 	__i915_vma_remove_closed(vma);
1120 	i915_vm_put(vma->vm);
1121 
1122 	i915_active_fini(&vma->active);
1123 	i915_vma_free(vma);
1124 }
1125 
i915_vma_parked(struct intel_gt * gt)1126 void i915_vma_parked(struct intel_gt *gt)
1127 {
1128 	struct i915_vma *vma, *next;
1129 	LIST_HEAD(closed);
1130 
1131 	spin_lock_irq(&gt->closed_lock);
1132 	list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1133 		struct drm_i915_gem_object *obj = vma->obj;
1134 		struct i915_address_space *vm = vma->vm;
1135 
1136 		/* XXX All to avoid keeping a reference on i915_vma itself */
1137 
1138 		if (!kref_get_unless_zero(&obj->base.refcount))
1139 			continue;
1140 
1141 		if (!i915_vm_tryopen(vm)) {
1142 			i915_gem_object_put(obj);
1143 			continue;
1144 		}
1145 
1146 		list_move(&vma->closed_link, &closed);
1147 	}
1148 	spin_unlock_irq(&gt->closed_lock);
1149 
1150 	/* As the GT is held idle, no vma can be reopened as we destroy them */
1151 	list_for_each_entry_safe(vma, next, &closed, closed_link) {
1152 		struct drm_i915_gem_object *obj = vma->obj;
1153 		struct i915_address_space *vm = vma->vm;
1154 
1155 		INIT_LIST_HEAD(&vma->closed_link);
1156 		__i915_vma_put(vma);
1157 
1158 		i915_gem_object_put(obj);
1159 		i915_vm_close(vm);
1160 	}
1161 }
1162 
__i915_vma_iounmap(struct i915_vma * vma)1163 static void __i915_vma_iounmap(struct i915_vma *vma)
1164 {
1165 	GEM_BUG_ON(i915_vma_is_pinned(vma));
1166 
1167 	if (vma->iomap == NULL)
1168 		return;
1169 
1170 	io_mapping_unmap(vma->iomap);
1171 	vma->iomap = NULL;
1172 }
1173 
i915_vma_revoke_mmap(struct i915_vma * vma)1174 void i915_vma_revoke_mmap(struct i915_vma *vma)
1175 {
1176 	struct drm_vma_offset_node *node;
1177 	u64 vma_offset;
1178 
1179 	if (!i915_vma_has_userfault(vma))
1180 		return;
1181 
1182 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1183 	GEM_BUG_ON(!vma->obj->userfault_count);
1184 
1185 	node = &vma->mmo->vma_node;
1186 	vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1187 	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1188 			    drm_vma_node_offset_addr(node) + vma_offset,
1189 			    vma->size,
1190 			    1);
1191 
1192 	i915_vma_unset_userfault(vma);
1193 	if (!--vma->obj->userfault_count)
1194 		list_del(&vma->obj->userfault_link);
1195 }
1196 
1197 static int
__i915_request_await_bind(struct i915_request * rq,struct i915_vma * vma)1198 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1199 {
1200 	return __i915_request_await_exclusive(rq, &vma->active);
1201 }
1202 
__i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq)1203 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1204 {
1205 	int err;
1206 
1207 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
1208 
1209 	/* Wait for the vma to be bound before we start! */
1210 	err = __i915_request_await_bind(rq, vma);
1211 	if (err)
1212 		return err;
1213 
1214 	return i915_active_add_request(&vma->active, rq);
1215 }
1216 
i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq,unsigned int flags)1217 int i915_vma_move_to_active(struct i915_vma *vma,
1218 			    struct i915_request *rq,
1219 			    unsigned int flags)
1220 {
1221 	struct drm_i915_gem_object *obj = vma->obj;
1222 	int err;
1223 
1224 	assert_object_held(obj);
1225 
1226 	err = __i915_vma_move_to_active(vma, rq);
1227 	if (unlikely(err))
1228 		return err;
1229 
1230 	if (flags & EXEC_OBJECT_WRITE) {
1231 		struct intel_frontbuffer *front;
1232 
1233 		front = __intel_frontbuffer_get(obj);
1234 		if (unlikely(front)) {
1235 			if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1236 				i915_active_add_request(&front->write, rq);
1237 			intel_frontbuffer_put(front);
1238 		}
1239 
1240 		dma_resv_add_excl_fence(vma->resv, &rq->fence);
1241 		obj->write_domain = I915_GEM_DOMAIN_RENDER;
1242 		obj->read_domains = 0;
1243 	} else {
1244 		err = dma_resv_reserve_shared(vma->resv, 1);
1245 		if (unlikely(err))
1246 			return err;
1247 
1248 		dma_resv_add_shared_fence(vma->resv, &rq->fence);
1249 		obj->write_domain = 0;
1250 	}
1251 
1252 	if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1253 		i915_active_add_request(&vma->fence->active, rq);
1254 
1255 	obj->read_domains |= I915_GEM_GPU_DOMAINS;
1256 	obj->mm.dirty = true;
1257 
1258 	GEM_BUG_ON(!i915_vma_is_active(vma));
1259 	return 0;
1260 }
1261 
__i915_vma_evict(struct i915_vma * vma)1262 void __i915_vma_evict(struct i915_vma *vma)
1263 {
1264 	GEM_BUG_ON(i915_vma_is_pinned(vma));
1265 
1266 	if (i915_vma_is_map_and_fenceable(vma)) {
1267 		/* Force a pagefault for domain tracking on next user access */
1268 		i915_vma_revoke_mmap(vma);
1269 
1270 		/*
1271 		 * Check that we have flushed all writes through the GGTT
1272 		 * before the unbind, other due to non-strict nature of those
1273 		 * indirect writes they may end up referencing the GGTT PTE
1274 		 * after the unbind.
1275 		 *
1276 		 * Note that we may be concurrently poking at the GGTT_WRITE
1277 		 * bit from set-domain, as we mark all GGTT vma associated
1278 		 * with an object. We know this is for another vma, as we
1279 		 * are currently unbinding this one -- so if this vma will be
1280 		 * reused, it will be refaulted and have its dirty bit set
1281 		 * before the next write.
1282 		 */
1283 		i915_vma_flush_writes(vma);
1284 
1285 		/* release the fence reg _after_ flushing */
1286 		i915_vma_revoke_fence(vma);
1287 
1288 		__i915_vma_iounmap(vma);
1289 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1290 	}
1291 	GEM_BUG_ON(vma->fence);
1292 	GEM_BUG_ON(i915_vma_has_userfault(vma));
1293 
1294 	if (likely(atomic_read(&vma->vm->open))) {
1295 		trace_i915_vma_unbind(vma);
1296 		vma->ops->unbind_vma(vma->vm, vma);
1297 	}
1298 	atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1299 		   &vma->flags);
1300 
1301 	i915_vma_detach(vma);
1302 	vma_unbind_pages(vma);
1303 }
1304 
__i915_vma_unbind(struct i915_vma * vma)1305 int __i915_vma_unbind(struct i915_vma *vma)
1306 {
1307 	int ret;
1308 
1309 	lockdep_assert_held(&vma->vm->mutex);
1310 
1311 	if (!drm_mm_node_allocated(&vma->node))
1312 		return 0;
1313 
1314 	if (i915_vma_is_pinned(vma)) {
1315 		vma_print_allocator(vma, "is pinned");
1316 		return -EAGAIN;
1317 	}
1318 
1319 	/*
1320 	 * After confirming that no one else is pinning this vma, wait for
1321 	 * any laggards who may have crept in during the wait (through
1322 	 * a residual pin skipping the vm->mutex) to complete.
1323 	 */
1324 	ret = i915_vma_sync(vma);
1325 	if (ret)
1326 		return ret;
1327 
1328 	GEM_BUG_ON(i915_vma_is_active(vma));
1329 	__i915_vma_evict(vma);
1330 
1331 	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1332 	return 0;
1333 }
1334 
i915_vma_unbind(struct i915_vma * vma)1335 int i915_vma_unbind(struct i915_vma *vma)
1336 {
1337 	struct i915_address_space *vm = vma->vm;
1338 	intel_wakeref_t wakeref = 0;
1339 	int err;
1340 
1341 	/* Optimistic wait before taking the mutex */
1342 	err = i915_vma_sync(vma);
1343 	if (err)
1344 		return err;
1345 
1346 	if (!drm_mm_node_allocated(&vma->node))
1347 		return 0;
1348 
1349 	if (i915_vma_is_pinned(vma)) {
1350 		vma_print_allocator(vma, "is pinned");
1351 		return -EAGAIN;
1352 	}
1353 
1354 	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1355 		/* XXX not always required: nop_clear_range */
1356 		wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1357 
1358 	err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
1359 	if (err)
1360 		goto out_rpm;
1361 
1362 	err = __i915_vma_unbind(vma);
1363 	mutex_unlock(&vm->mutex);
1364 
1365 out_rpm:
1366 	if (wakeref)
1367 		intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1368 	return err;
1369 }
1370 
i915_vma_make_unshrinkable(struct i915_vma * vma)1371 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1372 {
1373 	i915_gem_object_make_unshrinkable(vma->obj);
1374 	return vma;
1375 }
1376 
i915_vma_make_shrinkable(struct i915_vma * vma)1377 void i915_vma_make_shrinkable(struct i915_vma *vma)
1378 {
1379 	i915_gem_object_make_shrinkable(vma->obj);
1380 }
1381 
i915_vma_make_purgeable(struct i915_vma * vma)1382 void i915_vma_make_purgeable(struct i915_vma *vma)
1383 {
1384 	i915_gem_object_make_purgeable(vma->obj);
1385 }
1386 
1387 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1388 #include "selftests/i915_vma.c"
1389 #endif
1390 
i915_global_vma_shrink(void)1391 static void i915_global_vma_shrink(void)
1392 {
1393 	kmem_cache_shrink(global.slab_vmas);
1394 }
1395 
i915_global_vma_exit(void)1396 static void i915_global_vma_exit(void)
1397 {
1398 	kmem_cache_destroy(global.slab_vmas);
1399 }
1400 
1401 static struct i915_global_vma global = { {
1402 	.shrink = i915_global_vma_shrink,
1403 	.exit = i915_global_vma_exit,
1404 } };
1405 
i915_global_vma_init(void)1406 int __init i915_global_vma_init(void)
1407 {
1408 	global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1409 	if (!global.slab_vmas)
1410 		return -ENOMEM;
1411 
1412 	i915_global_register(&global.base);
1413 	return 0;
1414 }
1415