Lines Matching refs:vma
221 struct i915_vma **vma; member
352 const struct i915_vma *vma, in eb_vma_misplaced() argument
355 if (vma->node.size < entry->pad_to_size) in eb_vma_misplaced()
358 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment)) in eb_vma_misplaced()
362 vma->node.start != entry->offset) in eb_vma_misplaced()
366 vma->node.start < BATCH_OFFSET_BIAS) in eb_vma_misplaced()
370 (vma->node.start + vma->node.size - 1) >> 32) in eb_vma_misplaced()
374 !i915_vma_is_map_and_fenceable(vma)) in eb_vma_misplaced()
383 struct i915_vma *vma) in eb_pin_vma() argument
385 unsigned int exec_flags = *vma->exec_flags; in eb_pin_vma()
388 if (vma->node.size) in eb_pin_vma()
389 pin_flags = vma->node.start; in eb_pin_vma()
397 if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) in eb_pin_vma()
401 if (unlikely(i915_vma_pin_fence(vma))) { in eb_pin_vma()
402 i915_vma_unpin(vma); in eb_pin_vma()
406 if (vma->fence) in eb_pin_vma()
410 *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN; in eb_pin_vma()
411 return !eb_vma_misplaced(entry, vma, exec_flags); in eb_pin_vma()
414 static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags) in __eb_unreserve_vma() argument
419 __i915_vma_unpin_fence(vma); in __eb_unreserve_vma()
421 __i915_vma_unpin(vma); in __eb_unreserve_vma()
425 eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags) in eb_unreserve_vma() argument
430 __eb_unreserve_vma(vma, *flags); in eb_unreserve_vma()
437 struct i915_vma *vma) in eb_validate_vma() argument
461 if (unlikely(vma->exec_flags)) { in eb_validate_vma()
479 i915_gem_object_is_tiled(vma->obj)) in eb_validate_vma()
492 struct i915_vma *vma) in eb_add_vma() argument
497 GEM_BUG_ON(i915_vma_is_closed(vma)); in eb_add_vma()
500 err = eb_validate_vma(eb, entry, vma); in eb_add_vma()
506 vma->exec_handle = entry->handle; in eb_add_vma()
507 hlist_add_head(&vma->exec_node, in eb_add_vma()
513 list_add_tail(&vma->reloc_link, &eb->relocs); in eb_add_vma()
521 eb->vma[i] = vma; in eb_add_vma()
523 vma->exec_flags = &eb->flags[i]; in eb_add_vma()
541 eb->batch = vma; in eb_add_vma()
545 if (eb_pin_vma(eb, entry, vma)) { in eb_add_vma()
546 if (entry->offset != vma->node.start) { in eb_add_vma()
547 entry->offset = vma->node.start | UPDATE; in eb_add_vma()
551 eb_unreserve_vma(vma, vma->exec_flags); in eb_add_vma()
553 list_add_tail(&vma->exec_link, &eb->unbound); in eb_add_vma()
554 if (drm_mm_node_allocated(&vma->node)) in eb_add_vma()
555 err = i915_vma_unbind(vma); in eb_add_vma()
557 vma->exec_flags = NULL; in eb_add_vma()
580 struct i915_vma *vma) in eb_reserve_vma() argument
582 struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); in eb_reserve_vma()
583 unsigned int exec_flags = *vma->exec_flags; in eb_reserve_vma()
608 err = i915_vma_pin(vma, in eb_reserve_vma()
614 if (entry->offset != vma->node.start) { in eb_reserve_vma()
615 entry->offset = vma->node.start | UPDATE; in eb_reserve_vma()
620 err = i915_vma_pin_fence(vma); in eb_reserve_vma()
622 i915_vma_unpin(vma); in eb_reserve_vma()
626 if (vma->fence) in eb_reserve_vma()
630 *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN; in eb_reserve_vma()
631 GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags)); in eb_reserve_vma()
640 struct i915_vma *vma; in eb_reserve() local
661 list_for_each_entry(vma, &eb->unbound, exec_link) { in eb_reserve()
662 err = eb_reserve_vma(eb, vma); in eb_reserve()
674 struct i915_vma *vma = eb->vma[i]; in eb_reserve() local
680 eb_unreserve_vma(vma, &eb->flags[i]); in eb_reserve()
684 list_add(&vma->exec_link, &eb->unbound); in eb_reserve()
687 list_add_tail(&vma->exec_link, &eb->unbound); in eb_reserve()
690 list_add(&vma->exec_link, &last); in eb_reserve()
692 list_add_tail(&vma->exec_link, &last); in eb_reserve()
764 struct i915_vma *vma; in eb_lookup_vmas() local
766 vma = radix_tree_lookup(handles_vma, handle); in eb_lookup_vmas()
767 if (likely(vma)) in eb_lookup_vmas()
776 vma = i915_vma_instance(obj, eb->context->vm, NULL); in eb_lookup_vmas()
777 if (IS_ERR(vma)) { in eb_lookup_vmas()
778 err = PTR_ERR(vma); in eb_lookup_vmas()
788 err = radix_tree_insert(handles_vma, handle, vma); in eb_lookup_vmas()
795 if (!atomic_fetch_inc(&vma->open_count)) in eb_lookup_vmas()
796 i915_vma_reopen(vma); in eb_lookup_vmas()
805 err = eb_add_vma(eb, i, batch, vma); in eb_lookup_vmas()
809 GEM_BUG_ON(vma != eb->vma[i]); in eb_lookup_vmas()
810 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); in eb_lookup_vmas()
811 GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && in eb_lookup_vmas()
812 eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i])); in eb_lookup_vmas()
823 eb->vma[i] = NULL; in eb_lookup_vmas()
835 return eb->vma[handle]; in eb_get_vma()
838 struct i915_vma *vma; in eb_get_vma() local
841 hlist_for_each_entry(vma, head, exec_node) { in eb_get_vma()
842 if (vma->exec_handle == handle) in eb_get_vma()
843 return vma; in eb_get_vma()
855 struct i915_vma *vma = eb->vma[i]; in eb_release_vmas() local
858 if (!vma) in eb_release_vmas()
861 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); in eb_release_vmas()
862 vma->exec_flags = NULL; in eb_release_vmas()
863 eb->vma[i] = NULL; in eb_release_vmas()
866 __eb_unreserve_vma(vma, flags); in eb_release_vmas()
869 i915_vma_put(vma); in eb_release_vmas()
1026 struct i915_vma *vma; in reloc_iomap() local
1041 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, in reloc_iomap()
1045 if (IS_ERR(vma)) { in reloc_iomap()
1055 cache->node.start = vma->node.start; in reloc_iomap()
1056 cache->node.mm = (void *)vma; in reloc_iomap()
1119 static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma) in reloc_move_to_gpu() argument
1121 struct drm_i915_gem_object *obj = vma->obj; in reloc_move_to_gpu()
1124 i915_vma_lock(vma); in reloc_move_to_gpu()
1130 err = i915_request_await_object(rq, vma->obj, true); in reloc_move_to_gpu()
1132 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); in reloc_move_to_gpu()
1134 i915_vma_unlock(vma); in reloc_move_to_gpu()
1140 struct i915_vma *vma, in __reloc_gpu_alloc() argument
1163 batch = i915_vma_instance(pool->obj, vma->vm, NULL); in __reloc_gpu_alloc()
1183 err = reloc_move_to_gpu(rq, vma); in __reloc_gpu_alloc()
1225 struct i915_vma *vma, in reloc_gpu() argument
1244 err = __reloc_gpu_alloc(eb, vma, len); in reloc_gpu()
1256 relocate_entry(struct i915_vma *vma, in relocate_entry() argument
1268 !dma_resv_test_signaled_rcu(vma->resv, true))) { in relocate_entry()
1281 batch = reloc_gpu(eb, vma, len); in relocate_entry()
1285 addr = gen8_canonical_addr(vma->node.start + offset); in relocate_entry()
1326 vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT); in relocate_entry()
1347 struct i915_vma *vma, in eb_relocate_entry() argument
1410 vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { in eb_relocate_entry()
1415 (int)vma->size); in eb_relocate_entry()
1434 *vma->exec_flags &= ~EXEC_OBJECT_ASYNC; in eb_relocate_entry()
1437 return relocate_entry(vma, reloc, eb, target); in eb_relocate_entry()
1440 static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) in eb_relocate_vma() argument
1445 const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); in eb_relocate_vma()
1485 u64 offset = eb_relocate_entry(eb, vma, r); in eb_relocate_vma()
1528 eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma) in eb_relocate_vma_slow() argument
1530 const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); in eb_relocate_vma_slow()
1537 u64 offset = eb_relocate_entry(eb, vma, &relocs[i]); in eb_relocate_vma_slow()
1681 struct i915_vma *vma; in eb_relocate_slow() local
1737 list_for_each_entry(vma, &eb->relocs, reloc_link) { in eb_relocate_slow()
1740 err = eb_relocate_vma(eb, vma); in eb_relocate_slow()
1745 err = eb_relocate_vma_slow(eb, vma); in eb_relocate_slow()
1790 struct i915_vma *vma; in eb_relocate() local
1792 list_for_each_entry(vma, &eb->relocs, reloc_link) { in eb_relocate()
1793 if (eb_relocate_vma(eb, vma)) in eb_relocate()
1814 struct i915_vma *vma = eb->vma[i]; in eb_move_to_gpu() local
1816 err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire); in eb_move_to_gpu()
1827 ww_mutex_unlock(&eb->vma[j]->resv->lock); in eb_move_to_gpu()
1830 swap(eb->vma[i], eb->vma[j]); in eb_move_to_gpu()
1831 eb->vma[i]->exec_flags = &eb->flags[i]; in eb_move_to_gpu()
1833 GEM_BUG_ON(vma != eb->vma[0]); in eb_move_to_gpu()
1834 vma->exec_flags = &eb->flags[0]; in eb_move_to_gpu()
1836 err = ww_mutex_lock_slow_interruptible(&vma->resv->lock, in eb_move_to_gpu()
1846 struct i915_vma *vma = eb->vma[i]; in eb_move_to_gpu() local
1847 struct drm_i915_gem_object *obj = vma->obj; in eb_move_to_gpu()
1849 assert_vma_held(vma); in eb_move_to_gpu()
1857 capture->vma = vma; in eb_move_to_gpu()
1885 err = i915_vma_move_to_active(vma, eb->request, flags); in eb_move_to_gpu()
1887 i915_vma_unlock(vma); in eb_move_to_gpu()
1889 __eb_unreserve_vma(vma, flags); in eb_move_to_gpu()
1890 vma->exec_flags = NULL; in eb_move_to_gpu()
1893 i915_vma_put(vma); in eb_move_to_gpu()
1964 struct i915_vma * const vma = *eb->vma; in shadow_batch_pin() local
1975 } else if (vma->vm->has_read_only) { in shadow_batch_pin()
1977 vm = vma->vm; in shadow_batch_pin()
1990 struct i915_vma *vma; in eb_parse() local
1999 vma = shadow_batch_pin(eb, pool->obj); in eb_parse()
2000 if (IS_ERR(vma)) in eb_parse()
2006 shadow_batch_start = gen8_canonical_addr(vma->node.start); in eb_parse()
2018 i915_vma_unpin(vma); in eb_parse()
2028 vma = NULL; in eb_parse()
2030 vma = ERR_PTR(err); in eb_parse()
2034 eb->vma[eb->buffer_count] = i915_vma_get(vma); in eb_parse()
2037 vma->exec_flags = &eb->flags[eb->buffer_count]; in eb_parse()
2041 eb->batch = vma; in eb_parse()
2048 vma->private = pool; in eb_parse()
2049 return vma; in eb_parse()
2053 return vma; in eb_parse()
2499 eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1); in i915_gem_do_execbuffer()
2500 eb.vma[0] = NULL; in i915_gem_do_execbuffer()
2501 eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1); in i915_gem_do_execbuffer()
2601 struct i915_vma *vma; in i915_gem_do_execbuffer() local
2603 vma = eb_parse(&eb); in i915_gem_do_execbuffer()
2604 if (IS_ERR(vma)) { in i915_gem_do_execbuffer()
2605 err = PTR_ERR(vma); in i915_gem_do_execbuffer()
2615 struct i915_vma *vma; in i915_gem_do_execbuffer() local
2627 vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0); in i915_gem_do_execbuffer()
2628 if (IS_ERR(vma)) { in i915_gem_do_execbuffer()
2629 err = PTR_ERR(vma); in i915_gem_do_execbuffer()
2633 eb.batch = vma; in i915_gem_do_execbuffer()