• Home
  • Raw
  • Download

Lines Matching full:bb

419 	struct intel_vgpu_shadow_bb *bb;  in prepare_shadow_batch_buffer()  local
422 list_for_each_entry(bb, &workload->shadow_bb, list) { in prepare_shadow_batch_buffer()
430 if (bb->bb_offset) in prepare_shadow_batch_buffer()
431 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va in prepare_shadow_batch_buffer()
432 + bb->bb_offset; in prepare_shadow_batch_buffer()
434 if (bb->ppgtt) { in prepare_shadow_batch_buffer()
435 /* for non-priv bb, scan&shadow is only for in prepare_shadow_batch_buffer()
436 * debugging purpose, so the content of shadow bb in prepare_shadow_batch_buffer()
437 * is the same as original bb. Therefore, in prepare_shadow_batch_buffer()
438 * here, rather than switch to shadow bb's gma in prepare_shadow_batch_buffer()
440 * gma address, and send original bb to hardware in prepare_shadow_batch_buffer()
443 if (bb->clflush & CLFLUSH_AFTER) { in prepare_shadow_batch_buffer()
444 drm_clflush_virt_range(bb->va, in prepare_shadow_batch_buffer()
445 bb->obj->base.size); in prepare_shadow_batch_buffer()
446 bb->clflush &= ~CLFLUSH_AFTER; in prepare_shadow_batch_buffer()
448 i915_gem_obj_finish_shmem_access(bb->obj); in prepare_shadow_batch_buffer()
449 bb->accessing = false; in prepare_shadow_batch_buffer()
452 bb->vma = i915_gem_object_ggtt_pin(bb->obj, in prepare_shadow_batch_buffer()
454 if (IS_ERR(bb->vma)) { in prepare_shadow_batch_buffer()
455 ret = PTR_ERR(bb->vma); in prepare_shadow_batch_buffer()
460 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); in prepare_shadow_batch_buffer()
462 bb->bb_start_cmd_va[2] = 0; in prepare_shadow_batch_buffer()
464 /* No one is going to touch shadow bb from now on. */ in prepare_shadow_batch_buffer()
465 if (bb->clflush & CLFLUSH_AFTER) { in prepare_shadow_batch_buffer()
466 drm_clflush_virt_range(bb->va, in prepare_shadow_batch_buffer()
467 bb->obj->base.size); in prepare_shadow_batch_buffer()
468 bb->clflush &= ~CLFLUSH_AFTER; in prepare_shadow_batch_buffer()
471 ret = i915_gem_object_set_to_gtt_domain(bb->obj, in prepare_shadow_batch_buffer()
476 i915_gem_obj_finish_shmem_access(bb->obj); in prepare_shadow_batch_buffer()
477 bb->accessing = false; in prepare_shadow_batch_buffer()
479 ret = i915_vma_move_to_active(bb->vma, in prepare_shadow_batch_buffer()
541 struct intel_vgpu_shadow_bb *bb, *pos; in release_shadow_batch_buffer() local
546 bb = list_first_entry(&workload->shadow_bb, in release_shadow_batch_buffer()
551 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { in release_shadow_batch_buffer()
552 if (bb->obj) { in release_shadow_batch_buffer()
553 if (bb->accessing) in release_shadow_batch_buffer()
554 i915_gem_obj_finish_shmem_access(bb->obj); in release_shadow_batch_buffer()
556 if (bb->va && !IS_ERR(bb->va)) in release_shadow_batch_buffer()
557 i915_gem_object_unpin_map(bb->obj); in release_shadow_batch_buffer()
559 if (bb->vma && !IS_ERR(bb->vma)) { in release_shadow_batch_buffer()
560 i915_vma_unpin(bb->vma); in release_shadow_batch_buffer()
561 i915_vma_close(bb->vma); in release_shadow_batch_buffer()
563 __i915_gem_object_release_unless_active(bb->obj); in release_shadow_batch_buffer()
565 list_del(&bb->list); in release_shadow_batch_buffer()
566 kfree(bb); in release_shadow_batch_buffer()