/drivers/gpu/drm/qxl/ |
D | qxl_ttm.c | 41 struct qxl_mman *mman; in qxl_get_qdev() local 44 mman = container_of(bdev, struct qxl_mman, bdev); in qxl_get_qdev() 45 qdev = container_of(mman, struct qxl_device, mman); in qxl_get_qdev() 64 qdev->mman.mem_global_referenced = false; in qxl_ttm_global_init() 65 global_ref = &qdev->mman.mem_global_ref; in qxl_ttm_global_init() 78 qdev->mman.bo_global_ref.mem_glob = in qxl_ttm_global_init() 79 qdev->mman.mem_global_ref.object; in qxl_ttm_global_init() 80 global_ref = &qdev->mman.bo_global_ref.ref; in qxl_ttm_global_init() 88 drm_global_item_unref(&qdev->mman.mem_global_ref); in qxl_ttm_global_init() 92 qdev->mman.mem_global_referenced = true; in qxl_ttm_global_init() [all …]
|
D | qxl_object.c | 111 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, in qxl_bo_create() 351 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV); in qxl_surf_evict() 356 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM); in qxl_vram_evict()
|
D | qxl_release.c | 451 qdev = container_of(bdev, struct qxl_device, mman.bdev); in qxl_release_fence_buffer_objects()
|
D | qxl_drv.h | 256 struct qxl_mman mman; member
|
/drivers/gpu/drm/virtio/ |
D | virtgpu_ttm.c | 45 struct virtio_gpu_mman *mman; in virtio_gpu_get_vgdev() local 48 mman = container_of(bdev, struct virtio_gpu_mman, bdev); in virtio_gpu_get_vgdev() 49 vgdev = container_of(mman, struct virtio_gpu_device, mman); in virtio_gpu_get_vgdev() 68 vgdev->mman.mem_global_referenced = false; in virtio_gpu_ttm_global_init() 69 global_ref = &vgdev->mman.mem_global_ref; in virtio_gpu_ttm_global_init() 82 vgdev->mman.bo_global_ref.mem_glob = in virtio_gpu_ttm_global_init() 83 vgdev->mman.mem_global_ref.object; in virtio_gpu_ttm_global_init() 84 global_ref = &vgdev->mman.bo_global_ref.ref; in virtio_gpu_ttm_global_init() 92 drm_global_item_unref(&vgdev->mman.mem_global_ref); in virtio_gpu_ttm_global_init() 96 vgdev->mman.mem_global_referenced = true; in virtio_gpu_ttm_global_init() [all …]
|
D | virtgpu_object.c | 76 acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size, in virtio_gpu_object_create() 91 ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type, in virtio_gpu_object_create()
|
D | virtgpu_drv.h | 172 struct virtio_gpu_mman mman; member
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_ttm.c | 80 adev->mman.mem_global_referenced = false; in amdgpu_ttm_global_init() 81 global_ref = &adev->mman.mem_global_ref; in amdgpu_ttm_global_init() 93 adev->mman.bo_global_ref.mem_glob = in amdgpu_ttm_global_init() 94 adev->mman.mem_global_ref.object; in amdgpu_ttm_global_init() 95 global_ref = &adev->mman.bo_global_ref.ref; in amdgpu_ttm_global_init() 106 mutex_init(&adev->mman.gtt_window_lock); in amdgpu_ttm_global_init() 108 ring = adev->mman.buffer_funcs_ring; in amdgpu_ttm_global_init() 110 r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, in amdgpu_ttm_global_init() 117 adev->mman.mem_global_referenced = true; in amdgpu_ttm_global_init() 122 drm_global_item_unref(&adev->mman.bo_global_ref.ref); in amdgpu_ttm_global_init() [all …]
|
D | amdgpu_benchmark.c | 41 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; in amdgpu_benchmark_do_move() 110 if (adev->mman.buffer_funcs) { in amdgpu_benchmark_move()
|
D | amdgpu_kms.c | 423 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); in amdgpu_info_ioctl() 426 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); in amdgpu_info_ioctl() 429 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); in amdgpu_info_ioctl() 452 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; in amdgpu_info_ioctl() 466 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); in amdgpu_info_ioctl() 475 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); in amdgpu_info_ioctl() 479 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size; in amdgpu_info_ioctl() 484 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); in amdgpu_info_ioctl()
|
D | amdgpu_test.c | 33 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; in amdgpu_do_test_moves() 239 if (adev->mman.buffer_funcs) in amdgpu_test_moves()
|
D | si_dma.c | 136 if (adev->mman.buffer_funcs_ring == ring) in si_dma_stop() 199 if (adev->mman.buffer_funcs_ring == ring) in si_dma_start() 883 if (adev->mman.buffer_funcs == NULL) { in si_dma_set_buffer_funcs() 884 adev->mman.buffer_funcs = &si_dma_buffer_funcs; in si_dma_set_buffer_funcs() 885 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in si_dma_set_buffer_funcs()
|
D | cik_sdma.c | 318 if ((adev->mman.buffer_funcs_ring == sdma0) || in cik_sdma_gfx_stop() 319 (adev->mman.buffer_funcs_ring == sdma1)) in cik_sdma_gfx_stop() 519 if (adev->mman.buffer_funcs_ring == ring) in cik_sdma_gfx_resume() 1383 if (adev->mman.buffer_funcs == NULL) { in cik_sdma_set_buffer_funcs() 1384 adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; in cik_sdma_set_buffer_funcs() 1385 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in cik_sdma_set_buffer_funcs()
|
D | sdma_v2_4.c | 349 if ((adev->mman.buffer_funcs_ring == sdma0) || in sdma_v2_4_gfx_stop() 350 (adev->mman.buffer_funcs_ring == sdma1)) in sdma_v2_4_gfx_stop() 495 if (adev->mman.buffer_funcs_ring == ring) in sdma_v2_4_gfx_resume() 1320 if (adev->mman.buffer_funcs == NULL) { in sdma_v2_4_set_buffer_funcs() 1321 adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; in sdma_v2_4_set_buffer_funcs() 1322 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in sdma_v2_4_set_buffer_funcs()
|
D | sdma_v4_0.c | 458 if ((adev->mman.buffer_funcs_ring == sdma0) || in sdma_v4_0_gfx_stop() 459 (adev->mman.buffer_funcs_ring == sdma1)) in sdma_v4_0_gfx_stop() 703 if (adev->mman.buffer_funcs_ring == ring) in sdma_v4_0_gfx_resume() 1710 if (adev->mman.buffer_funcs == NULL) { in sdma_v4_0_set_buffer_funcs() 1711 adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs; in sdma_v4_0_set_buffer_funcs() 1712 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in sdma_v4_0_set_buffer_funcs()
|
D | sdma_v3_0.c | 516 if ((adev->mman.buffer_funcs_ring == sdma0) || in sdma_v3_0_gfx_stop() 517 (adev->mman.buffer_funcs_ring == sdma1)) in sdma_v3_0_gfx_stop() 738 if (adev->mman.buffer_funcs_ring == ring) in sdma_v3_0_gfx_resume() 1727 if (adev->mman.buffer_funcs == NULL) { in sdma_v3_0_set_buffer_funcs() 1728 adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; in sdma_v3_0_set_buffer_funcs() 1729 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in sdma_v3_0_set_buffer_funcs()
|
D | amdgpu_object.c | 336 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, in amdgpu_bo_create_restricted() 393 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, in amdgpu_bo_create_restricted() 808 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); in amdgpu_bo_evict_vram()
|
D | amdgpu_device.c | 1976 adev->mman.buffer_funcs = NULL; in amdgpu_device_init() 1977 adev->mman.buffer_funcs_ring = NULL; in amdgpu_device_init() 2658 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); in amdgpu_sriov_gpu_reset() 2717 ring = adev->mman.buffer_funcs_ring; in amdgpu_sriov_gpu_reset() 2758 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); in amdgpu_sriov_gpu_reset() 2793 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); in amdgpu_gpu_reset() 2864 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; in amdgpu_gpu_reset() 2913 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); in amdgpu_gpu_reset()
|
D | amdgpu.h | 1484 struct amdgpu_mman mman; member 1595 return container_of(bdev, struct amdgpu_device, mman.bdev); in amdgpu_ttm_adev() 1772 #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib)… 1773 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib)…
|
D | amdgpu_cs.c | 256 used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); in amdgpu_cs_get_threshold_for_moves() 303 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); in amdgpu_cs_get_threshold_for_moves()
|
/drivers/gpu/drm/radeon/ |
D | radeon_ttm.c | 55 struct radeon_mman *mman; in radeon_get_rdev() local 58 mman = container_of(bdev, struct radeon_mman, bdev); in radeon_get_rdev() 59 rdev = container_of(mman, struct radeon_device, mman); in radeon_get_rdev() 82 rdev->mman.mem_global_referenced = false; in radeon_ttm_global_init() 83 global_ref = &rdev->mman.mem_global_ref; in radeon_ttm_global_init() 95 rdev->mman.bo_global_ref.mem_glob = in radeon_ttm_global_init() 96 rdev->mman.mem_global_ref.object; in radeon_ttm_global_init() 97 global_ref = &rdev->mman.bo_global_ref.ref; in radeon_ttm_global_init() 105 drm_global_item_unref(&rdev->mman.mem_global_ref); in radeon_ttm_global_init() 109 rdev->mman.mem_global_referenced = true; in radeon_ttm_global_init() [all …]
|
D | radeon_object.c | 203 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, in radeon_bo_create() 263 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, in radeon_bo_create() 432 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); in radeon_bo_evict_vram()
|
D | radeon_device.c | 1833 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); in radeon_gpu_reset() 1892 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); in radeon_gpu_reset()
|
D | radeon_gem.c | 223 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; in radeon_gem_info_ioctl()
|
D | radeon_pm.c | 1800 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); in radeon_dynpm_idle_work_handler() 1851 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); in radeon_dynpm_idle_work_handler()
|