1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 #ifndef __AMDGPU_VM_H__ 25 #define __AMDGPU_VM_H__ 26 27 #include <linux/rbtree.h> 28 29 #include "gpu_scheduler.h" 30 #include "amdgpu_sync.h" 31 #include "amdgpu_ring.h" 32 33 struct amdgpu_bo_va; 34 struct amdgpu_job; 35 struct amdgpu_bo_list_entry; 36 37 /* 38 * GPUVM handling 39 */ 40 41 /* maximum number of VMIDs */ 42 #define AMDGPU_NUM_VM 16 43 44 /* Maximum number of PTEs the hardware can write with one command */ 45 #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF 46 47 /* number of entries in page table */ 48 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size) 49 50 /* PTBs (Page Table Blocks) need to be aligned to 32K */ 51 #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 52 53 #define AMDGPU_PTE_VALID (1ULL << 0) 54 #define AMDGPU_PTE_SYSTEM (1ULL << 1) 55 #define AMDGPU_PTE_SNOOPED (1ULL << 2) 56 57 /* VI only */ 58 #define AMDGPU_PTE_EXECUTABLE (1ULL << 4) 59 60 #define AMDGPU_PTE_READABLE (1ULL << 5) 61 #define AMDGPU_PTE_WRITEABLE (1ULL << 6) 62 63 #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7) 64 65 /* TILED for VEGA10, reserved for older ASICs */ 66 #define AMDGPU_PTE_PRT (1ULL << 51) 67 68 /* PDE is handled as PTE for VEGA10 */ 69 #define AMDGPU_PDE_PTE (1ULL << 54) 70 71 /* VEGA10 only */ 72 #define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) 73 #define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL) 74 75 /* How to programm VM fault handling */ 76 #define AMDGPU_VM_FAULT_STOP_NEVER 0 77 #define AMDGPU_VM_FAULT_STOP_FIRST 1 78 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 79 80 /* max number of VMHUB */ 81 #define AMDGPU_MAX_VMHUBS 2 82 #define AMDGPU_GFXHUB 0 83 #define AMDGPU_MMHUB 1 84 85 /* hardcode that limit for now */ 86 #define AMDGPU_VA_RESERVED_SIZE (8 << 20) 87 /* max vmids dedicated for process */ 88 #define AMDGPU_VM_MAX_RESERVED_VMID 1 89 90 #define AMDGPU_VM_CONTEXT_GFX 0 91 #define AMDGPU_VM_CONTEXT_COMPUTE 1 92 93 /* See vm_update_mode */ 94 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) 95 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) 96 97 /* base structure for tracking BO usage in a VM */ 98 struct amdgpu_vm_bo_base { 99 /* constant after initialization */ 100 struct amdgpu_vm *vm; 101 struct amdgpu_bo *bo; 102 103 /* protected by bo being reserved */ 104 struct list_head bo_list; 105 106 /* protected by spinlock */ 107 struct list_head vm_status; 108 }; 109 110 struct amdgpu_vm_pt { 111 struct amdgpu_bo *bo; 112 uint64_t addr; 113 114 /* array of page tables, one for each directory entry */ 115 struct amdgpu_vm_pt *entries; 116 unsigned last_entry_used; 117 }; 118 119 struct amdgpu_vm { 120 /* tree of virtual addresses mapped */ 121 struct rb_root_cached va; 122 123 /* protecting invalidated */ 124 spinlock_t status_lock; 125 126 /* BOs moved, but not yet updated in the PT */ 127 struct list_head moved; 128 129 /* BOs cleared in the PT because of a move */ 130 struct list_head cleared; 131 132 /* BO mappings freed, but not yet updated in the PT */ 133 struct list_head freed; 134 135 /* contains the page directory */ 136 struct amdgpu_vm_pt root; 137 struct dma_fence *last_dir_update; 138 uint64_t last_eviction_counter; 139 140 /* protecting freed */ 141 spinlock_t freed_lock; 142 143 /* Scheduler entity for page table updates */ 144 struct amd_sched_entity entity; 145 146 /* client id */ 147 u64 client_id; 148 /* dedicated to vm */ 149 struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; 150 151 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ 152 bool use_cpu_for_update; 153 154 /* Flag to indicate ATS support from PTE for GFX9 */ 155 bool pte_support_ats; 156 }; 157 158 struct amdgpu_vm_id { 159 struct list_head list; 160 struct amdgpu_sync active; 161 struct dma_fence *last_flush; 162 atomic64_t owner; 163 164 uint64_t pd_gpu_addr; 165 /* last flushed PD/PT update */ 166 struct dma_fence *flushed_updates; 167 168 uint32_t current_gpu_reset_count; 169 170 uint32_t gds_base; 171 uint32_t gds_size; 172 uint32_t gws_base; 173 uint32_t gws_size; 174 uint32_t oa_base; 175 uint32_t oa_size; 176 }; 177 178 struct amdgpu_vm_id_manager { 179 struct mutex lock; 180 unsigned num_ids; 181 struct list_head ids_lru; 182 struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; 183 atomic_t reserved_vmid_num; 184 }; 185 186 struct amdgpu_vm_manager { 187 /* Handling of VMIDs */ 188 struct amdgpu_vm_id_manager id_mgr[AMDGPU_MAX_VMHUBS]; 189 190 /* Handling of VM fences */ 191 u64 fence_context; 192 unsigned seqno[AMDGPU_MAX_RINGS]; 193 194 uint64_t max_pfn; 195 uint32_t num_level; 196 uint64_t vm_size; 197 uint32_t block_size; 198 uint32_t fragment_size; 199 /* vram base address for page table entry */ 200 u64 vram_base_offset; 201 /* vm pte handling */ 202 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 203 struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; 204 unsigned vm_pte_num_rings; 205 atomic_t vm_pte_next_ring; 206 /* client id counter */ 207 atomic64_t client_counter; 208 209 /* partial resident texture handling */ 210 spinlock_t prt_lock; 211 atomic_t num_prt_users; 212 213 /* controls how VM page tables are updated for Graphics and Compute. 214 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU 215 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU 216 */ 217 int vm_update_mode; 218 }; 219 220 void amdgpu_vm_manager_init(struct amdgpu_device *adev); 221 void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 222 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, 223 int vm_context); 224 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 225 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 226 struct list_head *validated, 227 struct amdgpu_bo_list_entry *entry); 228 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 229 int (*callback)(void *p, struct amdgpu_bo *bo), 230 void *param); 231 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, 232 struct amdgpu_vm *vm, 233 uint64_t saddr, uint64_t size); 234 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 235 struct amdgpu_sync *sync, struct dma_fence *fence, 236 struct amdgpu_job *job); 237 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); 238 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, 239 unsigned vmid); 240 void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev); 241 int amdgpu_vm_update_directories(struct amdgpu_device *adev, 242 struct amdgpu_vm *vm); 243 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 244 struct amdgpu_vm *vm, 245 struct dma_fence **fence); 246 int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, 247 struct amdgpu_sync *sync); 248 int amdgpu_vm_bo_update(struct amdgpu_device *adev, 249 struct amdgpu_bo_va *bo_va, 250 bool clear); 251 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 252 struct amdgpu_bo *bo); 253 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 254 struct amdgpu_bo *bo); 255 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 256 struct amdgpu_vm *vm, 257 struct amdgpu_bo *bo); 258 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 259 struct amdgpu_bo_va *bo_va, 260 uint64_t addr, uint64_t offset, 261 uint64_t size, uint64_t flags); 262 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, 263 struct amdgpu_bo_va *bo_va, 264 uint64_t addr, uint64_t offset, 265 uint64_t size, uint64_t flags); 266 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 267 struct amdgpu_bo_va *bo_va, 268 uint64_t addr); 269 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 270 struct amdgpu_vm *vm, 271 uint64_t saddr, uint64_t size); 272 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 273 struct amdgpu_bo_va *bo_va); 274 void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, 275 uint32_t fragment_size_default); 276 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, 277 uint32_t fragment_size_default); 278 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 279 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 280 struct amdgpu_job *job); 281 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); 282 283 #endif 284