• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/fence-array.h>
29 #include <drm/drmP.h>
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu.h"
32 #include "amdgpu_trace.h"
33 
34 /*
35  * GPUVM
36  * GPUVM is similar to the legacy gart on older asics, however
37  * rather than there being a single global gart table
38  * for the entire GPU, there are multiple VM page tables active
39  * at any given time.  The VM page tables can contain a mix
40  * vram pages and system memory pages and system memory pages
41  * can be mapped as snooped (cached system pages) or unsnooped
42  * (uncached system pages).
43  * Each VM has an ID associated with it and there is a page table
44  * associated with each VMID.  When execting a command buffer,
45  * the kernel tells the the ring what VMID to use for that command
46  * buffer.  VMIDs are allocated dynamically as commands are submitted.
47  * The userspace drivers maintain their own address space and the kernel
48  * sets up their pages tables accordingly when they submit their
49  * command buffers and a VMID is assigned.
50  * Cayman/Trinity support up to 8 active VMs at any given time;
51  * SI supports 16.
52  */
53 
54 /* Local structure. Encapsulate some VM table update parameters to reduce
55  * the number of function parameters
56  */
57 struct amdgpu_pte_update_params {
58 	/* amdgpu device we do this update for */
59 	struct amdgpu_device *adev;
60 	/* address where to copy page table entries from */
61 	uint64_t src;
62 	/* indirect buffer to fill with commands */
63 	struct amdgpu_ib *ib;
64 	/* Function which actually does the update */
65 	void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
66 		     uint64_t addr, unsigned count, uint32_t incr,
67 		     uint32_t flags);
68 	/* indicate update pt or its shadow */
69 	bool shadow;
70 };
71 
72 /**
73  * amdgpu_vm_num_pde - return the number of page directory entries
74  *
75  * @adev: amdgpu_device pointer
76  *
77  * Calculate the number of page directory entries.
78  */
amdgpu_vm_num_pdes(struct amdgpu_device * adev)79 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
80 {
81 	return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
82 }
83 
84 /**
85  * amdgpu_vm_directory_size - returns the size of the page directory in bytes
86  *
87  * @adev: amdgpu_device pointer
88  *
89  * Calculate the size of the page directory in bytes.
90  */
amdgpu_vm_directory_size(struct amdgpu_device * adev)91 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
92 {
93 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
94 }
95 
96 /**
97  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
98  *
99  * @vm: vm providing the BOs
100  * @validated: head of validation list
101  * @entry: entry to add
102  *
103  * Add the page directory to the list of BOs to
104  * validate for command submission.
105  */
amdgpu_vm_get_pd_bo(struct amdgpu_vm * vm,struct list_head * validated,struct amdgpu_bo_list_entry * entry)106 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
107 			 struct list_head *validated,
108 			 struct amdgpu_bo_list_entry *entry)
109 {
110 	entry->robj = vm->page_directory;
111 	entry->priority = 0;
112 	entry->tv.bo = &vm->page_directory->tbo;
113 	entry->tv.shared = true;
114 	entry->user_pages = NULL;
115 	list_add(&entry->tv.head, validated);
116 }
117 
118 /**
119  * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
120  *
121  * @adev: amdgpu device pointer
122  * @vm: vm providing the BOs
123  * @duplicates: head of duplicates list
124  *
125  * Add the page directory to the BO duplicates list
126  * for command submission.
127  */
amdgpu_vm_get_pt_bos(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct list_head * duplicates)128 void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
129 			  struct list_head *duplicates)
130 {
131 	uint64_t num_evictions;
132 	unsigned i;
133 
134 	/* We only need to validate the page tables
135 	 * if they aren't already valid.
136 	 */
137 	num_evictions = atomic64_read(&adev->num_evictions);
138 	if (num_evictions == vm->last_eviction_counter)
139 		return;
140 
141 	/* add the vm page table to the list */
142 	for (i = 0; i <= vm->max_pde_used; ++i) {
143 		struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
144 
145 		if (!entry->robj)
146 			continue;
147 
148 		list_add(&entry->tv.head, duplicates);
149 	}
150 
151 }
152 
153 /**
154  * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
155  *
156  * @adev: amdgpu device instance
157  * @vm: vm providing the BOs
158  *
159  * Move the PT BOs to the tail of the LRU.
160  */
amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device * adev,struct amdgpu_vm * vm)161 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
162 				  struct amdgpu_vm *vm)
163 {
164 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
165 	unsigned i;
166 
167 	spin_lock(&glob->lru_lock);
168 	for (i = 0; i <= vm->max_pde_used; ++i) {
169 		struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
170 
171 		if (!entry->robj)
172 			continue;
173 
174 		ttm_bo_move_to_lru_tail(&entry->robj->tbo);
175 	}
176 	spin_unlock(&glob->lru_lock);
177 }
178 
amdgpu_vm_is_gpu_reset(struct amdgpu_device * adev,struct amdgpu_vm_id * id)179 static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
180 			      struct amdgpu_vm_id *id)
181 {
182 	return id->current_gpu_reset_count !=
183 		atomic_read(&adev->gpu_reset_counter) ? true : false;
184 }
185 
186 /**
187  * amdgpu_vm_grab_id - allocate the next free VMID
188  *
189  * @vm: vm to allocate id for
190  * @ring: ring we want to submit job to
191  * @sync: sync object where we add dependencies
192  * @fence: fence protecting ID from reuse
193  *
194  * Allocate an id for the vm, adding fences to the sync obj as necessary.
195  */
amdgpu_vm_grab_id(struct amdgpu_vm * vm,struct amdgpu_ring * ring,struct amdgpu_sync * sync,struct fence * fence,struct amdgpu_job * job)196 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
197 		      struct amdgpu_sync *sync, struct fence *fence,
198 		      struct amdgpu_job *job)
199 {
200 	struct amdgpu_device *adev = ring->adev;
201 	uint64_t fence_context = adev->fence_context + ring->idx;
202 	struct fence *updates = sync->last_vm_update;
203 	struct amdgpu_vm_id *id, *idle;
204 	struct fence **fences;
205 	unsigned i;
206 	int r = 0;
207 
208 	fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
209 			       GFP_KERNEL);
210 	if (!fences)
211 		return -ENOMEM;
212 
213 	mutex_lock(&adev->vm_manager.lock);
214 
215 	/* Check if we have an idle VMID */
216 	i = 0;
217 	list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
218 		fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
219 		if (!fences[i])
220 			break;
221 		++i;
222 	}
223 
224 	/* If we can't find a idle VMID to use, wait till one becomes available */
225 	if (&idle->list == &adev->vm_manager.ids_lru) {
226 		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
227 		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
228 		struct fence_array *array;
229 		unsigned j;
230 
231 		for (j = 0; j < i; ++j)
232 			fence_get(fences[j]);
233 
234 		array = fence_array_create(i, fences, fence_context,
235 					   seqno, true);
236 		if (!array) {
237 			for (j = 0; j < i; ++j)
238 				fence_put(fences[j]);
239 			kfree(fences);
240 			r = -ENOMEM;
241 			goto error;
242 		}
243 
244 
245 		r = amdgpu_sync_fence(ring->adev, sync, &array->base);
246 		fence_put(&array->base);
247 		if (r)
248 			goto error;
249 
250 		mutex_unlock(&adev->vm_manager.lock);
251 		return 0;
252 
253 	}
254 	kfree(fences);
255 
256 	job->vm_needs_flush = true;
257 	/* Check if we can use a VMID already assigned to this VM */
258 	i = ring->idx;
259 	do {
260 		struct fence *flushed;
261 
262 		id = vm->ids[i++];
263 		if (i == AMDGPU_MAX_RINGS)
264 			i = 0;
265 
266 		/* Check all the prerequisites to using this VMID */
267 		if (!id)
268 			continue;
269 		if (amdgpu_vm_is_gpu_reset(adev, id))
270 			continue;
271 
272 		if (atomic64_read(&id->owner) != vm->client_id)
273 			continue;
274 
275 		if (job->vm_pd_addr != id->pd_gpu_addr)
276 			continue;
277 
278 		if (!id->last_flush)
279 			continue;
280 
281 		if (id->last_flush->context != fence_context &&
282 		    !fence_is_signaled(id->last_flush))
283 			continue;
284 
285 		flushed  = id->flushed_updates;
286 		if (updates &&
287 		    (!flushed || fence_is_later(updates, flushed)))
288 			continue;
289 
290 		/* Good we can use this VMID. Remember this submission as
291 		 * user of the VMID.
292 		 */
293 		r = amdgpu_sync_fence(ring->adev, &id->active, fence);
294 		if (r)
295 			goto error;
296 
297 		id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
298 		list_move_tail(&id->list, &adev->vm_manager.ids_lru);
299 		vm->ids[ring->idx] = id;
300 
301 		job->vm_id = id - adev->vm_manager.ids;
302 		job->vm_needs_flush = false;
303 		trace_amdgpu_vm_grab_id(vm, ring->idx, job);
304 
305 		mutex_unlock(&adev->vm_manager.lock);
306 		return 0;
307 
308 	} while (i != ring->idx);
309 
310 	/* Still no ID to use? Then use the idle one found earlier */
311 	id = idle;
312 
313 	/* Remember this submission as user of the VMID */
314 	r = amdgpu_sync_fence(ring->adev, &id->active, fence);
315 	if (r)
316 		goto error;
317 
318 	fence_put(id->first);
319 	id->first = fence_get(fence);
320 
321 	fence_put(id->last_flush);
322 	id->last_flush = NULL;
323 
324 	fence_put(id->flushed_updates);
325 	id->flushed_updates = fence_get(updates);
326 
327 	id->pd_gpu_addr = job->vm_pd_addr;
328 	id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
329 	list_move_tail(&id->list, &adev->vm_manager.ids_lru);
330 	atomic64_set(&id->owner, vm->client_id);
331 	vm->ids[ring->idx] = id;
332 
333 	job->vm_id = id - adev->vm_manager.ids;
334 	trace_amdgpu_vm_grab_id(vm, ring->idx, job);
335 
336 error:
337 	mutex_unlock(&adev->vm_manager.lock);
338 	return r;
339 }
340 
amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring * ring)341 static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
342 {
343 	struct amdgpu_device *adev = ring->adev;
344 	const struct amdgpu_ip_block_version *ip_block;
345 
346 	if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
347 		/* only compute rings */
348 		return false;
349 
350 	ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
351 	if (!ip_block)
352 		return false;
353 
354 	if (ip_block->major <= 7) {
355 		/* gfx7 has no workaround */
356 		return true;
357 	} else if (ip_block->major == 8) {
358 		if (adev->gfx.mec_fw_version >= 673)
359 			/* gfx8 is fixed in MEC firmware 673 */
360 			return false;
361 		else
362 			return true;
363 	}
364 	return false;
365 }
366 
367 /**
368  * amdgpu_vm_flush - hardware flush the vm
369  *
370  * @ring: ring to use for flush
371  * @vm_id: vmid number to use
372  * @pd_addr: address of the page directory
373  *
374  * Emit a VM flush when it is necessary.
375  */
amdgpu_vm_flush(struct amdgpu_ring * ring,struct amdgpu_job * job)376 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
377 {
378 	struct amdgpu_device *adev = ring->adev;
379 	struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
380 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
381 		id->gds_base != job->gds_base ||
382 		id->gds_size != job->gds_size ||
383 		id->gws_base != job->gws_base ||
384 		id->gws_size != job->gws_size ||
385 		id->oa_base != job->oa_base ||
386 		id->oa_size != job->oa_size);
387 	int r;
388 
389 	if (ring->funcs->emit_pipeline_sync && (
390 	    job->vm_needs_flush || gds_switch_needed ||
391 	    amdgpu_vm_ring_has_compute_vm_bug(ring)))
392 		amdgpu_ring_emit_pipeline_sync(ring);
393 
394 	if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
395 	    amdgpu_vm_is_gpu_reset(adev, id))) {
396 		struct fence *fence;
397 
398 		trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
399 		amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
400 
401 		r = amdgpu_fence_emit(ring, &fence);
402 		if (r)
403 			return r;
404 
405 		mutex_lock(&adev->vm_manager.lock);
406 		fence_put(id->last_flush);
407 		id->last_flush = fence;
408 		mutex_unlock(&adev->vm_manager.lock);
409 	}
410 
411 	if (gds_switch_needed) {
412 		id->gds_base = job->gds_base;
413 		id->gds_size = job->gds_size;
414 		id->gws_base = job->gws_base;
415 		id->gws_size = job->gws_size;
416 		id->oa_base = job->oa_base;
417 		id->oa_size = job->oa_size;
418 		amdgpu_ring_emit_gds_switch(ring, job->vm_id,
419 					    job->gds_base, job->gds_size,
420 					    job->gws_base, job->gws_size,
421 					    job->oa_base, job->oa_size);
422 	}
423 
424 	return 0;
425 }
426 
427 /**
428  * amdgpu_vm_reset_id - reset VMID to zero
429  *
430  * @adev: amdgpu device structure
431  * @vm_id: vmid number to use
432  *
433  * Reset saved GDW, GWS and OA to force switch on next flush.
434  */
amdgpu_vm_reset_id(struct amdgpu_device * adev,unsigned vm_id)435 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
436 {
437 	struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
438 
439 	id->gds_base = 0;
440 	id->gds_size = 0;
441 	id->gws_base = 0;
442 	id->gws_size = 0;
443 	id->oa_base = 0;
444 	id->oa_size = 0;
445 }
446 
447 /**
448  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
449  *
450  * @vm: requested vm
451  * @bo: requested buffer object
452  *
453  * Find @bo inside the requested vm.
454  * Search inside the @bos vm list for the requested vm
455  * Returns the found bo_va or NULL if none is found
456  *
457  * Object has to be reserved!
458  */
amdgpu_vm_bo_find(struct amdgpu_vm * vm,struct amdgpu_bo * bo)459 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
460 				       struct amdgpu_bo *bo)
461 {
462 	struct amdgpu_bo_va *bo_va;
463 
464 	list_for_each_entry(bo_va, &bo->va, bo_list) {
465 		if (bo_va->vm == vm) {
466 			return bo_va;
467 		}
468 	}
469 	return NULL;
470 }
471 
472 /**
473  * amdgpu_vm_do_set_ptes - helper to call the right asic function
474  *
475  * @params: see amdgpu_pte_update_params definition
476  * @pe: addr of the page entry
477  * @addr: dst addr to write into pe
478  * @count: number of page entries to update
479  * @incr: increase next addr by incr bytes
480  * @flags: hw access flags
481  *
482  * Traces the parameters and calls the right asic functions
483  * to setup the page table using the DMA.
484  */
amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params * params,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint32_t flags)485 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
486 				  uint64_t pe, uint64_t addr,
487 				  unsigned count, uint32_t incr,
488 				  uint32_t flags)
489 {
490 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
491 
492 	if (count < 3) {
493 		amdgpu_vm_write_pte(params->adev, params->ib, pe,
494 				    addr | flags, count, incr);
495 
496 	} else {
497 		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
498 				      count, incr, flags);
499 	}
500 }
501 
502 /**
503  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
504  *
505  * @params: see amdgpu_pte_update_params definition
506  * @pe: addr of the page entry
507  * @addr: dst addr to write into pe
508  * @count: number of page entries to update
509  * @incr: increase next addr by incr bytes
510  * @flags: hw access flags
511  *
512  * Traces the parameters and calls the DMA function to copy the PTEs.
513  */
amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params * params,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint32_t flags)514 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
515 				   uint64_t pe, uint64_t addr,
516 				   unsigned count, uint32_t incr,
517 				   uint32_t flags)
518 {
519 	uint64_t src = (params->src + (addr >> 12) * 8);
520 
521 
522 	trace_amdgpu_vm_copy_ptes(pe, src, count);
523 
524 	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
525 }
526 
527 /**
528  * amdgpu_vm_clear_bo - initially clear the page dir/table
529  *
530  * @adev: amdgpu_device pointer
531  * @bo: bo to clear
532  *
533  * need to reserve bo first before calling it.
534  */
amdgpu_vm_clear_bo(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo)535 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
536 			      struct amdgpu_vm *vm,
537 			      struct amdgpu_bo *bo)
538 {
539 	struct amdgpu_ring *ring;
540 	struct fence *fence = NULL;
541 	struct amdgpu_job *job;
542 	struct amdgpu_pte_update_params params;
543 	unsigned entries;
544 	uint64_t addr;
545 	int r;
546 
547 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
548 
549 	r = reservation_object_reserve_shared(bo->tbo.resv);
550 	if (r)
551 		return r;
552 
553 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
554 	if (r)
555 		goto error;
556 
557 	r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
558 	if (r)
559 		goto error;
560 
561 	addr = amdgpu_bo_gpu_offset(bo);
562 	entries = amdgpu_bo_size(bo) / 8;
563 
564 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
565 	if (r)
566 		goto error;
567 
568 	memset(&params, 0, sizeof(params));
569 	params.adev = adev;
570 	params.ib = &job->ibs[0];
571 	amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
572 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
573 
574 	WARN_ON(job->ibs[0].length_dw > 64);
575 	r = amdgpu_job_submit(job, ring, &vm->entity,
576 			      AMDGPU_FENCE_OWNER_VM, &fence);
577 	if (r)
578 		goto error_free;
579 
580 	amdgpu_bo_fence(bo, fence, true);
581 	fence_put(fence);
582 	return 0;
583 
584 error_free:
585 	amdgpu_job_free(job);
586 
587 error:
588 	return r;
589 }
590 
591 /**
592  * amdgpu_vm_map_gart - Resolve gart mapping of addr
593  *
594  * @pages_addr: optional DMA address to use for lookup
595  * @addr: the unmapped addr
596  *
597  * Look up the physical address of the page that the pte resolves
598  * to and return the pointer for the page table entry.
599  */
amdgpu_vm_map_gart(const dma_addr_t * pages_addr,uint64_t addr)600 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
601 {
602 	uint64_t result;
603 
604 	/* page table offset */
605 	result = pages_addr[addr >> PAGE_SHIFT];
606 
607 	/* in case cpu page size != gpu page size*/
608 	result |= addr & (~PAGE_MASK);
609 
610 	result &= 0xFFFFFFFFFFFFF000ULL;
611 
612 	return result;
613 }
614 
amdgpu_vm_update_pd_or_shadow(struct amdgpu_device * adev,struct amdgpu_vm * vm,bool shadow)615 static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
616 					 struct amdgpu_vm *vm,
617 					 bool shadow)
618 {
619 	struct amdgpu_ring *ring;
620 	struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
621 		vm->page_directory;
622 	uint64_t pd_addr;
623 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
624 	uint64_t last_pde = ~0, last_pt = ~0;
625 	unsigned count = 0, pt_idx, ndw;
626 	struct amdgpu_job *job;
627 	struct amdgpu_pte_update_params params;
628 	struct fence *fence = NULL;
629 
630 	int r;
631 
632 	if (!pd)
633 		return 0;
634 
635 	r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
636 	if (r)
637 		return r;
638 
639 	pd_addr = amdgpu_bo_gpu_offset(pd);
640 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
641 
642 	/* padding, etc. */
643 	ndw = 64;
644 
645 	/* assume the worst case */
646 	ndw += vm->max_pde_used * 6;
647 
648 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
649 	if (r)
650 		return r;
651 
652 	memset(&params, 0, sizeof(params));
653 	params.adev = adev;
654 	params.ib = &job->ibs[0];
655 
656 	/* walk over the address space and update the page directory */
657 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
658 		struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
659 		uint64_t pde, pt;
660 
661 		if (bo == NULL)
662 			continue;
663 
664 		if (bo->shadow) {
665 			struct amdgpu_bo *shadow = bo->shadow;
666 
667 			r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
668 			if (r)
669 				return r;
670 		}
671 
672 		pt = amdgpu_bo_gpu_offset(bo);
673 		if (!shadow) {
674 			if (vm->page_tables[pt_idx].addr == pt)
675 				continue;
676 			vm->page_tables[pt_idx].addr = pt;
677 		} else {
678 			if (vm->page_tables[pt_idx].shadow_addr == pt)
679 				continue;
680 			vm->page_tables[pt_idx].shadow_addr = pt;
681 		}
682 
683 		pde = pd_addr + pt_idx * 8;
684 		if (((last_pde + 8 * count) != pde) ||
685 		    ((last_pt + incr * count) != pt) ||
686 		    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
687 
688 			if (count) {
689 				amdgpu_vm_do_set_ptes(&params, last_pde,
690 						      last_pt, count, incr,
691 						      AMDGPU_PTE_VALID);
692 			}
693 
694 			count = 1;
695 			last_pde = pde;
696 			last_pt = pt;
697 		} else {
698 			++count;
699 		}
700 	}
701 
702 	if (count)
703 		amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
704 				      count, incr, AMDGPU_PTE_VALID);
705 
706 	if (params.ib->length_dw != 0) {
707 		amdgpu_ring_pad_ib(ring, params.ib);
708 		amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
709 				 AMDGPU_FENCE_OWNER_VM);
710 		WARN_ON(params.ib->length_dw > ndw);
711 		r = amdgpu_job_submit(job, ring, &vm->entity,
712 				      AMDGPU_FENCE_OWNER_VM, &fence);
713 		if (r)
714 			goto error_free;
715 
716 		amdgpu_bo_fence(pd, fence, true);
717 		fence_put(vm->page_directory_fence);
718 		vm->page_directory_fence = fence_get(fence);
719 		fence_put(fence);
720 
721 	} else {
722 		amdgpu_job_free(job);
723 	}
724 
725 	return 0;
726 
727 error_free:
728 	amdgpu_job_free(job);
729 	return r;
730 }
731 
732 /*
733  * amdgpu_vm_update_pdes - make sure that page directory is valid
734  *
735  * @adev: amdgpu_device pointer
736  * @vm: requested vm
737  * @start: start of GPU address range
738  * @end: end of GPU address range
739  *
740  * Allocates new page tables if necessary
741  * and updates the page directory.
742  * Returns 0 for success, error for failure.
743  */
amdgpu_vm_update_page_directory(struct amdgpu_device * adev,struct amdgpu_vm * vm)744 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
745                                    struct amdgpu_vm *vm)
746 {
747 	int r = 0;
748 
749 	r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
750 	if (r)
751 		return r;
752 	return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
753 }
754 
755 /**
756  * amdgpu_vm_update_ptes - make sure that page tables are valid
757  *
758  * @params: see amdgpu_pte_update_params definition
759  * @vm: requested vm
760  * @start: start of GPU address range
761  * @end: end of GPU address range
762  * @dst: destination address to map to, the next dst inside the function
763  * @flags: mapping flags
764  *
765  * Update the page tables in the range @start - @end.
766  */
amdgpu_vm_update_ptes(struct amdgpu_pte_update_params * params,struct amdgpu_vm * vm,uint64_t start,uint64_t end,uint64_t dst,uint32_t flags)767 static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
768 				  struct amdgpu_vm *vm,
769 				  uint64_t start, uint64_t end,
770 				  uint64_t dst, uint32_t flags)
771 {
772 	const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
773 
774 	uint64_t cur_pe_start, cur_nptes, cur_dst;
775 	uint64_t addr; /* next GPU address to be updated */
776 	uint64_t pt_idx;
777 	struct amdgpu_bo *pt;
778 	unsigned nptes; /* next number of ptes to be updated */
779 	uint64_t next_pe_start;
780 
781 	/* initialize the variables */
782 	addr = start;
783 	pt_idx = addr >> amdgpu_vm_block_size;
784 	pt = vm->page_tables[pt_idx].entry.robj;
785 	if (params->shadow) {
786 		if (!pt->shadow)
787 			return;
788 		pt = vm->page_tables[pt_idx].entry.robj->shadow;
789 	}
790 	if ((addr & ~mask) == (end & ~mask))
791 		nptes = end - addr;
792 	else
793 		nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
794 
795 	cur_pe_start = amdgpu_bo_gpu_offset(pt);
796 	cur_pe_start += (addr & mask) * 8;
797 	cur_nptes = nptes;
798 	cur_dst = dst;
799 
800 	/* for next ptb*/
801 	addr += nptes;
802 	dst += nptes * AMDGPU_GPU_PAGE_SIZE;
803 
804 	/* walk over the address space and update the page tables */
805 	while (addr < end) {
806 		pt_idx = addr >> amdgpu_vm_block_size;
807 		pt = vm->page_tables[pt_idx].entry.robj;
808 		if (params->shadow) {
809 			if (!pt->shadow)
810 				return;
811 			pt = vm->page_tables[pt_idx].entry.robj->shadow;
812 		}
813 
814 		if ((addr & ~mask) == (end & ~mask))
815 			nptes = end - addr;
816 		else
817 			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
818 
819 		next_pe_start = amdgpu_bo_gpu_offset(pt);
820 		next_pe_start += (addr & mask) * 8;
821 
822 		if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
823 		    ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
824 			/* The next ptb is consecutive to current ptb.
825 			 * Don't call the update function now.
826 			 * Will update two ptbs together in future.
827 			*/
828 			cur_nptes += nptes;
829 		} else {
830 			params->func(params, cur_pe_start, cur_dst, cur_nptes,
831 				     AMDGPU_GPU_PAGE_SIZE, flags);
832 
833 			cur_pe_start = next_pe_start;
834 			cur_nptes = nptes;
835 			cur_dst = dst;
836 		}
837 
838 		/* for next ptb*/
839 		addr += nptes;
840 		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
841 	}
842 
843 	params->func(params, cur_pe_start, cur_dst, cur_nptes,
844 		     AMDGPU_GPU_PAGE_SIZE, flags);
845 }
846 
847 /*
848  * amdgpu_vm_frag_ptes - add fragment information to PTEs
849  *
850  * @params: see amdgpu_pte_update_params definition
851  * @vm: requested vm
852  * @start: first PTE to handle
853  * @end: last PTE to handle
854  * @dst: addr those PTEs should point to
855  * @flags: hw mapping flags
856  */
amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params * params,struct amdgpu_vm * vm,uint64_t start,uint64_t end,uint64_t dst,uint32_t flags)857 static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
858 				struct amdgpu_vm *vm,
859 				uint64_t start, uint64_t end,
860 				uint64_t dst, uint32_t flags)
861 {
862 	/**
863 	 * The MC L1 TLB supports variable sized pages, based on a fragment
864 	 * field in the PTE. When this field is set to a non-zero value, page
865 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
866 	 * flags are considered valid for all PTEs within the fragment range
867 	 * and corresponding mappings are assumed to be physically contiguous.
868 	 *
869 	 * The L1 TLB can store a single PTE for the whole fragment,
870 	 * significantly increasing the space available for translation
871 	 * caching. This leads to large improvements in throughput when the
872 	 * TLB is under pressure.
873 	 *
874 	 * The L2 TLB distributes small and large fragments into two
875 	 * asymmetric partitions. The large fragment cache is significantly
876 	 * larger. Thus, we try to use large fragments wherever possible.
877 	 * Userspace can support this by aligning virtual base address and
878 	 * allocation size to the fragment size.
879 	 */
880 
881 	/* SI and newer are optimized for 64KB */
882 	uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
883 	uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
884 
885 	uint64_t frag_start = ALIGN(start, frag_align);
886 	uint64_t frag_end = end & ~(frag_align - 1);
887 
888 	/* system pages are non continuously */
889 	if (params->src || !(flags & AMDGPU_PTE_VALID) ||
890 	    (frag_start >= frag_end)) {
891 
892 		amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
893 		return;
894 	}
895 
896 	/* handle the 4K area at the beginning */
897 	if (start != frag_start) {
898 		amdgpu_vm_update_ptes(params, vm, start, frag_start,
899 				      dst, flags);
900 		dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
901 	}
902 
903 	/* handle the area in the middle */
904 	amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
905 			      flags | frag_flags);
906 
907 	/* handle the 4K area at the end */
908 	if (frag_end != end) {
909 		dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
910 		amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
911 	}
912 }
913 
914 /**
915  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
916  *
917  * @adev: amdgpu_device pointer
918  * @exclusive: fence we need to sync to
919  * @src: address where to copy page table entries from
920  * @pages_addr: DMA addresses to use for mapping
921  * @vm: requested vm
922  * @start: start of mapped range
923  * @last: last mapped entry
924  * @flags: flags for the entries
925  * @addr: addr to set the area to
926  * @fence: optional resulting fence
927  *
928  * Fill in the page table entries between @start and @last.
929  * Returns 0 for success, -EINVAL for failure.
930  */
amdgpu_vm_bo_update_mapping(struct amdgpu_device * adev,struct fence * exclusive,uint64_t src,dma_addr_t * pages_addr,struct amdgpu_vm * vm,uint64_t start,uint64_t last,uint32_t flags,uint64_t addr,struct fence ** fence)931 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
932 				       struct fence *exclusive,
933 				       uint64_t src,
934 				       dma_addr_t *pages_addr,
935 				       struct amdgpu_vm *vm,
936 				       uint64_t start, uint64_t last,
937 				       uint32_t flags, uint64_t addr,
938 				       struct fence **fence)
939 {
940 	struct amdgpu_ring *ring;
941 	void *owner = AMDGPU_FENCE_OWNER_VM;
942 	unsigned nptes, ncmds, ndw;
943 	struct amdgpu_job *job;
944 	struct amdgpu_pte_update_params params;
945 	struct fence *f = NULL;
946 	int r;
947 
948 	memset(&params, 0, sizeof(params));
949 	params.adev = adev;
950 	params.src = src;
951 
952 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
953 
954 	memset(&params, 0, sizeof(params));
955 	params.adev = adev;
956 	params.src = src;
957 
958 	/* sync to everything on unmapping */
959 	if (!(flags & AMDGPU_PTE_VALID))
960 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
961 
962 	nptes = last - start + 1;
963 
964 	/*
965 	 * reserve space for one command every (1 << BLOCK_SIZE)
966 	 *  entries or 2k dwords (whatever is smaller)
967 	 */
968 	ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
969 
970 	/* padding, etc. */
971 	ndw = 64;
972 
973 	if (src) {
974 		/* only copy commands needed */
975 		ndw += ncmds * 7;
976 
977 		params.func = amdgpu_vm_do_copy_ptes;
978 
979 	} else if (pages_addr) {
980 		/* copy commands needed */
981 		ndw += ncmds * 7;
982 
983 		/* and also PTEs */
984 		ndw += nptes * 2;
985 
986 		params.func = amdgpu_vm_do_copy_ptes;
987 
988 	} else {
989 		/* set page commands needed */
990 		ndw += ncmds * 10;
991 
992 		/* two extra commands for begin/end of fragment */
993 		ndw += 2 * 10;
994 
995 		params.func = amdgpu_vm_do_set_ptes;
996 	}
997 
998 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
999 	if (r)
1000 		return r;
1001 
1002 	params.ib = &job->ibs[0];
1003 
1004 	if (!src && pages_addr) {
1005 		uint64_t *pte;
1006 		unsigned i;
1007 
1008 		/* Put the PTEs at the end of the IB. */
1009 		i = ndw - nptes * 2;
1010 		pte= (uint64_t *)&(job->ibs->ptr[i]);
1011 		params.src = job->ibs->gpu_addr + i * 4;
1012 
1013 		for (i = 0; i < nptes; ++i) {
1014 			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1015 						    AMDGPU_GPU_PAGE_SIZE);
1016 			pte[i] |= flags;
1017 		}
1018 		addr = 0;
1019 	}
1020 
1021 	r = amdgpu_sync_fence(adev, &job->sync, exclusive);
1022 	if (r)
1023 		goto error_free;
1024 
1025 	r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
1026 			     owner);
1027 	if (r)
1028 		goto error_free;
1029 
1030 	r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
1031 	if (r)
1032 		goto error_free;
1033 
1034 	params.shadow = true;
1035 	amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
1036 	params.shadow = false;
1037 	amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
1038 
1039 	amdgpu_ring_pad_ib(ring, params.ib);
1040 	WARN_ON(params.ib->length_dw > ndw);
1041 	r = amdgpu_job_submit(job, ring, &vm->entity,
1042 			      AMDGPU_FENCE_OWNER_VM, &f);
1043 	if (r)
1044 		goto error_free;
1045 
1046 	amdgpu_bo_fence(vm->page_directory, f, true);
1047 	if (fence) {
1048 		fence_put(*fence);
1049 		*fence = fence_get(f);
1050 	}
1051 	fence_put(f);
1052 	return 0;
1053 
1054 error_free:
1055 	amdgpu_job_free(job);
1056 	return r;
1057 }
1058 
1059 /**
1060  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1061  *
1062  * @adev: amdgpu_device pointer
1063  * @exclusive: fence we need to sync to
1064  * @gtt_flags: flags as they are used for GTT
1065  * @pages_addr: DMA addresses to use for mapping
1066  * @vm: requested vm
1067  * @mapping: mapped range and flags to use for the update
1068  * @addr: addr to set the area to
1069  * @flags: HW flags for the mapping
1070  * @fence: optional resulting fence
1071  *
1072  * Split the mapping into smaller chunks so that each update fits
1073  * into a SDMA IB.
1074  * Returns 0 for success, -EINVAL for failure.
1075  */
amdgpu_vm_bo_split_mapping(struct amdgpu_device * adev,struct fence * exclusive,uint32_t gtt_flags,dma_addr_t * pages_addr,struct amdgpu_vm * vm,struct amdgpu_bo_va_mapping * mapping,uint32_t flags,uint64_t addr,struct fence ** fence)1076 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1077 				      struct fence *exclusive,
1078 				      uint32_t gtt_flags,
1079 				      dma_addr_t *pages_addr,
1080 				      struct amdgpu_vm *vm,
1081 				      struct amdgpu_bo_va_mapping *mapping,
1082 				      uint32_t flags, uint64_t addr,
1083 				      struct fence **fence)
1084 {
1085 	const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
1086 
1087 	uint64_t src = 0, start = mapping->it.start;
1088 	int r;
1089 
1090 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1091 	 * but in case of something, we filter the flags in first place
1092 	 */
1093 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
1094 		flags &= ~AMDGPU_PTE_READABLE;
1095 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1096 		flags &= ~AMDGPU_PTE_WRITEABLE;
1097 
1098 	trace_amdgpu_vm_bo_update(mapping);
1099 
1100 	if (pages_addr) {
1101 		if (flags == gtt_flags)
1102 			src = adev->gart.table_addr + (addr >> 12) * 8;
1103 		addr = 0;
1104 	}
1105 	addr += mapping->offset;
1106 
1107 	if (!pages_addr || src)
1108 		return amdgpu_vm_bo_update_mapping(adev, exclusive,
1109 						   src, pages_addr, vm,
1110 						   start, mapping->it.last,
1111 						   flags, addr, fence);
1112 
1113 	while (start != mapping->it.last + 1) {
1114 		uint64_t last;
1115 
1116 		last = min((uint64_t)mapping->it.last, start + max_size - 1);
1117 		r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1118 						src, pages_addr, vm,
1119 						start, last, flags, addr,
1120 						fence);
1121 		if (r)
1122 			return r;
1123 
1124 		start = last + 1;
1125 		addr += max_size * AMDGPU_GPU_PAGE_SIZE;
1126 	}
1127 
1128 	return 0;
1129 }
1130 
1131 /**
1132  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1133  *
1134  * @adev: amdgpu_device pointer
1135  * @bo_va: requested BO and VM object
1136  * @clear: if true clear the entries
1137  *
1138  * Fill in the page table entries for @bo_va.
1139  * Returns 0 for success, -EINVAL for failure.
1140  */
amdgpu_vm_bo_update(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,bool clear)1141 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1142 			struct amdgpu_bo_va *bo_va,
1143 			bool clear)
1144 {
1145 	struct amdgpu_vm *vm = bo_va->vm;
1146 	struct amdgpu_bo_va_mapping *mapping;
1147 	dma_addr_t *pages_addr = NULL;
1148 	uint32_t gtt_flags, flags;
1149 	struct ttm_mem_reg *mem;
1150 	struct fence *exclusive;
1151 	uint64_t addr;
1152 	int r;
1153 
1154 	if (clear) {
1155 		mem = NULL;
1156 		addr = 0;
1157 		exclusive = NULL;
1158 	} else {
1159 		struct ttm_dma_tt *ttm;
1160 
1161 		mem = &bo_va->bo->tbo.mem;
1162 		addr = (u64)mem->start << PAGE_SHIFT;
1163 		switch (mem->mem_type) {
1164 		case TTM_PL_TT:
1165 			ttm = container_of(bo_va->bo->tbo.ttm, struct
1166 					   ttm_dma_tt, ttm);
1167 			pages_addr = ttm->dma_address;
1168 			break;
1169 
1170 		case TTM_PL_VRAM:
1171 			addr += adev->vm_manager.vram_base_offset;
1172 			break;
1173 
1174 		default:
1175 			break;
1176 		}
1177 
1178 		exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
1179 	}
1180 
1181 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
1182 	gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
1183 		adev == bo_va->bo->adev) ? flags : 0;
1184 
1185 	spin_lock(&vm->status_lock);
1186 	if (!list_empty(&bo_va->vm_status))
1187 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1188 	spin_unlock(&vm->status_lock);
1189 
1190 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1191 		r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1192 					       gtt_flags, pages_addr, vm,
1193 					       mapping, flags, addr,
1194 					       &bo_va->last_pt_update);
1195 		if (r)
1196 			return r;
1197 	}
1198 
1199 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1200 		list_for_each_entry(mapping, &bo_va->valids, list)
1201 			trace_amdgpu_vm_bo_mapping(mapping);
1202 
1203 		list_for_each_entry(mapping, &bo_va->invalids, list)
1204 			trace_amdgpu_vm_bo_mapping(mapping);
1205 	}
1206 
1207 	spin_lock(&vm->status_lock);
1208 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1209 	list_del_init(&bo_va->vm_status);
1210 	if (clear)
1211 		list_add(&bo_va->vm_status, &vm->cleared);
1212 	spin_unlock(&vm->status_lock);
1213 
1214 	return 0;
1215 }
1216 
1217 /**
1218  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1219  *
1220  * @adev: amdgpu_device pointer
1221  * @vm: requested vm
1222  *
1223  * Make sure all freed BOs are cleared in the PT.
1224  * Returns 0 for success.
1225  *
1226  * PTs have to be reserved and mutex must be locked!
1227  */
amdgpu_vm_clear_freed(struct amdgpu_device * adev,struct amdgpu_vm * vm)1228 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1229 			  struct amdgpu_vm *vm)
1230 {
1231 	struct amdgpu_bo_va_mapping *mapping;
1232 	int r;
1233 
1234 	while (!list_empty(&vm->freed)) {
1235 		mapping = list_first_entry(&vm->freed,
1236 			struct amdgpu_bo_va_mapping, list);
1237 		list_del(&mapping->list);
1238 
1239 		r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
1240 					       0, 0, NULL);
1241 		kfree(mapping);
1242 		if (r)
1243 			return r;
1244 
1245 	}
1246 	return 0;
1247 
1248 }
1249 
1250 /**
1251  * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1252  *
1253  * @adev: amdgpu_device pointer
1254  * @vm: requested vm
1255  *
1256  * Make sure all invalidated BOs are cleared in the PT.
1257  * Returns 0 for success.
1258  *
1259  * PTs have to be reserved and mutex must be locked!
1260  */
amdgpu_vm_clear_invalids(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_sync * sync)1261 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
1262 			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
1263 {
1264 	struct amdgpu_bo_va *bo_va = NULL;
1265 	int r = 0;
1266 
1267 	spin_lock(&vm->status_lock);
1268 	while (!list_empty(&vm->invalidated)) {
1269 		bo_va = list_first_entry(&vm->invalidated,
1270 			struct amdgpu_bo_va, vm_status);
1271 		spin_unlock(&vm->status_lock);
1272 
1273 		r = amdgpu_vm_bo_update(adev, bo_va, true);
1274 		if (r)
1275 			return r;
1276 
1277 		spin_lock(&vm->status_lock);
1278 	}
1279 	spin_unlock(&vm->status_lock);
1280 
1281 	if (bo_va)
1282 		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
1283 
1284 	return r;
1285 }
1286 
1287 /**
1288  * amdgpu_vm_bo_add - add a bo to a specific vm
1289  *
1290  * @adev: amdgpu_device pointer
1291  * @vm: requested vm
1292  * @bo: amdgpu buffer object
1293  *
1294  * Add @bo into the requested vm.
1295  * Add @bo to the list of bos associated with the vm
1296  * Returns newly added bo_va or NULL for failure
1297  *
1298  * Object has to be reserved!
1299  */
amdgpu_vm_bo_add(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo)1300 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1301 				      struct amdgpu_vm *vm,
1302 				      struct amdgpu_bo *bo)
1303 {
1304 	struct amdgpu_bo_va *bo_va;
1305 
1306 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1307 	if (bo_va == NULL) {
1308 		return NULL;
1309 	}
1310 	bo_va->vm = vm;
1311 	bo_va->bo = bo;
1312 	bo_va->ref_count = 1;
1313 	INIT_LIST_HEAD(&bo_va->bo_list);
1314 	INIT_LIST_HEAD(&bo_va->valids);
1315 	INIT_LIST_HEAD(&bo_va->invalids);
1316 	INIT_LIST_HEAD(&bo_va->vm_status);
1317 
1318 	list_add_tail(&bo_va->bo_list, &bo->va);
1319 
1320 	return bo_va;
1321 }
1322 
1323 /**
1324  * amdgpu_vm_bo_map - map bo inside a vm
1325  *
1326  * @adev: amdgpu_device pointer
1327  * @bo_va: bo_va to store the address
1328  * @saddr: where to map the BO
1329  * @offset: requested offset in the BO
1330  * @flags: attributes of pages (read/write/valid/etc.)
1331  *
1332  * Add a mapping of the BO at the specefied addr into the VM.
1333  * Returns 0 for success, error for failure.
1334  *
1335  * Object has to be reserved and unreserved outside!
1336  */
amdgpu_vm_bo_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr,uint64_t offset,uint64_t size,uint32_t flags)1337 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1338 		     struct amdgpu_bo_va *bo_va,
1339 		     uint64_t saddr, uint64_t offset,
1340 		     uint64_t size, uint32_t flags)
1341 {
1342 	struct amdgpu_bo_va_mapping *mapping;
1343 	struct amdgpu_vm *vm = bo_va->vm;
1344 	struct interval_tree_node *it;
1345 	unsigned last_pfn, pt_idx;
1346 	uint64_t eaddr;
1347 	int r;
1348 
1349 	/* validate the parameters */
1350 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1351 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1352 		return -EINVAL;
1353 
1354 	/* make sure object fit at this offset */
1355 	eaddr = saddr + size - 1;
1356 	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1357 		return -EINVAL;
1358 
1359 	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1360 	if (last_pfn >= adev->vm_manager.max_pfn) {
1361 		dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
1362 			last_pfn, adev->vm_manager.max_pfn);
1363 		return -EINVAL;
1364 	}
1365 
1366 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1367 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1368 
1369 	it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1370 	if (it) {
1371 		struct amdgpu_bo_va_mapping *tmp;
1372 		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1373 		/* bo and tmp overlap, invalid addr */
1374 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1375 			"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1376 			tmp->it.start, tmp->it.last + 1);
1377 		r = -EINVAL;
1378 		goto error;
1379 	}
1380 
1381 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1382 	if (!mapping) {
1383 		r = -ENOMEM;
1384 		goto error;
1385 	}
1386 
1387 	INIT_LIST_HEAD(&mapping->list);
1388 	mapping->it.start = saddr;
1389 	mapping->it.last = eaddr;
1390 	mapping->offset = offset;
1391 	mapping->flags = flags;
1392 
1393 	list_add(&mapping->list, &bo_va->invalids);
1394 	interval_tree_insert(&mapping->it, &vm->va);
1395 
1396 	/* Make sure the page tables are allocated */
1397 	saddr >>= amdgpu_vm_block_size;
1398 	eaddr >>= amdgpu_vm_block_size;
1399 
1400 	BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1401 
1402 	if (eaddr > vm->max_pde_used)
1403 		vm->max_pde_used = eaddr;
1404 
1405 	/* walk over the address space and allocate the page tables */
1406 	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1407 		struct reservation_object *resv = vm->page_directory->tbo.resv;
1408 		struct amdgpu_bo_list_entry *entry;
1409 		struct amdgpu_bo *pt;
1410 
1411 		entry = &vm->page_tables[pt_idx].entry;
1412 		if (entry->robj)
1413 			continue;
1414 
1415 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1416 				     AMDGPU_GPU_PAGE_SIZE, true,
1417 				     AMDGPU_GEM_DOMAIN_VRAM,
1418 				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1419 				     AMDGPU_GEM_CREATE_SHADOW,
1420 				     NULL, resv, &pt);
1421 		if (r)
1422 			goto error_free;
1423 
1424 		/* Keep a reference to the page table to avoid freeing
1425 		 * them up in the wrong order.
1426 		 */
1427 		pt->parent = amdgpu_bo_ref(vm->page_directory);
1428 
1429 		r = amdgpu_vm_clear_bo(adev, vm, pt);
1430 		if (r) {
1431 			amdgpu_bo_unref(&pt->shadow);
1432 			amdgpu_bo_unref(&pt);
1433 			goto error_free;
1434 		}
1435 
1436 		if (pt->shadow) {
1437 			r = amdgpu_vm_clear_bo(adev, vm, pt->shadow);
1438 			if (r) {
1439 				amdgpu_bo_unref(&pt->shadow);
1440 				amdgpu_bo_unref(&pt);
1441 				goto error_free;
1442 			}
1443 		}
1444 
1445 		entry->robj = pt;
1446 		entry->priority = 0;
1447 		entry->tv.bo = &entry->robj->tbo;
1448 		entry->tv.shared = true;
1449 		entry->user_pages = NULL;
1450 		vm->page_tables[pt_idx].addr = 0;
1451 	}
1452 
1453 	return 0;
1454 
1455 error_free:
1456 	list_del(&mapping->list);
1457 	interval_tree_remove(&mapping->it, &vm->va);
1458 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1459 	kfree(mapping);
1460 
1461 error:
1462 	return r;
1463 }
1464 
1465 /**
1466  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1467  *
1468  * @adev: amdgpu_device pointer
1469  * @bo_va: bo_va to remove the address from
1470  * @saddr: where to the BO is mapped
1471  *
1472  * Remove a mapping of the BO at the specefied addr from the VM.
1473  * Returns 0 for success, error for failure.
1474  *
1475  * Object has to be reserved and unreserved outside!
1476  */
amdgpu_vm_bo_unmap(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr)1477 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1478 		       struct amdgpu_bo_va *bo_va,
1479 		       uint64_t saddr)
1480 {
1481 	struct amdgpu_bo_va_mapping *mapping;
1482 	struct amdgpu_vm *vm = bo_va->vm;
1483 	bool valid = true;
1484 
1485 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1486 
1487 	list_for_each_entry(mapping, &bo_va->valids, list) {
1488 		if (mapping->it.start == saddr)
1489 			break;
1490 	}
1491 
1492 	if (&mapping->list == &bo_va->valids) {
1493 		valid = false;
1494 
1495 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1496 			if (mapping->it.start == saddr)
1497 				break;
1498 		}
1499 
1500 		if (&mapping->list == &bo_va->invalids)
1501 			return -ENOENT;
1502 	}
1503 
1504 	list_del(&mapping->list);
1505 	interval_tree_remove(&mapping->it, &vm->va);
1506 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1507 
1508 	if (valid)
1509 		list_add(&mapping->list, &vm->freed);
1510 	else
1511 		kfree(mapping);
1512 
1513 	return 0;
1514 }
1515 
1516 /**
1517  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1518  *
1519  * @adev: amdgpu_device pointer
1520  * @bo_va: requested bo_va
1521  *
1522  * Remove @bo_va->bo from the requested vm.
1523  *
1524  * Object have to be reserved!
1525  */
amdgpu_vm_bo_rmv(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va)1526 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1527 		      struct amdgpu_bo_va *bo_va)
1528 {
1529 	struct amdgpu_bo_va_mapping *mapping, *next;
1530 	struct amdgpu_vm *vm = bo_va->vm;
1531 
1532 	list_del(&bo_va->bo_list);
1533 
1534 	spin_lock(&vm->status_lock);
1535 	list_del(&bo_va->vm_status);
1536 	spin_unlock(&vm->status_lock);
1537 
1538 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1539 		list_del(&mapping->list);
1540 		interval_tree_remove(&mapping->it, &vm->va);
1541 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1542 		list_add(&mapping->list, &vm->freed);
1543 	}
1544 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1545 		list_del(&mapping->list);
1546 		interval_tree_remove(&mapping->it, &vm->va);
1547 		kfree(mapping);
1548 	}
1549 
1550 	fence_put(bo_va->last_pt_update);
1551 	kfree(bo_va);
1552 }
1553 
1554 /**
1555  * amdgpu_vm_bo_invalidate - mark the bo as invalid
1556  *
1557  * @adev: amdgpu_device pointer
1558  * @vm: requested vm
1559  * @bo: amdgpu buffer object
1560  *
1561  * Mark @bo as invalid.
1562  */
amdgpu_vm_bo_invalidate(struct amdgpu_device * adev,struct amdgpu_bo * bo)1563 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1564 			     struct amdgpu_bo *bo)
1565 {
1566 	struct amdgpu_bo_va *bo_va;
1567 
1568 	list_for_each_entry(bo_va, &bo->va, bo_list) {
1569 		spin_lock(&bo_va->vm->status_lock);
1570 		if (list_empty(&bo_va->vm_status))
1571 			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1572 		spin_unlock(&bo_va->vm->status_lock);
1573 	}
1574 }
1575 
1576 /**
1577  * amdgpu_vm_init - initialize a vm instance
1578  *
1579  * @adev: amdgpu_device pointer
1580  * @vm: requested vm
1581  *
1582  * Init @vm fields.
1583  */
amdgpu_vm_init(struct amdgpu_device * adev,struct amdgpu_vm * vm)1584 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1585 {
1586 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1587 		AMDGPU_VM_PTE_COUNT * 8);
1588 	unsigned pd_size, pd_entries;
1589 	unsigned ring_instance;
1590 	struct amdgpu_ring *ring;
1591 	struct amd_sched_rq *rq;
1592 	int i, r;
1593 
1594 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1595 		vm->ids[i] = NULL;
1596 	vm->va = RB_ROOT;
1597 	vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
1598 	spin_lock_init(&vm->status_lock);
1599 	INIT_LIST_HEAD(&vm->invalidated);
1600 	INIT_LIST_HEAD(&vm->cleared);
1601 	INIT_LIST_HEAD(&vm->freed);
1602 
1603 	pd_size = amdgpu_vm_directory_size(adev);
1604 	pd_entries = amdgpu_vm_num_pdes(adev);
1605 
1606 	/* allocate page table array */
1607 	vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
1608 	if (vm->page_tables == NULL) {
1609 		DRM_ERROR("Cannot allocate memory for page table array\n");
1610 		return -ENOMEM;
1611 	}
1612 
1613 	/* create scheduler entity for page table updates */
1614 
1615 	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1616 	ring_instance %= adev->vm_manager.vm_pte_num_rings;
1617 	ring = adev->vm_manager.vm_pte_rings[ring_instance];
1618 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1619 	r = amd_sched_entity_init(&ring->sched, &vm->entity,
1620 				  rq, amdgpu_sched_jobs);
1621 	if (r)
1622 		goto err;
1623 
1624 	vm->page_directory_fence = NULL;
1625 
1626 	r = amdgpu_bo_create(adev, pd_size, align, true,
1627 			     AMDGPU_GEM_DOMAIN_VRAM,
1628 			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1629 			     AMDGPU_GEM_CREATE_SHADOW,
1630 			     NULL, NULL, &vm->page_directory);
1631 	if (r)
1632 		goto error_free_sched_entity;
1633 
1634 	r = amdgpu_bo_reserve(vm->page_directory, false);
1635 	if (r)
1636 		goto error_free_page_directory;
1637 
1638 	r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
1639 	if (r)
1640 		goto error_unreserve;
1641 
1642 	if (vm->page_directory->shadow) {
1643 		r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow);
1644 		if (r)
1645 			goto error_unreserve;
1646 	}
1647 
1648 	vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
1649 	amdgpu_bo_unreserve(vm->page_directory);
1650 
1651 	return 0;
1652 
1653 error_unreserve:
1654 	amdgpu_bo_unreserve(vm->page_directory);
1655 
1656 error_free_page_directory:
1657 	amdgpu_bo_unref(&vm->page_directory->shadow);
1658 	amdgpu_bo_unref(&vm->page_directory);
1659 	vm->page_directory = NULL;
1660 
1661 error_free_sched_entity:
1662 	amd_sched_entity_fini(&ring->sched, &vm->entity);
1663 
1664 err:
1665 	drm_free_large(vm->page_tables);
1666 
1667 	return r;
1668 }
1669 
1670 /**
1671  * amdgpu_vm_fini - tear down a vm instance
1672  *
1673  * @adev: amdgpu_device pointer
1674  * @vm: requested vm
1675  *
1676  * Tear down @vm.
1677  * Unbind the VM and remove all bos from the vm bo list
1678  */
amdgpu_vm_fini(struct amdgpu_device * adev,struct amdgpu_vm * vm)1679 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1680 {
1681 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1682 	int i;
1683 
1684 	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
1685 
1686 	if (!RB_EMPTY_ROOT(&vm->va)) {
1687 		dev_err(adev->dev, "still active bo inside vm\n");
1688 	}
1689 	rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1690 		list_del(&mapping->list);
1691 		interval_tree_remove(&mapping->it, &vm->va);
1692 		kfree(mapping);
1693 	}
1694 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1695 		list_del(&mapping->list);
1696 		kfree(mapping);
1697 	}
1698 
1699 	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
1700 		struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
1701 
1702 		if (!pt)
1703 			continue;
1704 
1705 		amdgpu_bo_unref(&pt->shadow);
1706 		amdgpu_bo_unref(&pt);
1707 	}
1708 	drm_free_large(vm->page_tables);
1709 
1710 	amdgpu_bo_unref(&vm->page_directory->shadow);
1711 	amdgpu_bo_unref(&vm->page_directory);
1712 	fence_put(vm->page_directory_fence);
1713 }
1714 
1715 /**
1716  * amdgpu_vm_manager_init - init the VM manager
1717  *
1718  * @adev: amdgpu_device pointer
1719  *
1720  * Initialize the VM manager structures
1721  */
amdgpu_vm_manager_init(struct amdgpu_device * adev)1722 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1723 {
1724 	unsigned i;
1725 
1726 	INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
1727 
1728 	/* skip over VMID 0, since it is the system VM */
1729 	for (i = 1; i < adev->vm_manager.num_ids; ++i) {
1730 		amdgpu_vm_reset_id(adev, i);
1731 		amdgpu_sync_create(&adev->vm_manager.ids[i].active);
1732 		list_add_tail(&adev->vm_manager.ids[i].list,
1733 			      &adev->vm_manager.ids_lru);
1734 	}
1735 
1736 	adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
1737 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1738 		adev->vm_manager.seqno[i] = 0;
1739 
1740 	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
1741 	atomic64_set(&adev->vm_manager.client_counter, 0);
1742 }
1743 
1744 /**
1745  * amdgpu_vm_manager_fini - cleanup VM manager
1746  *
1747  * @adev: amdgpu_device pointer
1748  *
1749  * Cleanup the VM manager and free resources.
1750  */
amdgpu_vm_manager_fini(struct amdgpu_device * adev)1751 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1752 {
1753 	unsigned i;
1754 
1755 	for (i = 0; i < AMDGPU_NUM_VM; ++i) {
1756 		struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
1757 
1758 		fence_put(adev->vm_manager.ids[i].first);
1759 		amdgpu_sync_free(&adev->vm_manager.ids[i].active);
1760 		fence_put(id->flushed_updates);
1761 		fence_put(id->last_flush);
1762 	}
1763 }
1764