• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <linux/dma-buf.h>
35 
36 #include <drm/amdgpu_drm.h>
37 #include <drm/drm_cache.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_amdkfd.h"
41 
42 /**
43  * DOC: amdgpu_object
44  *
45  * This defines the interfaces to operate on an &amdgpu_bo buffer object which
46  * represents memory used by driver (VRAM, system memory, etc.). The driver
47  * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
48  * to create/destroy/set buffer object which are then managed by the kernel TTM
49  * memory manager.
50  * The interfaces are also used internally by kernel clients, including gfx,
51  * uvd, etc. for kernel managed allocations used by the GPU.
52  *
53  */
54 
55 /**
56  * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
57  *
58  * @bo: &amdgpu_bo buffer object
59  *
60  * This function is called when a BO stops being pinned, and updates the
61  * &amdgpu_device pin_size values accordingly.
62  */
amdgpu_bo_subtract_pin_size(struct amdgpu_bo * bo)63 static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
64 {
65 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
66 
67 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
68 		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
69 		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
70 			     &adev->visible_pin_size);
71 	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
72 		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
73 	}
74 }
75 
amdgpu_bo_destroy(struct ttm_buffer_object * tbo)76 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
77 {
78 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
79 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
80 
81 	if (bo->pin_count > 0)
82 		amdgpu_bo_subtract_pin_size(bo);
83 
84 	amdgpu_bo_kunmap(bo);
85 
86 	if (bo->tbo.base.import_attach)
87 		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
88 	drm_gem_object_release(&bo->tbo.base);
89 	/* in case amdgpu_device_recover_vram got NULL of bo->parent */
90 	if (!list_empty(&bo->shadow_list)) {
91 		mutex_lock(&adev->shadow_list_lock);
92 		list_del_init(&bo->shadow_list);
93 		mutex_unlock(&adev->shadow_list_lock);
94 	}
95 	amdgpu_bo_unref(&bo->parent);
96 
97 	kfree(bo->metadata);
98 	kfree(bo);
99 }
100 
101 /**
102  * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
103  * @bo: buffer object to be checked
104  *
105  * Uses destroy function associated with the object to determine if this is
106  * an &amdgpu_bo.
107  *
108  * Returns:
109  * true if the object belongs to &amdgpu_bo, false if not.
110  */
amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object * bo)111 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
112 {
113 	if (bo->destroy == &amdgpu_bo_destroy)
114 		return true;
115 	return false;
116 }
117 
118 /**
119  * amdgpu_bo_placement_from_domain - set buffer's placement
120  * @abo: &amdgpu_bo buffer object whose placement is to be set
121  * @domain: requested domain
122  *
123  * Sets buffer's placement according to requested domain and the buffer's
124  * flags.
125  */
amdgpu_bo_placement_from_domain(struct amdgpu_bo * abo,u32 domain)126 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
127 {
128 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
129 	struct ttm_placement *placement = &abo->placement;
130 	struct ttm_place *places = abo->placements;
131 	u64 flags = abo->flags;
132 	u32 c = 0;
133 
134 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
135 		unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
136 
137 		places[c].fpfn = 0;
138 		places[c].lpfn = 0;
139 		places[c].mem_type = TTM_PL_VRAM;
140 		places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
141 
142 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
143 			places[c].lpfn = visible_pfn;
144 		else
145 			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
146 
147 		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
148 			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
149 		c++;
150 	}
151 
152 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
153 		places[c].fpfn = 0;
154 		places[c].lpfn = 0;
155 		places[c].mem_type = TTM_PL_TT;
156 		places[c].flags = 0;
157 		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
158 			places[c].flags |= TTM_PL_FLAG_WC |
159 				TTM_PL_FLAG_UNCACHED;
160 		else
161 			places[c].flags |= TTM_PL_FLAG_CACHED;
162 		c++;
163 	}
164 
165 	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
166 		places[c].fpfn = 0;
167 		places[c].lpfn = 0;
168 		places[c].mem_type = TTM_PL_SYSTEM;
169 		places[c].flags = 0;
170 		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
171 			places[c].flags |= TTM_PL_FLAG_WC |
172 				TTM_PL_FLAG_UNCACHED;
173 		else
174 			places[c].flags |= TTM_PL_FLAG_CACHED;
175 		c++;
176 	}
177 
178 	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
179 		places[c].fpfn = 0;
180 		places[c].lpfn = 0;
181 		places[c].mem_type = AMDGPU_PL_GDS;
182 		places[c].flags = TTM_PL_FLAG_UNCACHED;
183 		c++;
184 	}
185 
186 	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
187 		places[c].fpfn = 0;
188 		places[c].lpfn = 0;
189 		places[c].mem_type = AMDGPU_PL_GWS;
190 		places[c].flags = TTM_PL_FLAG_UNCACHED;
191 		c++;
192 	}
193 
194 	if (domain & AMDGPU_GEM_DOMAIN_OA) {
195 		places[c].fpfn = 0;
196 		places[c].lpfn = 0;
197 		places[c].mem_type = AMDGPU_PL_OA;
198 		places[c].flags = TTM_PL_FLAG_UNCACHED;
199 		c++;
200 	}
201 
202 	if (!c) {
203 		places[c].fpfn = 0;
204 		places[c].lpfn = 0;
205 		places[c].mem_type = TTM_PL_SYSTEM;
206 		places[c].flags = TTM_PL_MASK_CACHING;
207 		c++;
208 	}
209 
210 	BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
211 
212 	placement->num_placement = c;
213 	placement->placement = places;
214 
215 	placement->num_busy_placement = c;
216 	placement->busy_placement = places;
217 }
218 
219 /**
220  * amdgpu_bo_create_reserved - create reserved BO for kernel use
221  *
222  * @adev: amdgpu device object
223  * @size: size for the new BO
224  * @align: alignment for the new BO
225  * @domain: where to place it
226  * @bo_ptr: used to initialize BOs in structures
227  * @gpu_addr: GPU addr of the pinned BO
228  * @cpu_addr: optional CPU address mapping
229  *
230  * Allocates and pins a BO for kernel internal use, and returns it still
231  * reserved.
232  *
233  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
234  *
235  * Returns:
236  * 0 on success, negative error code otherwise.
237  */
amdgpu_bo_create_reserved(struct amdgpu_device * adev,unsigned long size,int align,u32 domain,struct amdgpu_bo ** bo_ptr,u64 * gpu_addr,void ** cpu_addr)238 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
239 			      unsigned long size, int align,
240 			      u32 domain, struct amdgpu_bo **bo_ptr,
241 			      u64 *gpu_addr, void **cpu_addr)
242 {
243 	struct amdgpu_bo_param bp;
244 	bool free = false;
245 	int r;
246 
247 	if (!size) {
248 		amdgpu_bo_unref(bo_ptr);
249 		return 0;
250 	}
251 
252 	memset(&bp, 0, sizeof(bp));
253 	bp.size = size;
254 	bp.byte_align = align;
255 	bp.domain = domain;
256 	bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
257 		: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
258 	bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
259 	bp.type = ttm_bo_type_kernel;
260 	bp.resv = NULL;
261 
262 	if (!*bo_ptr) {
263 		r = amdgpu_bo_create(adev, &bp, bo_ptr);
264 		if (r) {
265 			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
266 				r);
267 			return r;
268 		}
269 		free = true;
270 	}
271 
272 	r = amdgpu_bo_reserve(*bo_ptr, false);
273 	if (r) {
274 		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
275 		goto error_free;
276 	}
277 
278 	r = amdgpu_bo_pin(*bo_ptr, domain);
279 	if (r) {
280 		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
281 		goto error_unreserve;
282 	}
283 
284 	r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
285 	if (r) {
286 		dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
287 		goto error_unpin;
288 	}
289 
290 	if (gpu_addr)
291 		*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
292 
293 	if (cpu_addr) {
294 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
295 		if (r) {
296 			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
297 			goto error_unpin;
298 		}
299 	}
300 
301 	return 0;
302 
303 error_unpin:
304 	amdgpu_bo_unpin(*bo_ptr);
305 error_unreserve:
306 	amdgpu_bo_unreserve(*bo_ptr);
307 
308 error_free:
309 	if (free)
310 		amdgpu_bo_unref(bo_ptr);
311 
312 	return r;
313 }
314 
315 /**
316  * amdgpu_bo_create_kernel - create BO for kernel use
317  *
318  * @adev: amdgpu device object
319  * @size: size for the new BO
320  * @align: alignment for the new BO
321  * @domain: where to place it
322  * @bo_ptr:  used to initialize BOs in structures
323  * @gpu_addr: GPU addr of the pinned BO
324  * @cpu_addr: optional CPU address mapping
325  *
326  * Allocates and pins a BO for kernel internal use.
327  *
328  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
329  *
330  * Returns:
331  * 0 on success, negative error code otherwise.
332  */
amdgpu_bo_create_kernel(struct amdgpu_device * adev,unsigned long size,int align,u32 domain,struct amdgpu_bo ** bo_ptr,u64 * gpu_addr,void ** cpu_addr)333 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
334 			    unsigned long size, int align,
335 			    u32 domain, struct amdgpu_bo **bo_ptr,
336 			    u64 *gpu_addr, void **cpu_addr)
337 {
338 	int r;
339 
340 	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
341 				      gpu_addr, cpu_addr);
342 
343 	if (r)
344 		return r;
345 
346 	if (*bo_ptr)
347 		amdgpu_bo_unreserve(*bo_ptr);
348 
349 	return 0;
350 }
351 
352 /**
353  * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
354  *
355  * @adev: amdgpu device object
356  * @offset: offset of the BO
357  * @size: size of the BO
358  * @domain: where to place it
359  * @bo_ptr:  used to initialize BOs in structures
360  * @cpu_addr: optional CPU address mapping
361  *
362  * Creates a kernel BO at a specific offset in the address space of the domain.
363  *
364  * Returns:
365  * 0 on success, negative error code otherwise.
366  */
amdgpu_bo_create_kernel_at(struct amdgpu_device * adev,uint64_t offset,uint64_t size,uint32_t domain,struct amdgpu_bo ** bo_ptr,void ** cpu_addr)367 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
368 			       uint64_t offset, uint64_t size, uint32_t domain,
369 			       struct amdgpu_bo **bo_ptr, void **cpu_addr)
370 {
371 	struct ttm_operation_ctx ctx = { false, false };
372 	unsigned int i;
373 	int r;
374 
375 	offset &= PAGE_MASK;
376 	size = ALIGN(size, PAGE_SIZE);
377 
378 	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
379 				      NULL, cpu_addr);
380 	if (r)
381 		return r;
382 
383 	if ((*bo_ptr) == NULL)
384 		return 0;
385 
386 	/*
387 	 * Remove the original mem node and create a new one at the request
388 	 * position.
389 	 */
390 	if (cpu_addr)
391 		amdgpu_bo_kunmap(*bo_ptr);
392 
393 	ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
394 
395 	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
396 		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
397 		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
398 	}
399 	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
400 			     &(*bo_ptr)->tbo.mem, &ctx);
401 	if (r)
402 		goto error;
403 
404 	if (cpu_addr) {
405 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
406 		if (r)
407 			goto error;
408 	}
409 
410 	amdgpu_bo_unreserve(*bo_ptr);
411 	return 0;
412 
413 error:
414 	amdgpu_bo_unreserve(*bo_ptr);
415 	amdgpu_bo_unref(bo_ptr);
416 	return r;
417 }
418 
419 /**
420  * amdgpu_bo_free_kernel - free BO for kernel use
421  *
422  * @bo: amdgpu BO to free
423  * @gpu_addr: pointer to where the BO's GPU memory space address was stored
424  * @cpu_addr: pointer to where the BO's CPU memory space address was stored
425  *
426  * unmaps and unpin a BO for kernel internal use.
427  */
amdgpu_bo_free_kernel(struct amdgpu_bo ** bo,u64 * gpu_addr,void ** cpu_addr)428 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
429 			   void **cpu_addr)
430 {
431 	if (*bo == NULL)
432 		return;
433 
434 	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
435 		if (cpu_addr)
436 			amdgpu_bo_kunmap(*bo);
437 
438 		amdgpu_bo_unpin(*bo);
439 		amdgpu_bo_unreserve(*bo);
440 	}
441 	amdgpu_bo_unref(bo);
442 
443 	if (gpu_addr)
444 		*gpu_addr = 0;
445 
446 	if (cpu_addr)
447 		*cpu_addr = NULL;
448 }
449 
450 /* Validate bo size is bit bigger then the request domain */
amdgpu_bo_validate_size(struct amdgpu_device * adev,unsigned long size,u32 domain)451 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
452 					  unsigned long size, u32 domain)
453 {
454 	struct ttm_resource_manager *man = NULL;
455 
456 	/*
457 	 * If GTT is part of requested domains the check must succeed to
458 	 * allow fall back to GTT
459 	 */
460 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
461 		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
462 
463 		if (size < (man->size << PAGE_SHIFT))
464 			return true;
465 		else
466 			goto fail;
467 	}
468 
469 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
470 		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
471 
472 		if (size < (man->size << PAGE_SHIFT))
473 			return true;
474 		else
475 			goto fail;
476 	}
477 
478 
479 	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
480 	return true;
481 
482 fail:
483 	DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
484 		  man->size << PAGE_SHIFT);
485 	return false;
486 }
487 
amdgpu_bo_support_uswc(u64 bo_flags)488 bool amdgpu_bo_support_uswc(u64 bo_flags)
489 {
490 
491 #ifdef CONFIG_X86_32
492 	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
493 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
494 	 */
495 	return false;
496 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
497 	/* Don't try to enable write-combining when it can't work, or things
498 	 * may be slow
499 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
500 	 */
501 
502 #ifndef CONFIG_COMPILE_TEST
503 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
504 	 thanks to write-combining
505 #endif
506 
507 	if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
508 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
509 			      "better performance thanks to write-combining\n");
510 	return false;
511 #else
512 	/* For architectures that don't support WC memory,
513 	 * mask out the WC flag from the BO
514 	 */
515 	if (!drm_arch_can_wc_memory())
516 		return false;
517 
518 	return true;
519 #endif
520 }
521 
amdgpu_bo_do_create(struct amdgpu_device * adev,struct amdgpu_bo_param * bp,struct amdgpu_bo ** bo_ptr)522 static int amdgpu_bo_do_create(struct amdgpu_device *adev,
523 			       struct amdgpu_bo_param *bp,
524 			       struct amdgpu_bo **bo_ptr)
525 {
526 	struct ttm_operation_ctx ctx = {
527 		.interruptible = (bp->type != ttm_bo_type_kernel),
528 		.no_wait_gpu = bp->no_wait_gpu,
529 		.resv = bp->resv,
530 		.flags = bp->type != ttm_bo_type_kernel ?
531 			TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
532 	};
533 	struct amdgpu_bo *bo;
534 	unsigned long page_align, size = bp->size;
535 	size_t acc_size;
536 	int r;
537 
538 	/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
539 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
540 		/* GWS and OA don't need any alignment. */
541 		page_align = bp->byte_align;
542 		size <<= PAGE_SHIFT;
543 	} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
544 		/* Both size and alignment must be a multiple of 4. */
545 		page_align = ALIGN(bp->byte_align, 4);
546 		size = ALIGN(size, 4) << PAGE_SHIFT;
547 	} else {
548 		/* Memory should be aligned at least to a page size. */
549 		page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
550 		size = ALIGN(size, PAGE_SIZE);
551 	}
552 
553 	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
554 		return -ENOMEM;
555 
556 	*bo_ptr = NULL;
557 
558 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
559 				       sizeof(struct amdgpu_bo));
560 
561 	bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
562 	if (bo == NULL)
563 		return -ENOMEM;
564 	drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
565 	INIT_LIST_HEAD(&bo->shadow_list);
566 	bo->vm_bo = NULL;
567 	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
568 		bp->domain;
569 	bo->allowed_domains = bo->preferred_domains;
570 	if (bp->type != ttm_bo_type_kernel &&
571 	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
572 		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
573 
574 	bo->flags = bp->flags;
575 
576 	if (!amdgpu_bo_support_uswc(bo->flags))
577 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
578 
579 	bo->tbo.bdev = &adev->mman.bdev;
580 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
581 			  AMDGPU_GEM_DOMAIN_GDS))
582 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
583 	else
584 		amdgpu_bo_placement_from_domain(bo, bp->domain);
585 	if (bp->type == ttm_bo_type_kernel)
586 		bo->tbo.priority = 1;
587 
588 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
589 				 &bo->placement, page_align, &ctx, acc_size,
590 				 NULL, bp->resv, &amdgpu_bo_destroy);
591 	if (unlikely(r != 0))
592 		return r;
593 
594 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
595 	    bo->tbo.mem.mem_type == TTM_PL_VRAM &&
596 	    bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
597 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
598 					     ctx.bytes_moved);
599 	else
600 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
601 
602 	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
603 	    bo->tbo.mem.mem_type == TTM_PL_VRAM) {
604 		struct dma_fence *fence;
605 
606 		r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
607 		if (unlikely(r))
608 			goto fail_unreserve;
609 
610 		amdgpu_bo_fence(bo, fence, false);
611 		dma_fence_put(bo->tbo.moving);
612 		bo->tbo.moving = dma_fence_get(fence);
613 		dma_fence_put(fence);
614 	}
615 	if (!bp->resv)
616 		amdgpu_bo_unreserve(bo);
617 	*bo_ptr = bo;
618 
619 	trace_amdgpu_bo_create(bo);
620 
621 	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
622 	if (bp->type == ttm_bo_type_device)
623 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
624 
625 	return 0;
626 
627 fail_unreserve:
628 	if (!bp->resv)
629 		dma_resv_unlock(bo->tbo.base.resv);
630 	amdgpu_bo_unref(&bo);
631 	return r;
632 }
633 
amdgpu_bo_create_shadow(struct amdgpu_device * adev,unsigned long size,struct amdgpu_bo * bo)634 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
635 				   unsigned long size,
636 				   struct amdgpu_bo *bo)
637 {
638 	struct amdgpu_bo_param bp;
639 	int r;
640 
641 	if (bo->shadow)
642 		return 0;
643 
644 	memset(&bp, 0, sizeof(bp));
645 	bp.size = size;
646 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
647 	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
648 		AMDGPU_GEM_CREATE_SHADOW;
649 	bp.type = ttm_bo_type_kernel;
650 	bp.resv = bo->tbo.base.resv;
651 
652 	r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
653 	if (!r) {
654 		bo->shadow->parent = amdgpu_bo_ref(bo);
655 		mutex_lock(&adev->shadow_list_lock);
656 		list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
657 		mutex_unlock(&adev->shadow_list_lock);
658 	}
659 
660 	return r;
661 }
662 
663 /**
664  * amdgpu_bo_create - create an &amdgpu_bo buffer object
665  * @adev: amdgpu device object
666  * @bp: parameters to be used for the buffer object
667  * @bo_ptr: pointer to the buffer object pointer
668  *
669  * Creates an &amdgpu_bo buffer object; and if requested, also creates a
670  * shadow object.
671  * Shadow object is used to backup the original buffer object, and is always
672  * in GTT.
673  *
674  * Returns:
675  * 0 for success or a negative error code on failure.
676  */
amdgpu_bo_create(struct amdgpu_device * adev,struct amdgpu_bo_param * bp,struct amdgpu_bo ** bo_ptr)677 int amdgpu_bo_create(struct amdgpu_device *adev,
678 		     struct amdgpu_bo_param *bp,
679 		     struct amdgpu_bo **bo_ptr)
680 {
681 	u64 flags = bp->flags;
682 	int r;
683 
684 	bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
685 	r = amdgpu_bo_do_create(adev, bp, bo_ptr);
686 	if (r)
687 		return r;
688 
689 	if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
690 		if (!bp->resv)
691 			WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
692 							NULL));
693 
694 		r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
695 
696 		if (!bp->resv)
697 			dma_resv_unlock((*bo_ptr)->tbo.base.resv);
698 
699 		if (r)
700 			amdgpu_bo_unref(bo_ptr);
701 	}
702 
703 	return r;
704 }
705 
706 /**
707  * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
708  * @bo: pointer to the buffer object
709  *
710  * Sets placement according to domain; and changes placement and caching
711  * policy of the buffer object according to the placement.
712  * This is used for validating shadow bos.  It calls ttm_bo_validate() to
713  * make sure the buffer is resident where it needs to be.
714  *
715  * Returns:
716  * 0 for success or a negative error code on failure.
717  */
amdgpu_bo_validate(struct amdgpu_bo * bo)718 int amdgpu_bo_validate(struct amdgpu_bo *bo)
719 {
720 	struct ttm_operation_ctx ctx = { false, false };
721 	uint32_t domain;
722 	int r;
723 
724 	if (bo->pin_count)
725 		return 0;
726 
727 	domain = bo->preferred_domains;
728 
729 retry:
730 	amdgpu_bo_placement_from_domain(bo, domain);
731 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
732 	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
733 		domain = bo->allowed_domains;
734 		goto retry;
735 	}
736 
737 	return r;
738 }
739 
740 /**
741  * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
742  *
743  * @shadow: &amdgpu_bo shadow to be restored
744  * @fence: dma_fence associated with the operation
745  *
746  * Copies a buffer object's shadow content back to the object.
747  * This is used for recovering a buffer from its shadow in case of a gpu
748  * reset where vram context may be lost.
749  *
750  * Returns:
751  * 0 for success or a negative error code on failure.
752  */
amdgpu_bo_restore_shadow(struct amdgpu_bo * shadow,struct dma_fence ** fence)753 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
754 
755 {
756 	struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
757 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
758 	uint64_t shadow_addr, parent_addr;
759 
760 	shadow_addr = amdgpu_bo_gpu_offset(shadow);
761 	parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
762 
763 	return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
764 				  amdgpu_bo_size(shadow), NULL, fence,
765 				  true, false, false);
766 }
767 
768 /**
769  * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
770  * @bo: &amdgpu_bo buffer object to be mapped
771  * @ptr: kernel virtual address to be returned
772  *
773  * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
774  * amdgpu_bo_kptr() to get the kernel virtual address.
775  *
776  * Returns:
777  * 0 for success or a negative error code on failure.
778  */
amdgpu_bo_kmap(struct amdgpu_bo * bo,void ** ptr)779 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
780 {
781 	void *kptr;
782 	long r;
783 
784 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
785 		return -EPERM;
786 
787 	kptr = amdgpu_bo_kptr(bo);
788 	if (kptr) {
789 		if (ptr)
790 			*ptr = kptr;
791 		return 0;
792 	}
793 
794 	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
795 						MAX_SCHEDULE_TIMEOUT);
796 	if (r < 0)
797 		return r;
798 
799 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
800 	if (r)
801 		return r;
802 
803 	if (ptr)
804 		*ptr = amdgpu_bo_kptr(bo);
805 
806 	return 0;
807 }
808 
809 /**
810  * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
811  * @bo: &amdgpu_bo buffer object
812  *
813  * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
814  *
815  * Returns:
816  * the virtual address of a buffer object area.
817  */
amdgpu_bo_kptr(struct amdgpu_bo * bo)818 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
819 {
820 	bool is_iomem;
821 
822 	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
823 }
824 
825 /**
826  * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
827  * @bo: &amdgpu_bo buffer object to be unmapped
828  *
829  * Unmaps a kernel map set up by amdgpu_bo_kmap().
830  */
amdgpu_bo_kunmap(struct amdgpu_bo * bo)831 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
832 {
833 	if (bo->kmap.bo)
834 		ttm_bo_kunmap(&bo->kmap);
835 }
836 
837 /**
838  * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
839  * @bo: &amdgpu_bo buffer object
840  *
841  * References the contained &ttm_buffer_object.
842  *
843  * Returns:
844  * a refcounted pointer to the &amdgpu_bo buffer object.
845  */
amdgpu_bo_ref(struct amdgpu_bo * bo)846 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
847 {
848 	if (bo == NULL)
849 		return NULL;
850 
851 	ttm_bo_get(&bo->tbo);
852 	return bo;
853 }
854 
855 /**
856  * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
857  * @bo: &amdgpu_bo buffer object
858  *
859  * Unreferences the contained &ttm_buffer_object and clear the pointer
860  */
amdgpu_bo_unref(struct amdgpu_bo ** bo)861 void amdgpu_bo_unref(struct amdgpu_bo **bo)
862 {
863 	struct ttm_buffer_object *tbo;
864 
865 	if ((*bo) == NULL)
866 		return;
867 
868 	tbo = &((*bo)->tbo);
869 	ttm_bo_put(tbo);
870 	*bo = NULL;
871 }
872 
873 /**
874  * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
875  * @bo: &amdgpu_bo buffer object to be pinned
876  * @domain: domain to be pinned to
877  * @min_offset: the start of requested address range
878  * @max_offset: the end of requested address range
879  *
880  * Pins the buffer object according to requested domain and address range. If
881  * the memory is unbound gart memory, binds the pages into gart table. Adjusts
882  * pin_count and pin_size accordingly.
883  *
884  * Pinning means to lock pages in memory along with keeping them at a fixed
885  * offset. It is required when a buffer can not be moved, for example, when
886  * a display buffer is being scanned out.
887  *
888  * Compared with amdgpu_bo_pin(), this function gives more flexibility on
889  * where to pin a buffer if there are specific restrictions on where a buffer
890  * must be located.
891  *
892  * Returns:
893  * 0 for success or a negative error code on failure.
894  */
amdgpu_bo_pin_restricted(struct amdgpu_bo * bo,u32 domain,u64 min_offset,u64 max_offset)895 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
896 			     u64 min_offset, u64 max_offset)
897 {
898 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
899 	struct ttm_operation_ctx ctx = { false, false };
900 	int r, i;
901 
902 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
903 		return -EPERM;
904 
905 	if (WARN_ON_ONCE(min_offset > max_offset))
906 		return -EINVAL;
907 
908 	/* Check domain to be pinned to against preferred domains */
909 	if (bo->preferred_domains & domain)
910 		domain = bo->preferred_domains & domain;
911 
912 	/* A shared bo cannot be migrated to VRAM */
913 	if (bo->prime_shared_count) {
914 		if (domain & AMDGPU_GEM_DOMAIN_GTT)
915 			domain = AMDGPU_GEM_DOMAIN_GTT;
916 		else
917 			return -EINVAL;
918 	}
919 
920 	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
921 	 * See function amdgpu_display_supported_domains()
922 	 */
923 	domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
924 
925 	if (bo->pin_count) {
926 		uint32_t mem_type = bo->tbo.mem.mem_type;
927 
928 		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
929 			return -EINVAL;
930 
931 		bo->pin_count++;
932 
933 		if (max_offset != 0) {
934 			u64 domain_start = amdgpu_ttm_domain_start(adev,
935 								   mem_type);
936 			WARN_ON_ONCE(max_offset <
937 				     (amdgpu_bo_gpu_offset(bo) - domain_start));
938 		}
939 
940 		return 0;
941 	}
942 
943 	if (bo->tbo.base.import_attach)
944 		dma_buf_pin(bo->tbo.base.import_attach);
945 
946 	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
947 	/* force to pin into visible video ram */
948 	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
949 		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
950 	amdgpu_bo_placement_from_domain(bo, domain);
951 	for (i = 0; i < bo->placement.num_placement; i++) {
952 		unsigned fpfn, lpfn;
953 
954 		fpfn = min_offset >> PAGE_SHIFT;
955 		lpfn = max_offset >> PAGE_SHIFT;
956 
957 		if (fpfn > bo->placements[i].fpfn)
958 			bo->placements[i].fpfn = fpfn;
959 		if (!bo->placements[i].lpfn ||
960 		    (lpfn && lpfn < bo->placements[i].lpfn))
961 			bo->placements[i].lpfn = lpfn;
962 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
963 	}
964 
965 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
966 	if (unlikely(r)) {
967 		dev_err(adev->dev, "%p pin failed\n", bo);
968 		goto error;
969 	}
970 
971 	bo->pin_count = 1;
972 
973 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
974 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
975 		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
976 		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
977 			     &adev->visible_pin_size);
978 	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
979 		atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
980 	}
981 
982 error:
983 	return r;
984 }
985 
986 /**
987  * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
988  * @bo: &amdgpu_bo buffer object to be pinned
989  * @domain: domain to be pinned to
990  *
991  * A simple wrapper to amdgpu_bo_pin_restricted().
992  * Provides a simpler API for buffers that do not have any strict restrictions
993  * on where a buffer must be located.
994  *
995  * Returns:
996  * 0 for success or a negative error code on failure.
997  */
amdgpu_bo_pin(struct amdgpu_bo * bo,u32 domain)998 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
999 {
1000 	return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1001 }
1002 
1003 /**
1004  * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
1005  * @bo: &amdgpu_bo buffer object to be unpinned
1006  *
1007  * Decreases the pin_count, and clears the flags if pin_count reaches 0.
1008  * Changes placement and pin size accordingly.
1009  *
1010  * Returns:
1011  * 0 for success or a negative error code on failure.
1012  */
amdgpu_bo_unpin(struct amdgpu_bo * bo)1013 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
1014 {
1015 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1016 	struct ttm_operation_ctx ctx = { false, false };
1017 	int r, i;
1018 
1019 	if (WARN_ON_ONCE(!bo->pin_count)) {
1020 		dev_warn(adev->dev, "%p unpin not necessary\n", bo);
1021 		return 0;
1022 	}
1023 	bo->pin_count--;
1024 	if (bo->pin_count)
1025 		return 0;
1026 
1027 	amdgpu_bo_subtract_pin_size(bo);
1028 
1029 	if (bo->tbo.base.import_attach)
1030 		dma_buf_unpin(bo->tbo.base.import_attach);
1031 
1032 	for (i = 0; i < bo->placement.num_placement; i++) {
1033 		bo->placements[i].lpfn = 0;
1034 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
1035 	}
1036 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1037 	if (unlikely(r))
1038 		dev_err(adev->dev, "%p validate failed for unpin\n", bo);
1039 
1040 	return r;
1041 }
1042 
1043 /**
1044  * amdgpu_bo_evict_vram - evict VRAM buffers
1045  * @adev: amdgpu device object
1046  *
1047  * Evicts all VRAM buffers on the lru list of the memory type.
1048  * Mainly used for evicting vram at suspend time.
1049  *
1050  * Returns:
1051  * 0 for success or a negative error code on failure.
1052  */
amdgpu_bo_evict_vram(struct amdgpu_device * adev)1053 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
1054 {
1055 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
1056 #ifndef CONFIG_HIBERNATION
1057 	if (adev->flags & AMD_IS_APU) {
1058 		/* Useless to evict on IGP chips */
1059 		return 0;
1060 	}
1061 #endif
1062 	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
1063 }
1064 
1065 static const char *amdgpu_vram_names[] = {
1066 	"UNKNOWN",
1067 	"GDDR1",
1068 	"DDR2",
1069 	"GDDR3",
1070 	"GDDR4",
1071 	"GDDR5",
1072 	"HBM",
1073 	"DDR3",
1074 	"DDR4",
1075 	"GDDR6",
1076 };
1077 
1078 /**
1079  * amdgpu_bo_init - initialize memory manager
1080  * @adev: amdgpu device object
1081  *
1082  * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1083  *
1084  * Returns:
1085  * 0 for success or a negative error code on failure.
1086  */
amdgpu_bo_init(struct amdgpu_device * adev)1087 int amdgpu_bo_init(struct amdgpu_device *adev)
1088 {
1089 	/* reserve PAT memory space to WC for VRAM */
1090 	arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1091 				   adev->gmc.aper_size);
1092 
1093 	/* Add an MTRR for the VRAM */
1094 	adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1095 					      adev->gmc.aper_size);
1096 	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1097 		 adev->gmc.mc_vram_size >> 20,
1098 		 (unsigned long long)adev->gmc.aper_size >> 20);
1099 	DRM_INFO("RAM width %dbits %s\n",
1100 		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1101 	return amdgpu_ttm_init(adev);
1102 }
1103 
1104 /**
1105  * amdgpu_bo_late_init - late init
1106  * @adev: amdgpu device object
1107  *
1108  * Calls amdgpu_ttm_late_init() to free resources used earlier during
1109  * initialization.
1110  *
1111  * Returns:
1112  * 0 for success or a negative error code on failure.
1113  */
amdgpu_bo_late_init(struct amdgpu_device * adev)1114 int amdgpu_bo_late_init(struct amdgpu_device *adev)
1115 {
1116 	amdgpu_ttm_late_init(adev);
1117 
1118 	return 0;
1119 }
1120 
1121 /**
1122  * amdgpu_bo_fini - tear down memory manager
1123  * @adev: amdgpu device object
1124  *
1125  * Reverses amdgpu_bo_init() to tear down memory manager.
1126  */
amdgpu_bo_fini(struct amdgpu_device * adev)1127 void amdgpu_bo_fini(struct amdgpu_device *adev)
1128 {
1129 	amdgpu_ttm_fini(adev);
1130 	arch_phys_wc_del(adev->gmc.vram_mtrr);
1131 	arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1132 }
1133 
1134 /**
1135  * amdgpu_bo_fbdev_mmap - mmap fbdev memory
1136  * @bo: &amdgpu_bo buffer object
1137  * @vma: vma as input from the fbdev mmap method
1138  *
1139  * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
1140  *
1141  * Returns:
1142  * 0 for success or a negative error code on failure.
1143  */
amdgpu_bo_fbdev_mmap(struct amdgpu_bo * bo,struct vm_area_struct * vma)1144 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
1145 			     struct vm_area_struct *vma)
1146 {
1147 	if (vma->vm_pgoff != 0)
1148 		return -EACCES;
1149 
1150 	return ttm_bo_mmap_obj(vma, &bo->tbo);
1151 }
1152 
1153 /**
1154  * amdgpu_bo_set_tiling_flags - set tiling flags
1155  * @bo: &amdgpu_bo buffer object
1156  * @tiling_flags: new flags
1157  *
1158  * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1159  * kernel driver to set the tiling flags on a buffer.
1160  *
1161  * Returns:
1162  * 0 for success or a negative error code on failure.
1163  */
amdgpu_bo_set_tiling_flags(struct amdgpu_bo * bo,u64 tiling_flags)1164 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1165 {
1166 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1167 
1168 	if (adev->family <= AMDGPU_FAMILY_CZ &&
1169 	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1170 		return -EINVAL;
1171 
1172 	bo->tiling_flags = tiling_flags;
1173 	return 0;
1174 }
1175 
1176 /**
1177  * amdgpu_bo_get_tiling_flags - get tiling flags
1178  * @bo: &amdgpu_bo buffer object
1179  * @tiling_flags: returned flags
1180  *
1181  * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1182  * set the tiling flags on a buffer.
1183  */
amdgpu_bo_get_tiling_flags(struct amdgpu_bo * bo,u64 * tiling_flags)1184 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1185 {
1186 	dma_resv_assert_held(bo->tbo.base.resv);
1187 
1188 	if (tiling_flags)
1189 		*tiling_flags = bo->tiling_flags;
1190 }
1191 
1192 /**
1193  * amdgpu_bo_set_metadata - set metadata
1194  * @bo: &amdgpu_bo buffer object
1195  * @metadata: new metadata
1196  * @metadata_size: size of the new metadata
1197  * @flags: flags of the new metadata
1198  *
1199  * Sets buffer object's metadata, its size and flags.
1200  * Used via GEM ioctl.
1201  *
1202  * Returns:
1203  * 0 for success or a negative error code on failure.
1204  */
amdgpu_bo_set_metadata(struct amdgpu_bo * bo,void * metadata,uint32_t metadata_size,uint64_t flags)1205 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1206 			    uint32_t metadata_size, uint64_t flags)
1207 {
1208 	void *buffer;
1209 
1210 	if (!metadata_size) {
1211 		if (bo->metadata_size) {
1212 			kfree(bo->metadata);
1213 			bo->metadata = NULL;
1214 			bo->metadata_size = 0;
1215 		}
1216 		return 0;
1217 	}
1218 
1219 	if (metadata == NULL)
1220 		return -EINVAL;
1221 
1222 	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1223 	if (buffer == NULL)
1224 		return -ENOMEM;
1225 
1226 	kfree(bo->metadata);
1227 	bo->metadata_flags = flags;
1228 	bo->metadata = buffer;
1229 	bo->metadata_size = metadata_size;
1230 
1231 	return 0;
1232 }
1233 
1234 /**
1235  * amdgpu_bo_get_metadata - get metadata
1236  * @bo: &amdgpu_bo buffer object
1237  * @buffer: returned metadata
1238  * @buffer_size: size of the buffer
1239  * @metadata_size: size of the returned metadata
1240  * @flags: flags of the returned metadata
1241  *
1242  * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1243  * less than metadata_size.
1244  * Used via GEM ioctl.
1245  *
1246  * Returns:
1247  * 0 for success or a negative error code on failure.
1248  */
amdgpu_bo_get_metadata(struct amdgpu_bo * bo,void * buffer,size_t buffer_size,uint32_t * metadata_size,uint64_t * flags)1249 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1250 			   size_t buffer_size, uint32_t *metadata_size,
1251 			   uint64_t *flags)
1252 {
1253 	if (!buffer && !metadata_size)
1254 		return -EINVAL;
1255 
1256 	if (buffer) {
1257 		if (buffer_size < bo->metadata_size)
1258 			return -EINVAL;
1259 
1260 		if (bo->metadata_size)
1261 			memcpy(buffer, bo->metadata, bo->metadata_size);
1262 	}
1263 
1264 	if (metadata_size)
1265 		*metadata_size = bo->metadata_size;
1266 	if (flags)
1267 		*flags = bo->metadata_flags;
1268 
1269 	return 0;
1270 }
1271 
1272 /**
1273  * amdgpu_bo_move_notify - notification about a memory move
1274  * @bo: pointer to a buffer object
1275  * @evict: if this move is evicting the buffer from the graphics address space
1276  * @new_mem: new information of the bufer object
1277  *
1278  * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1279  * bookkeeping.
1280  * TTM driver callback which is called when ttm moves a buffer.
1281  */
amdgpu_bo_move_notify(struct ttm_buffer_object * bo,bool evict,struct ttm_resource * new_mem)1282 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1283 			   bool evict,
1284 			   struct ttm_resource *new_mem)
1285 {
1286 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1287 	struct amdgpu_bo *abo;
1288 	struct ttm_resource *old_mem = &bo->mem;
1289 
1290 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1291 		return;
1292 
1293 	abo = ttm_to_amdgpu_bo(bo);
1294 	amdgpu_vm_bo_invalidate(adev, abo, evict);
1295 
1296 	amdgpu_bo_kunmap(abo);
1297 
1298 	if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
1299 	    bo->mem.mem_type != TTM_PL_SYSTEM)
1300 		dma_buf_move_notify(abo->tbo.base.dma_buf);
1301 
1302 	/* remember the eviction */
1303 	if (evict)
1304 		atomic64_inc(&adev->num_evictions);
1305 
1306 	/* update statistics */
1307 	if (!new_mem)
1308 		return;
1309 
1310 	/* move_notify is called before move happens */
1311 	trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1312 }
1313 
1314 /**
1315  * amdgpu_bo_release_notify - notification about a BO being released
1316  * @bo: pointer to a buffer object
1317  *
1318  * Wipes VRAM buffers whose contents should not be leaked before the
1319  * memory is released.
1320  */
amdgpu_bo_release_notify(struct ttm_buffer_object * bo)1321 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1322 {
1323 	struct dma_fence *fence = NULL;
1324 	struct amdgpu_bo *abo;
1325 	int r;
1326 
1327 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1328 		return;
1329 
1330 	abo = ttm_to_amdgpu_bo(bo);
1331 
1332 	if (abo->kfd_bo)
1333 		amdgpu_amdkfd_unreserve_memory_limit(abo);
1334 
1335 	/* We only remove the fence if the resv has individualized. */
1336 	WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
1337 			&& bo->base.resv != &bo->base._resv);
1338 	if (bo->base.resv == &bo->base._resv)
1339 		amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
1340 
1341 	if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
1342 	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
1343 		return;
1344 
1345 	if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
1346 		return;
1347 
1348 	r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1349 	if (!WARN_ON(r)) {
1350 		amdgpu_bo_fence(abo, fence, false);
1351 		dma_fence_put(fence);
1352 	}
1353 
1354 	dma_resv_unlock(bo->base.resv);
1355 }
1356 
1357 /**
1358  * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1359  * @bo: pointer to a buffer object
1360  *
1361  * Notifies the driver we are taking a fault on this BO and have reserved it,
1362  * also performs bookkeeping.
1363  * TTM driver callback for dealing with vm faults.
1364  *
1365  * Returns:
1366  * 0 for success or a negative error code on failure.
1367  */
amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object * bo)1368 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1369 {
1370 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1371 	struct ttm_operation_ctx ctx = { false, false };
1372 	struct amdgpu_bo *abo;
1373 	unsigned long offset, size;
1374 	int r;
1375 
1376 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1377 		return 0;
1378 
1379 	abo = ttm_to_amdgpu_bo(bo);
1380 
1381 	/* Remember that this BO was accessed by the CPU */
1382 	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1383 
1384 	if (bo->mem.mem_type != TTM_PL_VRAM)
1385 		return 0;
1386 
1387 	size = bo->mem.num_pages << PAGE_SHIFT;
1388 	offset = bo->mem.start << PAGE_SHIFT;
1389 	if ((offset + size) <= adev->gmc.visible_vram_size)
1390 		return 0;
1391 
1392 	/* Can't move a pinned BO to visible VRAM */
1393 	if (abo->pin_count > 0)
1394 		return -EINVAL;
1395 
1396 	/* hurrah the memory is not visible ! */
1397 	atomic64_inc(&adev->num_vram_cpu_page_faults);
1398 	amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1399 					AMDGPU_GEM_DOMAIN_GTT);
1400 
1401 	/* Avoid costly evictions; only set GTT as a busy placement */
1402 	abo->placement.num_busy_placement = 1;
1403 	abo->placement.busy_placement = &abo->placements[1];
1404 
1405 	r = ttm_bo_validate(bo, &abo->placement, &ctx);
1406 	if (unlikely(r != 0))
1407 		return r;
1408 
1409 	offset = bo->mem.start << PAGE_SHIFT;
1410 	/* this should never happen */
1411 	if (bo->mem.mem_type == TTM_PL_VRAM &&
1412 	    (offset + size) > adev->gmc.visible_vram_size)
1413 		return -EINVAL;
1414 
1415 	return 0;
1416 }
1417 
1418 /**
1419  * amdgpu_bo_fence - add fence to buffer object
1420  *
1421  * @bo: buffer object in question
1422  * @fence: fence to add
1423  * @shared: true if fence should be added shared
1424  *
1425  */
amdgpu_bo_fence(struct amdgpu_bo * bo,struct dma_fence * fence,bool shared)1426 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1427 		     bool shared)
1428 {
1429 	struct dma_resv *resv = bo->tbo.base.resv;
1430 
1431 	if (shared)
1432 		dma_resv_add_shared_fence(resv, fence);
1433 	else
1434 		dma_resv_add_excl_fence(resv, fence);
1435 }
1436 
1437 /**
1438  * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1439  *
1440  * @adev: amdgpu device pointer
1441  * @resv: reservation object to sync to
1442  * @sync_mode: synchronization mode
1443  * @owner: fence owner
1444  * @intr: Whether the wait is interruptible
1445  *
1446  * Extract the fences from the reservation object and waits for them to finish.
1447  *
1448  * Returns:
1449  * 0 on success, errno otherwise.
1450  */
amdgpu_bo_sync_wait_resv(struct amdgpu_device * adev,struct dma_resv * resv,enum amdgpu_sync_mode sync_mode,void * owner,bool intr)1451 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1452 			     enum amdgpu_sync_mode sync_mode, void *owner,
1453 			     bool intr)
1454 {
1455 	struct amdgpu_sync sync;
1456 	int r;
1457 
1458 	amdgpu_sync_create(&sync);
1459 	amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1460 	r = amdgpu_sync_wait(&sync, intr);
1461 	amdgpu_sync_free(&sync);
1462 	return r;
1463 }
1464 
1465 /**
1466  * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
1467  * @bo: buffer object to wait for
1468  * @owner: fence owner
1469  * @intr: Whether the wait is interruptible
1470  *
1471  * Wrapper to wait for fences in a BO.
1472  * Returns:
1473  * 0 on success, errno otherwise.
1474  */
amdgpu_bo_sync_wait(struct amdgpu_bo * bo,void * owner,bool intr)1475 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1476 {
1477 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1478 
1479 	return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1480 					AMDGPU_SYNC_NE_OWNER, owner, intr);
1481 }
1482 
1483 /**
1484  * amdgpu_bo_gpu_offset - return GPU offset of bo
1485  * @bo:	amdgpu object for which we query the offset
1486  *
1487  * Note: object should either be pinned or reserved when calling this
1488  * function, it might be useful to add check for this for debugging.
1489  *
1490  * Returns:
1491  * current GPU offset of the object.
1492  */
amdgpu_bo_gpu_offset(struct amdgpu_bo * bo)1493 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1494 {
1495 	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1496 	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1497 		     !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
1498 	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1499 	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1500 		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1501 
1502 	return amdgpu_bo_gpu_offset_no_check(bo);
1503 }
1504 
1505 /**
1506  * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1507  * @bo:	amdgpu object for which we query the offset
1508  *
1509  * Returns:
1510  * current GPU offset of the object without raising warnings.
1511  */
amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo * bo)1512 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1513 {
1514 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1515 	uint64_t offset;
1516 
1517 	offset = (bo->tbo.mem.start << PAGE_SHIFT) +
1518 		 amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
1519 
1520 	return amdgpu_gmc_sign_extend(offset);
1521 }
1522 
1523 /**
1524  * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
1525  * @adev: amdgpu device object
1526  * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1527  *
1528  * Returns:
1529  * Which of the allowed domains is preferred for pinning the BO for scanout.
1530  */
amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device * adev,uint32_t domain)1531 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1532 					    uint32_t domain)
1533 {
1534 	if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1535 		domain = AMDGPU_GEM_DOMAIN_VRAM;
1536 		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1537 			domain = AMDGPU_GEM_DOMAIN_GTT;
1538 	}
1539 	return domain;
1540 }
1541