1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * based on nouveau_prime.c
23 *
24 * Authors: Alex Deucher
25 */
26 #include <drm/drmP.h>
27
28 #include "amdgpu.h"
29 #include <drm/amdgpu_drm.h>
30 #include <linux/dma-buf.h>
31
amdgpu_gem_prime_get_sg_table(struct drm_gem_object * obj)32 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
33 {
34 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
35 int npages = bo->tbo.num_pages;
36
37 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
38 }
39
amdgpu_gem_prime_vmap(struct drm_gem_object * obj)40 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
41 {
42 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
43 int ret;
44
45 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
46 &bo->dma_buf_vmap);
47 if (ret)
48 return ERR_PTR(ret);
49
50 return bo->dma_buf_vmap.virtual;
51 }
52
amdgpu_gem_prime_vunmap(struct drm_gem_object * obj,void * vaddr)53 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
54 {
55 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
56
57 ttm_bo_kunmap(&bo->dma_buf_vmap);
58 }
59
amdgpu_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sg)60 struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
61 struct dma_buf_attachment *attach,
62 struct sg_table *sg)
63 {
64 struct reservation_object *resv = attach->dmabuf->resv;
65 struct amdgpu_device *adev = dev->dev_private;
66 struct amdgpu_bo *bo;
67 int ret;
68
69 ww_mutex_lock(&resv->lock, NULL);
70 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
71 AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
72 ww_mutex_unlock(&resv->lock);
73 if (ret)
74 return ERR_PTR(ret);
75
76 mutex_lock(&adev->gem.mutex);
77 list_add_tail(&bo->list, &adev->gem.objects);
78 mutex_unlock(&adev->gem.mutex);
79
80 bo->prime_shared_count = 1;
81 return &bo->gem_base;
82 }
83
amdgpu_gem_prime_pin(struct drm_gem_object * obj)84 int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
85 {
86 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
87 long ret = 0;
88
89 ret = amdgpu_bo_reserve(bo, false);
90 if (unlikely(ret != 0))
91 return ret;
92
93 /*
94 * Wait for all shared fences to complete before we switch to future
95 * use of exclusive fence on this prime shared bo.
96 */
97 ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
98 MAX_SCHEDULE_TIMEOUT);
99 if (unlikely(ret < 0)) {
100 DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
101 amdgpu_bo_unreserve(bo);
102 return ret;
103 }
104
105 /* pin buffer into GTT */
106 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
107 if (likely(ret == 0))
108 bo->prime_shared_count++;
109
110 amdgpu_bo_unreserve(bo);
111 return ret;
112 }
113
amdgpu_gem_prime_unpin(struct drm_gem_object * obj)114 void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
115 {
116 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
117 int ret = 0;
118
119 ret = amdgpu_bo_reserve(bo, false);
120 if (unlikely(ret != 0))
121 return;
122
123 amdgpu_bo_unpin(bo);
124 if (bo->prime_shared_count)
125 bo->prime_shared_count--;
126 amdgpu_bo_unreserve(bo);
127 }
128
amdgpu_gem_prime_res_obj(struct drm_gem_object * obj)129 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
130 {
131 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
132
133 return bo->tbo.resv;
134 }
135
amdgpu_gem_prime_export(struct drm_device * dev,struct drm_gem_object * gobj,int flags)136 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
137 struct drm_gem_object *gobj,
138 int flags)
139 {
140 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
141
142 if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
143 return ERR_PTR(-EPERM);
144
145 return drm_gem_prime_export(dev, gobj, flags);
146 }
147