1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3 * Copyright (c) 2020 Intel Corporation. All rights reserved.
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/dma-mapping.h>
9
10 #include "uverbs.h"
11
ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf * umem_dmabuf)12 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
13 {
14 struct sg_table *sgt;
15 struct scatterlist *sg;
16 struct dma_fence *fence;
17 unsigned long start, end, cur = 0;
18 unsigned int nmap = 0;
19 int i;
20
21 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
22
23 if (umem_dmabuf->sgt)
24 goto wait_fence;
25
26 sgt = dma_buf_map_attachment(umem_dmabuf->attach, DMA_BIDIRECTIONAL);
27 if (IS_ERR(sgt))
28 return PTR_ERR(sgt);
29
30 /* modify the sg list in-place to match umem address and length */
31
32 start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
33 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
34 PAGE_SIZE);
35 for_each_sgtable_dma_sg(sgt, sg, i) {
36 if (start < cur + sg_dma_len(sg) && cur < end)
37 nmap++;
38 if (cur <= start && start < cur + sg_dma_len(sg)) {
39 unsigned long offset = start - cur;
40
41 umem_dmabuf->first_sg = sg;
42 umem_dmabuf->first_sg_offset = offset;
43 sg_dma_address(sg) += offset;
44 sg_dma_len(sg) -= offset;
45 cur += offset;
46 }
47 if (cur < end && end <= cur + sg_dma_len(sg)) {
48 unsigned long trim = cur + sg_dma_len(sg) - end;
49
50 umem_dmabuf->last_sg = sg;
51 umem_dmabuf->last_sg_trim = trim;
52 sg_dma_len(sg) -= trim;
53 break;
54 }
55 cur += sg_dma_len(sg);
56 }
57
58 umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
59 umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
60 umem_dmabuf->sgt = sgt;
61
62 wait_fence:
63 /*
64 * Although the sg list is valid now, the content of the pages
65 * may be not up-to-date. Wait for the exporter to finish
66 * the migration.
67 */
68 fence = dma_resv_excl_fence(umem_dmabuf->attach->dmabuf->resv);
69 if (fence)
70 return dma_fence_wait(fence, false);
71
72 return 0;
73 }
74 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
75
ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf * umem_dmabuf)76 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
77 {
78 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
79
80 if (!umem_dmabuf->sgt)
81 return;
82
83 /* retore the original sg list */
84 if (umem_dmabuf->first_sg) {
85 sg_dma_address(umem_dmabuf->first_sg) -=
86 umem_dmabuf->first_sg_offset;
87 sg_dma_len(umem_dmabuf->first_sg) +=
88 umem_dmabuf->first_sg_offset;
89 umem_dmabuf->first_sg = NULL;
90 umem_dmabuf->first_sg_offset = 0;
91 }
92 if (umem_dmabuf->last_sg) {
93 sg_dma_len(umem_dmabuf->last_sg) +=
94 umem_dmabuf->last_sg_trim;
95 umem_dmabuf->last_sg = NULL;
96 umem_dmabuf->last_sg_trim = 0;
97 }
98
99 dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
100 DMA_BIDIRECTIONAL);
101
102 umem_dmabuf->sgt = NULL;
103 }
104 EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
105
ib_umem_dmabuf_get(struct ib_device * device,unsigned long offset,size_t size,int fd,int access,const struct dma_buf_attach_ops * ops)106 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
107 unsigned long offset, size_t size,
108 int fd, int access,
109 const struct dma_buf_attach_ops *ops)
110 {
111 struct dma_buf *dmabuf;
112 struct ib_umem_dmabuf *umem_dmabuf;
113 struct ib_umem *umem;
114 unsigned long end;
115 struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
116
117 if (check_add_overflow(offset, (unsigned long)size, &end))
118 return ret;
119
120 if (unlikely(!ops || !ops->move_notify))
121 return ret;
122
123 dmabuf = dma_buf_get(fd);
124 if (IS_ERR(dmabuf))
125 return ERR_CAST(dmabuf);
126
127 if (dmabuf->size < end)
128 goto out_release_dmabuf;
129
130 umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
131 if (!umem_dmabuf) {
132 ret = ERR_PTR(-ENOMEM);
133 goto out_release_dmabuf;
134 }
135
136 umem = &umem_dmabuf->umem;
137 umem->ibdev = device;
138 umem->length = size;
139 umem->address = offset;
140 umem->writable = ib_access_writable(access);
141 umem->is_dmabuf = 1;
142
143 if (!ib_umem_num_pages(umem))
144 goto out_free_umem;
145
146 umem_dmabuf->attach = dma_buf_dynamic_attach(
147 dmabuf,
148 device->dma_device,
149 ops,
150 umem_dmabuf);
151 if (IS_ERR(umem_dmabuf->attach)) {
152 ret = ERR_CAST(umem_dmabuf->attach);
153 goto out_free_umem;
154 }
155 return umem_dmabuf;
156
157 out_free_umem:
158 kfree(umem_dmabuf);
159
160 out_release_dmabuf:
161 dma_buf_put(dmabuf);
162 return ret;
163 }
164 EXPORT_SYMBOL(ib_umem_dmabuf_get);
165
ib_umem_dmabuf_release(struct ib_umem_dmabuf * umem_dmabuf)166 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
167 {
168 struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
169
170 dma_resv_lock(dmabuf->resv, NULL);
171 ib_umem_dmabuf_unmap_pages(umem_dmabuf);
172 dma_resv_unlock(dmabuf->resv);
173
174 dma_buf_detach(dmabuf, umem_dmabuf->attach);
175 dma_buf_put(dmabuf);
176 kfree(umem_dmabuf);
177 }
178