1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Devmem TCP
4 *
5 * Authors: Mina Almasry <almasrymina@google.com>
6 * Willem de Bruijn <willemdebruijn.kernel@gmail.com>
7 * Kaiyuan Zhang <kaiyuanz@google.com
8 */
9
10 #include <linux/dma-buf.h>
11 #include <linux/genalloc.h>
12 #include <linux/mm.h>
13 #include <linux/netdevice.h>
14 #include <linux/types.h>
15 #include <net/netdev_queues.h>
16 #include <net/netdev_rx_queue.h>
17 #include <net/page_pool/helpers.h>
18 #include <trace/events/page_pool.h>
19
20 #include "devmem.h"
21 #include "mp_dmabuf_devmem.h"
22 #include "page_pool_priv.h"
23
24 /* Device memory support */
25
26 /* Protected by rtnl_lock() */
27 static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
28
net_devmem_dmabuf_free_chunk_owner(struct gen_pool * genpool,struct gen_pool_chunk * chunk,void * not_used)29 static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
30 struct gen_pool_chunk *chunk,
31 void *not_used)
32 {
33 struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
34
35 kvfree(owner->niovs);
36 kfree(owner);
37 }
38
net_devmem_get_dma_addr(const struct net_iov * niov)39 static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
40 {
41 struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);
42
43 return owner->base_dma_addr +
44 ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT);
45 }
46
__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding * binding)47 void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
48 {
49 size_t size, avail;
50
51 gen_pool_for_each_chunk(binding->chunk_pool,
52 net_devmem_dmabuf_free_chunk_owner, NULL);
53
54 size = gen_pool_size(binding->chunk_pool);
55 avail = gen_pool_avail(binding->chunk_pool);
56
57 if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu",
58 size, avail))
59 gen_pool_destroy(binding->chunk_pool);
60
61 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
62 DMA_FROM_DEVICE);
63 dma_buf_detach(binding->dmabuf, binding->attachment);
64 dma_buf_put(binding->dmabuf);
65 xa_destroy(&binding->bound_rxqs);
66 kfree(binding);
67 }
68
69 struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding * binding)70 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
71 {
72 struct dmabuf_genpool_chunk_owner *owner;
73 unsigned long dma_addr;
74 struct net_iov *niov;
75 ssize_t offset;
76 ssize_t index;
77
78 dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
79 (void **)&owner);
80 if (!dma_addr)
81 return NULL;
82
83 offset = dma_addr - owner->base_dma_addr;
84 index = offset / PAGE_SIZE;
85 niov = &owner->niovs[index];
86
87 niov->pp_magic = 0;
88 niov->pp = NULL;
89 atomic_long_set(&niov->pp_ref_count, 0);
90
91 return niov;
92 }
93
net_devmem_free_dmabuf(struct net_iov * niov)94 void net_devmem_free_dmabuf(struct net_iov *niov)
95 {
96 struct net_devmem_dmabuf_binding *binding = net_iov_binding(niov);
97 unsigned long dma_addr = net_devmem_get_dma_addr(niov);
98
99 if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr,
100 PAGE_SIZE)))
101 return;
102
103 gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE);
104 }
105
net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding * binding)106 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
107 {
108 struct netdev_rx_queue *rxq;
109 unsigned long xa_idx;
110 unsigned int rxq_idx;
111 int err;
112
113 if (binding->list.next)
114 list_del(&binding->list);
115
116 xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
117 WARN_ON(rxq->mp_params.mp_priv != binding);
118
119 rxq->mp_params.mp_priv = NULL;
120
121 rxq_idx = get_netdev_rx_queue_index(rxq);
122
123 err = netdev_rx_queue_restart(binding->dev, rxq_idx);
124 WARN_ON(err && err != -ENETDOWN);
125 }
126
127 xa_erase(&net_devmem_dmabuf_bindings, binding->id);
128
129 net_devmem_dmabuf_binding_put(binding);
130 }
131
net_devmem_bind_dmabuf_to_queue(struct net_device * dev,u32 rxq_idx,struct net_devmem_dmabuf_binding * binding,struct netlink_ext_ack * extack)132 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
133 struct net_devmem_dmabuf_binding *binding,
134 struct netlink_ext_ack *extack)
135 {
136 struct netdev_rx_queue *rxq;
137 u32 xa_idx;
138 int err;
139
140 if (rxq_idx >= dev->real_num_rx_queues) {
141 NL_SET_ERR_MSG(extack, "rx queue index out of range");
142 return -ERANGE;
143 }
144
145 rxq = __netif_get_rx_queue(dev, rxq_idx);
146 if (rxq->mp_params.mp_priv) {
147 NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
148 return -EEXIST;
149 }
150
151 #ifdef CONFIG_XDP_SOCKETS
152 if (rxq->pool) {
153 NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
154 return -EBUSY;
155 }
156 #endif
157
158 err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
159 GFP_KERNEL);
160 if (err)
161 return err;
162
163 rxq->mp_params.mp_priv = binding;
164
165 err = netdev_rx_queue_restart(dev, rxq_idx);
166 if (err)
167 goto err_xa_erase;
168
169 return 0;
170
171 err_xa_erase:
172 rxq->mp_params.mp_priv = NULL;
173 xa_erase(&binding->bound_rxqs, xa_idx);
174
175 return err;
176 }
177
178 struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device * dev,unsigned int dmabuf_fd,struct netlink_ext_ack * extack)179 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
180 struct netlink_ext_ack *extack)
181 {
182 struct net_devmem_dmabuf_binding *binding;
183 static u32 id_alloc_next;
184 struct scatterlist *sg;
185 struct dma_buf *dmabuf;
186 unsigned int sg_idx, i;
187 unsigned long virtual;
188 int err;
189
190 dmabuf = dma_buf_get(dmabuf_fd);
191 if (IS_ERR(dmabuf))
192 return ERR_CAST(dmabuf);
193
194 binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
195 dev_to_node(&dev->dev));
196 if (!binding) {
197 err = -ENOMEM;
198 goto err_put_dmabuf;
199 }
200
201 binding->dev = dev;
202
203 err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
204 binding, xa_limit_32b, &id_alloc_next,
205 GFP_KERNEL);
206 if (err < 0)
207 goto err_free_binding;
208
209 xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
210
211 refcount_set(&binding->ref, 1);
212
213 binding->dmabuf = dmabuf;
214
215 binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
216 if (IS_ERR(binding->attachment)) {
217 err = PTR_ERR(binding->attachment);
218 NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
219 goto err_free_id;
220 }
221
222 binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
223 DMA_FROM_DEVICE);
224 if (IS_ERR(binding->sgt)) {
225 err = PTR_ERR(binding->sgt);
226 NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
227 goto err_detach;
228 }
229
230 /* For simplicity we expect to make PAGE_SIZE allocations, but the
231 * binding can be much more flexible than that. We may be able to
232 * allocate MTU sized chunks here. Leave that for future work...
233 */
234 binding->chunk_pool =
235 gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev));
236 if (!binding->chunk_pool) {
237 err = -ENOMEM;
238 goto err_unmap;
239 }
240
241 virtual = 0;
242 for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
243 dma_addr_t dma_addr = sg_dma_address(sg);
244 struct dmabuf_genpool_chunk_owner *owner;
245 size_t len = sg_dma_len(sg);
246 struct net_iov *niov;
247
248 owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
249 dev_to_node(&dev->dev));
250 if (!owner) {
251 err = -ENOMEM;
252 goto err_free_chunks;
253 }
254
255 owner->base_virtual = virtual;
256 owner->base_dma_addr = dma_addr;
257 owner->num_niovs = len / PAGE_SIZE;
258 owner->binding = binding;
259
260 err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
261 dma_addr, len, dev_to_node(&dev->dev),
262 owner);
263 if (err) {
264 kfree(owner);
265 err = -EINVAL;
266 goto err_free_chunks;
267 }
268
269 owner->niovs = kvmalloc_array(owner->num_niovs,
270 sizeof(*owner->niovs),
271 GFP_KERNEL);
272 if (!owner->niovs) {
273 err = -ENOMEM;
274 goto err_free_chunks;
275 }
276
277 for (i = 0; i < owner->num_niovs; i++) {
278 niov = &owner->niovs[i];
279 niov->owner = owner;
280 page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
281 net_devmem_get_dma_addr(niov));
282 }
283
284 virtual += len;
285 }
286
287 return binding;
288
289 err_free_chunks:
290 gen_pool_for_each_chunk(binding->chunk_pool,
291 net_devmem_dmabuf_free_chunk_owner, NULL);
292 gen_pool_destroy(binding->chunk_pool);
293 err_unmap:
294 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
295 DMA_FROM_DEVICE);
296 err_detach:
297 dma_buf_detach(dmabuf, binding->attachment);
298 err_free_id:
299 xa_erase(&net_devmem_dmabuf_bindings, binding->id);
300 err_free_binding:
301 kfree(binding);
302 err_put_dmabuf:
303 dma_buf_put(dmabuf);
304 return ERR_PTR(err);
305 }
306
dev_dmabuf_uninstall(struct net_device * dev)307 void dev_dmabuf_uninstall(struct net_device *dev)
308 {
309 struct net_devmem_dmabuf_binding *binding;
310 struct netdev_rx_queue *rxq;
311 unsigned long xa_idx;
312 unsigned int i;
313
314 for (i = 0; i < dev->real_num_rx_queues; i++) {
315 binding = dev->_rx[i].mp_params.mp_priv;
316 if (!binding)
317 continue;
318
319 xa_for_each(&binding->bound_rxqs, xa_idx, rxq)
320 if (rxq == &dev->_rx[i]) {
321 xa_erase(&binding->bound_rxqs, xa_idx);
322 break;
323 }
324 }
325 }
326
327 /*** "Dmabuf devmem memory provider" ***/
328
mp_dmabuf_devmem_init(struct page_pool * pool)329 int mp_dmabuf_devmem_init(struct page_pool *pool)
330 {
331 struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
332
333 if (!binding)
334 return -EINVAL;
335
336 if (!pool->dma_map)
337 return -EOPNOTSUPP;
338
339 if (pool->dma_sync)
340 return -EOPNOTSUPP;
341
342 if (pool->p.order != 0)
343 return -E2BIG;
344
345 net_devmem_dmabuf_binding_get(binding);
346 return 0;
347 }
348
mp_dmabuf_devmem_alloc_netmems(struct page_pool * pool,gfp_t gfp)349 netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
350 {
351 struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
352 struct net_iov *niov;
353 netmem_ref netmem;
354
355 niov = net_devmem_alloc_dmabuf(binding);
356 if (!niov)
357 return 0;
358
359 netmem = net_iov_to_netmem(niov);
360
361 page_pool_set_pp_info(pool, netmem);
362
363 pool->pages_state_hold_cnt++;
364 trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
365 return netmem;
366 }
367
mp_dmabuf_devmem_destroy(struct page_pool * pool)368 void mp_dmabuf_devmem_destroy(struct page_pool *pool)
369 {
370 struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
371
372 net_devmem_dmabuf_binding_put(binding);
373 }
374
mp_dmabuf_devmem_release_page(struct page_pool * pool,netmem_ref netmem)375 bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
376 {
377 long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem));
378
379 if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
380 return false;
381
382 if (WARN_ON_ONCE(refcount != 1))
383 return false;
384
385 page_pool_clear_pp_info(netmem);
386
387 net_devmem_free_dmabuf(netmem_to_net_iov(netmem));
388
389 /* We don't want the page pool put_page()ing our net_iovs. */
390 return false;
391 }
392