Lines Matching refs:umem
26 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) in xdp_add_sk_umem() argument
33 spin_lock_irqsave(&umem->xsk_list_lock, flags); in xdp_add_sk_umem()
34 list_add_rcu(&xs->list, &umem->xsk_list); in xdp_add_sk_umem()
35 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); in xdp_add_sk_umem()
38 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) in xdp_del_sk_umem() argument
45 spin_lock_irqsave(&umem->xsk_list_lock, flags); in xdp_del_sk_umem()
47 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); in xdp_del_sk_umem()
54 static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, in xdp_reg_umem_at_qid() argument
63 dev->_rx[queue_id].umem = umem; in xdp_reg_umem_at_qid()
65 dev->_tx[queue_id].umem = umem; in xdp_reg_umem_at_qid()
74 return dev->_rx[queue_id].umem; in xdp_get_umem_from_qid()
76 return dev->_tx[queue_id].umem; in xdp_get_umem_from_qid()
85 dev->_rx[queue_id].umem = NULL; in xdp_clear_umem_at_qid()
87 dev->_tx[queue_id].umem = NULL; in xdp_clear_umem_at_qid()
90 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, in xdp_umem_assign_dev() argument
108 err = xdp_reg_umem_at_qid(dev, umem, queue_id); in xdp_umem_assign_dev()
112 umem->dev = dev; in xdp_umem_assign_dev()
113 umem->queue_id = queue_id; in xdp_umem_assign_dev()
116 umem->flags |= XDP_UMEM_USES_NEED_WAKEUP; in xdp_umem_assign_dev()
121 xsk_set_tx_need_wakeup(umem); in xdp_umem_assign_dev()
136 bpf.xsk.umem = umem; in xdp_umem_assign_dev()
143 umem->zc = true; in xdp_umem_assign_dev()
154 void xdp_umem_clear_dev(struct xdp_umem *umem) in xdp_umem_clear_dev() argument
161 if (!umem->dev) in xdp_umem_clear_dev()
164 if (umem->zc) { in xdp_umem_clear_dev()
166 bpf.xsk.umem = NULL; in xdp_umem_clear_dev()
167 bpf.xsk.queue_id = umem->queue_id; in xdp_umem_clear_dev()
169 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf); in xdp_umem_clear_dev()
175 xdp_clear_umem_at_qid(umem->dev, umem->queue_id); in xdp_umem_clear_dev()
177 dev_put(umem->dev); in xdp_umem_clear_dev()
178 umem->dev = NULL; in xdp_umem_clear_dev()
179 umem->zc = false; in xdp_umem_clear_dev()
182 static void xdp_umem_unmap_pages(struct xdp_umem *umem) in xdp_umem_unmap_pages() argument
186 for (i = 0; i < umem->npgs; i++) in xdp_umem_unmap_pages()
187 if (PageHighMem(umem->pgs[i])) in xdp_umem_unmap_pages()
188 vunmap(umem->pages[i].addr); in xdp_umem_unmap_pages()
191 static int xdp_umem_map_pages(struct xdp_umem *umem) in xdp_umem_map_pages() argument
196 for (i = 0; i < umem->npgs; i++) { in xdp_umem_map_pages()
197 if (PageHighMem(umem->pgs[i])) in xdp_umem_map_pages()
198 addr = vmap(&umem->pgs[i], 1, VM_MAP, PAGE_KERNEL); in xdp_umem_map_pages()
200 addr = page_address(umem->pgs[i]); in xdp_umem_map_pages()
203 xdp_umem_unmap_pages(umem); in xdp_umem_map_pages()
207 umem->pages[i].addr = addr; in xdp_umem_map_pages()
213 static void xdp_umem_unpin_pages(struct xdp_umem *umem) in xdp_umem_unpin_pages() argument
215 put_user_pages_dirty_lock(umem->pgs, umem->npgs, true); in xdp_umem_unpin_pages()
217 kfree(umem->pgs); in xdp_umem_unpin_pages()
218 umem->pgs = NULL; in xdp_umem_unpin_pages()
221 static void xdp_umem_unaccount_pages(struct xdp_umem *umem) in xdp_umem_unaccount_pages() argument
223 if (umem->user) { in xdp_umem_unaccount_pages()
224 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages()
225 free_uid(umem->user); in xdp_umem_unaccount_pages()
229 static void xdp_umem_release(struct xdp_umem *umem) in xdp_umem_release() argument
232 xdp_umem_clear_dev(umem); in xdp_umem_release()
235 ida_simple_remove(&umem_ida, umem->id); in xdp_umem_release()
237 if (umem->fq) { in xdp_umem_release()
238 xskq_destroy(umem->fq); in xdp_umem_release()
239 umem->fq = NULL; in xdp_umem_release()
242 if (umem->cq) { in xdp_umem_release()
243 xskq_destroy(umem->cq); in xdp_umem_release()
244 umem->cq = NULL; in xdp_umem_release()
247 xsk_reuseq_destroy(umem); in xdp_umem_release()
249 xdp_umem_unmap_pages(umem); in xdp_umem_release()
250 xdp_umem_unpin_pages(umem); in xdp_umem_release()
252 kfree(umem->pages); in xdp_umem_release()
253 umem->pages = NULL; in xdp_umem_release()
255 xdp_umem_unaccount_pages(umem); in xdp_umem_release()
256 kfree(umem); in xdp_umem_release()
261 struct xdp_umem *umem = container_of(work, struct xdp_umem, work); in xdp_umem_release_deferred() local
263 xdp_umem_release(umem); in xdp_umem_release_deferred()
266 void xdp_get_umem(struct xdp_umem *umem) in xdp_get_umem() argument
268 refcount_inc(&umem->users); in xdp_get_umem()
271 void xdp_put_umem(struct xdp_umem *umem) in xdp_put_umem() argument
273 if (!umem) in xdp_put_umem()
276 if (refcount_dec_and_test(&umem->users)) { in xdp_put_umem()
277 INIT_WORK(&umem->work, xdp_umem_release_deferred); in xdp_put_umem()
278 schedule_work(&umem->work); in xdp_put_umem()
282 static int xdp_umem_pin_pages(struct xdp_umem *umem) in xdp_umem_pin_pages() argument
288 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs), in xdp_umem_pin_pages()
290 if (!umem->pgs) in xdp_umem_pin_pages()
294 npgs = get_user_pages(umem->address, umem->npgs, in xdp_umem_pin_pages()
295 gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL); in xdp_umem_pin_pages()
298 if (npgs != umem->npgs) { in xdp_umem_pin_pages()
300 umem->npgs = npgs; in xdp_umem_pin_pages()
310 xdp_umem_unpin_pages(umem); in xdp_umem_pin_pages()
312 kfree(umem->pgs); in xdp_umem_pin_pages()
313 umem->pgs = NULL; in xdp_umem_pin_pages()
317 static int xdp_umem_account_pages(struct xdp_umem *umem) in xdp_umem_account_pages() argument
325 umem->user = get_uid(current_user()); in xdp_umem_account_pages()
328 old_npgs = atomic_long_read(&umem->user->locked_vm); in xdp_umem_account_pages()
329 new_npgs = old_npgs + umem->npgs; in xdp_umem_account_pages()
331 free_uid(umem->user); in xdp_umem_account_pages()
332 umem->user = NULL; in xdp_umem_account_pages()
335 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs, in xdp_umem_account_pages()
340 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) in xdp_umem_reg() argument
392 umem->address = (unsigned long)addr; in xdp_umem_reg()
393 umem->chunk_mask = unaligned_chunks ? XSK_UNALIGNED_BUF_ADDR_MASK in xdp_umem_reg()
395 umem->size = size; in xdp_umem_reg()
396 umem->headroom = headroom; in xdp_umem_reg()
397 umem->chunk_size_nohr = chunk_size - headroom; in xdp_umem_reg()
398 umem->npgs = (u32)npgs; in xdp_umem_reg()
399 umem->pgs = NULL; in xdp_umem_reg()
400 umem->user = NULL; in xdp_umem_reg()
401 umem->flags = mr->flags; in xdp_umem_reg()
402 INIT_LIST_HEAD(&umem->xsk_list); in xdp_umem_reg()
403 spin_lock_init(&umem->xsk_list_lock); in xdp_umem_reg()
405 refcount_set(&umem->users, 1); in xdp_umem_reg()
407 err = xdp_umem_account_pages(umem); in xdp_umem_reg()
411 err = xdp_umem_pin_pages(umem); in xdp_umem_reg()
415 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL); in xdp_umem_reg()
416 if (!umem->pages) { in xdp_umem_reg()
421 err = xdp_umem_map_pages(umem); in xdp_umem_reg()
425 kfree(umem->pages); in xdp_umem_reg()
428 xdp_umem_unpin_pages(umem); in xdp_umem_reg()
430 xdp_umem_unaccount_pages(umem); in xdp_umem_reg()
436 struct xdp_umem *umem; in xdp_umem_create() local
439 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in xdp_umem_create()
440 if (!umem) in xdp_umem_create()
445 kfree(umem); in xdp_umem_create()
448 umem->id = err; in xdp_umem_create()
450 err = xdp_umem_reg(umem, mr); in xdp_umem_create()
452 ida_simple_remove(&umem_ida, umem->id); in xdp_umem_create()
453 kfree(umem); in xdp_umem_create()
457 return umem; in xdp_umem_create()
460 bool xdp_umem_validate_queues(struct xdp_umem *umem) in xdp_umem_validate_queues() argument
462 return umem->fq && umem->cq; in xdp_umem_validate_queues()