Searched refs:umem (Results 1 – 6 of 6) sorted by relevance
/net/xdp/ |
D | xdp_umem.c | 26 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) in xdp_add_sk_umem() argument 33 spin_lock_irqsave(&umem->xsk_list_lock, flags); in xdp_add_sk_umem() 34 list_add_rcu(&xs->list, &umem->xsk_list); in xdp_add_sk_umem() 35 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); in xdp_add_sk_umem() 38 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) in xdp_del_sk_umem() argument 45 spin_lock_irqsave(&umem->xsk_list_lock, flags); in xdp_del_sk_umem() 47 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); in xdp_del_sk_umem() 54 static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, in xdp_reg_umem_at_qid() argument 63 dev->_rx[queue_id].umem = umem; in xdp_reg_umem_at_qid() 65 dev->_tx[queue_id].umem = umem; in xdp_reg_umem_at_qid() [all …]
|
D | xsk.c | 36 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) && in xsk_is_setup_for_bpf_map() 37 READ_ONCE(xs->umem->fq); in xsk_is_setup_for_bpf_map() 40 bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) in xsk_umem_has_addrs() argument 42 return xskq_has_addrs(umem->fq, cnt); in xsk_umem_has_addrs() 46 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) in xsk_umem_peek_addr() argument 48 return xskq_peek_addr(umem->fq, addr, umem); in xsk_umem_peek_addr() 52 void xsk_umem_discard_addr(struct xdp_umem *umem) in xsk_umem_discard_addr() argument 54 xskq_discard_addr(umem->fq); in xsk_umem_discard_addr() 58 void xsk_set_rx_need_wakeup(struct xdp_umem *umem) in xsk_set_rx_need_wakeup() argument 60 if (umem->need_wakeup & XDP_WAKEUP_RX) in xsk_set_rx_need_wakeup() [all …]
|
D | xsk_diag.c | 49 struct xdp_umem *umem = xs->umem; in xsk_diag_put_umem() local 53 if (!umem) in xsk_diag_put_umem() 56 du.id = umem->id; in xsk_diag_put_umem() 57 du.size = umem->size; in xsk_diag_put_umem() 58 du.num_pages = umem->npgs; in xsk_diag_put_umem() 59 du.chunk_size = umem->chunk_size_nohr + umem->headroom; in xsk_diag_put_umem() 60 du.headroom = umem->headroom; in xsk_diag_put_umem() 61 du.ifindex = umem->dev ? umem->dev->ifindex : 0; in xsk_diag_put_umem() 62 du.queue_id = umem->queue_id; in xsk_diag_put_umem() 64 if (umem->zc) in xsk_diag_put_umem() [all …]
|
D | xsk_queue.h | 137 static inline bool xskq_crosses_non_contig_pg(struct xdp_umem *umem, u64 addr, in xskq_crosses_non_contig_pg() argument 142 (unsigned long)umem->pages[(addr >> PAGE_SHIFT)].addr & in xskq_crosses_non_contig_pg() 160 struct xdp_umem *umem) in xskq_is_valid_addr_unaligned() argument 166 xskq_crosses_non_contig_pg(umem, addr, length)) { in xskq_is_valid_addr_unaligned() 175 struct xdp_umem *umem) in xskq_validate_addr() argument 183 if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) { in xskq_validate_addr() 185 umem->chunk_size_nohr, in xskq_validate_addr() 186 umem)) in xskq_validate_addr() 202 struct xdp_umem *umem) in xskq_peek_addr() argument 213 return xskq_validate_addr(q, addr, umem); in xskq_peek_addr() [all …]
|
D | xdp_umem.h | 11 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, 13 void xdp_umem_clear_dev(struct xdp_umem *umem); 14 bool xdp_umem_validate_queues(struct xdp_umem *umem); 15 void xdp_get_umem(struct xdp_umem *umem); 16 void xdp_put_umem(struct xdp_umem *umem); 17 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs); 18 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
|
D | xsk_queue.c | 87 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, in xsk_reuseq_swap() argument 90 struct xdp_umem_fq_reuse *oldq = umem->fq_reuse; in xsk_reuseq_swap() 93 umem->fq_reuse = newq; in xsk_reuseq_swap() 104 umem->fq_reuse = newq; in xsk_reuseq_swap() 115 void xsk_reuseq_destroy(struct xdp_umem *umem) in xsk_reuseq_destroy() argument 117 xsk_reuseq_free(umem->fq_reuse); in xsk_reuseq_destroy() 118 umem->fq_reuse = NULL; in xsk_reuseq_destroy()
|