Home
last modified time | relevance | path

Searched refs:ring (Results 1 – 15 of 15) sorted by relevance

/net/rds/
Dib_ring.c66 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr) in rds_ib_ring_init() argument
68 memset(ring, 0, sizeof(*ring)); in rds_ib_ring_init()
69 ring->w_nr = nr; in rds_ib_ring_init()
70 rdsdebug("ring %p nr %u\n", ring, ring->w_nr); in rds_ib_ring_init()
73 static inline u32 __rds_ib_ring_used(struct rds_ib_work_ring *ring) in __rds_ib_ring_used() argument
78 diff = ring->w_alloc_ctr - (u32) atomic_read(&ring->w_free_ctr); in __rds_ib_ring_used()
79 BUG_ON(diff > ring->w_nr); in __rds_ib_ring_used()
84 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr) in rds_ib_ring_resize() argument
88 BUG_ON(__rds_ib_ring_used(ring)); in rds_ib_ring_resize()
89 ring->w_nr = nr; in rds_ib_ring_resize()
[all …]
Dib.h418 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
419 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
420 u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
421 void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
422 void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
423 int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
424 int rds_ib_ring_low(struct rds_ib_work_ring *ring);
425 u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
426 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
/net/9p/
Dtrans_xen.c127 static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size) in p9_xen_write_todo() argument
131 cons = ring->intf->out_cons; in p9_xen_write_todo()
132 prod = ring->intf->out_prod; in p9_xen_write_todo()
145 struct xen_9pfs_dataring *ring; in p9_xen_request() local
158 ring = &priv->rings[num]; in p9_xen_request()
161 while (wait_event_killable(ring->wq, in p9_xen_request()
162 p9_xen_write_todo(ring, size)) != 0) in p9_xen_request()
165 spin_lock_irqsave(&ring->lock, flags); in p9_xen_request()
166 cons = ring->intf->out_cons; in p9_xen_request()
167 prod = ring->intf->out_prod; in p9_xen_request()
[all …]
/net/xdp/
Dxsk_queue.h43 struct xdp_ring *ring; member
102 q->prod_tail = READ_ONCE(q->ring->producer); in xskq_nb_avail()
117 q->cons_tail = READ_ONCE(q->ring->consumer); in xskq_nb_free()
129 q->prod_tail = READ_ONCE(q->ring->producer); in xskq_has_addrs()
178 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_validate_addr() local
181 *addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask; in xskq_validate_addr()
206 WRITE_ONCE(q->ring->consumer, q->cons_tail); in xskq_peek_addr()
223 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_produce_addr() local
229 ring->desc[q->prod_tail++ & q->ring_mask] = addr; in xskq_produce_addr()
234 WRITE_ONCE(q->ring->producer, q->prod_tail); in xskq_produce_addr()
[all …]
Dxsk.c63 umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP; in xsk_set_rx_need_wakeup()
77 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; in xsk_set_tx_need_wakeup()
90 umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; in xsk_clear_rx_need_wakeup()
104 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; in xsk_clear_tx_need_wakeup()
770 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; in xsk_setsockopt()
838 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) in xsk_enter_rxtx_offsets() argument
840 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); in xsk_enter_rxtx_offsets()
841 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); in xsk_enter_rxtx_offsets()
842 ring->desc = offsetof(struct xdp_rxtx_ring, desc); in xsk_enter_rxtx_offsets()
845 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) in xsk_enter_umem_offsets() argument
[all …]
Dxsk_queue.c49 q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags, in xskq_create()
51 if (!q->ring) { in xskq_create()
64 page_frag_free(q->ring); in xskq_destroy()
/net/dccp/ccids/lib/
Dloss_interval.c24 return lh->counter ? lh->ring[LIH_INDEX(lh->counter - 1)] : NULL; in tfrc_lh_peek()
31 return lh->ring[LIH_INDEX(lh->counter - i - 1)]->li_length; in tfrc_lh_get_interval()
39 if (lh->ring[LIH_INDEX(lh->counter)] == NULL) in tfrc_lh_demand_next()
40 lh->ring[LIH_INDEX(lh->counter)] = kmem_cache_alloc(tfrc_lh_slab, in tfrc_lh_demand_next()
42 return lh->ring[LIH_INDEX(lh->counter)]; in tfrc_lh_demand_next()
51 if (lh->ring[LIH_INDEX(lh->counter)] != NULL) { in tfrc_lh_cleanup()
53 lh->ring[LIH_INDEX(lh->counter)]); in tfrc_lh_cleanup()
54 lh->ring[LIH_INDEX(lh->counter)] = NULL; in tfrc_lh_cleanup()
Dpacket_history.c140 swap(h->ring[idx_a], h->ring[idx_b]); in tfrc_rx_hist_swap()
339 h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC); in tfrc_rx_hist_alloc()
340 if (h->ring[i] == NULL) in tfrc_rx_hist_alloc()
349 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); in tfrc_rx_hist_alloc()
350 h->ring[i] = NULL; in tfrc_rx_hist_alloc()
360 if (h->ring[i] != NULL) { in tfrc_rx_hist_purge()
361 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); in tfrc_rx_hist_purge()
362 h->ring[i] = NULL; in tfrc_rx_hist_purge()
372 return h->ring[0]; in tfrc_rx_hist_rtt_last_s()
381 return h->ring[h->rtt_sample_prev]; in tfrc_rx_hist_rtt_prev_s()
Dpacket_history.h82 struct tfrc_rx_hist_entry *ring[TFRC_NDUPACK + 1]; member
102 return h->ring[tfrc_rx_hist_index(h, h->loss_count)]; in tfrc_rx_hist_last_rcv()
111 return h->ring[tfrc_rx_hist_index(h, n)]; in tfrc_rx_hist_entry()
120 return h->ring[h->loss_start]; in tfrc_rx_hist_loss_prev()
Dloss_interval.h42 struct tfrc_loss_interval *ring[LIH_SIZE]; member
/net/packet/
Ddiag.c72 static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type, in pdiag_put_ring() argument
77 if (!ring->pg_vec) in pdiag_put_ring()
80 pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT; in pdiag_put_ring()
81 pdr.pdr_block_nr = ring->pg_vec_len; in pdiag_put_ring()
82 pdr.pdr_frame_size = ring->frame_size; in pdiag_put_ring()
83 pdr.pdr_frame_nr = ring->frame_max + 1; in pdiag_put_ring()
86 pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov; in pdiag_put_ring()
87 pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv; in pdiag_put_ring()
88 pdr.pdr_features = ring->prb_bdqc.feature_req_word; in pdiag_put_ring()
/net/core/
Dpage_pool.c50 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) in page_pool_init()
87 struct ptr_ring *r = &pool->ring; in __page_pool_get_cached()
261 ret = ptr_ring_produce(&pool->ring, page); in __page_pool_recycle_into_ring()
263 ret = ptr_ring_produce_bh(&pool->ring, page); in __page_pool_recycle_into_ring()
329 while ((page = ptr_ring_consume_bh(&pool->ring))) { in __page_pool_empty_ring()
344 ptr_ring_cleanup(&pool->ring, NULL); in page_pool_free()
/net/hsr/
DKconfig12 and it must be connected as a node in a ring network together with
16 directions on the ring (over both slave ports), giving a redundant,
17 instant fail-over network. Each HSR node in the ring acts like a
/net/ipv6/
Dseg6_hmac.c168 char *ring, *off; in seg6_hmac_compute() local
192 ring = this_cpu_ptr(hmac_ring); in seg6_hmac_compute()
193 off = ring; in seg6_hmac_compute()
215 dgsize = __do_hmac(hinfo, ring, plen, tmp_out, in seg6_hmac_compute()
/net/sched/
Dsch_generic.c738 if (!q->ring.queue) in pfifo_fast_reset()
805 if (!q->ring.queue) in pfifo_fast_destroy()
810 ptr_ring_cleanup(&q->ring, NULL); in pfifo_fast_destroy()