Home
last modified time | relevance | path

Searched refs:ring (Results 1 – 15 of 15) sorted by relevance

/net/rds/
Dib_ring.c66 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr) in rds_ib_ring_init() argument
68 memset(ring, 0, sizeof(*ring)); in rds_ib_ring_init()
69 ring->w_nr = nr; in rds_ib_ring_init()
70 rdsdebug("ring %p nr %u\n", ring, ring->w_nr); in rds_ib_ring_init()
73 static inline u32 __rds_ib_ring_used(struct rds_ib_work_ring *ring) in __rds_ib_ring_used() argument
78 diff = ring->w_alloc_ctr - (u32) atomic_read(&ring->w_free_ctr); in __rds_ib_ring_used()
79 BUG_ON(diff > ring->w_nr); in __rds_ib_ring_used()
84 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr) in rds_ib_ring_resize() argument
88 BUG_ON(__rds_ib_ring_used(ring)); in rds_ib_ring_resize()
89 ring->w_nr = nr; in rds_ib_ring_resize()
[all …]
Dib.h415 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
416 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
417 u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
418 void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
419 void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
420 int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
421 int rds_ib_ring_low(struct rds_ib_work_ring *ring);
422 u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
423 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
/net/9p/
Dtrans_xen.c47 #define XEN_9PFS_RING_SIZE(ring) XEN_FLEX_RING_SIZE(ring->intf->ring_order) argument
127 static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size) in p9_xen_write_todo() argument
131 cons = ring->intf->out_cons; in p9_xen_write_todo()
132 prod = ring->intf->out_prod; in p9_xen_write_todo()
135 return XEN_9PFS_RING_SIZE(ring) - in p9_xen_write_todo()
136 xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) >= size; in p9_xen_write_todo()
145 struct xen_9pfs_dataring *ring; in p9_xen_request() local
158 ring = &priv->rings[num]; in p9_xen_request()
161 while (wait_event_killable(ring->wq, in p9_xen_request()
162 p9_xen_write_todo(ring, size)) != 0) in p9_xen_request()
[all …]
/net/xdp/
Dxsk_queue.h43 struct xdp_ring *ring; member
114 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_cons_read_addr_unchecked() local
119 *addr = ring->desc[idx]; in xskq_cons_read_addr_unchecked()
190 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc() local
193 *desc = ring->desc[idx]; in xskq_cons_read_desc()
208 WRITE_ONCE(q->ring->consumer, q->cached_cons); in __xskq_cons_release()
214 q->cached_prod = READ_ONCE(q->ring->producer); in __xskq_cons_peek()
266 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) == in xskq_cons_is_full()
273 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); in xskq_cons_present_entries()
286 q->cached_cons = READ_ONCE(q->ring->consumer); in xskq_prod_is_full()
[all …]
Dxsk_queue.c40 q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags, in xskq_create()
42 if (!q->ring) { in xskq_create()
55 page_frag_free(q->ring); in xskq_destroy()
Dxsk.c41 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP; in xsk_set_rx_need_wakeup()
55 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; in xsk_set_tx_need_wakeup()
68 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; in xsk_clear_rx_need_wakeup()
82 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; in xsk_clear_tx_need_wakeup()
856 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; in xsk_setsockopt()
920 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) in xsk_enter_rxtx_offsets() argument
922 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); in xsk_enter_rxtx_offsets()
923 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); in xsk_enter_rxtx_offsets()
924 ring->desc = offsetof(struct xdp_rxtx_ring, desc); in xsk_enter_rxtx_offsets()
927 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) in xsk_enter_umem_offsets() argument
[all …]
/net/dccp/ccids/lib/
Dloss_interval.c24 return lh->counter ? lh->ring[LIH_INDEX(lh->counter - 1)] : NULL; in tfrc_lh_peek()
31 return lh->ring[LIH_INDEX(lh->counter - i - 1)]->li_length; in tfrc_lh_get_interval()
39 if (lh->ring[LIH_INDEX(lh->counter)] == NULL) in tfrc_lh_demand_next()
40 lh->ring[LIH_INDEX(lh->counter)] = kmem_cache_alloc(tfrc_lh_slab, in tfrc_lh_demand_next()
42 return lh->ring[LIH_INDEX(lh->counter)]; in tfrc_lh_demand_next()
51 if (lh->ring[LIH_INDEX(lh->counter)] != NULL) { in tfrc_lh_cleanup()
53 lh->ring[LIH_INDEX(lh->counter)]); in tfrc_lh_cleanup()
54 lh->ring[LIH_INDEX(lh->counter)] = NULL; in tfrc_lh_cleanup()
Dpacket_history.c140 swap(h->ring[idx_a], h->ring[idx_b]); in tfrc_rx_hist_swap()
339 h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC); in tfrc_rx_hist_alloc()
340 if (h->ring[i] == NULL) in tfrc_rx_hist_alloc()
349 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); in tfrc_rx_hist_alloc()
350 h->ring[i] = NULL; in tfrc_rx_hist_alloc()
360 if (h->ring[i] != NULL) { in tfrc_rx_hist_purge()
361 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); in tfrc_rx_hist_purge()
362 h->ring[i] = NULL; in tfrc_rx_hist_purge()
373 return h->ring[0]; in tfrc_rx_hist_rtt_last_s()
383 return h->ring[h->rtt_sample_prev]; in tfrc_rx_hist_rtt_prev_s()
Dpacket_history.h82 struct tfrc_rx_hist_entry *ring[TFRC_NDUPACK + 1]; member
102 return h->ring[tfrc_rx_hist_index(h, h->loss_count)]; in tfrc_rx_hist_last_rcv()
111 return h->ring[tfrc_rx_hist_index(h, n)]; in tfrc_rx_hist_entry()
120 return h->ring[h->loss_start]; in tfrc_rx_hist_loss_prev()
Dloss_interval.h42 struct tfrc_loss_interval *ring[LIH_SIZE]; member
/net/packet/
Ddiag.c72 static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type, in pdiag_put_ring() argument
77 if (!ring->pg_vec) in pdiag_put_ring()
80 pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT; in pdiag_put_ring()
81 pdr.pdr_block_nr = ring->pg_vec_len; in pdiag_put_ring()
82 pdr.pdr_frame_size = ring->frame_size; in pdiag_put_ring()
83 pdr.pdr_frame_nr = ring->frame_max + 1; in pdiag_put_ring()
86 pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov; in pdiag_put_ring()
87 pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv; in pdiag_put_ring()
88 pdr.pdr_features = ring->prb_bdqc.feature_req_word; in pdiag_put_ring()
/net/core/
Dpage_pool.c67 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) in page_pool_init()
106 struct ptr_ring *r = &pool->ring; in page_pool_refill_alloc_cache()
330 ret = ptr_ring_produce(&pool->ring, page); in page_pool_recycle_in_ring()
332 ret = ptr_ring_produce_bh(&pool->ring, page); in page_pool_recycle_in_ring()
418 while ((page = ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
433 ptr_ring_cleanup(&pool->ring, NULL); in page_pool_free()
/net/hsr/
DKconfig17 For DANH, it must be connected as a node in a ring network together
19 device will be sent in both directions on the ring (over both slave
21 in the ring acts like a bridge for HSR frames, but filters frames
/net/ipv6/
Dseg6_hmac.c167 char *ring, *off; in seg6_hmac_compute() local
191 ring = this_cpu_ptr(hmac_ring); in seg6_hmac_compute()
192 off = ring; in seg6_hmac_compute()
214 dgsize = __do_hmac(hinfo, ring, plen, tmp_out, in seg6_hmac_compute()
/net/sched/
Dsch_generic.c733 if (!q->ring.queue) in pfifo_fast_reset()
800 if (!q->ring.queue) in pfifo_fast_destroy()
805 ptr_ring_cleanup(&q->ring, NULL); in pfifo_fast_destroy()