Home
last modified time | relevance | path

Searched refs:rx_queue (Results 1 – 25 of 104) sorted by relevance

12345

/kernel/linux/linux-5.10/drivers/net/ethernet/sfc/falcon/
Drx.c76 ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf) in ef4_rx_buf_next() argument
78 if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask))) in ef4_rx_buf_next()
79 return ef4_rx_buffer(rx_queue, 0); in ef4_rx_buf_next()
106 static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue) in ef4_reuse_page() argument
108 struct ef4_nic *efx = rx_queue->efx; in ef4_reuse_page()
113 if (unlikely(!rx_queue->page_ring)) in ef4_reuse_page()
115 index = rx_queue->page_remove & rx_queue->page_ptr_mask; in ef4_reuse_page()
116 page = rx_queue->page_ring[index]; in ef4_reuse_page()
120 rx_queue->page_ring[index] = NULL; in ef4_reuse_page()
122 if (rx_queue->page_remove != rx_queue->page_add) in ef4_reuse_page()
[all …]
Dnic.h110 ef4_rx_desc(struct ef4_rx_queue *rx_queue, unsigned int index) in ef4_rx_desc() argument
112 return ((ef4_qword_t *) (rx_queue->rxd.buf.addr)) + index; in ef4_rx_desc()
333 static inline int ef4_nic_probe_rx(struct ef4_rx_queue *rx_queue) in ef4_nic_probe_rx() argument
335 return rx_queue->efx->type->rx_probe(rx_queue); in ef4_nic_probe_rx()
337 static inline void ef4_nic_init_rx(struct ef4_rx_queue *rx_queue) in ef4_nic_init_rx() argument
339 rx_queue->efx->type->rx_init(rx_queue); in ef4_nic_init_rx()
341 static inline void ef4_nic_remove_rx(struct ef4_rx_queue *rx_queue) in ef4_nic_remove_rx() argument
343 rx_queue->efx->type->rx_remove(rx_queue); in ef4_nic_remove_rx()
345 static inline void ef4_nic_notify_rx_desc(struct ef4_rx_queue *rx_queue) in ef4_nic_notify_rx_desc() argument
347 rx_queue->efx->type->rx_write(rx_queue); in ef4_nic_notify_rx_desc()
[all …]
Dfarch.c471 ef4_farch_build_rx_desc(struct ef4_rx_queue *rx_queue, unsigned index) in ef4_farch_build_rx_desc() argument
476 rxd = ef4_rx_desc(rx_queue, index); in ef4_farch_build_rx_desc()
477 rx_buf = ef4_rx_buffer(rx_queue, index); in ef4_farch_build_rx_desc()
481 rx_queue->efx->type->rx_buffer_padding, in ef4_farch_build_rx_desc()
489 void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue) in ef4_farch_rx_write() argument
491 struct ef4_nic *efx = rx_queue->efx; in ef4_farch_rx_write()
495 while (rx_queue->notified_count != rx_queue->added_count) { in ef4_farch_rx_write()
497 rx_queue, in ef4_farch_rx_write()
498 rx_queue->notified_count & rx_queue->ptr_mask); in ef4_farch_rx_write()
499 ++rx_queue->notified_count; in ef4_farch_rx_write()
[all …]
Dnet_driver.h444 struct ef4_rx_queue rx_queue; member
1090 int (*rx_probe)(struct ef4_rx_queue *rx_queue);
1091 void (*rx_init)(struct ef4_rx_queue *rx_queue);
1092 void (*rx_remove)(struct ef4_rx_queue *rx_queue);
1093 void (*rx_write)(struct ef4_rx_queue *rx_queue);
1094 void (*rx_defer_refill)(struct ef4_rx_queue *rx_queue);
1236 return channel->rx_queue.core_index >= 0; in ef4_channel_has_rx_queue()
1243 return &channel->rx_queue; in ef4_channel_get_rx_queue()
1251 for (_rx_queue = &(_channel)->rx_queue; \
1256 ef4_rx_queue_channel(struct ef4_rx_queue *rx_queue) in ef4_rx_queue_channel() argument
[all …]
Defx.h40 int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue);
41 void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue);
42 void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue);
43 void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue);
44 void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic);
47 void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
54 void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue);
Defx.c253 struct ef4_rx_queue *rx_queue = in ef4_process_channel() local
257 ef4_fast_push_rx_descriptors(rx_queue, true); in ef4_process_channel()
428 struct ef4_rx_queue *rx_queue; in ef4_alloc_channel() local
447 rx_queue = &channel->rx_queue; in ef4_alloc_channel()
448 rx_queue->efx = efx; in ef4_alloc_channel()
449 timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0); in ef4_alloc_channel()
461 struct ef4_rx_queue *rx_queue; in ef4_copy_channel() local
485 rx_queue = &channel->rx_queue; in ef4_copy_channel()
486 rx_queue->buffer = NULL; in ef4_copy_channel()
487 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); in ef4_copy_channel()
[all …]
/kernel/linux/linux-5.10/drivers/net/ethernet/sfc/
Drx_common.c41 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) in efx_reuse_page() argument
43 struct efx_nic *efx = rx_queue->efx; in efx_reuse_page()
48 if (unlikely(!rx_queue->page_ring)) in efx_reuse_page()
50 index = rx_queue->page_remove & rx_queue->page_ptr_mask; in efx_reuse_page()
51 page = rx_queue->page_ring[index]; in efx_reuse_page()
55 rx_queue->page_ring[index] = NULL; in efx_reuse_page()
57 if (rx_queue->page_remove != rx_queue->page_add) in efx_reuse_page()
58 ++rx_queue->page_remove; in efx_reuse_page()
62 ++rx_queue->page_recycle_count; in efx_reuse_page()
70 ++rx_queue->page_recycle_failed; in efx_reuse_page()
[all …]
Def100_rx.c58 struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); in __ef100_rx_packet()
90 struct efx_rx_queue *rx_queue = in __ef100_rx_packet() local
95 efx_free_rx_buffers(rx_queue, rx_buf, 1); in __ef100_rx_packet()
105 static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index) in ef100_rx_packet() argument
107 struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue, index); in ef100_rx_packet()
108 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); in ef100_rx_packet()
109 struct efx_nic *efx = rx_queue->efx; in ef100_rx_packet()
111 ++rx_queue->rx_packets; in ef100_rx_packet()
115 efx_rx_queue_index(rx_queue), index); in ef100_rx_packet()
132 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_ef100_ev_rx() local
[all …]
Drx.c43 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, in efx_rx_packet__check_len() argument
47 struct efx_nic *efx = rx_queue->efx; in efx_rx_packet__check_len()
61 efx_rx_queue_index(rx_queue), len, max_len); in efx_rx_packet__check_len()
63 efx_rx_queue_channel(rx_queue)->n_rx_overlength++; in efx_rx_packet__check_len()
106 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); in efx_rx_mk_skb()
124 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, in efx_rx_packet() argument
127 struct efx_nic *efx = rx_queue->efx; in efx_rx_packet()
128 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); in efx_rx_packet()
131 rx_queue->rx_packets++; in efx_rx_packet()
133 rx_buf = efx_rx_buffer(rx_queue, index); in efx_rx_packet()
[all …]
Dnic_common.h100 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) in efx_rx_desc() argument
102 return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; in efx_rx_desc()
140 static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) in efx_nic_probe_rx() argument
142 return rx_queue->efx->type->rx_probe(rx_queue); in efx_nic_probe_rx()
144 static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue) in efx_nic_init_rx() argument
146 rx_queue->efx->type->rx_init(rx_queue); in efx_nic_init_rx()
148 static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) in efx_nic_remove_rx() argument
150 rx_queue->efx->type->rx_remove(rx_queue); in efx_nic_remove_rx()
152 static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) in efx_nic_notify_rx_desc() argument
154 rx_queue->efx->type->rx_write(rx_queue); in efx_nic_notify_rx_desc()
[all …]
Dmcdi_functions.c270 int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue) in efx_mcdi_rx_probe() argument
272 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, in efx_mcdi_rx_probe()
273 (rx_queue->ptr_mask + 1) * in efx_mcdi_rx_probe()
278 void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue) in efx_mcdi_rx_init() argument
280 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); in efx_mcdi_rx_init()
281 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; in efx_mcdi_rx_init()
283 struct efx_nic *efx = rx_queue->efx; in efx_mcdi_rx_init()
290 rx_queue->scatter_n = 0; in efx_mcdi_rx_init()
291 rx_queue->scatter_len = 0; in efx_mcdi_rx_init()
297 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); in efx_mcdi_rx_init()
[all …]
Dfarch.c464 efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) in efx_farch_build_rx_desc() argument
469 rxd = efx_rx_desc(rx_queue, index); in efx_farch_build_rx_desc()
470 rx_buf = efx_rx_buffer(rx_queue, index); in efx_farch_build_rx_desc()
474 rx_queue->efx->type->rx_buffer_padding, in efx_farch_build_rx_desc()
482 void efx_farch_rx_write(struct efx_rx_queue *rx_queue) in efx_farch_rx_write() argument
484 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_write()
488 while (rx_queue->notified_count != rx_queue->added_count) { in efx_farch_rx_write()
490 rx_queue, in efx_farch_rx_write()
491 rx_queue->notified_count & rx_queue->ptr_mask); in efx_farch_rx_write()
492 ++rx_queue->notified_count; in efx_farch_rx_write()
[all …]
Drx_common.h49 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
50 void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
51 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
52 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
53 void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue);
55 void efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
69 void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
73 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
75 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
Defx_channels.c510 struct efx_rx_queue *rx_queue; in efx_alloc_channel() local
535 rx_queue = &channel->rx_queue; in efx_alloc_channel()
536 rx_queue->efx = efx; in efx_alloc_channel()
537 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); in efx_alloc_channel()
580 struct efx_rx_queue *rx_queue; in efx_copy_channel() local
606 rx_queue = &channel->rx_queue; in efx_copy_channel()
607 rx_queue->buffer = NULL; in efx_copy_channel()
608 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); in efx_copy_channel()
609 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); in efx_copy_channel()
620 struct efx_rx_queue *rx_queue; in efx_probe_channel() local
[all …]
Dmcdi_functions.h25 int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue);
26 void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue);
27 void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue);
28 void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue);
Dnet_driver.h548 struct efx_rx_queue rx_queue; member
1370 int (*rx_probe)(struct efx_rx_queue *rx_queue);
1371 void (*rx_init)(struct efx_rx_queue *rx_queue);
1372 void (*rx_remove)(struct efx_rx_queue *rx_queue);
1373 void (*rx_write)(struct efx_rx_queue *rx_queue);
1374 void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
1562 return channel->rx_queue.core_index >= 0; in efx_channel_has_rx_queue()
1569 return &channel->rx_queue; in efx_channel_get_rx_queue()
1577 for (_rx_queue = &(_channel)->rx_queue; \
1582 efx_rx_queue_channel(struct efx_rx_queue *rx_queue) in efx_rx_queue_channel() argument
[all …]
Def10.c2541 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) in efx_ef10_build_rx_desc() argument
2546 rxd = efx_rx_desc(rx_queue, index); in efx_ef10_build_rx_desc()
2547 rx_buf = efx_rx_buffer(rx_queue, index); in efx_ef10_build_rx_desc()
2553 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) in efx_ef10_rx_write() argument
2555 struct efx_nic *efx = rx_queue->efx; in efx_ef10_rx_write()
2560 write_count = rx_queue->added_count & ~7; in efx_ef10_rx_write()
2561 if (rx_queue->notified_count == write_count) in efx_ef10_rx_write()
2566 rx_queue, in efx_ef10_rx_write()
2567 rx_queue->notified_count & rx_queue->ptr_mask); in efx_ef10_rx_write()
2568 while (++rx_queue->notified_count != write_count); in efx_ef10_rx_write()
[all …]
Dnic.h321 int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
322 void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
323 void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
324 void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
325 void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
326 void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
/kernel/linux/linux-5.10/drivers/net/ethernet/freescale/
Dgianfar.c110 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, in gfar_init_rxbdp() argument
118 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) in gfar_init_rxbdp()
140 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); in gfar_init_tx_rx_base()
153 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | in gfar_init_rqprm()
259 if (likely(priv->rx_queue[i]->rxcoalescing)) in gfar_configure_coalescing()
260 gfar_write(baddr + i, priv->rx_queue[i]->rxic); in gfar_configure_coalescing()
271 if (unlikely(priv->rx_queue[0]->rxcoalescing)) in gfar_configure_coalescing()
272 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic); in gfar_configure_coalescing()
289 rx_packets += priv->rx_queue[i]->stats.rx_packets; in gfar_get_stats()
290 rx_bytes += priv->rx_queue[i]->stats.rx_bytes; in gfar_get_stats()
[all …]
Dgianfar_ethtool.c249 struct gfar_priv_rx_q *rx_queue = NULL; in gfar_gcoalesce() local
262 rx_queue = priv->rx_queue[0]; in gfar_gcoalesce()
265 rxtime = get_ictt_value(rx_queue->rxic); in gfar_gcoalesce()
266 rxcount = get_icft_value(rx_queue->rxic); in gfar_gcoalesce()
327 priv->rx_queue[i]->rxcoalescing = 0; in gfar_scoalesce()
330 priv->rx_queue[i]->rxcoalescing = 1; in gfar_scoalesce()
334 priv->rx_queue[i]->rxic = mk_ic_value( in gfar_scoalesce()
375 struct gfar_priv_rx_q *rx_queue = NULL; in gfar_gringparam() local
378 rx_queue = priv->rx_queue[0]; in gfar_gringparam()
388 rvals->rx_pending = rx_queue->rx_ring_size; in gfar_gringparam()
[all …]
/kernel/linux/linux-5.10/drivers/net/ethernet/ibm/
Dibmveth.c105 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off); in ibmveth_rxq_flags()
116 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle; in ibmveth_rxq_pending_buffer()
136 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); in ibmveth_rxq_frame_length()
397 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; in ibmveth_rxq_get_buffer()
410 u32 q_index = adapter->rx_queue.index; in ibmveth_rxq_recycle_buffer()
411 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; in ibmveth_rxq_recycle_buffer()
436 …ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].cor… in ibmveth_rxq_recycle_buffer()
440 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { in ibmveth_rxq_recycle_buffer()
441 adapter->rx_queue.index = 0; in ibmveth_rxq_recycle_buffer()
442 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; in ibmveth_rxq_recycle_buffer()
[all …]
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/progs/
Dbpf_iter_tcp4.c86 int rx_queue; in dump_tcp_sock() local
117 rx_queue = sp->sk_ack_backlog; in dump_tcp_sock()
119 rx_queue = tp->rcv_nxt - tp->copied_seq; in dump_tcp_sock()
120 if (rx_queue < 0) in dump_tcp_sock()
121 rx_queue = 0; in dump_tcp_sock()
128 tp->write_seq - tp->snd_una, rx_queue, in dump_tcp_sock()
Dbpf_iter_tcp6.c86 int rx_queue; in dump_tcp6_sock() local
117 rx_queue = sp->sk_ack_backlog; in dump_tcp6_sock()
119 rx_queue = tp->tcp.rcv_nxt - tp->tcp.copied_seq; in dump_tcp6_sock()
120 if (rx_queue < 0) in dump_tcp6_sock()
121 rx_queue = 0; in dump_tcp6_sock()
132 tp->tcp.write_seq - tp->tcp.snd_una, rx_queue, in dump_tcp6_sock()
/kernel/linux/linux-5.10/drivers/net/xen-netback/
Drx.c90 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
98 if (skb_queue_empty(&queue->rx_queue)) in xenvif_rx_queue_tail()
101 __skb_queue_tail(&queue->rx_queue, skb); in xenvif_rx_queue_tail()
106 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
115 spin_lock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue()
117 skb = __skb_dequeue(&queue->rx_queue); in xenvif_rx_dequeue()
119 xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue)); in xenvif_rx_dequeue()
130 spin_unlock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue()
148 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_drop_expired()
551 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_timeout()
[all …]
/kernel/linux/linux-5.10/drivers/net/ethernet/qlogic/qed/
Dqed_ll2.c40 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered)
423 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; in qed_ll2_handle_slowpath()
458 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; in qed_ll2_rxq_handle_completion()
500 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; in qed_ll2_rxq_completion()
563 p_rx = &p_ll2_conn->rx_queue; in qed_ll2_rxq_flush()
620 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; in qed_ll2_lb_rxq_handler()
937 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; in qed_sp_ll2_rx_queue_start()
1162 rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, &params); in qed_ll2_acquire_connection_rx()
1168 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain); in qed_ll2_acquire_connection_rx()
1176 p_ll2_info->rx_queue.descq_array = p_descq; in qed_ll2_acquire_connection_rx()
[all …]

12345