• Home
  • Raw
  • Download

Lines Matching refs:efx

65 static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)  in efx_rx_buf_hash()  argument
68 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset)); in efx_rx_buf_hash()
70 const u8 *data = eh + efx->rx_packet_hash_offset; in efx_rx_buf_hash()
87 static inline void efx_sync_rx_buffer(struct efx_nic *efx, in efx_sync_rx_buffer() argument
91 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, in efx_sync_rx_buffer()
95 void efx_rx_config_page_split(struct efx_nic *efx) in efx_rx_config_page_split() argument
97 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, in efx_rx_config_page_split()
99 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : in efx_rx_config_page_split()
101 efx->rx_page_buf_step); in efx_rx_config_page_split()
102 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / in efx_rx_config_page_split()
103 efx->rx_bufs_per_page; in efx_rx_config_page_split()
104 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, in efx_rx_config_page_split()
105 efx->rx_bufs_per_page); in efx_rx_config_page_split()
111 struct efx_nic *efx = rx_queue->efx; in efx_reuse_page() local
132 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, in efx_reuse_page()
133 PAGE_SIZE << efx->rx_buffer_order, in efx_reuse_page()
154 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_buffers() local
168 efx->rx_buffer_order); in efx_init_rx_buffers()
172 dma_map_page(&efx->pci_dev->dev, page, 0, in efx_init_rx_buffers()
173 PAGE_SIZE << efx->rx_buffer_order, in efx_init_rx_buffers()
175 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, in efx_init_rx_buffers()
177 __free_pages(page, efx->rx_buffer_order); in efx_init_rx_buffers()
193 rx_buf->dma_addr = dma_addr + efx->rx_ip_align; in efx_init_rx_buffers()
195 rx_buf->page_offset = page_offset + efx->rx_ip_align; in efx_init_rx_buffers()
196 rx_buf->len = efx->rx_dma_len; in efx_init_rx_buffers()
200 dma_addr += efx->rx_page_buf_step; in efx_init_rx_buffers()
201 page_offset += efx->rx_page_buf_step; in efx_init_rx_buffers()
202 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); in efx_init_rx_buffers()
205 } while (++count < efx->rx_pages_per_batch); in efx_init_rx_buffers()
213 static void efx_unmap_rx_buffer(struct efx_nic *efx, in efx_unmap_rx_buffer() argument
220 dma_unmap_page(&efx->pci_dev->dev, in efx_unmap_rx_buffer()
222 PAGE_SIZE << efx->rx_buffer_order, in efx_unmap_rx_buffer()
249 struct efx_nic *efx = rx_queue->efx; in efx_recycle_rx_page() local
272 efx_unmap_rx_buffer(efx, rx_buf); in efx_recycle_rx_page()
285 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); in efx_fini_rx_buffer()
329 struct efx_nic *efx = rx_queue->efx; in efx_fast_push_rx_descriptors() local
338 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); in efx_fast_push_rx_descriptors()
348 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; in efx_fast_push_rx_descriptors()
352 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_fast_push_rx_descriptors()
369 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_fast_push_rx_descriptors()
392 struct efx_nic *efx = rx_queue->efx; in efx_rx_packet__check_len() local
393 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; in efx_rx_packet__check_len()
403 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { in efx_rx_packet__check_len()
405 netif_err(efx, rx_err, efx->net_dev, in efx_rx_packet__check_len()
409 efx->type->rx_buffer_padding); in efx_rx_packet__check_len()
410 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); in efx_rx_packet__check_len()
413 netif_err(efx, rx_err, efx->net_dev, in efx_rx_packet__check_len()
431 struct efx_nic *efx = channel->efx; in efx_rx_packet_gro() local
443 if (efx->net_dev->features & NETIF_F_RXHASH) in efx_rx_packet_gro()
444 skb_set_hash(skb, efx_rx_buf_hash(efx, eh), in efx_rx_packet_gro()
462 skb->truesize += n_frags * efx->rx_buffer_truesize; in efx_rx_packet_gro()
478 struct efx_nic *efx = channel->efx; in efx_rx_mk_skb() local
482 skb = netdev_alloc_skb(efx->net_dev, in efx_rx_mk_skb()
483 efx->rx_ip_align + efx->rx_prefix_size + in efx_rx_mk_skb()
486 atomic_inc(&efx->n_rx_noskb_drops); in efx_rx_mk_skb()
492 memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size, in efx_rx_mk_skb()
493 efx->rx_prefix_size + hdr_len); in efx_rx_mk_skb()
494 skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size); in efx_rx_mk_skb()
515 __free_pages(rx_buf->page, efx->rx_buffer_order); in efx_rx_mk_skb()
520 skb->truesize += n_frags * efx->rx_buffer_truesize; in efx_rx_mk_skb()
523 skb->protocol = eth_type_trans(skb, efx->net_dev); in efx_rx_mk_skb()
533 struct efx_nic *efx = rx_queue->efx; in efx_rx_packet() local
547 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || in efx_rx_packet()
548 unlikely(len > n_frags * efx->rx_dma_len) || in efx_rx_packet()
549 unlikely(!efx->rx_scatter)) { in efx_rx_packet()
557 netif_vdbg(efx, rx_status, efx->net_dev, in efx_rx_packet()
579 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); in efx_rx_packet()
586 rx_buf->page_offset += efx->rx_prefix_size; in efx_rx_packet()
587 rx_buf->len -= efx->rx_prefix_size; in efx_rx_packet()
599 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); in efx_rx_packet()
601 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; in efx_rx_packet()
602 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); in efx_rx_packet()
652 struct efx_nic *efx = channel->efx; in __efx_rx_packet() local
662 (eh + efx->rx_packet_len_offset)); in __efx_rx_packet()
667 if (unlikely(efx->loopback_selftest)) { in __efx_rx_packet()
670 efx_loopback_rx_packet(efx, eh, rx_buf->len); in __efx_rx_packet()
677 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) in __efx_rx_packet()
691 struct efx_nic *efx = rx_queue->efx; in efx_probe_rx_queue() local
696 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); in efx_probe_rx_queue()
700 netif_dbg(efx, probe, efx->net_dev, in efx_probe_rx_queue()
702 efx_rx_queue_index(rx_queue), efx->rxq_entries, in efx_probe_rx_queue()
720 static void efx_init_rx_recycle_ring(struct efx_nic *efx, in efx_init_rx_recycle_ring() argument
736 efx->rx_bufs_per_page); in efx_init_rx_recycle_ring()
744 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_queue() local
747 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_init_rx_queue()
755 efx_init_rx_recycle_ring(efx, rx_queue); in efx_init_rx_queue()
764 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; in efx_init_rx_queue()
766 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; in efx_init_rx_queue()
786 struct efx_nic *efx = rx_queue->efx; in efx_fini_rx_queue() local
789 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_fini_rx_queue()
813 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, in efx_fini_rx_queue()
814 PAGE_SIZE << efx->rx_buffer_order, in efx_fini_rx_queue()
824 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_remove_rx_queue()
843 struct efx_nic *efx = netdev_priv(net_dev); in efx_filter_rfs() local
876 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, in efx_filter_rfs()
911 rc = efx->type->filter_rfs_insert(efx, &spec); in efx_filter_rfs()
916 efx->rps_flow_id[rc] = flow_id; in efx_filter_rfs()
917 channel = efx_get_channel(efx, skb_get_rx_queue(skb)); in efx_filter_rfs()
921 netif_info(efx, rx_status, efx->net_dev, in efx_filter_rfs()
927 netif_info(efx, rx_status, efx->net_dev, in efx_filter_rfs()
936 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) in __efx_filter_rfs_expire() argument
938 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); in __efx_filter_rfs_expire()
942 if (!spin_trylock_bh(&efx->filter_lock)) in __efx_filter_rfs_expire()
945 expire_one = efx->type->filter_rfs_expire_one; in __efx_filter_rfs_expire()
946 index = efx->rps_expire_index; in __efx_filter_rfs_expire()
947 size = efx->type->max_rx_ip_filters; in __efx_filter_rfs_expire()
949 flow_id = efx->rps_flow_id[index]; in __efx_filter_rfs_expire()
950 if (expire_one(efx, flow_id, index)) in __efx_filter_rfs_expire()
951 netif_info(efx, rx_status, efx->net_dev, in __efx_filter_rfs_expire()
957 efx->rps_expire_index = index; in __efx_filter_rfs_expire()
959 spin_unlock_bh(&efx->filter_lock); in __efx_filter_rfs_expire()