1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2019 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11 #include "net_driver.h"
12 #include "ef100_rx.h"
13 #include "rx_common.h"
14 #include "efx.h"
15 #include "nic_common.h"
16 #include "mcdi_functions.h"
17 #include "ef100_regs.h"
18 #include "ef100_nic.h"
19 #include "io.h"
20
21 /* Get the value of a field in the RX prefix */
22 #define PREFIX_OFFSET_W(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN / 32)
23 #define PREFIX_OFFSET_B(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN % 32)
24 #define PREFIX_WIDTH_MASK(_f) ((1UL << ESF_GZ_RX_PREFIX_ ## _f ## _WIDTH) - 1)
25 #define PREFIX_WORD(_p, _f) le32_to_cpu((__force __le32)(_p)[PREFIX_OFFSET_W(_f)])
26 #define PREFIX_FIELD(_p, _f) ((PREFIX_WORD(_p, _f) >> PREFIX_OFFSET_B(_f)) & \
27 PREFIX_WIDTH_MASK(_f))
28
29 #define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_LBN \
30 (ESF_GZ_RX_PREFIX_CLASS_LBN + ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_LBN)
31 #define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH \
32 ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH
33
ef100_rx_buf_hash_valid(const u8 * prefix)34 bool ef100_rx_buf_hash_valid(const u8 *prefix)
35 {
36 return PREFIX_FIELD(prefix, RSS_HASH_VALID);
37 }
38
ef100_has_fcs_error(struct efx_channel * channel,u32 * prefix)39 static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
40 {
41 u16 rxclass;
42 u8 l2status;
43
44 rxclass = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, CLASS));
45 l2status = PREFIX_FIELD(&rxclass, HCLASS_L2_STATUS);
46
47 if (likely(l2status == ESE_GZ_RH_HCLASS_L2_STATUS_OK))
48 /* Everything is ok */
49 return false;
50
51 if (l2status == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR)
52 channel->n_rx_eth_crc_err++;
53 return true;
54 }
55
__ef100_rx_packet(struct efx_channel * channel)56 void __ef100_rx_packet(struct efx_channel *channel)
57 {
58 struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
59 struct efx_nic *efx = channel->efx;
60 u8 *eh = efx_rx_buf_va(rx_buf);
61 __wsum csum = 0;
62 u32 *prefix;
63
64 prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
65
66 if (ef100_has_fcs_error(channel, prefix) &&
67 unlikely(!(efx->net_dev->features & NETIF_F_RXALL)))
68 goto out;
69
70 rx_buf->len = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, LENGTH));
71 if (rx_buf->len <= sizeof(struct ethhdr)) {
72 if (net_ratelimit())
73 netif_err(channel->efx, rx_err, channel->efx->net_dev,
74 "RX packet too small (%d)\n", rx_buf->len);
75 ++channel->n_rx_frm_trunc;
76 goto out;
77 }
78
79 if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
80 if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
81 ++channel->n_rx_ip_hdr_chksum_err;
82 } else {
83 u16 sum = be16_to_cpu((__force __be16)PREFIX_FIELD(prefix, CSUM_FRAME));
84
85 csum = (__force __wsum) sum;
86 }
87 }
88
89 if (channel->type->receive_skb) {
90 struct efx_rx_queue *rx_queue =
91 efx_channel_get_rx_queue(channel);
92
93 /* no support for special channels yet, so just discard */
94 WARN_ON_ONCE(1);
95 efx_free_rx_buffers(rx_queue, rx_buf, 1);
96 goto out;
97 }
98
99 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
100
101 out:
102 channel->rx_pkt_n_frags = 0;
103 }
104
ef100_rx_packet(struct efx_rx_queue * rx_queue,unsigned int index)105 static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index)
106 {
107 struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue, index);
108 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
109 struct efx_nic *efx = rx_queue->efx;
110
111 ++rx_queue->rx_packets;
112
113 netif_vdbg(efx, rx_status, efx->net_dev,
114 "RX queue %d received id %x\n",
115 efx_rx_queue_index(rx_queue), index);
116
117 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
118
119 prefetch(efx_rx_buf_va(rx_buf));
120
121 rx_buf->page_offset += efx->rx_prefix_size;
122
123 efx_recycle_rx_pages(channel, rx_buf, 1);
124
125 efx_rx_flush_packet(channel);
126 channel->rx_pkt_n_frags = 1;
127 channel->rx_pkt_index = index;
128 }
129
efx_ef100_ev_rx(struct efx_channel * channel,const efx_qword_t * p_event)130 void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event)
131 {
132 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
133 unsigned int n_packets =
134 EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_NUM_PKT);
135 int i;
136
137 WARN_ON_ONCE(!n_packets);
138 if (n_packets > 1)
139 ++channel->n_rx_merge_events;
140
141 channel->irq_mod_score += 2 * n_packets;
142
143 for (i = 0; i < n_packets; ++i) {
144 ef100_rx_packet(rx_queue,
145 rx_queue->removed_count & rx_queue->ptr_mask);
146 ++rx_queue->removed_count;
147 }
148 }
149
ef100_rx_write(struct efx_rx_queue * rx_queue)150 void ef100_rx_write(struct efx_rx_queue *rx_queue)
151 {
152 struct efx_rx_buffer *rx_buf;
153 unsigned int idx;
154 efx_qword_t *rxd;
155 efx_dword_t rxdb;
156
157 while (rx_queue->notified_count != rx_queue->added_count) {
158 idx = rx_queue->notified_count & rx_queue->ptr_mask;
159 rx_buf = efx_rx_buffer(rx_queue, idx);
160 rxd = efx_rx_desc(rx_queue, idx);
161
162 EFX_POPULATE_QWORD_1(*rxd, ESF_GZ_RX_BUF_ADDR, rx_buf->dma_addr);
163
164 ++rx_queue->notified_count;
165 }
166
167 wmb();
168 EFX_POPULATE_DWORD_1(rxdb, ERF_GZ_RX_RING_PIDX,
169 rx_queue->added_count & rx_queue->ptr_mask);
170 efx_writed_page(rx_queue->efx, &rxdb,
171 ER_GZ_RX_RING_DOORBELL, efx_rx_queue_index(rx_queue));
172 }
173