• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
2 
3 /* Packet receive logic for Mellanox Gigabit Ethernet driver
4  *
5  * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
6  */
7 
8 #include <linux/etherdevice.h>
9 #include <linux/skbuff.h>
10 
11 #include "mlxbf_gige.h"
12 #include "mlxbf_gige_regs.h"
13 
mlxbf_gige_enable_multicast_rx(struct mlxbf_gige * priv)14 void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv)
15 {
16 	void __iomem *base = priv->base;
17 	u64 data;
18 
19 	data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
20 	data |= MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
21 	writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
22 }
23 
mlxbf_gige_disable_multicast_rx(struct mlxbf_gige * priv)24 void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv)
25 {
26 	void __iomem *base = priv->base;
27 	u64 data;
28 
29 	data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
30 	data &= ~MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
31 	writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
32 }
33 
mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige * priv,unsigned int index)34 void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
35 				     unsigned int index)
36 {
37 	void __iomem *base = priv->base;
38 	u64 control;
39 
40 	/* Enable MAC receive filter mask for specified index */
41 	control = readq(base + MLXBF_GIGE_CONTROL);
42 	control |= (MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
43 	writeq(control, base + MLXBF_GIGE_CONTROL);
44 }
45 
mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige * priv,unsigned int index)46 void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
47 				      unsigned int index)
48 {
49 	void __iomem *base = priv->base;
50 	u64 control;
51 
52 	/* Disable MAC receive filter mask for specified index */
53 	control = readq(base + MLXBF_GIGE_CONTROL);
54 	control &= ~(MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
55 	writeq(control, base + MLXBF_GIGE_CONTROL);
56 }
57 
mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige * priv,unsigned int index,u64 dmac)58 void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
59 				  unsigned int index, u64 dmac)
60 {
61 	void __iomem *base = priv->base;
62 
63 	/* Write destination MAC to specified MAC RX filter */
64 	writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
65 	       (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
66 }
67 
mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige * priv,unsigned int index,u64 * dmac)68 void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
69 				  unsigned int index, u64 *dmac)
70 {
71 	void __iomem *base = priv->base;
72 
73 	/* Read destination MAC from specified MAC RX filter */
74 	*dmac = readq(base + MLXBF_GIGE_RX_MAC_FILTER +
75 		      (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
76 }
77 
mlxbf_gige_enable_promisc(struct mlxbf_gige * priv)78 void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv)
79 {
80 	void __iomem *base = priv->base;
81 	u64 control;
82 	u64 end_mac;
83 
84 	/* Enable MAC_ID_RANGE match functionality */
85 	control = readq(base + MLXBF_GIGE_CONTROL);
86 	control |= MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
87 	writeq(control, base + MLXBF_GIGE_CONTROL);
88 
89 	/* Set start of destination MAC range check to 0 */
90 	writeq(0, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START);
91 
92 	/* Set end of destination MAC range check to all FFs */
93 	end_mac = BCAST_MAC_ADDR;
94 	writeq(end_mac, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END);
95 }
96 
mlxbf_gige_disable_promisc(struct mlxbf_gige * priv)97 void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv)
98 {
99 	void __iomem *base = priv->base;
100 	u64 control;
101 
102 	/* Disable MAC_ID_RANGE match functionality */
103 	control = readq(base + MLXBF_GIGE_CONTROL);
104 	control &= ~MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
105 	writeq(control, base + MLXBF_GIGE_CONTROL);
106 
107 	/* NOTE: no need to change DMAC_RANGE_START or END;
108 	 * those values are ignored since MAC_ID_RANGE_EN=0
109 	 */
110 }
111 
112 /* Receive Initialization
113  * 1) Configures RX MAC filters via MMIO registers
114  * 2) Allocates RX WQE array using coherent DMA mapping
115  * 3) Initializes each element of RX WQE array with a receive
116  *    buffer pointer (also using coherent DMA mapping)
117  * 4) Allocates RX CQE array using coherent DMA mapping
118  * 5) Completes other misc receive initialization
119  */
mlxbf_gige_rx_init(struct mlxbf_gige * priv)120 int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
121 {
122 	size_t wq_size, cq_size;
123 	dma_addr_t *rx_wqe_ptr;
124 	dma_addr_t rx_buf_dma;
125 	u64 data;
126 	int i, j;
127 
128 	/* Configure MAC RX filter #0 to allow RX of broadcast pkts */
129 	mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX,
130 				     BCAST_MAC_ADDR);
131 
132 	wq_size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
133 	priv->rx_wqe_base = dma_alloc_coherent(priv->dev, wq_size,
134 					       &priv->rx_wqe_base_dma,
135 					       GFP_KERNEL);
136 	if (!priv->rx_wqe_base)
137 		return -ENOMEM;
138 
139 	/* Initialize 'rx_wqe_ptr' to point to first RX WQE in array
140 	 * Each RX WQE is simply a receive buffer pointer, so walk
141 	 * the entire array, allocating a 2KB buffer for each element
142 	 */
143 	rx_wqe_ptr = priv->rx_wqe_base;
144 
145 	for (i = 0; i < priv->rx_q_entries; i++) {
146 		priv->rx_skb[i] = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
147 						       &rx_buf_dma, DMA_FROM_DEVICE);
148 		if (!priv->rx_skb[i])
149 			goto free_wqe_and_skb;
150 		*rx_wqe_ptr++ = rx_buf_dma;
151 	}
152 
153 	/* Write RX WQE base address into MMIO reg */
154 	writeq(priv->rx_wqe_base_dma, priv->base + MLXBF_GIGE_RX_WQ_BASE);
155 
156 	cq_size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
157 	priv->rx_cqe_base = dma_alloc_coherent(priv->dev, cq_size,
158 					       &priv->rx_cqe_base_dma,
159 					       GFP_KERNEL);
160 	if (!priv->rx_cqe_base)
161 		goto free_wqe_and_skb;
162 
163 	for (i = 0; i < priv->rx_q_entries; i++)
164 		priv->rx_cqe_base[i] |= MLXBF_GIGE_RX_CQE_VALID_MASK;
165 
166 	/* Write RX CQE base address into MMIO reg */
167 	writeq(priv->rx_cqe_base_dma, priv->base + MLXBF_GIGE_RX_CQ_BASE);
168 
169 	/* Write RX_WQE_PI with current number of replenished buffers */
170 	writeq(priv->rx_q_entries, priv->base + MLXBF_GIGE_RX_WQE_PI);
171 
172 	/* Enable removal of CRC during RX */
173 	data = readq(priv->base + MLXBF_GIGE_RX);
174 	data |= MLXBF_GIGE_RX_STRIP_CRC_EN;
175 	writeq(data, priv->base + MLXBF_GIGE_RX);
176 
177 	/* Enable RX MAC filter pass and discard counters */
178 	writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN,
179 	       priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC);
180 	writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN,
181 	       priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS);
182 
183 	writeq(ilog2(priv->rx_q_entries),
184 	       priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
185 
186 	/* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
187 	 * indicate readiness to receive interrupts
188 	 */
189 	data = readq(priv->base + MLXBF_GIGE_INT_MASK);
190 	data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
191 	writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
192 
193 	/* Enable RX DMA to write new packets to memory */
194 	data = readq(priv->base + MLXBF_GIGE_RX_DMA);
195 	data |= MLXBF_GIGE_RX_DMA_EN;
196 	writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
197 
198 	return 0;
199 
200 free_wqe_and_skb:
201 	rx_wqe_ptr = priv->rx_wqe_base;
202 	for (j = 0; j < i; j++) {
203 		dma_unmap_single(priv->dev, *rx_wqe_ptr,
204 				 MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
205 		dev_kfree_skb(priv->rx_skb[j]);
206 		rx_wqe_ptr++;
207 	}
208 	dma_free_coherent(priv->dev, wq_size,
209 			  priv->rx_wqe_base, priv->rx_wqe_base_dma);
210 	return -ENOMEM;
211 }
212 
213 /* Receive Deinitialization
214  * This routine will free allocations done by mlxbf_gige_rx_init(),
215  * namely the RX WQE and RX CQE arrays, as well as all RX buffers
216  */
mlxbf_gige_rx_deinit(struct mlxbf_gige * priv)217 void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv)
218 {
219 	dma_addr_t *rx_wqe_ptr;
220 	size_t size;
221 	u64 data;
222 	int i;
223 
224 	/* Disable RX DMA to prevent packet transfers to memory */
225 	data = readq(priv->base + MLXBF_GIGE_RX_DMA);
226 	data &= ~MLXBF_GIGE_RX_DMA_EN;
227 	writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
228 
229 	rx_wqe_ptr = priv->rx_wqe_base;
230 
231 	for (i = 0; i < priv->rx_q_entries; i++) {
232 		dma_unmap_single(priv->dev, *rx_wqe_ptr, MLXBF_GIGE_DEFAULT_BUF_SZ,
233 				 DMA_FROM_DEVICE);
234 		dev_kfree_skb(priv->rx_skb[i]);
235 		rx_wqe_ptr++;
236 	}
237 
238 	size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
239 	dma_free_coherent(priv->dev, size,
240 			  priv->rx_wqe_base, priv->rx_wqe_base_dma);
241 
242 	size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
243 	dma_free_coherent(priv->dev, size,
244 			  priv->rx_cqe_base, priv->rx_cqe_base_dma);
245 
246 	priv->rx_wqe_base = NULL;
247 	priv->rx_wqe_base_dma = 0;
248 	priv->rx_cqe_base = NULL;
249 	priv->rx_cqe_base_dma = 0;
250 	writeq(0, priv->base + MLXBF_GIGE_RX_WQ_BASE);
251 	writeq(0, priv->base + MLXBF_GIGE_RX_CQ_BASE);
252 }
253 
mlxbf_gige_rx_packet(struct mlxbf_gige * priv,int * rx_pkts)254 static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
255 {
256 	struct net_device *netdev = priv->netdev;
257 	struct sk_buff *skb = NULL, *rx_skb;
258 	u16 rx_pi_rem, rx_ci_rem;
259 	dma_addr_t *rx_wqe_addr;
260 	dma_addr_t rx_buf_dma;
261 	u64 *rx_cqe_addr;
262 	u64 datalen;
263 	u64 rx_cqe;
264 	u16 rx_ci;
265 	u16 rx_pi;
266 
267 	/* Index into RX buffer array is rx_pi w/wrap based on RX_CQE_SIZE */
268 	rx_pi = readq(priv->base + MLXBF_GIGE_RX_WQE_PI);
269 	rx_pi_rem = rx_pi % priv->rx_q_entries;
270 
271 	rx_wqe_addr = priv->rx_wqe_base + rx_pi_rem;
272 	rx_cqe_addr = priv->rx_cqe_base + rx_pi_rem;
273 	rx_cqe = *rx_cqe_addr;
274 
275 	if ((!!(rx_cqe & MLXBF_GIGE_RX_CQE_VALID_MASK)) != priv->valid_polarity)
276 		return false;
277 
278 	if ((rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK) == 0) {
279 		/* Packet is OK, increment stats */
280 		datalen = rx_cqe & MLXBF_GIGE_RX_CQE_PKT_LEN_MASK;
281 		netdev->stats.rx_packets++;
282 		netdev->stats.rx_bytes += datalen;
283 
284 		skb = priv->rx_skb[rx_pi_rem];
285 
286 		/* Alloc another RX SKB for this same index */
287 		rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
288 					      &rx_buf_dma, DMA_FROM_DEVICE);
289 		if (!rx_skb)
290 			return false;
291 		priv->rx_skb[rx_pi_rem] = rx_skb;
292 		dma_unmap_single(priv->dev, *rx_wqe_addr,
293 				 MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
294 
295 		skb_put(skb, datalen);
296 
297 		skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
298 
299 		skb->protocol = eth_type_trans(skb, netdev);
300 
301 		*rx_wqe_addr = rx_buf_dma;
302 	} else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
303 		priv->stats.rx_mac_errors++;
304 	} else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED) {
305 		priv->stats.rx_truncate_errors++;
306 	}
307 
308 	/* Read receive consumer index before replenish so that this routine
309 	 * returns accurate return value even if packet is received into
310 	 * just-replenished buffer prior to exiting this routine.
311 	 */
312 	rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
313 	rx_ci_rem = rx_ci % priv->rx_q_entries;
314 
315 	/* Let hardware know we've replenished one buffer */
316 	rx_pi++;
317 
318 	/* Ensure completion of all writes before notifying HW of replenish */
319 	wmb();
320 	writeq(rx_pi, priv->base + MLXBF_GIGE_RX_WQE_PI);
321 
322 	(*rx_pkts)++;
323 
324 	rx_pi_rem = rx_pi % priv->rx_q_entries;
325 	if (rx_pi_rem == 0)
326 		priv->valid_polarity ^= 1;
327 
328 	if (skb)
329 		netif_receive_skb(skb);
330 
331 	return rx_pi_rem != rx_ci_rem;
332 }
333 
334 /* Driver poll() function called by NAPI infrastructure */
mlxbf_gige_poll(struct napi_struct * napi,int budget)335 int mlxbf_gige_poll(struct napi_struct *napi, int budget)
336 {
337 	struct mlxbf_gige *priv;
338 	bool remaining_pkts;
339 	int work_done = 0;
340 	u64 data;
341 
342 	priv = container_of(napi, struct mlxbf_gige, napi);
343 
344 	mlxbf_gige_handle_tx_complete(priv);
345 
346 	do {
347 		remaining_pkts = mlxbf_gige_rx_packet(priv, &work_done);
348 	} while (remaining_pkts && work_done < budget);
349 
350 	/* If amount of work done < budget, turn off NAPI polling
351 	 * via napi_complete_done(napi, work_done) and then
352 	 * re-enable interrupts.
353 	 */
354 	if (work_done < budget && napi_complete_done(napi, work_done)) {
355 		/* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
356 		 * indicate receive readiness
357 		 */
358 		data = readq(priv->base + MLXBF_GIGE_INT_MASK);
359 		data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
360 		writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
361 	}
362 
363 	return work_done;
364 }
365