• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <uapi/linux/bpf.h>
5 
6 #include <linux/inetdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/filter.h>
10 #include <linux/mm.h>
11 #include <linux/pci.h>
12 
13 #include <net/checksum.h>
14 #include <net/ip6_checksum.h>
15 
16 #include "mana.h"
17 
18 /* Microsoft Azure Network Adapter (MANA) functions */
19 
mana_open(struct net_device * ndev)20 static int mana_open(struct net_device *ndev)
21 {
22 	struct mana_port_context *apc = netdev_priv(ndev);
23 	int err;
24 
25 	err = mana_alloc_queues(ndev);
26 	if (err)
27 		return err;
28 
29 	apc->port_is_up = true;
30 
31 	/* Ensure port state updated before txq state */
32 	smp_wmb();
33 
34 	netif_carrier_on(ndev);
35 	netif_tx_wake_all_queues(ndev);
36 
37 	return 0;
38 }
39 
mana_close(struct net_device * ndev)40 static int mana_close(struct net_device *ndev)
41 {
42 	struct mana_port_context *apc = netdev_priv(ndev);
43 
44 	if (!apc->port_is_up)
45 		return 0;
46 
47 	return mana_detach(ndev, true);
48 }
49 
mana_can_tx(struct gdma_queue * wq)50 static bool mana_can_tx(struct gdma_queue *wq)
51 {
52 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
53 }
54 
mana_checksum_info(struct sk_buff * skb)55 static unsigned int mana_checksum_info(struct sk_buff *skb)
56 {
57 	if (skb->protocol == htons(ETH_P_IP)) {
58 		struct iphdr *ip = ip_hdr(skb);
59 
60 		if (ip->protocol == IPPROTO_TCP)
61 			return IPPROTO_TCP;
62 
63 		if (ip->protocol == IPPROTO_UDP)
64 			return IPPROTO_UDP;
65 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
66 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
67 
68 		if (ip6->nexthdr == IPPROTO_TCP)
69 			return IPPROTO_TCP;
70 
71 		if (ip6->nexthdr == IPPROTO_UDP)
72 			return IPPROTO_UDP;
73 	}
74 
75 	/* No csum offloading */
76 	return 0;
77 }
78 
mana_map_skb(struct sk_buff * skb,struct mana_port_context * apc,struct mana_tx_package * tp)79 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
80 			struct mana_tx_package *tp)
81 {
82 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
83 	struct gdma_dev *gd = apc->ac->gdma_dev;
84 	struct gdma_context *gc;
85 	struct device *dev;
86 	skb_frag_t *frag;
87 	dma_addr_t da;
88 	int i;
89 
90 	gc = gd->gdma_context;
91 	dev = gc->dev;
92 	da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
93 
94 	if (dma_mapping_error(dev, da))
95 		return -ENOMEM;
96 
97 	ash->dma_handle[0] = da;
98 	ash->size[0] = skb_headlen(skb);
99 
100 	tp->wqe_req.sgl[0].address = ash->dma_handle[0];
101 	tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
102 	tp->wqe_req.sgl[0].size = ash->size[0];
103 
104 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
105 		frag = &skb_shinfo(skb)->frags[i];
106 		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
107 				      DMA_TO_DEVICE);
108 
109 		if (dma_mapping_error(dev, da))
110 			goto frag_err;
111 
112 		ash->dma_handle[i + 1] = da;
113 		ash->size[i + 1] = skb_frag_size(frag);
114 
115 		tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
116 		tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
117 		tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
118 	}
119 
120 	return 0;
121 
122 frag_err:
123 	for (i = i - 1; i >= 0; i--)
124 		dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
125 			       DMA_TO_DEVICE);
126 
127 	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
128 
129 	return -ENOMEM;
130 }
131 
mana_start_xmit(struct sk_buff * skb,struct net_device * ndev)132 int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
133 {
134 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
135 	struct mana_port_context *apc = netdev_priv(ndev);
136 	u16 txq_idx = skb_get_queue_mapping(skb);
137 	struct gdma_dev *gd = apc->ac->gdma_dev;
138 	bool ipv4 = false, ipv6 = false;
139 	struct mana_tx_package pkg = {};
140 	struct netdev_queue *net_txq;
141 	struct mana_stats_tx *tx_stats;
142 	struct gdma_queue *gdma_sq;
143 	unsigned int csum_type;
144 	struct mana_txq *txq;
145 	struct mana_cq *cq;
146 	int err, len;
147 
148 	if (unlikely(!apc->port_is_up))
149 		goto tx_drop;
150 
151 	if (skb_cow_head(skb, MANA_HEADROOM))
152 		goto tx_drop_count;
153 
154 	txq = &apc->tx_qp[txq_idx].txq;
155 	gdma_sq = txq->gdma_sq;
156 	cq = &apc->tx_qp[txq_idx].tx_cq;
157 
158 	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
159 	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
160 
161 	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
162 		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
163 		pkt_fmt = MANA_LONG_PKT_FMT;
164 	} else {
165 		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
166 	}
167 
168 	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
169 
170 	if (pkt_fmt == MANA_SHORT_PKT_FMT)
171 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
172 	else
173 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
174 
175 	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
176 	pkg.wqe_req.flags = 0;
177 	pkg.wqe_req.client_data_unit = 0;
178 
179 	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
180 	WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
181 
182 	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
183 		pkg.wqe_req.sgl = pkg.sgl_array;
184 	} else {
185 		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
186 					    sizeof(struct gdma_sge),
187 					    GFP_ATOMIC);
188 		if (!pkg.sgl_ptr)
189 			goto tx_drop_count;
190 
191 		pkg.wqe_req.sgl = pkg.sgl_ptr;
192 	}
193 
194 	if (skb->protocol == htons(ETH_P_IP))
195 		ipv4 = true;
196 	else if (skb->protocol == htons(ETH_P_IPV6))
197 		ipv6 = true;
198 
199 	if (skb_is_gso(skb)) {
200 		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
201 		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
202 
203 		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
204 		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
205 		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
206 
207 		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
208 		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
209 		if (ipv4) {
210 			ip_hdr(skb)->tot_len = 0;
211 			ip_hdr(skb)->check = 0;
212 			tcp_hdr(skb)->check =
213 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
214 						   ip_hdr(skb)->daddr, 0,
215 						   IPPROTO_TCP, 0);
216 		} else {
217 			ipv6_hdr(skb)->payload_len = 0;
218 			tcp_hdr(skb)->check =
219 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
220 						 &ipv6_hdr(skb)->daddr, 0,
221 						 IPPROTO_TCP, 0);
222 		}
223 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
224 		csum_type = mana_checksum_info(skb);
225 
226 		if (csum_type == IPPROTO_TCP) {
227 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
228 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
229 
230 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
231 			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
232 
233 		} else if (csum_type == IPPROTO_UDP) {
234 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
235 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
236 
237 			pkg.tx_oob.s_oob.comp_udp_csum = 1;
238 		} else {
239 			/* Can't do offload of this type of checksum */
240 			if (skb_checksum_help(skb))
241 				goto free_sgl_ptr;
242 		}
243 	}
244 
245 	if (mana_map_skb(skb, apc, &pkg))
246 		goto free_sgl_ptr;
247 
248 	skb_queue_tail(&txq->pending_skbs, skb);
249 
250 	len = skb->len;
251 	net_txq = netdev_get_tx_queue(ndev, txq_idx);
252 
253 	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
254 					(struct gdma_posted_wqe_info *)skb->cb);
255 	if (!mana_can_tx(gdma_sq)) {
256 		netif_tx_stop_queue(net_txq);
257 		apc->eth_stats.stop_queue++;
258 	}
259 
260 	if (err) {
261 		(void)skb_dequeue_tail(&txq->pending_skbs);
262 		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
263 		err = NETDEV_TX_BUSY;
264 		goto tx_busy;
265 	}
266 
267 	err = NETDEV_TX_OK;
268 	atomic_inc(&txq->pending_sends);
269 
270 	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
271 
272 	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
273 	skb = NULL;
274 
275 	tx_stats = &txq->stats;
276 	u64_stats_update_begin(&tx_stats->syncp);
277 	tx_stats->packets++;
278 	tx_stats->bytes += len;
279 	u64_stats_update_end(&tx_stats->syncp);
280 
281 tx_busy:
282 	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
283 		netif_tx_wake_queue(net_txq);
284 		apc->eth_stats.wake_queue++;
285 	}
286 
287 	kfree(pkg.sgl_ptr);
288 	return err;
289 
290 free_sgl_ptr:
291 	kfree(pkg.sgl_ptr);
292 tx_drop_count:
293 	ndev->stats.tx_dropped++;
294 tx_drop:
295 	dev_kfree_skb_any(skb);
296 	return NETDEV_TX_OK;
297 }
298 
mana_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * st)299 static void mana_get_stats64(struct net_device *ndev,
300 			     struct rtnl_link_stats64 *st)
301 {
302 	struct mana_port_context *apc = netdev_priv(ndev);
303 	unsigned int num_queues = apc->num_queues;
304 	struct mana_stats_rx *rx_stats;
305 	struct mana_stats_tx *tx_stats;
306 	unsigned int start;
307 	u64 packets, bytes;
308 	int q;
309 
310 	if (!apc->port_is_up)
311 		return;
312 
313 	netdev_stats_to_stats64(st, &ndev->stats);
314 
315 	for (q = 0; q < num_queues; q++) {
316 		rx_stats = &apc->rxqs[q]->stats;
317 
318 		do {
319 			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
320 			packets = rx_stats->packets;
321 			bytes = rx_stats->bytes;
322 		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
323 
324 		st->rx_packets += packets;
325 		st->rx_bytes += bytes;
326 	}
327 
328 	for (q = 0; q < num_queues; q++) {
329 		tx_stats = &apc->tx_qp[q].txq.stats;
330 
331 		do {
332 			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
333 			packets = tx_stats->packets;
334 			bytes = tx_stats->bytes;
335 		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
336 
337 		st->tx_packets += packets;
338 		st->tx_bytes += bytes;
339 	}
340 }
341 
mana_get_tx_queue(struct net_device * ndev,struct sk_buff * skb,int old_q)342 static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
343 			     int old_q)
344 {
345 	struct mana_port_context *apc = netdev_priv(ndev);
346 	u32 hash = skb_get_hash(skb);
347 	struct sock *sk = skb->sk;
348 	int txq;
349 
350 	txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
351 
352 	if (txq != old_q && sk && sk_fullsock(sk) &&
353 	    rcu_access_pointer(sk->sk_dst_cache))
354 		sk_tx_queue_set(sk, txq);
355 
356 	return txq;
357 }
358 
mana_select_queue(struct net_device * ndev,struct sk_buff * skb,struct net_device * sb_dev)359 static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
360 			     struct net_device *sb_dev)
361 {
362 	int txq;
363 
364 	if (ndev->real_num_tx_queues == 1)
365 		return 0;
366 
367 	txq = sk_tx_queue_get(skb->sk);
368 
369 	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
370 		if (skb_rx_queue_recorded(skb))
371 			txq = skb_get_rx_queue(skb);
372 		else
373 			txq = mana_get_tx_queue(ndev, skb, txq);
374 	}
375 
376 	return txq;
377 }
378 
379 static const struct net_device_ops mana_devops = {
380 	.ndo_open		= mana_open,
381 	.ndo_stop		= mana_close,
382 	.ndo_select_queue	= mana_select_queue,
383 	.ndo_start_xmit		= mana_start_xmit,
384 	.ndo_validate_addr	= eth_validate_addr,
385 	.ndo_get_stats64	= mana_get_stats64,
386 	.ndo_bpf		= mana_bpf,
387 	.ndo_xdp_xmit		= mana_xdp_xmit,
388 };
389 
mana_cleanup_port_context(struct mana_port_context * apc)390 static void mana_cleanup_port_context(struct mana_port_context *apc)
391 {
392 	kfree(apc->rxqs);
393 	apc->rxqs = NULL;
394 }
395 
mana_init_port_context(struct mana_port_context * apc)396 static int mana_init_port_context(struct mana_port_context *apc)
397 {
398 	apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
399 			    GFP_KERNEL);
400 
401 	return !apc->rxqs ? -ENOMEM : 0;
402 }
403 
mana_send_request(struct mana_context * ac,void * in_buf,u32 in_len,void * out_buf,u32 out_len)404 static int mana_send_request(struct mana_context *ac, void *in_buf,
405 			     u32 in_len, void *out_buf, u32 out_len)
406 {
407 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
408 	struct gdma_resp_hdr *resp = out_buf;
409 	struct gdma_req_hdr *req = in_buf;
410 	struct device *dev = gc->dev;
411 	static atomic_t activity_id;
412 	int err;
413 
414 	req->dev_id = gc->mana.dev_id;
415 	req->activity_id = atomic_inc_return(&activity_id);
416 
417 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
418 				   out_buf);
419 	if (err || resp->status) {
420 		dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
421 			err, resp->status);
422 		return err ? err : -EPROTO;
423 	}
424 
425 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
426 	    req->activity_id != resp->activity_id) {
427 		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
428 			req->dev_id.as_uint32, resp->dev_id.as_uint32,
429 			req->activity_id, resp->activity_id);
430 		return -EPROTO;
431 	}
432 
433 	return 0;
434 }
435 
mana_verify_resp_hdr(const struct gdma_resp_hdr * resp_hdr,const enum mana_command_code expected_code,const u32 min_size)436 static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
437 				const enum mana_command_code expected_code,
438 				const u32 min_size)
439 {
440 	if (resp_hdr->response.msg_type != expected_code)
441 		return -EPROTO;
442 
443 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
444 		return -EPROTO;
445 
446 	if (resp_hdr->response.msg_size < min_size)
447 		return -EPROTO;
448 
449 	return 0;
450 }
451 
mana_pf_register_hw_vport(struct mana_port_context * apc)452 static int mana_pf_register_hw_vport(struct mana_port_context *apc)
453 {
454 	struct mana_register_hw_vport_resp resp = {};
455 	struct mana_register_hw_vport_req req = {};
456 	int err;
457 
458 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
459 			     sizeof(req), sizeof(resp));
460 	req.attached_gfid = 1;
461 	req.is_pf_default_vport = 1;
462 	req.allow_all_ether_types = 1;
463 
464 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
465 				sizeof(resp));
466 	if (err) {
467 		netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
468 		return err;
469 	}
470 
471 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
472 				   sizeof(resp));
473 	if (err || resp.hdr.status) {
474 		netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
475 			   err, resp.hdr.status);
476 		return err ? err : -EPROTO;
477 	}
478 
479 	apc->port_handle = resp.hw_vport_handle;
480 	return 0;
481 }
482 
mana_pf_deregister_hw_vport(struct mana_port_context * apc)483 static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
484 {
485 	struct mana_deregister_hw_vport_resp resp = {};
486 	struct mana_deregister_hw_vport_req req = {};
487 	int err;
488 
489 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
490 			     sizeof(req), sizeof(resp));
491 	req.hw_vport_handle = apc->port_handle;
492 
493 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
494 				sizeof(resp));
495 	if (err) {
496 		netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
497 			   err);
498 		return;
499 	}
500 
501 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
502 				   sizeof(resp));
503 	if (err || resp.hdr.status)
504 		netdev_err(apc->ndev,
505 			   "Failed to deregister hw vPort: %d, 0x%x\n",
506 			   err, resp.hdr.status);
507 }
508 
mana_pf_register_filter(struct mana_port_context * apc)509 static int mana_pf_register_filter(struct mana_port_context *apc)
510 {
511 	struct mana_register_filter_resp resp = {};
512 	struct mana_register_filter_req req = {};
513 	int err;
514 
515 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
516 			     sizeof(req), sizeof(resp));
517 	req.vport = apc->port_handle;
518 	memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
519 
520 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
521 				sizeof(resp));
522 	if (err) {
523 		netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
524 		return err;
525 	}
526 
527 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
528 				   sizeof(resp));
529 	if (err || resp.hdr.status) {
530 		netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
531 			   err, resp.hdr.status);
532 		return err ? err : -EPROTO;
533 	}
534 
535 	apc->pf_filter_handle = resp.filter_handle;
536 	return 0;
537 }
538 
mana_pf_deregister_filter(struct mana_port_context * apc)539 static void mana_pf_deregister_filter(struct mana_port_context *apc)
540 {
541 	struct mana_deregister_filter_resp resp = {};
542 	struct mana_deregister_filter_req req = {};
543 	int err;
544 
545 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
546 			     sizeof(req), sizeof(resp));
547 	req.filter_handle = apc->pf_filter_handle;
548 
549 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
550 				sizeof(resp));
551 	if (err) {
552 		netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
553 			   err);
554 		return;
555 	}
556 
557 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
558 				   sizeof(resp));
559 	if (err || resp.hdr.status)
560 		netdev_err(apc->ndev,
561 			   "Failed to deregister filter: %d, 0x%x\n",
562 			   err, resp.hdr.status);
563 }
564 
mana_query_device_cfg(struct mana_context * ac,u32 proto_major_ver,u32 proto_minor_ver,u32 proto_micro_ver,u16 * max_num_vports)565 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
566 				 u32 proto_minor_ver, u32 proto_micro_ver,
567 				 u16 *max_num_vports)
568 {
569 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
570 	struct mana_query_device_cfg_resp resp = {};
571 	struct mana_query_device_cfg_req req = {};
572 	struct device *dev = gc->dev;
573 	int err = 0;
574 
575 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
576 			     sizeof(req), sizeof(resp));
577 	req.proto_major_ver = proto_major_ver;
578 	req.proto_minor_ver = proto_minor_ver;
579 	req.proto_micro_ver = proto_micro_ver;
580 
581 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
582 	if (err) {
583 		dev_err(dev, "Failed to query config: %d", err);
584 		return err;
585 	}
586 
587 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
588 				   sizeof(resp));
589 	if (err || resp.hdr.status) {
590 		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
591 			resp.hdr.status);
592 		if (!err)
593 			err = -EPROTO;
594 		return err;
595 	}
596 
597 	*max_num_vports = resp.max_num_vports;
598 
599 	return 0;
600 }
601 
mana_query_vport_cfg(struct mana_port_context * apc,u32 vport_index,u32 * max_sq,u32 * max_rq,u32 * num_indir_entry)602 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
603 				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
604 {
605 	struct mana_query_vport_cfg_resp resp = {};
606 	struct mana_query_vport_cfg_req req = {};
607 	int err;
608 
609 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
610 			     sizeof(req), sizeof(resp));
611 
612 	req.vport_index = vport_index;
613 
614 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
615 				sizeof(resp));
616 	if (err)
617 		return err;
618 
619 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
620 				   sizeof(resp));
621 	if (err)
622 		return err;
623 
624 	if (resp.hdr.status)
625 		return -EPROTO;
626 
627 	*max_sq = resp.max_num_sq;
628 	*max_rq = resp.max_num_rq;
629 	*num_indir_entry = resp.num_indirection_ent;
630 
631 	apc->port_handle = resp.vport;
632 	ether_addr_copy(apc->mac_addr, resp.mac_addr);
633 
634 	return 0;
635 }
636 
mana_cfg_vport(struct mana_port_context * apc,u32 protection_dom_id,u32 doorbell_pg_id)637 static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
638 			  u32 doorbell_pg_id)
639 {
640 	struct mana_config_vport_resp resp = {};
641 	struct mana_config_vport_req req = {};
642 	int err;
643 
644 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
645 			     sizeof(req), sizeof(resp));
646 	req.vport = apc->port_handle;
647 	req.pdid = protection_dom_id;
648 	req.doorbell_pageid = doorbell_pg_id;
649 
650 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
651 				sizeof(resp));
652 	if (err) {
653 		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
654 		goto out;
655 	}
656 
657 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
658 				   sizeof(resp));
659 	if (err || resp.hdr.status) {
660 		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
661 			   err, resp.hdr.status);
662 		if (!err)
663 			err = -EPROTO;
664 
665 		goto out;
666 	}
667 
668 	apc->tx_shortform_allowed = resp.short_form_allowed;
669 	apc->tx_vp_offset = resp.tx_vport_offset;
670 out:
671 	return err;
672 }
673 
mana_cfg_vport_steering(struct mana_port_context * apc,enum TRI_STATE rx,bool update_default_rxobj,bool update_key,bool update_tab)674 static int mana_cfg_vport_steering(struct mana_port_context *apc,
675 				   enum TRI_STATE rx,
676 				   bool update_default_rxobj, bool update_key,
677 				   bool update_tab)
678 {
679 	u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
680 	struct mana_cfg_rx_steer_req *req = NULL;
681 	struct mana_cfg_rx_steer_resp resp = {};
682 	struct net_device *ndev = apc->ndev;
683 	mana_handle_t *req_indir_tab;
684 	u32 req_buf_size;
685 	int err;
686 
687 	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
688 	req = kzalloc(req_buf_size, GFP_KERNEL);
689 	if (!req)
690 		return -ENOMEM;
691 
692 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
693 			     sizeof(resp));
694 
695 	req->vport = apc->port_handle;
696 	req->num_indir_entries = num_entries;
697 	req->indir_tab_offset = sizeof(*req);
698 	req->rx_enable = rx;
699 	req->rss_enable = apc->rss_state;
700 	req->update_default_rxobj = update_default_rxobj;
701 	req->update_hashkey = update_key;
702 	req->update_indir_tab = update_tab;
703 	req->default_rxobj = apc->default_rxobj;
704 
705 	if (update_key)
706 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
707 
708 	if (update_tab) {
709 		req_indir_tab = (mana_handle_t *)(req + 1);
710 		memcpy(req_indir_tab, apc->rxobj_table,
711 		       req->num_indir_entries * sizeof(mana_handle_t));
712 	}
713 
714 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
715 				sizeof(resp));
716 	if (err) {
717 		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
718 		goto out;
719 	}
720 
721 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
722 				   sizeof(resp));
723 	if (err) {
724 		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
725 		goto out;
726 	}
727 
728 	if (resp.hdr.status) {
729 		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
730 			   resp.hdr.status);
731 		err = -EPROTO;
732 	}
733 out:
734 	kfree(req);
735 	return err;
736 }
737 
mana_create_wq_obj(struct mana_port_context * apc,mana_handle_t vport,u32 wq_type,struct mana_obj_spec * wq_spec,struct mana_obj_spec * cq_spec,mana_handle_t * wq_obj)738 static int mana_create_wq_obj(struct mana_port_context *apc,
739 			      mana_handle_t vport,
740 			      u32 wq_type, struct mana_obj_spec *wq_spec,
741 			      struct mana_obj_spec *cq_spec,
742 			      mana_handle_t *wq_obj)
743 {
744 	struct mana_create_wqobj_resp resp = {};
745 	struct mana_create_wqobj_req req = {};
746 	struct net_device *ndev = apc->ndev;
747 	int err;
748 
749 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
750 			     sizeof(req), sizeof(resp));
751 	req.vport = vport;
752 	req.wq_type = wq_type;
753 	req.wq_gdma_region = wq_spec->gdma_region;
754 	req.cq_gdma_region = cq_spec->gdma_region;
755 	req.wq_size = wq_spec->queue_size;
756 	req.cq_size = cq_spec->queue_size;
757 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
758 	req.cq_parent_qid = cq_spec->attached_eq;
759 
760 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
761 				sizeof(resp));
762 	if (err) {
763 		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
764 		goto out;
765 	}
766 
767 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
768 				   sizeof(resp));
769 	if (err || resp.hdr.status) {
770 		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
771 			   resp.hdr.status);
772 		if (!err)
773 			err = -EPROTO;
774 		goto out;
775 	}
776 
777 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
778 		netdev_err(ndev, "Got an invalid WQ object handle\n");
779 		err = -EPROTO;
780 		goto out;
781 	}
782 
783 	*wq_obj = resp.wq_obj;
784 	wq_spec->queue_index = resp.wq_id;
785 	cq_spec->queue_index = resp.cq_id;
786 
787 	return 0;
788 out:
789 	return err;
790 }
791 
mana_destroy_wq_obj(struct mana_port_context * apc,u32 wq_type,mana_handle_t wq_obj)792 static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
793 				mana_handle_t wq_obj)
794 {
795 	struct mana_destroy_wqobj_resp resp = {};
796 	struct mana_destroy_wqobj_req req = {};
797 	struct net_device *ndev = apc->ndev;
798 	int err;
799 
800 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
801 			     sizeof(req), sizeof(resp));
802 	req.wq_type = wq_type;
803 	req.wq_obj_handle = wq_obj;
804 
805 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
806 				sizeof(resp));
807 	if (err) {
808 		netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
809 		return;
810 	}
811 
812 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
813 				   sizeof(resp));
814 	if (err || resp.hdr.status)
815 		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
816 			   resp.hdr.status);
817 }
818 
mana_destroy_eq(struct mana_context * ac)819 static void mana_destroy_eq(struct mana_context *ac)
820 {
821 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
822 	struct gdma_queue *eq;
823 	int i;
824 
825 	if (!ac->eqs)
826 		return;
827 
828 	for (i = 0; i < gc->max_num_queues; i++) {
829 		eq = ac->eqs[i].eq;
830 		if (!eq)
831 			continue;
832 
833 		mana_gd_destroy_queue(gc, eq);
834 	}
835 
836 	kfree(ac->eqs);
837 	ac->eqs = NULL;
838 }
839 
mana_create_eq(struct mana_context * ac)840 static int mana_create_eq(struct mana_context *ac)
841 {
842 	struct gdma_dev *gd = ac->gdma_dev;
843 	struct gdma_context *gc = gd->gdma_context;
844 	struct gdma_queue_spec spec = {};
845 	int err;
846 	int i;
847 
848 	ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
849 			  GFP_KERNEL);
850 	if (!ac->eqs)
851 		return -ENOMEM;
852 
853 	spec.type = GDMA_EQ;
854 	spec.monitor_avl_buf = false;
855 	spec.queue_size = EQ_SIZE;
856 	spec.eq.callback = NULL;
857 	spec.eq.context = ac->eqs;
858 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
859 
860 	for (i = 0; i < gc->max_num_queues; i++) {
861 		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
862 		if (err)
863 			goto out;
864 	}
865 
866 	return 0;
867 out:
868 	mana_destroy_eq(ac);
869 	return err;
870 }
871 
mana_fence_rq(struct mana_port_context * apc,struct mana_rxq * rxq)872 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
873 {
874 	struct mana_fence_rq_resp resp = {};
875 	struct mana_fence_rq_req req = {};
876 	int err;
877 
878 	init_completion(&rxq->fence_event);
879 
880 	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
881 			     sizeof(req), sizeof(resp));
882 	req.wq_obj_handle =  rxq->rxobj;
883 
884 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
885 				sizeof(resp));
886 	if (err) {
887 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
888 			   rxq->rxq_idx, err);
889 		return err;
890 	}
891 
892 	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
893 	if (err || resp.hdr.status) {
894 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
895 			   rxq->rxq_idx, err, resp.hdr.status);
896 		if (!err)
897 			err = -EPROTO;
898 
899 		return err;
900 	}
901 
902 	if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
903 		netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
904 			   rxq->rxq_idx);
905 		return -ETIMEDOUT;
906 	}
907 
908 	return 0;
909 }
910 
mana_fence_rqs(struct mana_port_context * apc)911 static void mana_fence_rqs(struct mana_port_context *apc)
912 {
913 	unsigned int rxq_idx;
914 	struct mana_rxq *rxq;
915 	int err;
916 
917 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
918 		rxq = apc->rxqs[rxq_idx];
919 		err = mana_fence_rq(apc, rxq);
920 
921 		/* In case of any error, use sleep instead. */
922 		if (err)
923 			msleep(100);
924 	}
925 }
926 
mana_move_wq_tail(struct gdma_queue * wq,u32 num_units)927 static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
928 {
929 	u32 used_space_old;
930 	u32 used_space_new;
931 
932 	used_space_old = wq->head - wq->tail;
933 	used_space_new = wq->head - (wq->tail + num_units);
934 
935 	if (WARN_ON_ONCE(used_space_new > used_space_old))
936 		return -ERANGE;
937 
938 	wq->tail += num_units;
939 	return 0;
940 }
941 
mana_unmap_skb(struct sk_buff * skb,struct mana_port_context * apc)942 static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
943 {
944 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
945 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
946 	struct device *dev = gc->dev;
947 	int i;
948 
949 	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
950 
951 	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
952 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
953 			       DMA_TO_DEVICE);
954 }
955 
mana_poll_tx_cq(struct mana_cq * cq)956 static void mana_poll_tx_cq(struct mana_cq *cq)
957 {
958 	struct gdma_comp *completions = cq->gdma_comp_buf;
959 	struct gdma_posted_wqe_info *wqe_info;
960 	unsigned int pkt_transmitted = 0;
961 	unsigned int wqe_unit_cnt = 0;
962 	struct mana_txq *txq = cq->txq;
963 	struct mana_port_context *apc;
964 	struct netdev_queue *net_txq;
965 	struct gdma_queue *gdma_wq;
966 	unsigned int avail_space;
967 	struct net_device *ndev;
968 	struct sk_buff *skb;
969 	bool txq_stopped;
970 	int comp_read;
971 	int i;
972 
973 	ndev = txq->ndev;
974 	apc = netdev_priv(ndev);
975 
976 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
977 				    CQE_POLLING_BUFFER);
978 
979 	if (comp_read < 1)
980 		return;
981 
982 	for (i = 0; i < comp_read; i++) {
983 		struct mana_tx_comp_oob *cqe_oob;
984 
985 		if (WARN_ON_ONCE(!completions[i].is_sq))
986 			return;
987 
988 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
989 		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
990 				 MANA_CQE_COMPLETION))
991 			return;
992 
993 		switch (cqe_oob->cqe_hdr.cqe_type) {
994 		case CQE_TX_OKAY:
995 			break;
996 
997 		case CQE_TX_SA_DROP:
998 		case CQE_TX_MTU_DROP:
999 		case CQE_TX_INVALID_OOB:
1000 		case CQE_TX_INVALID_ETH_TYPE:
1001 		case CQE_TX_HDR_PROCESSING_ERROR:
1002 		case CQE_TX_VF_DISABLED:
1003 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1004 		case CQE_TX_VPORT_DISABLED:
1005 		case CQE_TX_VLAN_TAGGING_VIOLATION:
1006 			if (net_ratelimit())
1007 				netdev_err(ndev, "TX: CQE error %d\n",
1008 					   cqe_oob->cqe_hdr.cqe_type);
1009 
1010 			break;
1011 
1012 		default:
1013 			/* If the CQE type is unknown, log an error,
1014 			 * and still free the SKB, update tail, etc.
1015 			 */
1016 			if (net_ratelimit())
1017 				netdev_err(ndev, "TX: unknown CQE type %d\n",
1018 					   cqe_oob->cqe_hdr.cqe_type);
1019 
1020 			break;
1021 		}
1022 
1023 		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1024 			return;
1025 
1026 		skb = skb_dequeue(&txq->pending_skbs);
1027 		if (WARN_ON_ONCE(!skb))
1028 			return;
1029 
1030 		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1031 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1032 
1033 		mana_unmap_skb(skb, apc);
1034 
1035 		napi_consume_skb(skb, cq->budget);
1036 
1037 		pkt_transmitted++;
1038 	}
1039 
1040 	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1041 		return;
1042 
1043 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1044 
1045 	gdma_wq = txq->gdma_sq;
1046 	avail_space = mana_gd_wq_avail_space(gdma_wq);
1047 
1048 	/* Ensure tail updated before checking q stop */
1049 	smp_mb();
1050 
1051 	net_txq = txq->net_txq;
1052 	txq_stopped = netif_tx_queue_stopped(net_txq);
1053 
1054 	/* Ensure checking txq_stopped before apc->port_is_up. */
1055 	smp_rmb();
1056 
1057 	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1058 		netif_tx_wake_queue(net_txq);
1059 		apc->eth_stats.wake_queue++;
1060 	}
1061 
1062 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1063 		WARN_ON_ONCE(1);
1064 
1065 	cq->work_done = pkt_transmitted;
1066 }
1067 
mana_post_pkt_rxq(struct mana_rxq * rxq)1068 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1069 {
1070 	struct mana_recv_buf_oob *recv_buf_oob;
1071 	u32 curr_index;
1072 	int err;
1073 
1074 	curr_index = rxq->buf_index++;
1075 	if (rxq->buf_index == rxq->num_rx_buf)
1076 		rxq->buf_index = 0;
1077 
1078 	recv_buf_oob = &rxq->rx_oobs[curr_index];
1079 
1080 	err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1081 				    &recv_buf_oob->wqe_inf);
1082 	if (WARN_ON_ONCE(err))
1083 		return;
1084 
1085 	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1086 }
1087 
mana_build_skb(void * buf_va,uint pkt_len,struct xdp_buff * xdp)1088 static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
1089 				      struct xdp_buff *xdp)
1090 {
1091 	struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
1092 
1093 	if (!skb)
1094 		return NULL;
1095 
1096 	if (xdp->data_hard_start) {
1097 		skb_reserve(skb, xdp->data - xdp->data_hard_start);
1098 		skb_put(skb, xdp->data_end - xdp->data);
1099 	} else {
1100 		skb_reserve(skb, XDP_PACKET_HEADROOM);
1101 		skb_put(skb, pkt_len);
1102 	}
1103 
1104 	return skb;
1105 }
1106 
mana_rx_skb(void * buf_va,struct mana_rxcomp_oob * cqe,struct mana_rxq * rxq)1107 static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
1108 			struct mana_rxq *rxq)
1109 {
1110 	struct mana_stats_rx *rx_stats = &rxq->stats;
1111 	struct net_device *ndev = rxq->ndev;
1112 	uint pkt_len = cqe->ppi[0].pkt_len;
1113 	u16 rxq_idx = rxq->rxq_idx;
1114 	struct napi_struct *napi;
1115 	struct xdp_buff xdp = {};
1116 	struct sk_buff *skb;
1117 	u32 hash_value;
1118 	u32 act;
1119 
1120 	rxq->rx_cq.work_done++;
1121 	napi = &rxq->rx_cq.napi;
1122 
1123 	if (!buf_va) {
1124 		++ndev->stats.rx_dropped;
1125 		return;
1126 	}
1127 
1128 	act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1129 
1130 	if (act == XDP_REDIRECT && !rxq->xdp_rc)
1131 		return;
1132 
1133 	if (act != XDP_PASS && act != XDP_TX)
1134 		goto drop_xdp;
1135 
1136 	skb = mana_build_skb(buf_va, pkt_len, &xdp);
1137 
1138 	if (!skb)
1139 		goto drop;
1140 
1141 	skb->dev = napi->dev;
1142 
1143 	skb->protocol = eth_type_trans(skb, ndev);
1144 	skb_checksum_none_assert(skb);
1145 	skb_record_rx_queue(skb, rxq_idx);
1146 
1147 	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1148 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1149 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1150 	}
1151 
1152 	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1153 		hash_value = cqe->ppi[0].pkt_hash;
1154 
1155 		if (cqe->rx_hashtype & MANA_HASH_L4)
1156 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1157 		else
1158 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1159 	}
1160 
1161 	u64_stats_update_begin(&rx_stats->syncp);
1162 	rx_stats->packets++;
1163 	rx_stats->bytes += pkt_len;
1164 
1165 	if (act == XDP_TX)
1166 		rx_stats->xdp_tx++;
1167 	u64_stats_update_end(&rx_stats->syncp);
1168 
1169 	if (act == XDP_TX) {
1170 		skb_set_queue_mapping(skb, rxq_idx);
1171 		mana_xdp_tx(skb, ndev);
1172 		return;
1173 	}
1174 
1175 	napi_gro_receive(napi, skb);
1176 
1177 	return;
1178 
1179 drop_xdp:
1180 	u64_stats_update_begin(&rx_stats->syncp);
1181 	rx_stats->xdp_drop++;
1182 	u64_stats_update_end(&rx_stats->syncp);
1183 
1184 drop:
1185 	WARN_ON_ONCE(rxq->xdp_save_page);
1186 	rxq->xdp_save_page = virt_to_page(buf_va);
1187 
1188 	++ndev->stats.rx_dropped;
1189 
1190 	return;
1191 }
1192 
mana_process_rx_cqe(struct mana_rxq * rxq,struct mana_cq * cq,struct gdma_comp * cqe)1193 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1194 				struct gdma_comp *cqe)
1195 {
1196 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1197 	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1198 	struct net_device *ndev = rxq->ndev;
1199 	struct mana_recv_buf_oob *rxbuf_oob;
1200 	struct device *dev = gc->dev;
1201 	void *new_buf, *old_buf;
1202 	struct page *new_page;
1203 	u32 curr, pktlen;
1204 	dma_addr_t da;
1205 
1206 	switch (oob->cqe_hdr.cqe_type) {
1207 	case CQE_RX_OKAY:
1208 		break;
1209 
1210 	case CQE_RX_TRUNCATED:
1211 		++ndev->stats.rx_dropped;
1212 		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1213 		netdev_warn_once(ndev, "Dropped a truncated packet\n");
1214 		goto drop;
1215 
1216 	case CQE_RX_COALESCED_4:
1217 		netdev_err(ndev, "RX coalescing is unsupported\n");
1218 		return;
1219 
1220 	case CQE_RX_OBJECT_FENCE:
1221 		complete(&rxq->fence_event);
1222 		return;
1223 
1224 	default:
1225 		netdev_err(ndev, "Unknown RX CQE type = %d\n",
1226 			   oob->cqe_hdr.cqe_type);
1227 		return;
1228 	}
1229 
1230 	pktlen = oob->ppi[0].pkt_len;
1231 
1232 	if (pktlen == 0) {
1233 		/* data packets should never have packetlength of zero */
1234 		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1235 			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1236 		return;
1237 	}
1238 
1239 	curr = rxq->buf_index;
1240 	rxbuf_oob = &rxq->rx_oobs[curr];
1241 	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1242 
1243 	/* Reuse XDP dropped page if available */
1244 	if (rxq->xdp_save_page) {
1245 		new_page = rxq->xdp_save_page;
1246 		rxq->xdp_save_page = NULL;
1247 	} else {
1248 		new_page = alloc_page(GFP_ATOMIC);
1249 	}
1250 
1251 	if (new_page) {
1252 		da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
1253 				  DMA_FROM_DEVICE);
1254 
1255 		if (dma_mapping_error(dev, da)) {
1256 			__free_page(new_page);
1257 			new_page = NULL;
1258 		}
1259 	}
1260 
1261 	new_buf = new_page ? page_to_virt(new_page) : NULL;
1262 
1263 	if (new_buf) {
1264 		dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
1265 			       DMA_FROM_DEVICE);
1266 
1267 		old_buf = rxbuf_oob->buf_va;
1268 
1269 		/* refresh the rxbuf_oob with the new page */
1270 		rxbuf_oob->buf_va = new_buf;
1271 		rxbuf_oob->buf_dma_addr = da;
1272 		rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
1273 	} else {
1274 		old_buf = NULL; /* drop the packet if no memory */
1275 	}
1276 
1277 	mana_rx_skb(old_buf, oob, rxq);
1278 
1279 drop:
1280 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1281 
1282 	mana_post_pkt_rxq(rxq);
1283 }
1284 
mana_poll_rx_cq(struct mana_cq * cq)1285 static void mana_poll_rx_cq(struct mana_cq *cq)
1286 {
1287 	struct gdma_comp *comp = cq->gdma_comp_buf;
1288 	struct mana_rxq *rxq = cq->rxq;
1289 	int comp_read, i;
1290 
1291 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1292 	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1293 
1294 	rxq->xdp_flush = false;
1295 
1296 	for (i = 0; i < comp_read; i++) {
1297 		if (WARN_ON_ONCE(comp[i].is_sq))
1298 			return;
1299 
1300 		/* verify recv cqe references the right rxq */
1301 		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1302 			return;
1303 
1304 		mana_process_rx_cqe(rxq, cq, &comp[i]);
1305 	}
1306 
1307 	if (rxq->xdp_flush)
1308 		xdp_do_flush();
1309 }
1310 
mana_cq_handler(void * context,struct gdma_queue * gdma_queue)1311 static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1312 {
1313 	struct mana_cq *cq = context;
1314 	u8 arm_bit;
1315 	int w;
1316 
1317 	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1318 
1319 	if (cq->type == MANA_CQ_TYPE_RX)
1320 		mana_poll_rx_cq(cq);
1321 	else
1322 		mana_poll_tx_cq(cq);
1323 
1324 	w = cq->work_done;
1325 
1326 	if (w < cq->budget &&
1327 	    napi_complete_done(&cq->napi, w)) {
1328 		arm_bit = SET_ARM_BIT;
1329 	} else {
1330 		arm_bit = 0;
1331 	}
1332 
1333 	mana_gd_ring_cq(gdma_queue, arm_bit);
1334 
1335 	return w;
1336 }
1337 
mana_poll(struct napi_struct * napi,int budget)1338 static int mana_poll(struct napi_struct *napi, int budget)
1339 {
1340 	struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1341 	int w;
1342 
1343 	cq->work_done = 0;
1344 	cq->budget = budget;
1345 
1346 	w = mana_cq_handler(cq, cq->gdma_cq);
1347 
1348 	return min(w, budget);
1349 }
1350 
mana_schedule_napi(void * context,struct gdma_queue * gdma_queue)1351 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1352 {
1353 	struct mana_cq *cq = context;
1354 
1355 	napi_schedule_irqoff(&cq->napi);
1356 }
1357 
mana_deinit_cq(struct mana_port_context * apc,struct mana_cq * cq)1358 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1359 {
1360 	struct gdma_dev *gd = apc->ac->gdma_dev;
1361 
1362 	if (!cq->gdma_cq)
1363 		return;
1364 
1365 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1366 }
1367 
mana_deinit_txq(struct mana_port_context * apc,struct mana_txq * txq)1368 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1369 {
1370 	struct gdma_dev *gd = apc->ac->gdma_dev;
1371 
1372 	if (!txq->gdma_sq)
1373 		return;
1374 
1375 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1376 }
1377 
mana_destroy_txq(struct mana_port_context * apc)1378 static void mana_destroy_txq(struct mana_port_context *apc)
1379 {
1380 	struct napi_struct *napi;
1381 	int i;
1382 
1383 	if (!apc->tx_qp)
1384 		return;
1385 
1386 	for (i = 0; i < apc->num_queues; i++) {
1387 		napi = &apc->tx_qp[i].tx_cq.napi;
1388 		napi_synchronize(napi);
1389 		napi_disable(napi);
1390 		netif_napi_del(napi);
1391 
1392 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1393 
1394 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1395 
1396 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1397 	}
1398 
1399 	kfree(apc->tx_qp);
1400 	apc->tx_qp = NULL;
1401 }
1402 
mana_create_txq(struct mana_port_context * apc,struct net_device * net)1403 static int mana_create_txq(struct mana_port_context *apc,
1404 			   struct net_device *net)
1405 {
1406 	struct mana_context *ac = apc->ac;
1407 	struct gdma_dev *gd = ac->gdma_dev;
1408 	struct mana_obj_spec wq_spec;
1409 	struct mana_obj_spec cq_spec;
1410 	struct gdma_queue_spec spec;
1411 	struct gdma_context *gc;
1412 	struct mana_txq *txq;
1413 	struct mana_cq *cq;
1414 	u32 txq_size;
1415 	u32 cq_size;
1416 	int err;
1417 	int i;
1418 
1419 	apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1420 			     GFP_KERNEL);
1421 	if (!apc->tx_qp)
1422 		return -ENOMEM;
1423 
1424 	/*  The minimum size of the WQE is 32 bytes, hence
1425 	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1426 	 *  the SQ can store. This value is then used to size other queues
1427 	 *  to prevent overflow.
1428 	 */
1429 	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1430 	BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1431 
1432 	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1433 	cq_size = PAGE_ALIGN(cq_size);
1434 
1435 	gc = gd->gdma_context;
1436 
1437 	for (i = 0; i < apc->num_queues; i++) {
1438 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1439 
1440 		/* Create SQ */
1441 		txq = &apc->tx_qp[i].txq;
1442 
1443 		u64_stats_init(&txq->stats.syncp);
1444 		txq->ndev = net;
1445 		txq->net_txq = netdev_get_tx_queue(net, i);
1446 		txq->vp_offset = apc->tx_vp_offset;
1447 		skb_queue_head_init(&txq->pending_skbs);
1448 
1449 		memset(&spec, 0, sizeof(spec));
1450 		spec.type = GDMA_SQ;
1451 		spec.monitor_avl_buf = true;
1452 		spec.queue_size = txq_size;
1453 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1454 		if (err)
1455 			goto out;
1456 
1457 		/* Create SQ's CQ */
1458 		cq = &apc->tx_qp[i].tx_cq;
1459 		cq->type = MANA_CQ_TYPE_TX;
1460 
1461 		cq->txq = txq;
1462 
1463 		memset(&spec, 0, sizeof(spec));
1464 		spec.type = GDMA_CQ;
1465 		spec.monitor_avl_buf = false;
1466 		spec.queue_size = cq_size;
1467 		spec.cq.callback = mana_schedule_napi;
1468 		spec.cq.parent_eq = ac->eqs[i].eq;
1469 		spec.cq.context = cq;
1470 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1471 		if (err)
1472 			goto out;
1473 
1474 		memset(&wq_spec, 0, sizeof(wq_spec));
1475 		memset(&cq_spec, 0, sizeof(cq_spec));
1476 
1477 		wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1478 		wq_spec.queue_size = txq->gdma_sq->queue_size;
1479 
1480 		cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1481 		cq_spec.queue_size = cq->gdma_cq->queue_size;
1482 		cq_spec.modr_ctx_id = 0;
1483 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1484 
1485 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1486 					 &wq_spec, &cq_spec,
1487 					 &apc->tx_qp[i].tx_object);
1488 
1489 		if (err)
1490 			goto out;
1491 
1492 		txq->gdma_sq->id = wq_spec.queue_index;
1493 		cq->gdma_cq->id = cq_spec.queue_index;
1494 
1495 		txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1496 		cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1497 
1498 		txq->gdma_txq_id = txq->gdma_sq->id;
1499 
1500 		cq->gdma_id = cq->gdma_cq->id;
1501 
1502 		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1503 			err = -EINVAL;
1504 			goto out;
1505 		}
1506 
1507 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1508 
1509 		netif_napi_add_tx(net, &cq->napi, mana_poll);
1510 		napi_enable(&cq->napi);
1511 
1512 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1513 	}
1514 
1515 	return 0;
1516 out:
1517 	mana_destroy_txq(apc);
1518 	return err;
1519 }
1520 
mana_destroy_rxq(struct mana_port_context * apc,struct mana_rxq * rxq,bool validate_state)1521 static void mana_destroy_rxq(struct mana_port_context *apc,
1522 			     struct mana_rxq *rxq, bool validate_state)
1523 
1524 {
1525 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1526 	struct mana_recv_buf_oob *rx_oob;
1527 	struct device *dev = gc->dev;
1528 	struct napi_struct *napi;
1529 	int i;
1530 
1531 	if (!rxq)
1532 		return;
1533 
1534 	napi = &rxq->rx_cq.napi;
1535 
1536 	if (validate_state)
1537 		napi_synchronize(napi);
1538 
1539 	napi_disable(napi);
1540 
1541 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
1542 
1543 	netif_napi_del(napi);
1544 
1545 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1546 
1547 	mana_deinit_cq(apc, &rxq->rx_cq);
1548 
1549 	if (rxq->xdp_save_page)
1550 		__free_page(rxq->xdp_save_page);
1551 
1552 	for (i = 0; i < rxq->num_rx_buf; i++) {
1553 		rx_oob = &rxq->rx_oobs[i];
1554 
1555 		if (!rx_oob->buf_va)
1556 			continue;
1557 
1558 		dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
1559 			       DMA_FROM_DEVICE);
1560 
1561 		free_page((unsigned long)rx_oob->buf_va);
1562 		rx_oob->buf_va = NULL;
1563 	}
1564 
1565 	if (rxq->gdma_rq)
1566 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
1567 
1568 	kfree(rxq);
1569 }
1570 
1571 #define MANA_WQE_HEADER_SIZE 16
1572 #define MANA_WQE_SGE_SIZE 16
1573 
mana_alloc_rx_wqe(struct mana_port_context * apc,struct mana_rxq * rxq,u32 * rxq_size,u32 * cq_size)1574 static int mana_alloc_rx_wqe(struct mana_port_context *apc,
1575 			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
1576 {
1577 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1578 	struct mana_recv_buf_oob *rx_oob;
1579 	struct device *dev = gc->dev;
1580 	struct page *page;
1581 	dma_addr_t da;
1582 	u32 buf_idx;
1583 
1584 	WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
1585 
1586 	*rxq_size = 0;
1587 	*cq_size = 0;
1588 
1589 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1590 		rx_oob = &rxq->rx_oobs[buf_idx];
1591 		memset(rx_oob, 0, sizeof(*rx_oob));
1592 
1593 		page = alloc_page(GFP_KERNEL);
1594 		if (!page)
1595 			return -ENOMEM;
1596 
1597 		da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
1598 				  DMA_FROM_DEVICE);
1599 
1600 		if (dma_mapping_error(dev, da)) {
1601 			__free_page(page);
1602 			return -ENOMEM;
1603 		}
1604 
1605 		rx_oob->buf_va = page_to_virt(page);
1606 		rx_oob->buf_dma_addr = da;
1607 
1608 		rx_oob->num_sge = 1;
1609 		rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
1610 		rx_oob->sgl[0].size = rxq->datasize;
1611 		rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
1612 
1613 		rx_oob->wqe_req.sgl = rx_oob->sgl;
1614 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
1615 		rx_oob->wqe_req.inline_oob_size = 0;
1616 		rx_oob->wqe_req.inline_oob_data = NULL;
1617 		rx_oob->wqe_req.flags = 0;
1618 		rx_oob->wqe_req.client_data_unit = 0;
1619 
1620 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
1621 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
1622 		*cq_size += COMP_ENTRY_SIZE;
1623 	}
1624 
1625 	return 0;
1626 }
1627 
mana_push_wqe(struct mana_rxq * rxq)1628 static int mana_push_wqe(struct mana_rxq *rxq)
1629 {
1630 	struct mana_recv_buf_oob *rx_oob;
1631 	u32 buf_idx;
1632 	int err;
1633 
1634 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1635 		rx_oob = &rxq->rx_oobs[buf_idx];
1636 
1637 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
1638 					    &rx_oob->wqe_inf);
1639 		if (err)
1640 			return -ENOSPC;
1641 	}
1642 
1643 	return 0;
1644 }
1645 
mana_create_rxq(struct mana_port_context * apc,u32 rxq_idx,struct mana_eq * eq,struct net_device * ndev)1646 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
1647 					u32 rxq_idx, struct mana_eq *eq,
1648 					struct net_device *ndev)
1649 {
1650 	struct gdma_dev *gd = apc->ac->gdma_dev;
1651 	struct mana_obj_spec wq_spec;
1652 	struct mana_obj_spec cq_spec;
1653 	struct gdma_queue_spec spec;
1654 	struct mana_cq *cq = NULL;
1655 	struct gdma_context *gc;
1656 	u32 cq_size, rq_size;
1657 	struct mana_rxq *rxq;
1658 	int err;
1659 
1660 	gc = gd->gdma_context;
1661 
1662 	rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
1663 		      GFP_KERNEL);
1664 	if (!rxq)
1665 		return NULL;
1666 
1667 	rxq->ndev = ndev;
1668 	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
1669 	rxq->rxq_idx = rxq_idx;
1670 	rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
1671 	rxq->rxobj = INVALID_MANA_HANDLE;
1672 
1673 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
1674 	if (err)
1675 		goto out;
1676 
1677 	rq_size = PAGE_ALIGN(rq_size);
1678 	cq_size = PAGE_ALIGN(cq_size);
1679 
1680 	/* Create RQ */
1681 	memset(&spec, 0, sizeof(spec));
1682 	spec.type = GDMA_RQ;
1683 	spec.monitor_avl_buf = true;
1684 	spec.queue_size = rq_size;
1685 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
1686 	if (err)
1687 		goto out;
1688 
1689 	/* Create RQ's CQ */
1690 	cq = &rxq->rx_cq;
1691 	cq->type = MANA_CQ_TYPE_RX;
1692 	cq->rxq = rxq;
1693 
1694 	memset(&spec, 0, sizeof(spec));
1695 	spec.type = GDMA_CQ;
1696 	spec.monitor_avl_buf = false;
1697 	spec.queue_size = cq_size;
1698 	spec.cq.callback = mana_schedule_napi;
1699 	spec.cq.parent_eq = eq->eq;
1700 	spec.cq.context = cq;
1701 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1702 	if (err)
1703 		goto out;
1704 
1705 	memset(&wq_spec, 0, sizeof(wq_spec));
1706 	memset(&cq_spec, 0, sizeof(cq_spec));
1707 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
1708 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
1709 
1710 	cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1711 	cq_spec.queue_size = cq->gdma_cq->queue_size;
1712 	cq_spec.modr_ctx_id = 0;
1713 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1714 
1715 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
1716 				 &wq_spec, &cq_spec, &rxq->rxobj);
1717 	if (err)
1718 		goto out;
1719 
1720 	rxq->gdma_rq->id = wq_spec.queue_index;
1721 	cq->gdma_cq->id = cq_spec.queue_index;
1722 
1723 	rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1724 	cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1725 
1726 	rxq->gdma_id = rxq->gdma_rq->id;
1727 	cq->gdma_id = cq->gdma_cq->id;
1728 
1729 	err = mana_push_wqe(rxq);
1730 	if (err)
1731 		goto out;
1732 
1733 	if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1734 		err = -EINVAL;
1735 		goto out;
1736 	}
1737 
1738 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1739 
1740 	netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
1741 
1742 	WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
1743 				 cq->napi.napi_id));
1744 	WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
1745 					   MEM_TYPE_PAGE_SHARED, NULL));
1746 
1747 	napi_enable(&cq->napi);
1748 
1749 	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1750 out:
1751 	if (!err)
1752 		return rxq;
1753 
1754 	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
1755 
1756 	mana_destroy_rxq(apc, rxq, false);
1757 
1758 	if (cq)
1759 		mana_deinit_cq(apc, cq);
1760 
1761 	return NULL;
1762 }
1763 
mana_add_rx_queues(struct mana_port_context * apc,struct net_device * ndev)1764 static int mana_add_rx_queues(struct mana_port_context *apc,
1765 			      struct net_device *ndev)
1766 {
1767 	struct mana_context *ac = apc->ac;
1768 	struct mana_rxq *rxq;
1769 	int err = 0;
1770 	int i;
1771 
1772 	for (i = 0; i < apc->num_queues; i++) {
1773 		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
1774 		if (!rxq) {
1775 			err = -ENOMEM;
1776 			goto out;
1777 		}
1778 
1779 		u64_stats_init(&rxq->stats.syncp);
1780 
1781 		apc->rxqs[i] = rxq;
1782 	}
1783 
1784 	apc->default_rxobj = apc->rxqs[0]->rxobj;
1785 out:
1786 	return err;
1787 }
1788 
mana_destroy_vport(struct mana_port_context * apc)1789 static void mana_destroy_vport(struct mana_port_context *apc)
1790 {
1791 	struct gdma_dev *gd = apc->ac->gdma_dev;
1792 	struct mana_rxq *rxq;
1793 	u32 rxq_idx;
1794 
1795 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1796 		rxq = apc->rxqs[rxq_idx];
1797 		if (!rxq)
1798 			continue;
1799 
1800 		mana_destroy_rxq(apc, rxq, true);
1801 		apc->rxqs[rxq_idx] = NULL;
1802 	}
1803 
1804 	mana_destroy_txq(apc);
1805 
1806 	if (gd->gdma_context->is_pf)
1807 		mana_pf_deregister_hw_vport(apc);
1808 }
1809 
mana_create_vport(struct mana_port_context * apc,struct net_device * net)1810 static int mana_create_vport(struct mana_port_context *apc,
1811 			     struct net_device *net)
1812 {
1813 	struct gdma_dev *gd = apc->ac->gdma_dev;
1814 	int err;
1815 
1816 	apc->default_rxobj = INVALID_MANA_HANDLE;
1817 
1818 	if (gd->gdma_context->is_pf) {
1819 		err = mana_pf_register_hw_vport(apc);
1820 		if (err)
1821 			return err;
1822 	}
1823 
1824 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
1825 	if (err)
1826 		return err;
1827 
1828 	return mana_create_txq(apc, net);
1829 }
1830 
mana_rss_table_init(struct mana_port_context * apc)1831 static void mana_rss_table_init(struct mana_port_context *apc)
1832 {
1833 	int i;
1834 
1835 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
1836 		apc->indir_table[i] =
1837 			ethtool_rxfh_indir_default(i, apc->num_queues);
1838 }
1839 
mana_config_rss(struct mana_port_context * apc,enum TRI_STATE rx,bool update_hash,bool update_tab)1840 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
1841 		    bool update_hash, bool update_tab)
1842 {
1843 	u32 queue_idx;
1844 	int err;
1845 	int i;
1846 
1847 	if (update_tab) {
1848 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
1849 			queue_idx = apc->indir_table[i];
1850 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
1851 		}
1852 	}
1853 
1854 	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
1855 	if (err)
1856 		return err;
1857 
1858 	mana_fence_rqs(apc);
1859 
1860 	return 0;
1861 }
1862 
mana_init_port(struct net_device * ndev)1863 static int mana_init_port(struct net_device *ndev)
1864 {
1865 	struct mana_port_context *apc = netdev_priv(ndev);
1866 	u32 max_txq, max_rxq, max_queues;
1867 	int port_idx = apc->port_idx;
1868 	u32 num_indirect_entries;
1869 	int err;
1870 
1871 	err = mana_init_port_context(apc);
1872 	if (err)
1873 		return err;
1874 
1875 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
1876 				   &num_indirect_entries);
1877 	if (err) {
1878 		netdev_err(ndev, "Failed to query info for vPort %d\n",
1879 			   port_idx);
1880 		goto reset_apc;
1881 	}
1882 
1883 	max_queues = min_t(u32, max_txq, max_rxq);
1884 	if (apc->max_queues > max_queues)
1885 		apc->max_queues = max_queues;
1886 
1887 	if (apc->num_queues > apc->max_queues)
1888 		apc->num_queues = apc->max_queues;
1889 
1890 	eth_hw_addr_set(ndev, apc->mac_addr);
1891 
1892 	return 0;
1893 
1894 reset_apc:
1895 	kfree(apc->rxqs);
1896 	apc->rxqs = NULL;
1897 	return err;
1898 }
1899 
mana_alloc_queues(struct net_device * ndev)1900 int mana_alloc_queues(struct net_device *ndev)
1901 {
1902 	struct mana_port_context *apc = netdev_priv(ndev);
1903 	struct gdma_dev *gd = apc->ac->gdma_dev;
1904 	int err;
1905 
1906 	err = mana_create_vport(apc, ndev);
1907 	if (err)
1908 		return err;
1909 
1910 	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
1911 	if (err)
1912 		goto destroy_vport;
1913 
1914 	err = mana_add_rx_queues(apc, ndev);
1915 	if (err)
1916 		goto destroy_vport;
1917 
1918 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
1919 
1920 	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
1921 	if (err)
1922 		goto destroy_vport;
1923 
1924 	mana_rss_table_init(apc);
1925 
1926 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
1927 	if (err)
1928 		goto destroy_vport;
1929 
1930 	if (gd->gdma_context->is_pf) {
1931 		err = mana_pf_register_filter(apc);
1932 		if (err)
1933 			goto destroy_vport;
1934 	}
1935 
1936 	mana_chn_setxdp(apc, mana_xdp_get(apc));
1937 
1938 	return 0;
1939 
1940 destroy_vport:
1941 	mana_destroy_vport(apc);
1942 	return err;
1943 }
1944 
mana_attach(struct net_device * ndev)1945 int mana_attach(struct net_device *ndev)
1946 {
1947 	struct mana_port_context *apc = netdev_priv(ndev);
1948 	int err;
1949 
1950 	ASSERT_RTNL();
1951 
1952 	err = mana_init_port(ndev);
1953 	if (err)
1954 		return err;
1955 
1956 	if (apc->port_st_save) {
1957 		err = mana_alloc_queues(ndev);
1958 		if (err) {
1959 			mana_cleanup_port_context(apc);
1960 			return err;
1961 		}
1962 	}
1963 
1964 	apc->port_is_up = apc->port_st_save;
1965 
1966 	/* Ensure port state updated before txq state */
1967 	smp_wmb();
1968 
1969 	if (apc->port_is_up)
1970 		netif_carrier_on(ndev);
1971 
1972 	netif_device_attach(ndev);
1973 
1974 	return 0;
1975 }
1976 
mana_dealloc_queues(struct net_device * ndev)1977 static int mana_dealloc_queues(struct net_device *ndev)
1978 {
1979 	struct mana_port_context *apc = netdev_priv(ndev);
1980 	unsigned long timeout = jiffies + 120 * HZ;
1981 	struct gdma_dev *gd = apc->ac->gdma_dev;
1982 	struct mana_txq *txq;
1983 	struct sk_buff *skb;
1984 	int i, err;
1985 	u32 tsleep;
1986 
1987 	if (apc->port_is_up)
1988 		return -EINVAL;
1989 
1990 	mana_chn_setxdp(apc, NULL);
1991 
1992 	if (gd->gdma_context->is_pf)
1993 		mana_pf_deregister_filter(apc);
1994 
1995 	/* No packet can be transmitted now since apc->port_is_up is false.
1996 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
1997 	 * a txq because it may not timely see apc->port_is_up being cleared
1998 	 * to false, but it doesn't matter since mana_start_xmit() drops any
1999 	 * new packets due to apc->port_is_up being false.
2000 	 *
2001 	 * Drain all the in-flight TX packets.
2002 	 * A timeout of 120 seconds for all the queues is used.
2003 	 * This will break the while loop when h/w is not responding.
2004 	 * This value of 120 has been decided here considering max
2005 	 * number of queues.
2006 	 */
2007 
2008 	for (i = 0; i < apc->num_queues; i++) {
2009 		txq = &apc->tx_qp[i].txq;
2010 		tsleep = 1000;
2011 		while (atomic_read(&txq->pending_sends) > 0 &&
2012 		       time_before(jiffies, timeout)) {
2013 			usleep_range(tsleep, tsleep + 1000);
2014 			tsleep <<= 1;
2015 		}
2016 		if (atomic_read(&txq->pending_sends)) {
2017 			err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
2018 			if (err) {
2019 				netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
2020 					   err, atomic_read(&txq->pending_sends),
2021 					   txq->gdma_txq_id);
2022 			}
2023 			break;
2024 		}
2025 	}
2026 
2027 	for (i = 0; i < apc->num_queues; i++) {
2028 		txq = &apc->tx_qp[i].txq;
2029 		while ((skb = skb_dequeue(&txq->pending_skbs))) {
2030 			mana_unmap_skb(skb, apc);
2031 			dev_kfree_skb_any(skb);
2032 		}
2033 		atomic_set(&txq->pending_sends, 0);
2034 	}
2035 	/* We're 100% sure the queues can no longer be woken up, because
2036 	 * we're sure now mana_poll_tx_cq() can't be running.
2037 	 */
2038 
2039 	apc->rss_state = TRI_STATE_FALSE;
2040 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2041 	if (err) {
2042 		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
2043 		return err;
2044 	}
2045 
2046 	mana_destroy_vport(apc);
2047 
2048 	return 0;
2049 }
2050 
mana_detach(struct net_device * ndev,bool from_close)2051 int mana_detach(struct net_device *ndev, bool from_close)
2052 {
2053 	struct mana_port_context *apc = netdev_priv(ndev);
2054 	int err;
2055 
2056 	ASSERT_RTNL();
2057 
2058 	apc->port_st_save = apc->port_is_up;
2059 	apc->port_is_up = false;
2060 
2061 	/* Ensure port state updated before txq state */
2062 	smp_wmb();
2063 
2064 	netif_tx_disable(ndev);
2065 	netif_carrier_off(ndev);
2066 
2067 	if (apc->port_st_save) {
2068 		err = mana_dealloc_queues(ndev);
2069 		if (err)
2070 			return err;
2071 	}
2072 
2073 	if (!from_close) {
2074 		netif_device_detach(ndev);
2075 		mana_cleanup_port_context(apc);
2076 	}
2077 
2078 	return 0;
2079 }
2080 
mana_probe_port(struct mana_context * ac,int port_idx,struct net_device ** ndev_storage)2081 static int mana_probe_port(struct mana_context *ac, int port_idx,
2082 			   struct net_device **ndev_storage)
2083 {
2084 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2085 	struct mana_port_context *apc;
2086 	struct net_device *ndev;
2087 	int err;
2088 
2089 	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
2090 				 gc->max_num_queues);
2091 	if (!ndev)
2092 		return -ENOMEM;
2093 
2094 	*ndev_storage = ndev;
2095 
2096 	apc = netdev_priv(ndev);
2097 	apc->ac = ac;
2098 	apc->ndev = ndev;
2099 	apc->max_queues = gc->max_num_queues;
2100 	apc->num_queues = gc->max_num_queues;
2101 	apc->port_handle = INVALID_MANA_HANDLE;
2102 	apc->pf_filter_handle = INVALID_MANA_HANDLE;
2103 	apc->port_idx = port_idx;
2104 
2105 	ndev->netdev_ops = &mana_devops;
2106 	ndev->ethtool_ops = &mana_ethtool_ops;
2107 	ndev->mtu = ETH_DATA_LEN;
2108 	ndev->max_mtu = ndev->mtu;
2109 	ndev->min_mtu = ndev->mtu;
2110 	ndev->needed_headroom = MANA_HEADROOM;
2111 	SET_NETDEV_DEV(ndev, gc->dev);
2112 
2113 	netif_carrier_off(ndev);
2114 
2115 	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2116 
2117 	err = mana_init_port(ndev);
2118 	if (err)
2119 		goto free_net;
2120 
2121 	netdev_lockdep_set_classes(ndev);
2122 
2123 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2124 	ndev->hw_features |= NETIF_F_RXCSUM;
2125 	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2126 	ndev->hw_features |= NETIF_F_RXHASH;
2127 	ndev->features = ndev->hw_features;
2128 	ndev->vlan_features = 0;
2129 
2130 	err = register_netdev(ndev);
2131 	if (err) {
2132 		netdev_err(ndev, "Unable to register netdev.\n");
2133 		goto reset_apc;
2134 	}
2135 
2136 	return 0;
2137 
2138 reset_apc:
2139 	kfree(apc->rxqs);
2140 	apc->rxqs = NULL;
2141 free_net:
2142 	*ndev_storage = NULL;
2143 	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2144 	free_netdev(ndev);
2145 	return err;
2146 }
2147 
mana_probe(struct gdma_dev * gd,bool resuming)2148 int mana_probe(struct gdma_dev *gd, bool resuming)
2149 {
2150 	struct gdma_context *gc = gd->gdma_context;
2151 	struct mana_context *ac = gd->driver_data;
2152 	struct device *dev = gc->dev;
2153 	u16 num_ports = 0;
2154 	int err;
2155 	int i;
2156 
2157 	dev_info(dev,
2158 		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
2159 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2160 
2161 	err = mana_gd_register_device(gd);
2162 	if (err)
2163 		return err;
2164 
2165 	if (!resuming) {
2166 		ac = kzalloc(sizeof(*ac), GFP_KERNEL);
2167 		if (!ac)
2168 			return -ENOMEM;
2169 
2170 		ac->gdma_dev = gd;
2171 		gd->driver_data = ac;
2172 	}
2173 
2174 	err = mana_create_eq(ac);
2175 	if (err)
2176 		goto out;
2177 
2178 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2179 				    MANA_MICRO_VERSION, &num_ports);
2180 	if (err)
2181 		goto out;
2182 
2183 	if (!resuming) {
2184 		ac->num_ports = num_ports;
2185 	} else {
2186 		if (ac->num_ports != num_ports) {
2187 			dev_err(dev, "The number of vPorts changed: %d->%d\n",
2188 				ac->num_ports, num_ports);
2189 			err = -EPROTO;
2190 			goto out;
2191 		}
2192 	}
2193 
2194 	if (ac->num_ports == 0)
2195 		dev_err(dev, "Failed to detect any vPort\n");
2196 
2197 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2198 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2199 
2200 	if (!resuming) {
2201 		for (i = 0; i < ac->num_ports; i++) {
2202 			err = mana_probe_port(ac, i, &ac->ports[i]);
2203 			if (err)
2204 				break;
2205 		}
2206 	} else {
2207 		for (i = 0; i < ac->num_ports; i++) {
2208 			rtnl_lock();
2209 			err = mana_attach(ac->ports[i]);
2210 			rtnl_unlock();
2211 			if (err)
2212 				break;
2213 		}
2214 	}
2215 out:
2216 	if (err)
2217 		mana_remove(gd, false);
2218 
2219 	return err;
2220 }
2221 
mana_remove(struct gdma_dev * gd,bool suspending)2222 void mana_remove(struct gdma_dev *gd, bool suspending)
2223 {
2224 	struct gdma_context *gc = gd->gdma_context;
2225 	struct mana_context *ac = gd->driver_data;
2226 	struct device *dev = gc->dev;
2227 	struct net_device *ndev;
2228 	int err;
2229 	int i;
2230 
2231 	for (i = 0; i < ac->num_ports; i++) {
2232 		ndev = ac->ports[i];
2233 		if (!ndev) {
2234 			if (i == 0)
2235 				dev_err(dev, "No net device to remove\n");
2236 			goto out;
2237 		}
2238 
2239 		/* All cleanup actions should stay after rtnl_lock(), otherwise
2240 		 * other functions may access partially cleaned up data.
2241 		 */
2242 		rtnl_lock();
2243 
2244 		err = mana_detach(ndev, false);
2245 		if (err)
2246 			netdev_err(ndev, "Failed to detach vPort %d: %d\n",
2247 				   i, err);
2248 
2249 		if (suspending) {
2250 			/* No need to unregister the ndev. */
2251 			rtnl_unlock();
2252 			continue;
2253 		}
2254 
2255 		unregister_netdevice(ndev);
2256 
2257 		rtnl_unlock();
2258 
2259 		free_netdev(ndev);
2260 	}
2261 
2262 	mana_destroy_eq(ac);
2263 
2264 out:
2265 	mana_gd_deregister_device(gd);
2266 
2267 	if (suspending)
2268 		return;
2269 
2270 	gd->driver_data = NULL;
2271 	gd->gdma_context = NULL;
2272 	kfree(ac);
2273 }
2274