• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2017 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28 #include <net/switchdev.h>
29 #include "lio_vf_rep.h"
30 #include "octeon_network.h"
31 
32 static int lio_vf_rep_open(struct net_device *ndev);
33 static int lio_vf_rep_stop(struct net_device *ndev);
34 static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
35 				       struct net_device *ndev);
36 static void lio_vf_rep_tx_timeout(struct net_device *netdev);
37 static int lio_vf_rep_phys_port_name(struct net_device *dev,
38 				     char *buf, size_t len);
39 static void lio_vf_rep_get_stats64(struct net_device *dev,
40 				   struct rtnl_link_stats64 *stats64);
41 static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
42 
43 static const struct net_device_ops lio_vf_rep_ndev_ops = {
44 	.ndo_open = lio_vf_rep_open,
45 	.ndo_stop = lio_vf_rep_stop,
46 	.ndo_start_xmit = lio_vf_rep_pkt_xmit,
47 	.ndo_tx_timeout = lio_vf_rep_tx_timeout,
48 	.ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
49 	.ndo_get_stats64 = lio_vf_rep_get_stats64,
50 	.ndo_change_mtu = lio_vf_rep_change_mtu,
51 };
52 
53 static void
lio_vf_rep_send_sc_complete(struct octeon_device * oct,u32 status,void * ptr)54 lio_vf_rep_send_sc_complete(struct octeon_device *oct,
55 			    u32 status, void *ptr)
56 {
57 	struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
58 	struct lio_vf_rep_sc_ctx *ctx =
59 		(struct lio_vf_rep_sc_ctx *)sc->ctxptr;
60 	struct lio_vf_rep_resp *resp =
61 		(struct lio_vf_rep_resp *)sc->virtrptr;
62 
63 	if (status != OCTEON_REQUEST_TIMEOUT && READ_ONCE(resp->status))
64 		WRITE_ONCE(resp->status, 0);
65 
66 	complete(&ctx->complete);
67 }
68 
69 static int
lio_vf_rep_send_soft_command(struct octeon_device * oct,void * req,int req_size,void * resp,int resp_size)70 lio_vf_rep_send_soft_command(struct octeon_device *oct,
71 			     void *req, int req_size,
72 			     void *resp, int resp_size)
73 {
74 	int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
75 	int ctx_size = sizeof(struct lio_vf_rep_sc_ctx);
76 	struct octeon_soft_command *sc = NULL;
77 	struct lio_vf_rep_resp *rep_resp;
78 	struct lio_vf_rep_sc_ctx *ctx;
79 	void *sc_req;
80 	int err;
81 
82 	sc = (struct octeon_soft_command *)
83 		octeon_alloc_soft_command(oct, req_size,
84 					  tot_resp_size, ctx_size);
85 	if (!sc)
86 		return -ENOMEM;
87 
88 	ctx = (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
89 	memset(ctx, 0, ctx_size);
90 	init_completion(&ctx->complete);
91 
92 	sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
93 	memcpy(sc_req, req, req_size);
94 
95 	rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
96 	memset(rep_resp, 0, tot_resp_size);
97 	WRITE_ONCE(rep_resp->status, 1);
98 
99 	sc->iq_no = 0;
100 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
101 				    OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
102 	sc->callback = lio_vf_rep_send_sc_complete;
103 	sc->callback_arg = sc;
104 	sc->wait_time = LIO_VF_REP_REQ_TMO_MS;
105 
106 	err = octeon_send_soft_command(oct, sc);
107 	if (err == IQ_SEND_FAILED)
108 		goto free_buff;
109 
110 	wait_for_completion_timeout(&ctx->complete,
111 				    msecs_to_jiffies
112 				    (2 * LIO_VF_REP_REQ_TMO_MS));
113 	err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
114 	if (err)
115 		dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
116 
117 	if (resp)
118 		memcpy(resp, (rep_resp + 1), resp_size);
119 free_buff:
120 	octeon_free_soft_command(oct, sc);
121 
122 	return err;
123 }
124 
125 static int
lio_vf_rep_open(struct net_device * ndev)126 lio_vf_rep_open(struct net_device *ndev)
127 {
128 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
129 	struct lio_vf_rep_req rep_cfg;
130 	struct octeon_device *oct;
131 	int ret;
132 
133 	oct = vf_rep->oct;
134 
135 	memset(&rep_cfg, 0, sizeof(rep_cfg));
136 	rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
137 	rep_cfg.ifidx = vf_rep->ifidx;
138 	rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
139 
140 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
141 					   sizeof(rep_cfg), NULL, 0);
142 
143 	if (ret) {
144 		dev_err(&oct->pci_dev->dev,
145 			"VF_REP open failed with err %d\n", ret);
146 		return -EIO;
147 	}
148 
149 	atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
150 				      LIO_IFSTATE_RUNNING));
151 
152 	netif_carrier_on(ndev);
153 	netif_start_queue(ndev);
154 
155 	return 0;
156 }
157 
158 static int
lio_vf_rep_stop(struct net_device * ndev)159 lio_vf_rep_stop(struct net_device *ndev)
160 {
161 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
162 	struct lio_vf_rep_req rep_cfg;
163 	struct octeon_device *oct;
164 	int ret;
165 
166 	oct = vf_rep->oct;
167 
168 	memset(&rep_cfg, 0, sizeof(rep_cfg));
169 	rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
170 	rep_cfg.ifidx = vf_rep->ifidx;
171 	rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
172 
173 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
174 					   sizeof(rep_cfg), NULL, 0);
175 
176 	if (ret) {
177 		dev_err(&oct->pci_dev->dev,
178 			"VF_REP dev stop failed with err %d\n", ret);
179 		return -EIO;
180 	}
181 
182 	atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
183 				      ~LIO_IFSTATE_RUNNING));
184 
185 	netif_tx_disable(ndev);
186 	netif_carrier_off(ndev);
187 
188 	return 0;
189 }
190 
191 static void
lio_vf_rep_tx_timeout(struct net_device * ndev)192 lio_vf_rep_tx_timeout(struct net_device *ndev)
193 {
194 	netif_trans_update(ndev);
195 
196 	netif_wake_queue(ndev);
197 }
198 
199 static void
lio_vf_rep_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats64)200 lio_vf_rep_get_stats64(struct net_device *dev,
201 		       struct rtnl_link_stats64 *stats64)
202 {
203 	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
204 
205 	/* Swap tx and rx stats as VF rep is a switch port */
206 	stats64->tx_packets = vf_rep->stats.rx_packets;
207 	stats64->tx_bytes   = vf_rep->stats.rx_bytes;
208 	stats64->tx_dropped = vf_rep->stats.rx_dropped;
209 
210 	stats64->rx_packets = vf_rep->stats.tx_packets;
211 	stats64->rx_bytes   = vf_rep->stats.tx_bytes;
212 	stats64->rx_dropped = vf_rep->stats.tx_dropped;
213 }
214 
215 static int
lio_vf_rep_change_mtu(struct net_device * ndev,int new_mtu)216 lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
217 {
218 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
219 	struct lio_vf_rep_req rep_cfg;
220 	struct octeon_device *oct;
221 	int ret;
222 
223 	oct = vf_rep->oct;
224 
225 	memset(&rep_cfg, 0, sizeof(rep_cfg));
226 	rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
227 	rep_cfg.ifidx = vf_rep->ifidx;
228 	rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
229 
230 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
231 					   sizeof(rep_cfg), NULL, 0);
232 	if (ret) {
233 		dev_err(&oct->pci_dev->dev,
234 			"Change MTU failed with err %d\n", ret);
235 		return -EIO;
236 	}
237 
238 	ndev->mtu = new_mtu;
239 
240 	return 0;
241 }
242 
243 static int
lio_vf_rep_phys_port_name(struct net_device * dev,char * buf,size_t len)244 lio_vf_rep_phys_port_name(struct net_device *dev,
245 			  char *buf, size_t len)
246 {
247 	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
248 	struct octeon_device *oct = vf_rep->oct;
249 	int ret;
250 
251 	ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
252 		       vf_rep->ifidx - oct->pf_num * 64 - 1);
253 	if (ret >= len)
254 		return -EOPNOTSUPP;
255 
256 	return 0;
257 }
258 
259 static struct net_device *
lio_vf_rep_get_ndev(struct octeon_device * oct,int ifidx)260 lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
261 {
262 	int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
263 	int vfid_mask = max_vfs - 1;
264 
265 	if (ifidx <= oct->pf_num * max_vfs ||
266 	    ifidx >= oct->pf_num * max_vfs + max_vfs)
267 		return NULL;
268 
269 	/* ifidx 1-63 for PF0 VFs
270 	 * ifidx 65-127 for PF1 VFs
271 	 */
272 	vf_id = (ifidx & vfid_mask) - 1;
273 
274 	return oct->vf_rep_list.ndev[vf_id];
275 }
276 
277 static void
lio_vf_rep_copy_packet(struct octeon_device * oct,struct sk_buff * skb,int len)278 lio_vf_rep_copy_packet(struct octeon_device *oct,
279 		       struct sk_buff *skb,
280 		       int len)
281 {
282 	if (likely(len > MIN_SKB_SIZE)) {
283 		struct octeon_skb_page_info *pg_info;
284 		unsigned char *va;
285 
286 		pg_info = ((struct octeon_skb_page_info *)(skb->cb));
287 		if (pg_info->page) {
288 			va = page_address(pg_info->page) +
289 				pg_info->page_offset;
290 			memcpy(skb->data, va, MIN_SKB_SIZE);
291 			skb_put(skb, MIN_SKB_SIZE);
292 		}
293 
294 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
295 				pg_info->page,
296 				pg_info->page_offset + MIN_SKB_SIZE,
297 				len - MIN_SKB_SIZE,
298 				LIO_RXBUFFER_SZ);
299 	} else {
300 		struct octeon_skb_page_info *pg_info =
301 			((struct octeon_skb_page_info *)(skb->cb));
302 
303 		skb_copy_to_linear_data(skb, page_address(pg_info->page) +
304 					pg_info->page_offset, len);
305 		skb_put(skb, len);
306 		put_page(pg_info->page);
307 	}
308 }
309 
310 static int
lio_vf_rep_pkt_recv(struct octeon_recv_info * recv_info,void * buf)311 lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
312 {
313 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
314 	struct lio_vf_rep_desc *vf_rep;
315 	struct net_device *vf_ndev;
316 	struct octeon_device *oct;
317 	union octeon_rh *rh;
318 	struct sk_buff *skb;
319 	int i, ifidx;
320 
321 	oct = lio_get_device(recv_pkt->octeon_id);
322 	if (!oct)
323 		goto free_buffers;
324 
325 	skb = recv_pkt->buffer_ptr[0];
326 	rh = &recv_pkt->rh;
327 	ifidx = rh->r.ossp;
328 
329 	vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
330 	if (!vf_ndev)
331 		goto free_buffers;
332 
333 	vf_rep = netdev_priv(vf_ndev);
334 	if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
335 	    recv_pkt->buffer_count > 1)
336 		goto free_buffers;
337 
338 	skb->dev = vf_ndev;
339 
340 	/* Multiple buffers are not used for vf_rep packets.
341 	 * So just buffer_size[0] is valid.
342 	 */
343 	lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
344 
345 	skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
346 	skb->protocol = eth_type_trans(skb, skb->dev);
347 	skb->ip_summed = CHECKSUM_NONE;
348 
349 	netif_rx(skb);
350 
351 	octeon_free_recv_info(recv_info);
352 
353 	return 0;
354 
355 free_buffers:
356 	for (i = 0; i < recv_pkt->buffer_count; i++)
357 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
358 
359 	octeon_free_recv_info(recv_info);
360 
361 	return 0;
362 }
363 
364 static void
lio_vf_rep_packet_sent_callback(struct octeon_device * oct,u32 status,void * buf)365 lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
366 				u32 status, void *buf)
367 {
368 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
369 	struct sk_buff *skb = sc->ctxptr;
370 	struct net_device *ndev = skb->dev;
371 	u32 iq_no;
372 
373 	dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
374 			 sc->datasize, DMA_TO_DEVICE);
375 	dev_kfree_skb_any(skb);
376 	iq_no = sc->iq_no;
377 	octeon_free_soft_command(oct, sc);
378 
379 	if (octnet_iq_is_full(oct, iq_no))
380 		return;
381 
382 	if (netif_queue_stopped(ndev))
383 		netif_wake_queue(ndev);
384 }
385 
386 static netdev_tx_t
lio_vf_rep_pkt_xmit(struct sk_buff * skb,struct net_device * ndev)387 lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
388 {
389 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
390 	struct net_device *parent_ndev = vf_rep->parent_ndev;
391 	struct octeon_device *oct = vf_rep->oct;
392 	struct octeon_instr_pki_ih3 *pki_ih3;
393 	struct octeon_soft_command *sc;
394 	struct lio *parent_lio;
395 	int status;
396 
397 	parent_lio = GET_LIO(parent_ndev);
398 
399 	if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
400 	    skb->len <= 0)
401 		goto xmit_failed;
402 
403 	if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
404 		dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
405 		netif_stop_queue(ndev);
406 		return NETDEV_TX_BUSY;
407 	}
408 
409 	sc = (struct octeon_soft_command *)
410 		octeon_alloc_soft_command(oct, 0, 0, 0);
411 	if (!sc) {
412 		dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
413 		goto xmit_failed;
414 	}
415 
416 	/* Multiple buffers are not used for vf_rep packets. */
417 	if (skb_shinfo(skb)->nr_frags != 0) {
418 		dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
419 		goto xmit_failed;
420 	}
421 
422 	sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
423 				     skb->data, skb->len, DMA_TO_DEVICE);
424 	if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
425 		dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
426 		goto xmit_failed;
427 	}
428 
429 	sc->virtdptr = skb->data;
430 	sc->datasize = skb->len;
431 	sc->ctxptr = skb;
432 	sc->iq_no = parent_lio->txq;
433 
434 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
435 				    vf_rep->ifidx, 0, 0);
436 	pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
437 	pki_ih3->tagtype = ORDERED_TAG;
438 
439 	sc->callback = lio_vf_rep_packet_sent_callback;
440 	sc->callback_arg = sc;
441 
442 	status = octeon_send_soft_command(oct, sc);
443 	if (status == IQ_SEND_FAILED) {
444 		dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
445 				 sc->datasize, DMA_TO_DEVICE);
446 		goto xmit_failed;
447 	}
448 
449 	if (status == IQ_SEND_STOP)
450 		netif_stop_queue(ndev);
451 
452 	netif_trans_update(ndev);
453 
454 	return NETDEV_TX_OK;
455 
456 xmit_failed:
457 	dev_kfree_skb_any(skb);
458 
459 	return NETDEV_TX_OK;
460 }
461 
462 static int
lio_vf_rep_attr_get(struct net_device * dev,struct switchdev_attr * attr)463 lio_vf_rep_attr_get(struct net_device *dev, struct switchdev_attr *attr)
464 {
465 	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
466 	struct net_device *parent_ndev = vf_rep->parent_ndev;
467 	struct lio *lio = GET_LIO(parent_ndev);
468 
469 	switch (attr->id) {
470 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
471 		attr->u.ppid.id_len = ETH_ALEN;
472 		ether_addr_copy(attr->u.ppid.id,
473 				(void *)&lio->linfo.hw_addr + 2);
474 		break;
475 
476 	default:
477 		return -EOPNOTSUPP;
478 	}
479 
480 	return 0;
481 }
482 
483 static const struct switchdev_ops lio_vf_rep_switchdev_ops = {
484 	.switchdev_port_attr_get        = lio_vf_rep_attr_get,
485 };
486 
487 static void
lio_vf_rep_fetch_stats(struct work_struct * work)488 lio_vf_rep_fetch_stats(struct work_struct *work)
489 {
490 	struct cavium_wk *wk = (struct cavium_wk *)work;
491 	struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
492 	struct lio_vf_rep_stats stats;
493 	struct lio_vf_rep_req rep_cfg;
494 	struct octeon_device *oct;
495 	int ret;
496 
497 	oct = vf_rep->oct;
498 
499 	memset(&rep_cfg, 0, sizeof(rep_cfg));
500 	rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
501 	rep_cfg.ifidx = vf_rep->ifidx;
502 
503 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
504 					   &stats, sizeof(stats));
505 
506 	if (!ret) {
507 		octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
508 		memcpy(&vf_rep->stats, &stats, sizeof(stats));
509 	}
510 
511 	schedule_delayed_work(&vf_rep->stats_wk.work,
512 			      msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
513 }
514 
515 int
lio_vf_rep_create(struct octeon_device * oct)516 lio_vf_rep_create(struct octeon_device *oct)
517 {
518 	struct lio_vf_rep_desc *vf_rep;
519 	struct net_device *ndev;
520 	int i, num_vfs;
521 
522 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
523 		return 0;
524 
525 	if (!oct->sriov_info.sriov_enabled)
526 		return 0;
527 
528 	num_vfs = oct->sriov_info.num_vfs_alloced;
529 
530 	oct->vf_rep_list.num_vfs = 0;
531 	for (i = 0; i < num_vfs; i++) {
532 		ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
533 
534 		if (!ndev) {
535 			dev_err(&oct->pci_dev->dev,
536 				"VF rep device %d creation failed\n", i);
537 			goto cleanup;
538 		}
539 
540 		ndev->min_mtu = LIO_MIN_MTU_SIZE;
541 		ndev->max_mtu = LIO_MAX_MTU_SIZE;
542 		ndev->netdev_ops = &lio_vf_rep_ndev_ops;
543 		SWITCHDEV_SET_OPS(ndev, &lio_vf_rep_switchdev_ops);
544 
545 		vf_rep = netdev_priv(ndev);
546 		memset(vf_rep, 0, sizeof(*vf_rep));
547 
548 		vf_rep->ndev = ndev;
549 		vf_rep->oct = oct;
550 		vf_rep->parent_ndev = oct->props[0].netdev;
551 		vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
552 
553 		eth_hw_addr_random(ndev);
554 
555 		if (register_netdev(ndev)) {
556 			dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
557 
558 			free_netdev(ndev);
559 			goto cleanup;
560 		}
561 
562 		netif_carrier_off(ndev);
563 
564 		INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
565 				  lio_vf_rep_fetch_stats);
566 		vf_rep->stats_wk.ctxptr = (void *)vf_rep;
567 		schedule_delayed_work(&vf_rep->stats_wk.work,
568 				      msecs_to_jiffies
569 				      (LIO_VF_REP_STATS_POLL_TIME_MS));
570 		oct->vf_rep_list.num_vfs++;
571 		oct->vf_rep_list.ndev[i] = ndev;
572 	}
573 
574 	if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
575 					OPCODE_NIC_VF_REP_PKT,
576 					lio_vf_rep_pkt_recv, oct)) {
577 		dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
578 
579 		goto cleanup;
580 	}
581 
582 	return 0;
583 
584 cleanup:
585 	for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
586 		ndev = oct->vf_rep_list.ndev[i];
587 		oct->vf_rep_list.ndev[i] = NULL;
588 		if (ndev) {
589 			vf_rep = netdev_priv(ndev);
590 			cancel_delayed_work_sync
591 				(&vf_rep->stats_wk.work);
592 			unregister_netdev(ndev);
593 			free_netdev(ndev);
594 		}
595 	}
596 
597 	oct->vf_rep_list.num_vfs = 0;
598 
599 	return -1;
600 }
601 
602 void
lio_vf_rep_destroy(struct octeon_device * oct)603 lio_vf_rep_destroy(struct octeon_device *oct)
604 {
605 	struct lio_vf_rep_desc *vf_rep;
606 	struct net_device *ndev;
607 	int i;
608 
609 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
610 		return;
611 
612 	if (!oct->sriov_info.sriov_enabled)
613 		return;
614 
615 	for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
616 		ndev = oct->vf_rep_list.ndev[i];
617 		oct->vf_rep_list.ndev[i] = NULL;
618 		if (ndev) {
619 			vf_rep = netdev_priv(ndev);
620 			cancel_delayed_work_sync
621 				(&vf_rep->stats_wk.work);
622 			netif_tx_disable(ndev);
623 			netif_carrier_off(ndev);
624 
625 			unregister_netdev(ndev);
626 			free_netdev(ndev);
627 		}
628 	}
629 
630 	oct->vf_rep_list.num_vfs = 0;
631 }
632 
633 static int
lio_vf_rep_netdev_event(struct notifier_block * nb,unsigned long event,void * ptr)634 lio_vf_rep_netdev_event(struct notifier_block *nb,
635 			unsigned long event, void *ptr)
636 {
637 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
638 	struct lio_vf_rep_desc *vf_rep;
639 	struct lio_vf_rep_req rep_cfg;
640 	struct octeon_device *oct;
641 	int ret;
642 
643 	switch (event) {
644 	case NETDEV_REGISTER:
645 	case NETDEV_CHANGENAME:
646 		break;
647 
648 	default:
649 		return NOTIFY_DONE;
650 	}
651 
652 	if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
653 		return NOTIFY_DONE;
654 
655 	vf_rep = netdev_priv(ndev);
656 	oct = vf_rep->oct;
657 
658 	if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
659 		dev_err(&oct->pci_dev->dev,
660 			"Device name change sync failed as the size is > %d\n",
661 			LIO_IF_NAME_SIZE);
662 		return NOTIFY_DONE;
663 	}
664 
665 	memset(&rep_cfg, 0, sizeof(rep_cfg));
666 	rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
667 	rep_cfg.ifidx = vf_rep->ifidx;
668 	strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
669 
670 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
671 					   sizeof(rep_cfg), NULL, 0);
672 	if (ret)
673 		dev_err(&oct->pci_dev->dev,
674 			"vf_rep netdev name change failed with err %d\n", ret);
675 
676 	return NOTIFY_DONE;
677 }
678 
679 static struct notifier_block lio_vf_rep_netdev_notifier = {
680 	.notifier_call = lio_vf_rep_netdev_event,
681 };
682 
683 int
lio_vf_rep_modinit(void)684 lio_vf_rep_modinit(void)
685 {
686 	if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
687 		pr_err("netdev notifier registration failed\n");
688 		return -EFAULT;
689 	}
690 
691 	return 0;
692 }
693 
694 void
lio_vf_rep_modexit(void)695 lio_vf_rep_modexit(void)
696 {
697 	if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
698 		pr_err("netdev notifier unregister failed\n");
699 }
700