• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright Gavin Shan, IBM Corporation 2016.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
16 
17 #include <net/ncsi.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/addrconf.h>
21 #include <net/ipv6.h>
22 #include <net/if_inet6.h>
23 
24 #include "internal.h"
25 #include "ncsi-pkt.h"
26 
27 LIST_HEAD(ncsi_dev_list);
28 DEFINE_SPINLOCK(ncsi_dev_lock);
29 
ncsi_filter_size(int table)30 static inline int ncsi_filter_size(int table)
31 {
32 	int sizes[] = { 2, 6, 6, 6 };
33 
34 	BUILD_BUG_ON(ARRAY_SIZE(sizes) != NCSI_FILTER_MAX);
35 	if (table < NCSI_FILTER_BASE || table >= NCSI_FILTER_MAX)
36 		return -EINVAL;
37 
38 	return sizes[table];
39 }
40 
ncsi_get_filter(struct ncsi_channel * nc,int table,int index)41 u32 *ncsi_get_filter(struct ncsi_channel *nc, int table, int index)
42 {
43 	struct ncsi_channel_filter *ncf;
44 	int size;
45 
46 	ncf = nc->filters[table];
47 	if (!ncf)
48 		return NULL;
49 
50 	size = ncsi_filter_size(table);
51 	if (size < 0)
52 		return NULL;
53 
54 	return ncf->data + size * index;
55 }
56 
57 /* Find the first active filter in a filter table that matches the given
58  * data parameter. If data is NULL, this returns the first active filter.
59  */
ncsi_find_filter(struct ncsi_channel * nc,int table,void * data)60 int ncsi_find_filter(struct ncsi_channel *nc, int table, void *data)
61 {
62 	struct ncsi_channel_filter *ncf;
63 	void *bitmap;
64 	int index, size;
65 	unsigned long flags;
66 
67 	ncf = nc->filters[table];
68 	if (!ncf)
69 		return -ENXIO;
70 
71 	size = ncsi_filter_size(table);
72 	if (size < 0)
73 		return size;
74 
75 	spin_lock_irqsave(&nc->lock, flags);
76 	bitmap = (void *)&ncf->bitmap;
77 	index = -1;
78 	while ((index = find_next_bit(bitmap, ncf->total, index + 1))
79 	       < ncf->total) {
80 		if (!data || !memcmp(ncf->data + size * index, data, size)) {
81 			spin_unlock_irqrestore(&nc->lock, flags);
82 			return index;
83 		}
84 	}
85 	spin_unlock_irqrestore(&nc->lock, flags);
86 
87 	return -ENOENT;
88 }
89 
ncsi_add_filter(struct ncsi_channel * nc,int table,void * data)90 int ncsi_add_filter(struct ncsi_channel *nc, int table, void *data)
91 {
92 	struct ncsi_channel_filter *ncf;
93 	int index, size;
94 	void *bitmap;
95 	unsigned long flags;
96 
97 	size = ncsi_filter_size(table);
98 	if (size < 0)
99 		return size;
100 
101 	index = ncsi_find_filter(nc, table, data);
102 	if (index >= 0)
103 		return index;
104 
105 	ncf = nc->filters[table];
106 	if (!ncf)
107 		return -ENODEV;
108 
109 	spin_lock_irqsave(&nc->lock, flags);
110 	bitmap = (void *)&ncf->bitmap;
111 	do {
112 		index = find_next_zero_bit(bitmap, ncf->total, 0);
113 		if (index >= ncf->total) {
114 			spin_unlock_irqrestore(&nc->lock, flags);
115 			return -ENOSPC;
116 		}
117 	} while (test_and_set_bit(index, bitmap));
118 
119 	memcpy(ncf->data + size * index, data, size);
120 	spin_unlock_irqrestore(&nc->lock, flags);
121 
122 	return index;
123 }
124 
ncsi_remove_filter(struct ncsi_channel * nc,int table,int index)125 int ncsi_remove_filter(struct ncsi_channel *nc, int table, int index)
126 {
127 	struct ncsi_channel_filter *ncf;
128 	int size;
129 	void *bitmap;
130 	unsigned long flags;
131 
132 	size = ncsi_filter_size(table);
133 	if (size < 0)
134 		return size;
135 
136 	ncf = nc->filters[table];
137 	if (!ncf || index >= ncf->total)
138 		return -ENODEV;
139 
140 	spin_lock_irqsave(&nc->lock, flags);
141 	bitmap = (void *)&ncf->bitmap;
142 	if (test_and_clear_bit(index, bitmap))
143 		memset(ncf->data + size * index, 0, size);
144 	spin_unlock_irqrestore(&nc->lock, flags);
145 
146 	return 0;
147 }
148 
ncsi_report_link(struct ncsi_dev_priv * ndp,bool force_down)149 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
150 {
151 	struct ncsi_dev *nd = &ndp->ndev;
152 	struct ncsi_package *np;
153 	struct ncsi_channel *nc;
154 	unsigned long flags;
155 
156 	nd->state = ncsi_dev_state_functional;
157 	if (force_down) {
158 		nd->link_up = 0;
159 		goto report;
160 	}
161 
162 	nd->link_up = 0;
163 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
164 		NCSI_FOR_EACH_CHANNEL(np, nc) {
165 			spin_lock_irqsave(&nc->lock, flags);
166 
167 			if (!list_empty(&nc->link) ||
168 			    nc->state != NCSI_CHANNEL_ACTIVE) {
169 				spin_unlock_irqrestore(&nc->lock, flags);
170 				continue;
171 			}
172 
173 			if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
174 				spin_unlock_irqrestore(&nc->lock, flags);
175 				nd->link_up = 1;
176 				goto report;
177 			}
178 
179 			spin_unlock_irqrestore(&nc->lock, flags);
180 		}
181 	}
182 
183 report:
184 	nd->handler(nd);
185 }
186 
ncsi_channel_monitor(unsigned long data)187 static void ncsi_channel_monitor(unsigned long data)
188 {
189 	struct ncsi_channel *nc = (struct ncsi_channel *)data;
190 	struct ncsi_package *np = nc->package;
191 	struct ncsi_dev_priv *ndp = np->ndp;
192 	struct ncsi_channel_mode *ncm;
193 	struct ncsi_cmd_arg nca;
194 	bool enabled, chained;
195 	unsigned int monitor_state;
196 	unsigned long flags;
197 	int state, ret;
198 
199 	spin_lock_irqsave(&nc->lock, flags);
200 	state = nc->state;
201 	chained = !list_empty(&nc->link);
202 	enabled = nc->monitor.enabled;
203 	monitor_state = nc->monitor.state;
204 	spin_unlock_irqrestore(&nc->lock, flags);
205 
206 	if (!enabled || chained) {
207 		ncsi_stop_channel_monitor(nc);
208 		return;
209 	}
210 	if (state != NCSI_CHANNEL_INACTIVE &&
211 	    state != NCSI_CHANNEL_ACTIVE) {
212 		ncsi_stop_channel_monitor(nc);
213 		return;
214 	}
215 
216 	switch (monitor_state) {
217 	case NCSI_CHANNEL_MONITOR_START:
218 	case NCSI_CHANNEL_MONITOR_RETRY:
219 		nca.ndp = ndp;
220 		nca.package = np->id;
221 		nca.channel = nc->id;
222 		nca.type = NCSI_PKT_CMD_GLS;
223 		nca.req_flags = 0;
224 		ret = ncsi_xmit_cmd(&nca);
225 		if (ret)
226 			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
227 				   ret);
228 		break;
229 	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
230 		break;
231 	default:
232 		if (!(ndp->flags & NCSI_DEV_HWA)) {
233 			ncsi_report_link(ndp, true);
234 			ndp->flags |= NCSI_DEV_RESHUFFLE;
235 		}
236 
237 		ncsi_stop_channel_monitor(nc);
238 
239 		ncm = &nc->modes[NCSI_MODE_LINK];
240 		spin_lock_irqsave(&nc->lock, flags);
241 		nc->state = NCSI_CHANNEL_INVISIBLE;
242 		ncm->data[2] &= ~0x1;
243 		spin_unlock_irqrestore(&nc->lock, flags);
244 
245 		spin_lock_irqsave(&ndp->lock, flags);
246 		nc->state = NCSI_CHANNEL_ACTIVE;
247 		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
248 		spin_unlock_irqrestore(&ndp->lock, flags);
249 		ncsi_process_next_channel(ndp);
250 		return;
251 	}
252 
253 	spin_lock_irqsave(&nc->lock, flags);
254 	nc->monitor.state++;
255 	spin_unlock_irqrestore(&nc->lock, flags);
256 	mod_timer(&nc->monitor.timer, jiffies + HZ);
257 }
258 
ncsi_start_channel_monitor(struct ncsi_channel * nc)259 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
260 {
261 	unsigned long flags;
262 
263 	spin_lock_irqsave(&nc->lock, flags);
264 	WARN_ON_ONCE(nc->monitor.enabled);
265 	nc->monitor.enabled = true;
266 	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
267 	spin_unlock_irqrestore(&nc->lock, flags);
268 
269 	mod_timer(&nc->monitor.timer, jiffies + HZ);
270 }
271 
ncsi_stop_channel_monitor(struct ncsi_channel * nc)272 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
273 {
274 	unsigned long flags;
275 
276 	spin_lock_irqsave(&nc->lock, flags);
277 	if (!nc->monitor.enabled) {
278 		spin_unlock_irqrestore(&nc->lock, flags);
279 		return;
280 	}
281 	nc->monitor.enabled = false;
282 	spin_unlock_irqrestore(&nc->lock, flags);
283 
284 	del_timer_sync(&nc->monitor.timer);
285 }
286 
ncsi_find_channel(struct ncsi_package * np,unsigned char id)287 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
288 				       unsigned char id)
289 {
290 	struct ncsi_channel *nc;
291 
292 	NCSI_FOR_EACH_CHANNEL(np, nc) {
293 		if (nc->id == id)
294 			return nc;
295 	}
296 
297 	return NULL;
298 }
299 
ncsi_add_channel(struct ncsi_package * np,unsigned char id)300 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
301 {
302 	struct ncsi_channel *nc, *tmp;
303 	int index;
304 	unsigned long flags;
305 
306 	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
307 	if (!nc)
308 		return NULL;
309 
310 	nc->id = id;
311 	nc->package = np;
312 	nc->state = NCSI_CHANNEL_INACTIVE;
313 	nc->monitor.enabled = false;
314 	setup_timer(&nc->monitor.timer,
315 		    ncsi_channel_monitor, (unsigned long)nc);
316 	spin_lock_init(&nc->lock);
317 	INIT_LIST_HEAD(&nc->link);
318 	for (index = 0; index < NCSI_CAP_MAX; index++)
319 		nc->caps[index].index = index;
320 	for (index = 0; index < NCSI_MODE_MAX; index++)
321 		nc->modes[index].index = index;
322 
323 	spin_lock_irqsave(&np->lock, flags);
324 	tmp = ncsi_find_channel(np, id);
325 	if (tmp) {
326 		spin_unlock_irqrestore(&np->lock, flags);
327 		kfree(nc);
328 		return tmp;
329 	}
330 
331 	list_add_tail_rcu(&nc->node, &np->channels);
332 	np->channel_num++;
333 	spin_unlock_irqrestore(&np->lock, flags);
334 
335 	return nc;
336 }
337 
ncsi_remove_channel(struct ncsi_channel * nc)338 static void ncsi_remove_channel(struct ncsi_channel *nc)
339 {
340 	struct ncsi_package *np = nc->package;
341 	struct ncsi_channel_filter *ncf;
342 	unsigned long flags;
343 	int i;
344 
345 	/* Release filters */
346 	spin_lock_irqsave(&nc->lock, flags);
347 	for (i = 0; i < NCSI_FILTER_MAX; i++) {
348 		ncf = nc->filters[i];
349 		if (!ncf)
350 			continue;
351 
352 		nc->filters[i] = NULL;
353 		kfree(ncf);
354 	}
355 
356 	nc->state = NCSI_CHANNEL_INACTIVE;
357 	spin_unlock_irqrestore(&nc->lock, flags);
358 	ncsi_stop_channel_monitor(nc);
359 
360 	/* Remove and free channel */
361 	spin_lock_irqsave(&np->lock, flags);
362 	list_del_rcu(&nc->node);
363 	np->channel_num--;
364 	spin_unlock_irqrestore(&np->lock, flags);
365 
366 	kfree(nc);
367 }
368 
ncsi_find_package(struct ncsi_dev_priv * ndp,unsigned char id)369 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
370 				       unsigned char id)
371 {
372 	struct ncsi_package *np;
373 
374 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
375 		if (np->id == id)
376 			return np;
377 	}
378 
379 	return NULL;
380 }
381 
ncsi_add_package(struct ncsi_dev_priv * ndp,unsigned char id)382 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
383 				      unsigned char id)
384 {
385 	struct ncsi_package *np, *tmp;
386 	unsigned long flags;
387 
388 	np = kzalloc(sizeof(*np), GFP_ATOMIC);
389 	if (!np)
390 		return NULL;
391 
392 	np->id = id;
393 	np->ndp = ndp;
394 	spin_lock_init(&np->lock);
395 	INIT_LIST_HEAD(&np->channels);
396 
397 	spin_lock_irqsave(&ndp->lock, flags);
398 	tmp = ncsi_find_package(ndp, id);
399 	if (tmp) {
400 		spin_unlock_irqrestore(&ndp->lock, flags);
401 		kfree(np);
402 		return tmp;
403 	}
404 
405 	list_add_tail_rcu(&np->node, &ndp->packages);
406 	ndp->package_num++;
407 	spin_unlock_irqrestore(&ndp->lock, flags);
408 
409 	return np;
410 }
411 
ncsi_remove_package(struct ncsi_package * np)412 void ncsi_remove_package(struct ncsi_package *np)
413 {
414 	struct ncsi_dev_priv *ndp = np->ndp;
415 	struct ncsi_channel *nc, *tmp;
416 	unsigned long flags;
417 
418 	/* Release all child channels */
419 	list_for_each_entry_safe(nc, tmp, &np->channels, node)
420 		ncsi_remove_channel(nc);
421 
422 	/* Remove and free package */
423 	spin_lock_irqsave(&ndp->lock, flags);
424 	list_del_rcu(&np->node);
425 	ndp->package_num--;
426 	spin_unlock_irqrestore(&ndp->lock, flags);
427 
428 	kfree(np);
429 }
430 
ncsi_find_package_and_channel(struct ncsi_dev_priv * ndp,unsigned char id,struct ncsi_package ** np,struct ncsi_channel ** nc)431 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
432 				   unsigned char id,
433 				   struct ncsi_package **np,
434 				   struct ncsi_channel **nc)
435 {
436 	struct ncsi_package *p;
437 	struct ncsi_channel *c;
438 
439 	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
440 	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
441 
442 	if (np)
443 		*np = p;
444 	if (nc)
445 		*nc = c;
446 }
447 
448 /* For two consecutive NCSI commands, the packet IDs shouldn't
449  * be same. Otherwise, the bogus response might be replied. So
450  * the available IDs are allocated in round-robin fashion.
451  */
ncsi_alloc_request(struct ncsi_dev_priv * ndp,unsigned int req_flags)452 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
453 					unsigned int req_flags)
454 {
455 	struct ncsi_request *nr = NULL;
456 	int i, limit = ARRAY_SIZE(ndp->requests);
457 	unsigned long flags;
458 
459 	/* Check if there is one available request until the ceiling */
460 	spin_lock_irqsave(&ndp->lock, flags);
461 	for (i = ndp->request_id; i < limit; i++) {
462 		if (ndp->requests[i].used)
463 			continue;
464 
465 		nr = &ndp->requests[i];
466 		nr->used = true;
467 		nr->flags = req_flags;
468 		ndp->request_id = i + 1;
469 		goto found;
470 	}
471 
472 	/* Fail back to check from the starting cursor */
473 	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
474 		if (ndp->requests[i].used)
475 			continue;
476 
477 		nr = &ndp->requests[i];
478 		nr->used = true;
479 		nr->flags = req_flags;
480 		ndp->request_id = i + 1;
481 		goto found;
482 	}
483 
484 found:
485 	spin_unlock_irqrestore(&ndp->lock, flags);
486 	return nr;
487 }
488 
ncsi_free_request(struct ncsi_request * nr)489 void ncsi_free_request(struct ncsi_request *nr)
490 {
491 	struct ncsi_dev_priv *ndp = nr->ndp;
492 	struct sk_buff *cmd, *rsp;
493 	unsigned long flags;
494 	bool driven;
495 
496 	if (nr->enabled) {
497 		nr->enabled = false;
498 		del_timer_sync(&nr->timer);
499 	}
500 
501 	spin_lock_irqsave(&ndp->lock, flags);
502 	cmd = nr->cmd;
503 	rsp = nr->rsp;
504 	nr->cmd = NULL;
505 	nr->rsp = NULL;
506 	nr->used = false;
507 	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
508 	spin_unlock_irqrestore(&ndp->lock, flags);
509 
510 	if (driven && cmd && --ndp->pending_req_num == 0)
511 		schedule_work(&ndp->work);
512 
513 	/* Release command and response */
514 	consume_skb(cmd);
515 	consume_skb(rsp);
516 }
517 
ncsi_find_dev(struct net_device * dev)518 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
519 {
520 	struct ncsi_dev_priv *ndp;
521 
522 	NCSI_FOR_EACH_DEV(ndp) {
523 		if (ndp->ndev.dev == dev)
524 			return &ndp->ndev;
525 	}
526 
527 	return NULL;
528 }
529 
ncsi_request_timeout(unsigned long data)530 static void ncsi_request_timeout(unsigned long data)
531 {
532 	struct ncsi_request *nr = (struct ncsi_request *)data;
533 	struct ncsi_dev_priv *ndp = nr->ndp;
534 	unsigned long flags;
535 
536 	/* If the request already had associated response,
537 	 * let the response handler to release it.
538 	 */
539 	spin_lock_irqsave(&ndp->lock, flags);
540 	nr->enabled = false;
541 	if (nr->rsp || !nr->cmd) {
542 		spin_unlock_irqrestore(&ndp->lock, flags);
543 		return;
544 	}
545 	spin_unlock_irqrestore(&ndp->lock, flags);
546 
547 	/* Release the request */
548 	ncsi_free_request(nr);
549 }
550 
ncsi_suspend_channel(struct ncsi_dev_priv * ndp)551 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
552 {
553 	struct ncsi_dev *nd = &ndp->ndev;
554 	struct ncsi_package *np = ndp->active_package;
555 	struct ncsi_channel *nc = ndp->active_channel;
556 	struct ncsi_cmd_arg nca;
557 	unsigned long flags;
558 	int ret;
559 
560 	nca.ndp = ndp;
561 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
562 	switch (nd->state) {
563 	case ncsi_dev_state_suspend:
564 		nd->state = ncsi_dev_state_suspend_select;
565 		/* Fall through */
566 	case ncsi_dev_state_suspend_select:
567 		ndp->pending_req_num = 1;
568 
569 		nca.type = NCSI_PKT_CMD_SP;
570 		nca.package = np->id;
571 		nca.channel = NCSI_RESERVED_CHANNEL;
572 		if (ndp->flags & NCSI_DEV_HWA)
573 			nca.bytes[0] = 0;
574 		else
575 			nca.bytes[0] = 1;
576 
577 		/* To retrieve the last link states of channels in current
578 		 * package when current active channel needs fail over to
579 		 * another one. It means we will possibly select another
580 		 * channel as next active one. The link states of channels
581 		 * are most important factor of the selection. So we need
582 		 * accurate link states. Unfortunately, the link states on
583 		 * inactive channels can't be updated with LSC AEN in time.
584 		 */
585 		if (ndp->flags & NCSI_DEV_RESHUFFLE)
586 			nd->state = ncsi_dev_state_suspend_gls;
587 		else
588 			nd->state = ncsi_dev_state_suspend_dcnt;
589 		ret = ncsi_xmit_cmd(&nca);
590 		if (ret)
591 			goto error;
592 
593 		break;
594 	case ncsi_dev_state_suspend_gls:
595 		ndp->pending_req_num = np->channel_num;
596 
597 		nca.type = NCSI_PKT_CMD_GLS;
598 		nca.package = np->id;
599 
600 		nd->state = ncsi_dev_state_suspend_dcnt;
601 		NCSI_FOR_EACH_CHANNEL(np, nc) {
602 			nca.channel = nc->id;
603 			ret = ncsi_xmit_cmd(&nca);
604 			if (ret)
605 				goto error;
606 		}
607 
608 		break;
609 	case ncsi_dev_state_suspend_dcnt:
610 		ndp->pending_req_num = 1;
611 
612 		nca.type = NCSI_PKT_CMD_DCNT;
613 		nca.package = np->id;
614 		nca.channel = nc->id;
615 
616 		nd->state = ncsi_dev_state_suspend_dc;
617 		ret = ncsi_xmit_cmd(&nca);
618 		if (ret)
619 			goto error;
620 
621 		break;
622 	case ncsi_dev_state_suspend_dc:
623 		ndp->pending_req_num = 1;
624 
625 		nca.type = NCSI_PKT_CMD_DC;
626 		nca.package = np->id;
627 		nca.channel = nc->id;
628 		nca.bytes[0] = 1;
629 
630 		nd->state = ncsi_dev_state_suspend_deselect;
631 		ret = ncsi_xmit_cmd(&nca);
632 		if (ret)
633 			goto error;
634 
635 		break;
636 	case ncsi_dev_state_suspend_deselect:
637 		ndp->pending_req_num = 1;
638 
639 		nca.type = NCSI_PKT_CMD_DP;
640 		nca.package = np->id;
641 		nca.channel = NCSI_RESERVED_CHANNEL;
642 
643 		nd->state = ncsi_dev_state_suspend_done;
644 		ret = ncsi_xmit_cmd(&nca);
645 		if (ret)
646 			goto error;
647 
648 		break;
649 	case ncsi_dev_state_suspend_done:
650 		spin_lock_irqsave(&nc->lock, flags);
651 		nc->state = NCSI_CHANNEL_INACTIVE;
652 		spin_unlock_irqrestore(&nc->lock, flags);
653 		ncsi_process_next_channel(ndp);
654 
655 		break;
656 	default:
657 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
658 			    nd->state);
659 	}
660 
661 	return;
662 error:
663 	nd->state = ncsi_dev_state_functional;
664 }
665 
666 /* Check the VLAN filter bitmap for a set filter, and construct a
667  * "Set VLAN Filter - Disable" packet if found.
668  */
clear_one_vid(struct ncsi_dev_priv * ndp,struct ncsi_channel * nc,struct ncsi_cmd_arg * nca)669 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
670 			 struct ncsi_cmd_arg *nca)
671 {
672 	int index;
673 	u32 *data;
674 	u16 vid;
675 
676 	index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, NULL);
677 	if (index < 0) {
678 		/* Filter table empty */
679 		return -1;
680 	}
681 
682 	data = ncsi_get_filter(nc, NCSI_FILTER_VLAN, index);
683 	if (!data) {
684 		netdev_err(ndp->ndev.dev,
685 			   "ncsi: failed to retrieve filter %d\n", index);
686 		/* Set the VLAN id to 0 - this will still disable the entry in
687 		 * the filter table, but we won't know what it was.
688 		 */
689 		vid = 0;
690 	} else {
691 		vid = *(u16 *)data;
692 	}
693 
694 	netdev_printk(KERN_DEBUG, ndp->ndev.dev,
695 		      "ncsi: removed vlan tag %u at index %d\n",
696 		      vid, index + 1);
697 	ncsi_remove_filter(nc, NCSI_FILTER_VLAN, index);
698 
699 	nca->type = NCSI_PKT_CMD_SVF;
700 	nca->words[1] = vid;
701 	/* HW filter index starts at 1 */
702 	nca->bytes[6] = index + 1;
703 	nca->bytes[7] = 0x00;
704 	return 0;
705 }
706 
707 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
708  * packet.
709  */
set_one_vid(struct ncsi_dev_priv * ndp,struct ncsi_channel * nc,struct ncsi_cmd_arg * nca)710 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
711 		       struct ncsi_cmd_arg *nca)
712 {
713 	struct vlan_vid *vlan = NULL;
714 	int index = 0;
715 
716 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
717 		index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
718 		if (index < 0) {
719 			/* New tag to add */
720 			netdev_printk(KERN_DEBUG, ndp->ndev.dev,
721 				      "ncsi: new vlan id to set: %u\n",
722 				      vlan->vid);
723 			break;
724 		}
725 		netdev_printk(KERN_DEBUG, ndp->ndev.dev,
726 			      "vid %u already at filter pos %d\n",
727 			      vlan->vid, index);
728 	}
729 
730 	if (!vlan || index >= 0) {
731 		netdev_printk(KERN_DEBUG, ndp->ndev.dev,
732 			      "no vlan ids left to set\n");
733 		return -1;
734 	}
735 
736 	index = ncsi_add_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
737 	if (index < 0) {
738 		netdev_err(ndp->ndev.dev,
739 			   "Failed to add new VLAN tag, error %d\n", index);
740 		if (index == -ENOSPC)
741 			netdev_err(ndp->ndev.dev,
742 				   "Channel %u already has all VLAN filters set\n",
743 				   nc->id);
744 		return -1;
745 	}
746 
747 	netdev_printk(KERN_DEBUG, ndp->ndev.dev,
748 		      "ncsi: set vid %u in packet, index %u\n",
749 		      vlan->vid, index + 1);
750 	nca->type = NCSI_PKT_CMD_SVF;
751 	nca->words[1] = vlan->vid;
752 	/* HW filter index starts at 1 */
753 	nca->bytes[6] = index + 1;
754 	nca->bytes[7] = 0x01;
755 
756 	return 0;
757 }
758 
ncsi_configure_channel(struct ncsi_dev_priv * ndp)759 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
760 {
761 	struct ncsi_dev *nd = &ndp->ndev;
762 	struct net_device *dev = nd->dev;
763 	struct ncsi_package *np = ndp->active_package;
764 	struct ncsi_channel *nc = ndp->active_channel;
765 	struct ncsi_channel *hot_nc = NULL;
766 	struct ncsi_cmd_arg nca;
767 	unsigned char index;
768 	unsigned long flags;
769 	int ret;
770 
771 	nca.ndp = ndp;
772 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
773 	switch (nd->state) {
774 	case ncsi_dev_state_config:
775 	case ncsi_dev_state_config_sp:
776 		ndp->pending_req_num = 1;
777 
778 		/* Select the specific package */
779 		nca.type = NCSI_PKT_CMD_SP;
780 		if (ndp->flags & NCSI_DEV_HWA)
781 			nca.bytes[0] = 0;
782 		else
783 			nca.bytes[0] = 1;
784 		nca.package = np->id;
785 		nca.channel = NCSI_RESERVED_CHANNEL;
786 		ret = ncsi_xmit_cmd(&nca);
787 		if (ret)
788 			goto error;
789 
790 		nd->state = ncsi_dev_state_config_cis;
791 		break;
792 	case ncsi_dev_state_config_cis:
793 		ndp->pending_req_num = 1;
794 
795 		/* Clear initial state */
796 		nca.type = NCSI_PKT_CMD_CIS;
797 		nca.package = np->id;
798 		nca.channel = nc->id;
799 		ret = ncsi_xmit_cmd(&nca);
800 		if (ret)
801 			goto error;
802 
803 		nd->state = ncsi_dev_state_config_clear_vids;
804 		break;
805 	case ncsi_dev_state_config_clear_vids:
806 	case ncsi_dev_state_config_svf:
807 	case ncsi_dev_state_config_ev:
808 	case ncsi_dev_state_config_sma:
809 	case ncsi_dev_state_config_ebf:
810 #if IS_ENABLED(CONFIG_IPV6)
811 	case ncsi_dev_state_config_egmf:
812 #endif
813 	case ncsi_dev_state_config_ecnt:
814 	case ncsi_dev_state_config_ec:
815 	case ncsi_dev_state_config_ae:
816 	case ncsi_dev_state_config_gls:
817 		ndp->pending_req_num = 1;
818 
819 		nca.package = np->id;
820 		nca.channel = nc->id;
821 
822 		/* Clear any active filters on the channel before setting */
823 		if (nd->state == ncsi_dev_state_config_clear_vids) {
824 			ret = clear_one_vid(ndp, nc, &nca);
825 			if (ret) {
826 				nd->state = ncsi_dev_state_config_svf;
827 				schedule_work(&ndp->work);
828 				break;
829 			}
830 			/* Repeat */
831 			nd->state = ncsi_dev_state_config_clear_vids;
832 		/* Add known VLAN tags to the filter */
833 		} else if (nd->state == ncsi_dev_state_config_svf) {
834 			ret = set_one_vid(ndp, nc, &nca);
835 			if (ret) {
836 				nd->state = ncsi_dev_state_config_ev;
837 				schedule_work(&ndp->work);
838 				break;
839 			}
840 			/* Repeat */
841 			nd->state = ncsi_dev_state_config_svf;
842 		/* Enable/Disable the VLAN filter */
843 		} else if (nd->state == ncsi_dev_state_config_ev) {
844 			if (list_empty(&ndp->vlan_vids)) {
845 				nca.type = NCSI_PKT_CMD_DV;
846 			} else {
847 				nca.type = NCSI_PKT_CMD_EV;
848 				nca.bytes[3] = NCSI_CAP_VLAN_NO;
849 			}
850 			nd->state = ncsi_dev_state_config_sma;
851 		} else if (nd->state == ncsi_dev_state_config_sma) {
852 		/* Use first entry in unicast filter table. Note that
853 		 * the MAC filter table starts from entry 1 instead of
854 		 * 0.
855 		 */
856 			nca.type = NCSI_PKT_CMD_SMA;
857 			for (index = 0; index < 6; index++)
858 				nca.bytes[index] = dev->dev_addr[index];
859 			nca.bytes[6] = 0x1;
860 			nca.bytes[7] = 0x1;
861 			nd->state = ncsi_dev_state_config_ebf;
862 		} else if (nd->state == ncsi_dev_state_config_ebf) {
863 			nca.type = NCSI_PKT_CMD_EBF;
864 			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
865 			nd->state = ncsi_dev_state_config_ecnt;
866 #if IS_ENABLED(CONFIG_IPV6)
867 			if (ndp->inet6_addr_num > 0 &&
868 			    (nc->caps[NCSI_CAP_GENERIC].cap &
869 			     NCSI_CAP_GENERIC_MC))
870 				nd->state = ncsi_dev_state_config_egmf;
871 			else
872 				nd->state = ncsi_dev_state_config_ecnt;
873 		} else if (nd->state == ncsi_dev_state_config_egmf) {
874 			nca.type = NCSI_PKT_CMD_EGMF;
875 			nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
876 			nd->state = ncsi_dev_state_config_ecnt;
877 #endif /* CONFIG_IPV6 */
878 		} else if (nd->state == ncsi_dev_state_config_ecnt) {
879 			nca.type = NCSI_PKT_CMD_ECNT;
880 			nd->state = ncsi_dev_state_config_ec;
881 		} else if (nd->state == ncsi_dev_state_config_ec) {
882 			/* Enable AEN if it's supported */
883 			nca.type = NCSI_PKT_CMD_EC;
884 			nd->state = ncsi_dev_state_config_ae;
885 			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
886 				nd->state = ncsi_dev_state_config_gls;
887 		} else if (nd->state == ncsi_dev_state_config_ae) {
888 			nca.type = NCSI_PKT_CMD_AE;
889 			nca.bytes[0] = 0;
890 			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
891 			nd->state = ncsi_dev_state_config_gls;
892 		} else if (nd->state == ncsi_dev_state_config_gls) {
893 			nca.type = NCSI_PKT_CMD_GLS;
894 			nd->state = ncsi_dev_state_config_done;
895 		}
896 
897 		ret = ncsi_xmit_cmd(&nca);
898 		if (ret)
899 			goto error;
900 		break;
901 	case ncsi_dev_state_config_done:
902 		spin_lock_irqsave(&nc->lock, flags);
903 		if (nc->reconfigure_needed) {
904 			/* This channel's configuration has been updated
905 			 * part-way during the config state - start the
906 			 * channel configuration over
907 			 */
908 			nc->reconfigure_needed = false;
909 			nc->state = NCSI_CHANNEL_INACTIVE;
910 			spin_unlock_irqrestore(&nc->lock, flags);
911 
912 			spin_lock_irqsave(&ndp->lock, flags);
913 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
914 			spin_unlock_irqrestore(&ndp->lock, flags);
915 
916 			netdev_printk(KERN_DEBUG, dev,
917 				      "Dirty NCSI channel state reset\n");
918 			ncsi_process_next_channel(ndp);
919 			break;
920 		}
921 
922 		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
923 			hot_nc = nc;
924 			nc->state = NCSI_CHANNEL_ACTIVE;
925 		} else {
926 			hot_nc = NULL;
927 			nc->state = NCSI_CHANNEL_INACTIVE;
928 		}
929 		spin_unlock_irqrestore(&nc->lock, flags);
930 
931 		/* Update the hot channel */
932 		spin_lock_irqsave(&ndp->lock, flags);
933 		ndp->hot_channel = hot_nc;
934 		spin_unlock_irqrestore(&ndp->lock, flags);
935 
936 		ncsi_start_channel_monitor(nc);
937 		ncsi_process_next_channel(ndp);
938 		break;
939 	default:
940 		netdev_warn(dev, "Wrong NCSI state 0x%x in config\n",
941 			    nd->state);
942 	}
943 
944 	return;
945 
946 error:
947 	ncsi_report_link(ndp, true);
948 }
949 
ncsi_choose_active_channel(struct ncsi_dev_priv * ndp)950 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
951 {
952 	struct ncsi_package *np;
953 	struct ncsi_channel *nc, *found, *hot_nc;
954 	struct ncsi_channel_mode *ncm;
955 	unsigned long flags;
956 
957 	spin_lock_irqsave(&ndp->lock, flags);
958 	hot_nc = ndp->hot_channel;
959 	spin_unlock_irqrestore(&ndp->lock, flags);
960 
961 	/* The search is done once an inactive channel with up
962 	 * link is found.
963 	 */
964 	found = NULL;
965 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
966 		NCSI_FOR_EACH_CHANNEL(np, nc) {
967 			spin_lock_irqsave(&nc->lock, flags);
968 
969 			if (!list_empty(&nc->link) ||
970 			    nc->state != NCSI_CHANNEL_INACTIVE) {
971 				spin_unlock_irqrestore(&nc->lock, flags);
972 				continue;
973 			}
974 
975 			if (!found)
976 				found = nc;
977 
978 			if (nc == hot_nc)
979 				found = nc;
980 
981 			ncm = &nc->modes[NCSI_MODE_LINK];
982 			if (ncm->data[2] & 0x1) {
983 				spin_unlock_irqrestore(&nc->lock, flags);
984 				found = nc;
985 				goto out;
986 			}
987 
988 			spin_unlock_irqrestore(&nc->lock, flags);
989 		}
990 	}
991 
992 	if (!found) {
993 		ncsi_report_link(ndp, true);
994 		return -ENODEV;
995 	}
996 
997 out:
998 	spin_lock_irqsave(&ndp->lock, flags);
999 	list_add_tail_rcu(&found->link, &ndp->channel_queue);
1000 	spin_unlock_irqrestore(&ndp->lock, flags);
1001 
1002 	return ncsi_process_next_channel(ndp);
1003 }
1004 
ncsi_check_hwa(struct ncsi_dev_priv * ndp)1005 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1006 {
1007 	struct ncsi_package *np;
1008 	struct ncsi_channel *nc;
1009 	unsigned int cap;
1010 	bool has_channel = false;
1011 
1012 	/* The hardware arbitration is disabled if any one channel
1013 	 * doesn't support explicitly.
1014 	 */
1015 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1016 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1017 			has_channel = true;
1018 
1019 			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1020 			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1021 			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1022 			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1023 				ndp->flags &= ~NCSI_DEV_HWA;
1024 				return false;
1025 			}
1026 		}
1027 	}
1028 
1029 	if (has_channel) {
1030 		ndp->flags |= NCSI_DEV_HWA;
1031 		return true;
1032 	}
1033 
1034 	ndp->flags &= ~NCSI_DEV_HWA;
1035 	return false;
1036 }
1037 
ncsi_enable_hwa(struct ncsi_dev_priv * ndp)1038 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
1039 {
1040 	struct ncsi_package *np;
1041 	struct ncsi_channel *nc;
1042 	unsigned long flags;
1043 
1044 	/* Move all available channels to processing queue */
1045 	spin_lock_irqsave(&ndp->lock, flags);
1046 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1047 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1048 			WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
1049 				     !list_empty(&nc->link));
1050 			ncsi_stop_channel_monitor(nc);
1051 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1052 		}
1053 	}
1054 	spin_unlock_irqrestore(&ndp->lock, flags);
1055 
1056 	/* We can have no channels in extremely case */
1057 	if (list_empty(&ndp->channel_queue)) {
1058 		ncsi_report_link(ndp, false);
1059 		return -ENOENT;
1060 	}
1061 
1062 	return ncsi_process_next_channel(ndp);
1063 }
1064 
ncsi_probe_channel(struct ncsi_dev_priv * ndp)1065 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1066 {
1067 	struct ncsi_dev *nd = &ndp->ndev;
1068 	struct ncsi_package *np;
1069 	struct ncsi_channel *nc;
1070 	struct ncsi_cmd_arg nca;
1071 	unsigned char index;
1072 	int ret;
1073 
1074 	nca.ndp = ndp;
1075 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1076 	switch (nd->state) {
1077 	case ncsi_dev_state_probe:
1078 		nd->state = ncsi_dev_state_probe_deselect;
1079 		/* Fall through */
1080 	case ncsi_dev_state_probe_deselect:
1081 		ndp->pending_req_num = 8;
1082 
1083 		/* Deselect all possible packages */
1084 		nca.type = NCSI_PKT_CMD_DP;
1085 		nca.channel = NCSI_RESERVED_CHANNEL;
1086 		for (index = 0; index < 8; index++) {
1087 			nca.package = index;
1088 			ret = ncsi_xmit_cmd(&nca);
1089 			if (ret)
1090 				goto error;
1091 		}
1092 
1093 		nd->state = ncsi_dev_state_probe_package;
1094 		break;
1095 	case ncsi_dev_state_probe_package:
1096 		ndp->pending_req_num = 16;
1097 
1098 		/* Select all possible packages */
1099 		nca.type = NCSI_PKT_CMD_SP;
1100 		nca.bytes[0] = 1;
1101 		nca.channel = NCSI_RESERVED_CHANNEL;
1102 		for (index = 0; index < 8; index++) {
1103 			nca.package = index;
1104 			ret = ncsi_xmit_cmd(&nca);
1105 			if (ret)
1106 				goto error;
1107 		}
1108 
1109 		/* Disable all possible packages */
1110 		nca.type = NCSI_PKT_CMD_DP;
1111 		for (index = 0; index < 8; index++) {
1112 			nca.package = index;
1113 			ret = ncsi_xmit_cmd(&nca);
1114 			if (ret)
1115 				goto error;
1116 		}
1117 
1118 		nd->state = ncsi_dev_state_probe_channel;
1119 		break;
1120 	case ncsi_dev_state_probe_channel:
1121 		if (!ndp->active_package)
1122 			ndp->active_package = list_first_or_null_rcu(
1123 				&ndp->packages, struct ncsi_package, node);
1124 		else if (list_is_last(&ndp->active_package->node,
1125 				      &ndp->packages))
1126 			ndp->active_package = NULL;
1127 		else
1128 			ndp->active_package = list_next_entry(
1129 				ndp->active_package, node);
1130 
1131 		/* All available packages and channels are enumerated. The
1132 		 * enumeration happens for once when the NCSI interface is
1133 		 * started. So we need continue to start the interface after
1134 		 * the enumeration.
1135 		 *
1136 		 * We have to choose an active channel before configuring it.
1137 		 * Note that we possibly don't have active channel in extreme
1138 		 * situation.
1139 		 */
1140 		if (!ndp->active_package) {
1141 			ndp->flags |= NCSI_DEV_PROBED;
1142 			if (ncsi_check_hwa(ndp))
1143 				ncsi_enable_hwa(ndp);
1144 			else
1145 				ncsi_choose_active_channel(ndp);
1146 			return;
1147 		}
1148 
1149 		/* Select the active package */
1150 		ndp->pending_req_num = 1;
1151 		nca.type = NCSI_PKT_CMD_SP;
1152 		nca.bytes[0] = 1;
1153 		nca.package = ndp->active_package->id;
1154 		nca.channel = NCSI_RESERVED_CHANNEL;
1155 		ret = ncsi_xmit_cmd(&nca);
1156 		if (ret)
1157 			goto error;
1158 
1159 		nd->state = ncsi_dev_state_probe_cis;
1160 		break;
1161 	case ncsi_dev_state_probe_cis:
1162 		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1163 
1164 		/* Clear initial state */
1165 		nca.type = NCSI_PKT_CMD_CIS;
1166 		nca.package = ndp->active_package->id;
1167 		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1168 			nca.channel = index;
1169 			ret = ncsi_xmit_cmd(&nca);
1170 			if (ret)
1171 				goto error;
1172 		}
1173 
1174 		nd->state = ncsi_dev_state_probe_gvi;
1175 		break;
1176 	case ncsi_dev_state_probe_gvi:
1177 	case ncsi_dev_state_probe_gc:
1178 	case ncsi_dev_state_probe_gls:
1179 		np = ndp->active_package;
1180 		ndp->pending_req_num = np->channel_num;
1181 
1182 		/* Retrieve version, capability or link status */
1183 		if (nd->state == ncsi_dev_state_probe_gvi)
1184 			nca.type = NCSI_PKT_CMD_GVI;
1185 		else if (nd->state == ncsi_dev_state_probe_gc)
1186 			nca.type = NCSI_PKT_CMD_GC;
1187 		else
1188 			nca.type = NCSI_PKT_CMD_GLS;
1189 
1190 		nca.package = np->id;
1191 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1192 			nca.channel = nc->id;
1193 			ret = ncsi_xmit_cmd(&nca);
1194 			if (ret)
1195 				goto error;
1196 		}
1197 
1198 		if (nd->state == ncsi_dev_state_probe_gvi)
1199 			nd->state = ncsi_dev_state_probe_gc;
1200 		else if (nd->state == ncsi_dev_state_probe_gc)
1201 			nd->state = ncsi_dev_state_probe_gls;
1202 		else
1203 			nd->state = ncsi_dev_state_probe_dp;
1204 		break;
1205 	case ncsi_dev_state_probe_dp:
1206 		ndp->pending_req_num = 1;
1207 
1208 		/* Deselect the active package */
1209 		nca.type = NCSI_PKT_CMD_DP;
1210 		nca.package = ndp->active_package->id;
1211 		nca.channel = NCSI_RESERVED_CHANNEL;
1212 		ret = ncsi_xmit_cmd(&nca);
1213 		if (ret)
1214 			goto error;
1215 
1216 		/* Scan channels in next package */
1217 		nd->state = ncsi_dev_state_probe_channel;
1218 		break;
1219 	default:
1220 		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1221 			    nd->state);
1222 	}
1223 
1224 	return;
1225 error:
1226 	ncsi_report_link(ndp, true);
1227 }
1228 
ncsi_dev_work(struct work_struct * work)1229 static void ncsi_dev_work(struct work_struct *work)
1230 {
1231 	struct ncsi_dev_priv *ndp = container_of(work,
1232 			struct ncsi_dev_priv, work);
1233 	struct ncsi_dev *nd = &ndp->ndev;
1234 
1235 	switch (nd->state & ncsi_dev_state_major) {
1236 	case ncsi_dev_state_probe:
1237 		ncsi_probe_channel(ndp);
1238 		break;
1239 	case ncsi_dev_state_suspend:
1240 		ncsi_suspend_channel(ndp);
1241 		break;
1242 	case ncsi_dev_state_config:
1243 		ncsi_configure_channel(ndp);
1244 		break;
1245 	default:
1246 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1247 			    nd->state);
1248 	}
1249 }
1250 
ncsi_process_next_channel(struct ncsi_dev_priv * ndp)1251 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1252 {
1253 	struct ncsi_channel *nc;
1254 	int old_state;
1255 	unsigned long flags;
1256 
1257 	spin_lock_irqsave(&ndp->lock, flags);
1258 	nc = list_first_or_null_rcu(&ndp->channel_queue,
1259 				    struct ncsi_channel, link);
1260 	if (!nc) {
1261 		spin_unlock_irqrestore(&ndp->lock, flags);
1262 		goto out;
1263 	}
1264 
1265 	list_del_init(&nc->link);
1266 	spin_unlock_irqrestore(&ndp->lock, flags);
1267 
1268 	spin_lock_irqsave(&nc->lock, flags);
1269 	old_state = nc->state;
1270 	nc->state = NCSI_CHANNEL_INVISIBLE;
1271 	spin_unlock_irqrestore(&nc->lock, flags);
1272 
1273 	ndp->active_channel = nc;
1274 	ndp->active_package = nc->package;
1275 
1276 	switch (old_state) {
1277 	case NCSI_CHANNEL_INACTIVE:
1278 		ndp->ndev.state = ncsi_dev_state_config;
1279 		ncsi_configure_channel(ndp);
1280 		break;
1281 	case NCSI_CHANNEL_ACTIVE:
1282 		ndp->ndev.state = ncsi_dev_state_suspend;
1283 		ncsi_suspend_channel(ndp);
1284 		break;
1285 	default:
1286 		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1287 			   old_state, nc->package->id, nc->id);
1288 		ncsi_report_link(ndp, false);
1289 		return -EINVAL;
1290 	}
1291 
1292 	return 0;
1293 
1294 out:
1295 	ndp->active_channel = NULL;
1296 	ndp->active_package = NULL;
1297 	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1298 		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1299 		return ncsi_choose_active_channel(ndp);
1300 	}
1301 
1302 	ncsi_report_link(ndp, false);
1303 	return -ENODEV;
1304 }
1305 
1306 #if IS_ENABLED(CONFIG_IPV6)
ncsi_inet6addr_event(struct notifier_block * this,unsigned long event,void * data)1307 static int ncsi_inet6addr_event(struct notifier_block *this,
1308 				unsigned long event, void *data)
1309 {
1310 	struct inet6_ifaddr *ifa = data;
1311 	struct net_device *dev = ifa->idev->dev;
1312 	struct ncsi_dev *nd = ncsi_find_dev(dev);
1313 	struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1314 	struct ncsi_package *np;
1315 	struct ncsi_channel *nc;
1316 	struct ncsi_cmd_arg nca;
1317 	bool action;
1318 	int ret;
1319 
1320 	if (!ndp || (ipv6_addr_type(&ifa->addr) &
1321 	    (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1322 		return NOTIFY_OK;
1323 
1324 	switch (event) {
1325 	case NETDEV_UP:
1326 		action = (++ndp->inet6_addr_num) == 1;
1327 		nca.type = NCSI_PKT_CMD_EGMF;
1328 		break;
1329 	case NETDEV_DOWN:
1330 		action = (--ndp->inet6_addr_num == 0);
1331 		nca.type = NCSI_PKT_CMD_DGMF;
1332 		break;
1333 	default:
1334 		return NOTIFY_OK;
1335 	}
1336 
1337 	/* We might not have active channel or packages. The IPv6
1338 	 * required multicast will be enabled when active channel
1339 	 * or packages are chosen.
1340 	 */
1341 	np = ndp->active_package;
1342 	nc = ndp->active_channel;
1343 	if (!action || !np || !nc)
1344 		return NOTIFY_OK;
1345 
1346 	/* We needn't enable or disable it if the function isn't supported */
1347 	if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1348 		return NOTIFY_OK;
1349 
1350 	nca.ndp = ndp;
1351 	nca.req_flags = 0;
1352 	nca.package = np->id;
1353 	nca.channel = nc->id;
1354 	nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1355 	ret = ncsi_xmit_cmd(&nca);
1356 	if (ret) {
1357 		netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1358 			    (event == NETDEV_UP) ? "enable" : "disable", ret);
1359 		return NOTIFY_DONE;
1360 	}
1361 
1362 	return NOTIFY_OK;
1363 }
1364 
1365 static struct notifier_block ncsi_inet6addr_notifier = {
1366 	.notifier_call = ncsi_inet6addr_event,
1367 };
1368 #endif /* CONFIG_IPV6 */
1369 
ncsi_kick_channels(struct ncsi_dev_priv * ndp)1370 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1371 {
1372 	struct ncsi_dev *nd = &ndp->ndev;
1373 	struct ncsi_channel *nc;
1374 	struct ncsi_package *np;
1375 	unsigned long flags;
1376 	unsigned int n = 0;
1377 
1378 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1379 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1380 			spin_lock_irqsave(&nc->lock, flags);
1381 
1382 			/* Channels may be busy, mark dirty instead of
1383 			 * kicking if;
1384 			 * a) not ACTIVE (configured)
1385 			 * b) in the channel_queue (to be configured)
1386 			 * c) it's ndev is in the config state
1387 			 */
1388 			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1389 				if ((ndp->ndev.state & 0xff00) ==
1390 						ncsi_dev_state_config ||
1391 						!list_empty(&nc->link)) {
1392 					netdev_printk(KERN_DEBUG, nd->dev,
1393 						      "ncsi: channel %p marked dirty\n",
1394 						      nc);
1395 					nc->reconfigure_needed = true;
1396 				}
1397 				spin_unlock_irqrestore(&nc->lock, flags);
1398 				continue;
1399 			}
1400 
1401 			spin_unlock_irqrestore(&nc->lock, flags);
1402 
1403 			ncsi_stop_channel_monitor(nc);
1404 			spin_lock_irqsave(&nc->lock, flags);
1405 			nc->state = NCSI_CHANNEL_INACTIVE;
1406 			spin_unlock_irqrestore(&nc->lock, flags);
1407 
1408 			spin_lock_irqsave(&ndp->lock, flags);
1409 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1410 			spin_unlock_irqrestore(&ndp->lock, flags);
1411 
1412 			netdev_printk(KERN_DEBUG, nd->dev,
1413 				      "ncsi: kicked channel %p\n", nc);
1414 			n++;
1415 		}
1416 	}
1417 
1418 	return n;
1419 }
1420 
ncsi_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)1421 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1422 {
1423 	struct ncsi_dev_priv *ndp;
1424 	unsigned int n_vids = 0;
1425 	struct vlan_vid *vlan;
1426 	struct ncsi_dev *nd;
1427 	bool found = false;
1428 
1429 	if (vid == 0)
1430 		return 0;
1431 
1432 	nd = ncsi_find_dev(dev);
1433 	if (!nd) {
1434 		netdev_warn(dev, "ncsi: No net_device?\n");
1435 		return 0;
1436 	}
1437 
1438 	ndp = TO_NCSI_DEV_PRIV(nd);
1439 
1440 	/* Add the VLAN id to our internal list */
1441 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1442 		n_vids++;
1443 		if (vlan->vid == vid) {
1444 			netdev_printk(KERN_DEBUG, dev,
1445 				      "vid %u already registered\n", vid);
1446 			return 0;
1447 		}
1448 	}
1449 	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1450 		netdev_warn(dev,
1451 			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1452 			    vid, NCSI_MAX_VLAN_VIDS);
1453 		return -ENOSPC;
1454 	}
1455 
1456 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1457 	if (!vlan)
1458 		return -ENOMEM;
1459 
1460 	vlan->proto = proto;
1461 	vlan->vid = vid;
1462 	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1463 
1464 	netdev_printk(KERN_DEBUG, dev, "Added new vid %u\n", vid);
1465 
1466 	found = ncsi_kick_channels(ndp) != 0;
1467 
1468 	return found ? ncsi_process_next_channel(ndp) : 0;
1469 }
1470 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1471 
ncsi_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)1472 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1473 {
1474 	struct vlan_vid *vlan, *tmp;
1475 	struct ncsi_dev_priv *ndp;
1476 	struct ncsi_dev *nd;
1477 	bool found = false;
1478 
1479 	if (vid == 0)
1480 		return 0;
1481 
1482 	nd = ncsi_find_dev(dev);
1483 	if (!nd) {
1484 		netdev_warn(dev, "ncsi: no net_device?\n");
1485 		return 0;
1486 	}
1487 
1488 	ndp = TO_NCSI_DEV_PRIV(nd);
1489 
1490 	/* Remove the VLAN id from our internal list */
1491 	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1492 		if (vlan->vid == vid) {
1493 			netdev_printk(KERN_DEBUG, dev,
1494 				      "vid %u found, removing\n", vid);
1495 			list_del_rcu(&vlan->list);
1496 			found = true;
1497 			kfree(vlan);
1498 		}
1499 
1500 	if (!found) {
1501 		netdev_err(dev, "ncsi: vid %u wasn't registered!\n", vid);
1502 		return -EINVAL;
1503 	}
1504 
1505 	found = ncsi_kick_channels(ndp) != 0;
1506 
1507 	return found ? ncsi_process_next_channel(ndp) : 0;
1508 }
1509 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1510 
ncsi_register_dev(struct net_device * dev,void (* handler)(struct ncsi_dev * ndev))1511 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1512 				   void (*handler)(struct ncsi_dev *ndev))
1513 {
1514 	struct ncsi_dev_priv *ndp;
1515 	struct ncsi_dev *nd;
1516 	unsigned long flags;
1517 	int i;
1518 
1519 	/* Check if the device has been registered or not */
1520 	nd = ncsi_find_dev(dev);
1521 	if (nd)
1522 		return nd;
1523 
1524 	/* Create NCSI device */
1525 	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1526 	if (!ndp)
1527 		return NULL;
1528 
1529 	nd = &ndp->ndev;
1530 	nd->state = ncsi_dev_state_registered;
1531 	nd->dev = dev;
1532 	nd->handler = handler;
1533 	ndp->pending_req_num = 0;
1534 	INIT_LIST_HEAD(&ndp->channel_queue);
1535 	INIT_LIST_HEAD(&ndp->vlan_vids);
1536 	INIT_WORK(&ndp->work, ncsi_dev_work);
1537 
1538 	/* Initialize private NCSI device */
1539 	spin_lock_init(&ndp->lock);
1540 	INIT_LIST_HEAD(&ndp->packages);
1541 	ndp->request_id = NCSI_REQ_START_IDX;
1542 	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1543 		ndp->requests[i].id = i;
1544 		ndp->requests[i].ndp = ndp;
1545 		setup_timer(&ndp->requests[i].timer,
1546 			    ncsi_request_timeout,
1547 			    (unsigned long)&ndp->requests[i]);
1548 	}
1549 
1550 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1551 #if IS_ENABLED(CONFIG_IPV6)
1552 	ndp->inet6_addr_num = 0;
1553 	if (list_empty(&ncsi_dev_list))
1554 		register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1555 #endif
1556 	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1557 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1558 
1559 	/* Register NCSI packet Rx handler */
1560 	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1561 	ndp->ptype.func = ncsi_rcv_rsp;
1562 	ndp->ptype.dev = dev;
1563 	dev_add_pack(&ndp->ptype);
1564 
1565 	return nd;
1566 }
1567 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1568 
ncsi_start_dev(struct ncsi_dev * nd)1569 int ncsi_start_dev(struct ncsi_dev *nd)
1570 {
1571 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1572 	int ret;
1573 
1574 	if (nd->state != ncsi_dev_state_registered &&
1575 	    nd->state != ncsi_dev_state_functional)
1576 		return -ENOTTY;
1577 
1578 	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1579 		nd->state = ncsi_dev_state_probe;
1580 		schedule_work(&ndp->work);
1581 		return 0;
1582 	}
1583 
1584 	if (ndp->flags & NCSI_DEV_HWA)
1585 		ret = ncsi_enable_hwa(ndp);
1586 	else
1587 		ret = ncsi_choose_active_channel(ndp);
1588 
1589 	return ret;
1590 }
1591 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1592 
ncsi_stop_dev(struct ncsi_dev * nd)1593 void ncsi_stop_dev(struct ncsi_dev *nd)
1594 {
1595 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1596 	struct ncsi_package *np;
1597 	struct ncsi_channel *nc;
1598 	bool chained;
1599 	int old_state;
1600 	unsigned long flags;
1601 
1602 	/* Stop the channel monitor and reset channel's state */
1603 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1604 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1605 			ncsi_stop_channel_monitor(nc);
1606 
1607 			spin_lock_irqsave(&nc->lock, flags);
1608 			chained = !list_empty(&nc->link);
1609 			old_state = nc->state;
1610 			nc->state = NCSI_CHANNEL_INACTIVE;
1611 			spin_unlock_irqrestore(&nc->lock, flags);
1612 
1613 			WARN_ON_ONCE(chained ||
1614 				     old_state == NCSI_CHANNEL_INVISIBLE);
1615 		}
1616 	}
1617 
1618 	ncsi_report_link(ndp, true);
1619 }
1620 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1621 
ncsi_unregister_dev(struct ncsi_dev * nd)1622 void ncsi_unregister_dev(struct ncsi_dev *nd)
1623 {
1624 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1625 	struct ncsi_package *np, *tmp;
1626 	unsigned long flags;
1627 
1628 	dev_remove_pack(&ndp->ptype);
1629 
1630 	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1631 		ncsi_remove_package(np);
1632 
1633 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1634 	list_del_rcu(&ndp->node);
1635 #if IS_ENABLED(CONFIG_IPV6)
1636 	if (list_empty(&ncsi_dev_list))
1637 		unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1638 #endif
1639 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1640 
1641 	kfree(ndp);
1642 }
1643 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
1644