• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/netdevice.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/etherdevice.h>
7 #include <linux/list.h>
8 
9 #include "ionic.h"
10 #include "ionic_lif.h"
11 #include "ionic_rx_filter.h"
12 
ionic_rx_filter_free(struct ionic_lif * lif,struct ionic_rx_filter * f)13 void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f)
14 {
15 	struct device *dev = lif->ionic->dev;
16 
17 	hlist_del(&f->by_id);
18 	hlist_del(&f->by_hash);
19 	devm_kfree(dev, f);
20 }
21 
ionic_rx_filter_replay(struct ionic_lif * lif)22 void ionic_rx_filter_replay(struct ionic_lif *lif)
23 {
24 	struct ionic_rx_filter_add_cmd *ac;
25 	struct hlist_head new_id_list;
26 	struct ionic_admin_ctx ctx;
27 	struct ionic_rx_filter *f;
28 	struct hlist_head *head;
29 	struct hlist_node *tmp;
30 	unsigned int key;
31 	unsigned int i;
32 	int err;
33 
34 	INIT_HLIST_HEAD(&new_id_list);
35 	ac = &ctx.cmd.rx_filter_add;
36 
37 	for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
38 		head = &lif->rx_filters.by_id[i];
39 		hlist_for_each_entry_safe(f, tmp, head, by_id) {
40 			ctx.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work);
41 			memcpy(ac, &f->cmd, sizeof(f->cmd));
42 			dev_dbg(&lif->netdev->dev, "replay filter command:\n");
43 			dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
44 					 &ctx.cmd, sizeof(ctx.cmd), true);
45 
46 			err = ionic_adminq_post_wait(lif, &ctx);
47 			if (err) {
48 				switch (le16_to_cpu(ac->match)) {
49 				case IONIC_RX_FILTER_MATCH_VLAN:
50 					netdev_info(lif->netdev, "Replay failed - %d: vlan %d\n",
51 						    err,
52 						    le16_to_cpu(ac->vlan.vlan));
53 					break;
54 				case IONIC_RX_FILTER_MATCH_MAC:
55 					netdev_info(lif->netdev, "Replay failed - %d: mac %pM\n",
56 						    err, ac->mac.addr);
57 					break;
58 				case IONIC_RX_FILTER_MATCH_MAC_VLAN:
59 					netdev_info(lif->netdev, "Replay failed - %d: vlan %d mac %pM\n",
60 						    err,
61 						    le16_to_cpu(ac->vlan.vlan),
62 						    ac->mac.addr);
63 					break;
64 				}
65 				spin_lock_bh(&lif->rx_filters.lock);
66 				ionic_rx_filter_free(lif, f);
67 				spin_unlock_bh(&lif->rx_filters.lock);
68 
69 				continue;
70 			}
71 
72 			/* remove from old id list, save new id in tmp list */
73 			spin_lock_bh(&lif->rx_filters.lock);
74 			hlist_del(&f->by_id);
75 			spin_unlock_bh(&lif->rx_filters.lock);
76 			f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id);
77 			hlist_add_head(&f->by_id, &new_id_list);
78 		}
79 	}
80 
81 	/* rebuild the by_id hash lists with the new filter ids */
82 	spin_lock_bh(&lif->rx_filters.lock);
83 	hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) {
84 		key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
85 		head = &lif->rx_filters.by_id[key];
86 		hlist_add_head(&f->by_id, head);
87 	}
88 	spin_unlock_bh(&lif->rx_filters.lock);
89 }
90 
ionic_rx_filters_init(struct ionic_lif * lif)91 int ionic_rx_filters_init(struct ionic_lif *lif)
92 {
93 	unsigned int i;
94 
95 	spin_lock_init(&lif->rx_filters.lock);
96 
97 	spin_lock_bh(&lif->rx_filters.lock);
98 	for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
99 		INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]);
100 		INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]);
101 	}
102 	spin_unlock_bh(&lif->rx_filters.lock);
103 
104 	return 0;
105 }
106 
ionic_rx_filters_deinit(struct ionic_lif * lif)107 void ionic_rx_filters_deinit(struct ionic_lif *lif)
108 {
109 	struct ionic_rx_filter *f;
110 	struct hlist_head *head;
111 	struct hlist_node *tmp;
112 	unsigned int i;
113 
114 	spin_lock_bh(&lif->rx_filters.lock);
115 	for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
116 		head = &lif->rx_filters.by_id[i];
117 		hlist_for_each_entry_safe(f, tmp, head, by_id)
118 			ionic_rx_filter_free(lif, f);
119 	}
120 	spin_unlock_bh(&lif->rx_filters.lock);
121 }
122 
ionic_rx_filter_save(struct ionic_lif * lif,u32 flow_id,u16 rxq_index,u32 hash,struct ionic_admin_ctx * ctx,enum ionic_filter_state state)123 int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
124 			 u32 hash, struct ionic_admin_ctx *ctx,
125 			 enum ionic_filter_state state)
126 {
127 	struct device *dev = lif->ionic->dev;
128 	struct ionic_rx_filter_add_cmd *ac;
129 	struct ionic_rx_filter *f = NULL;
130 	struct hlist_head *head;
131 	unsigned int key;
132 
133 	ac = &ctx->cmd.rx_filter_add;
134 
135 	switch (le16_to_cpu(ac->match)) {
136 	case IONIC_RX_FILTER_MATCH_VLAN:
137 		key = le16_to_cpu(ac->vlan.vlan);
138 		f = ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
139 		break;
140 	case IONIC_RX_FILTER_MATCH_MAC:
141 		key = *(u32 *)ac->mac.addr;
142 		f = ionic_rx_filter_by_addr(lif, ac->mac.addr);
143 		break;
144 	case IONIC_RX_FILTER_MATCH_MAC_VLAN:
145 		key = le16_to_cpu(ac->mac_vlan.vlan);
146 		break;
147 	case IONIC_RX_FILTER_STEER_PKTCLASS:
148 		key = 0;
149 		break;
150 	default:
151 		return -EINVAL;
152 	}
153 
154 	if (f) {
155 		/* remove from current linking so we can refresh it */
156 		hlist_del(&f->by_id);
157 		hlist_del(&f->by_hash);
158 	} else {
159 		f = devm_kzalloc(dev, sizeof(*f), GFP_ATOMIC);
160 		if (!f)
161 			return -ENOMEM;
162 	}
163 
164 	f->flow_id = flow_id;
165 	f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id);
166 	f->state = state;
167 	f->rxq_index = rxq_index;
168 	memcpy(&f->cmd, ac, sizeof(f->cmd));
169 	netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id);
170 
171 	INIT_HLIST_NODE(&f->by_hash);
172 	INIT_HLIST_NODE(&f->by_id);
173 
174 	key = hash_32(key, IONIC_RX_FILTER_HASH_BITS);
175 	head = &lif->rx_filters.by_hash[key];
176 	hlist_add_head(&f->by_hash, head);
177 
178 	key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
179 	head = &lif->rx_filters.by_id[key];
180 	hlist_add_head(&f->by_id, head);
181 
182 	return 0;
183 }
184 
ionic_rx_filter_by_vlan(struct ionic_lif * lif,u16 vid)185 struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid)
186 {
187 	struct ionic_rx_filter *f;
188 	struct hlist_head *head;
189 	unsigned int key;
190 
191 	key = hash_32(vid, IONIC_RX_FILTER_HASH_BITS);
192 	head = &lif->rx_filters.by_hash[key];
193 
194 	hlist_for_each_entry(f, head, by_hash) {
195 		if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_VLAN)
196 			continue;
197 		if (le16_to_cpu(f->cmd.vlan.vlan) == vid)
198 			return f;
199 	}
200 
201 	return NULL;
202 }
203 
ionic_rx_filter_by_addr(struct ionic_lif * lif,const u8 * addr)204 struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif,
205 						const u8 *addr)
206 {
207 	struct ionic_rx_filter *f;
208 	struct hlist_head *head;
209 	unsigned int key;
210 
211 	key = hash_32(*(u32 *)addr, IONIC_RX_FILTER_HASH_BITS);
212 	head = &lif->rx_filters.by_hash[key];
213 
214 	hlist_for_each_entry(f, head, by_hash) {
215 		if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_MAC)
216 			continue;
217 		if (memcmp(addr, f->cmd.mac.addr, ETH_ALEN) == 0)
218 			return f;
219 	}
220 
221 	return NULL;
222 }
223 
ionic_rx_filter_rxsteer(struct ionic_lif * lif)224 struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif)
225 {
226 	struct ionic_rx_filter *f;
227 	struct hlist_head *head;
228 	unsigned int key;
229 
230 	key = hash_32(0, IONIC_RX_FILTER_HASH_BITS);
231 	head = &lif->rx_filters.by_hash[key];
232 
233 	hlist_for_each_entry(f, head, by_hash) {
234 		if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_STEER_PKTCLASS)
235 			continue;
236 		return f;
237 	}
238 
239 	return NULL;
240 }
241 
ionic_rx_filter_find(struct ionic_lif * lif,struct ionic_rx_filter_add_cmd * ac)242 static struct ionic_rx_filter *ionic_rx_filter_find(struct ionic_lif *lif,
243 						    struct ionic_rx_filter_add_cmd *ac)
244 {
245 	switch (le16_to_cpu(ac->match)) {
246 	case IONIC_RX_FILTER_MATCH_VLAN:
247 		return ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
248 	case IONIC_RX_FILTER_MATCH_MAC:
249 		return ionic_rx_filter_by_addr(lif, ac->mac.addr);
250 	default:
251 		netdev_err(lif->netdev, "unsupported filter match %d",
252 			   le16_to_cpu(ac->match));
253 		return NULL;
254 	}
255 }
256 
ionic_lif_list_addr(struct ionic_lif * lif,const u8 * addr,bool mode)257 int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
258 {
259 	struct ionic_rx_filter *f;
260 	int err;
261 
262 	spin_lock_bh(&lif->rx_filters.lock);
263 
264 	f = ionic_rx_filter_by_addr(lif, addr);
265 	if (mode == ADD_ADDR && !f) {
266 		struct ionic_admin_ctx ctx = {
267 			.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
268 			.cmd.rx_filter_add = {
269 				.opcode = IONIC_CMD_RX_FILTER_ADD,
270 				.lif_index = cpu_to_le16(lif->index),
271 				.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
272 			},
273 		};
274 
275 		memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
276 		err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
277 					   IONIC_FILTER_STATE_NEW);
278 		if (err) {
279 			spin_unlock_bh(&lif->rx_filters.lock);
280 			return err;
281 		}
282 
283 	} else if (mode == ADD_ADDR && f) {
284 		if (f->state == IONIC_FILTER_STATE_OLD)
285 			f->state = IONIC_FILTER_STATE_SYNCED;
286 
287 	} else if (mode == DEL_ADDR && f) {
288 		if (f->state == IONIC_FILTER_STATE_NEW)
289 			ionic_rx_filter_free(lif, f);
290 		else if (f->state == IONIC_FILTER_STATE_SYNCED)
291 			f->state = IONIC_FILTER_STATE_OLD;
292 	} else if (mode == DEL_ADDR && !f) {
293 		spin_unlock_bh(&lif->rx_filters.lock);
294 		return -ENOENT;
295 	}
296 
297 	spin_unlock_bh(&lif->rx_filters.lock);
298 
299 	set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
300 
301 	return 0;
302 }
303 
ionic_lif_filter_add(struct ionic_lif * lif,struct ionic_rx_filter_add_cmd * ac)304 static int ionic_lif_filter_add(struct ionic_lif *lif,
305 				struct ionic_rx_filter_add_cmd *ac)
306 {
307 	struct ionic_admin_ctx ctx = {
308 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
309 	};
310 	struct ionic_rx_filter *f;
311 	int nfilters;
312 	int err = 0;
313 
314 	ctx.cmd.rx_filter_add = *ac;
315 	ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD,
316 	ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index),
317 
318 	spin_lock_bh(&lif->rx_filters.lock);
319 	f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
320 	if (f) {
321 		/* don't bother if we already have it and it is sync'd */
322 		if (f->state == IONIC_FILTER_STATE_SYNCED) {
323 			spin_unlock_bh(&lif->rx_filters.lock);
324 			return 0;
325 		}
326 
327 		/* mark preemptively as sync'd to block any parallel attempts */
328 		f->state = IONIC_FILTER_STATE_SYNCED;
329 	} else {
330 		/* save as SYNCED to catch any DEL requests while processing */
331 		err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
332 					   IONIC_FILTER_STATE_SYNCED);
333 	}
334 	spin_unlock_bh(&lif->rx_filters.lock);
335 	if (err)
336 		return err;
337 
338 	/* Don't bother with the write to FW if we know there's no room,
339 	 * we can try again on the next sync attempt.
340 	 * Since the FW doesn't have a way to tell us the vlan limit,
341 	 * we start max_vlans at 0 until we hit the ENOSPC error.
342 	 */
343 	switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
344 	case IONIC_RX_FILTER_MATCH_VLAN:
345 		netdev_dbg(lif->netdev, "%s: rx_filter add VLAN %d\n",
346 			   __func__, ctx.cmd.rx_filter_add.vlan.vlan);
347 		if (lif->max_vlans && lif->nvlans >= lif->max_vlans)
348 			err = -ENOSPC;
349 		break;
350 	case IONIC_RX_FILTER_MATCH_MAC:
351 		netdev_dbg(lif->netdev, "%s: rx_filter add ADDR %pM\n",
352 			   __func__, ctx.cmd.rx_filter_add.mac.addr);
353 		nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
354 		if ((lif->nucast + lif->nmcast) >= nfilters)
355 			err = -ENOSPC;
356 		break;
357 	}
358 
359 	if (err != -ENOSPC)
360 		err = ionic_adminq_post_wait_nomsg(lif, &ctx);
361 
362 	spin_lock_bh(&lif->rx_filters.lock);
363 
364 	if (err && err != -EEXIST) {
365 		/* set the state back to NEW so we can try again later */
366 		f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
367 		if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
368 			f->state = IONIC_FILTER_STATE_NEW;
369 
370 			/* If -ENOSPC we won't waste time trying to sync again
371 			 * until there is a delete that might make room
372 			 */
373 			if (err != -ENOSPC)
374 				set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
375 		}
376 
377 		spin_unlock_bh(&lif->rx_filters.lock);
378 
379 		/* store the max_vlans limit that we found */
380 		if (err == -ENOSPC &&
381 		    le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN)
382 			lif->max_vlans = lif->nvlans;
383 
384 		/* Prevent unnecessary error messages on recoverable
385 		 * errors as the filter will get retried on the next
386 		 * sync attempt.
387 		 */
388 		switch (err) {
389 		case -ENOSPC:
390 		case -ENXIO:
391 		case -ETIMEDOUT:
392 		case -EAGAIN:
393 		case -EBUSY:
394 			return 0;
395 		default:
396 			break;
397 		}
398 
399 		ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
400 					      ctx.comp.comp.status, err);
401 		switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
402 		case IONIC_RX_FILTER_MATCH_VLAN:
403 			netdev_info(lif->netdev, "rx_filter add failed: VLAN %d\n",
404 				    ctx.cmd.rx_filter_add.vlan.vlan);
405 			break;
406 		case IONIC_RX_FILTER_MATCH_MAC:
407 			netdev_info(lif->netdev, "rx_filter add failed: ADDR %pM\n",
408 				    ctx.cmd.rx_filter_add.mac.addr);
409 			break;
410 		}
411 
412 		return err;
413 	}
414 
415 	switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
416 	case IONIC_RX_FILTER_MATCH_VLAN:
417 		lif->nvlans++;
418 		break;
419 	case IONIC_RX_FILTER_MATCH_MAC:
420 		if (is_multicast_ether_addr(ctx.cmd.rx_filter_add.mac.addr))
421 			lif->nmcast++;
422 		else
423 			lif->nucast++;
424 		break;
425 	}
426 
427 	f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
428 	if (f && f->state == IONIC_FILTER_STATE_OLD) {
429 		/* Someone requested a delete while we were adding
430 		 * so update the filter info with the results from the add
431 		 * and the data will be there for the delete on the next
432 		 * sync cycle.
433 		 */
434 		err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
435 					   IONIC_FILTER_STATE_OLD);
436 	} else {
437 		err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
438 					   IONIC_FILTER_STATE_SYNCED);
439 	}
440 
441 	spin_unlock_bh(&lif->rx_filters.lock);
442 
443 	return err;
444 }
445 
ionic_lif_addr_add(struct ionic_lif * lif,const u8 * addr)446 int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
447 {
448 	struct ionic_rx_filter_add_cmd ac = {
449 		.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
450 	};
451 
452 	memcpy(&ac.mac.addr, addr, ETH_ALEN);
453 
454 	return ionic_lif_filter_add(lif, &ac);
455 }
456 
ionic_lif_vlan_add(struct ionic_lif * lif,const u16 vid)457 int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid)
458 {
459 	struct ionic_rx_filter_add_cmd ac = {
460 		.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
461 		.vlan.vlan = cpu_to_le16(vid),
462 	};
463 
464 	return ionic_lif_filter_add(lif, &ac);
465 }
466 
ionic_lif_filter_del(struct ionic_lif * lif,struct ionic_rx_filter_add_cmd * ac)467 static int ionic_lif_filter_del(struct ionic_lif *lif,
468 				struct ionic_rx_filter_add_cmd *ac)
469 {
470 	struct ionic_admin_ctx ctx = {
471 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
472 		.cmd.rx_filter_del = {
473 			.opcode = IONIC_CMD_RX_FILTER_DEL,
474 			.lif_index = cpu_to_le16(lif->index),
475 		},
476 	};
477 	struct ionic_rx_filter *f;
478 	int state;
479 	int err;
480 
481 	spin_lock_bh(&lif->rx_filters.lock);
482 	f = ionic_rx_filter_find(lif, ac);
483 	if (!f) {
484 		spin_unlock_bh(&lif->rx_filters.lock);
485 		return -ENOENT;
486 	}
487 
488 	switch (le16_to_cpu(ac->match)) {
489 	case IONIC_RX_FILTER_MATCH_VLAN:
490 		netdev_dbg(lif->netdev, "%s: rx_filter del VLAN %d id %d\n",
491 			   __func__, ac->vlan.vlan, f->filter_id);
492 		lif->nvlans--;
493 		break;
494 	case IONIC_RX_FILTER_MATCH_MAC:
495 		netdev_dbg(lif->netdev, "%s: rx_filter del ADDR %pM id %d\n",
496 			   __func__, ac->mac.addr, f->filter_id);
497 		if (is_multicast_ether_addr(ac->mac.addr) && lif->nmcast)
498 			lif->nmcast--;
499 		else if (!is_multicast_ether_addr(ac->mac.addr) && lif->nucast)
500 			lif->nucast--;
501 		break;
502 	}
503 
504 	state = f->state;
505 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
506 	ionic_rx_filter_free(lif, f);
507 
508 	spin_unlock_bh(&lif->rx_filters.lock);
509 
510 	if (state != IONIC_FILTER_STATE_NEW) {
511 		err = ionic_adminq_post_wait_nomsg(lif, &ctx);
512 
513 		switch (err) {
514 			/* ignore these errors */
515 		case -EEXIST:
516 		case -ENXIO:
517 		case -ETIMEDOUT:
518 		case -EAGAIN:
519 		case -EBUSY:
520 		case 0:
521 			break;
522 		default:
523 			ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
524 						      ctx.comp.comp.status, err);
525 			return err;
526 		}
527 	}
528 
529 	return 0;
530 }
531 
ionic_lif_addr_del(struct ionic_lif * lif,const u8 * addr)532 int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
533 {
534 	struct ionic_rx_filter_add_cmd ac = {
535 		.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
536 	};
537 
538 	memcpy(&ac.mac.addr, addr, ETH_ALEN);
539 
540 	return ionic_lif_filter_del(lif, &ac);
541 }
542 
ionic_lif_vlan_del(struct ionic_lif * lif,const u16 vid)543 int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid)
544 {
545 	struct ionic_rx_filter_add_cmd ac = {
546 		.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
547 		.vlan.vlan = cpu_to_le16(vid),
548 	};
549 
550 	return ionic_lif_filter_del(lif, &ac);
551 }
552 
553 struct sync_item {
554 	struct list_head list;
555 	struct ionic_rx_filter f;
556 };
557 
ionic_rx_filter_sync(struct ionic_lif * lif)558 void ionic_rx_filter_sync(struct ionic_lif *lif)
559 {
560 	struct device *dev = lif->ionic->dev;
561 	struct list_head sync_add_list;
562 	struct list_head sync_del_list;
563 	struct sync_item *sync_item;
564 	struct ionic_rx_filter *f;
565 	struct hlist_head *head;
566 	struct hlist_node *tmp;
567 	struct sync_item *spos;
568 	unsigned int i;
569 
570 	INIT_LIST_HEAD(&sync_add_list);
571 	INIT_LIST_HEAD(&sync_del_list);
572 
573 	clear_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
574 
575 	/* Copy the filters to be added and deleted
576 	 * into a separate local list that needs no locking.
577 	 */
578 	spin_lock_bh(&lif->rx_filters.lock);
579 	for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
580 		head = &lif->rx_filters.by_id[i];
581 		hlist_for_each_entry_safe(f, tmp, head, by_id) {
582 			if (f->state == IONIC_FILTER_STATE_NEW ||
583 			    f->state == IONIC_FILTER_STATE_OLD) {
584 				sync_item = devm_kzalloc(dev, sizeof(*sync_item),
585 							 GFP_ATOMIC);
586 				if (!sync_item)
587 					goto loop_out;
588 
589 				sync_item->f = *f;
590 
591 				if (f->state == IONIC_FILTER_STATE_NEW)
592 					list_add(&sync_item->list, &sync_add_list);
593 				else
594 					list_add(&sync_item->list, &sync_del_list);
595 			}
596 		}
597 	}
598 loop_out:
599 	spin_unlock_bh(&lif->rx_filters.lock);
600 
601 	/* If the add or delete fails, it won't get marked as sync'd
602 	 * and will be tried again in the next sync action.
603 	 * Do the deletes first in case we're in an overflow state and
604 	 * they can clear room for some new filters
605 	 */
606 	list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
607 		(void)ionic_lif_filter_del(lif, &sync_item->f.cmd);
608 
609 		list_del(&sync_item->list);
610 		devm_kfree(dev, sync_item);
611 	}
612 
613 	list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
614 		(void)ionic_lif_filter_add(lif, &sync_item->f.cmd);
615 
616 		list_del(&sync_item->list);
617 		devm_kfree(dev, sync_item);
618 	}
619 }
620