• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DPAA2 Ethernet Switch flower support
4  *
5  * Copyright 2021 NXP
6  *
7  */
8 
9 #include "dpaa2-switch.h"
10 
dpaa2_switch_flower_parse_key(struct flow_cls_offload * cls,struct dpsw_acl_key * acl_key)11 static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
12 					 struct dpsw_acl_key *acl_key)
13 {
14 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
15 	struct flow_dissector *dissector = rule->match.dissector;
16 	struct netlink_ext_ack *extack = cls->common.extack;
17 	struct dpsw_acl_fields *acl_h, *acl_m;
18 
19 	if (dissector->used_keys &
20 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
21 	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
22 	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
23 	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
24 	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
25 	      BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
26 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
27 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
28 		NL_SET_ERR_MSG_MOD(extack,
29 				   "Unsupported keys used");
30 		return -EOPNOTSUPP;
31 	}
32 
33 	acl_h = &acl_key->match;
34 	acl_m = &acl_key->mask;
35 
36 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
37 		struct flow_match_basic match;
38 
39 		flow_rule_match_basic(rule, &match);
40 		acl_h->l3_protocol = match.key->ip_proto;
41 		acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
42 		acl_m->l3_protocol = match.mask->ip_proto;
43 		acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
44 	}
45 
46 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
47 		struct flow_match_eth_addrs match;
48 
49 		flow_rule_match_eth_addrs(rule, &match);
50 		ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
51 		ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
52 		ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
53 		ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
54 	}
55 
56 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
57 		struct flow_match_vlan match;
58 
59 		flow_rule_match_vlan(rule, &match);
60 		acl_h->l2_vlan_id = match.key->vlan_id;
61 		acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
62 		acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
63 				    match.key->vlan_dei;
64 
65 		acl_m->l2_vlan_id = match.mask->vlan_id;
66 		acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
67 		acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
68 				    match.mask->vlan_dei;
69 	}
70 
71 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
72 		struct flow_match_ipv4_addrs match;
73 
74 		flow_rule_match_ipv4_addrs(rule, &match);
75 		acl_h->l3_source_ip = be32_to_cpu(match.key->src);
76 		acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
77 		acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
78 		acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
79 	}
80 
81 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
82 		struct flow_match_ports match;
83 
84 		flow_rule_match_ports(rule, &match);
85 		acl_h->l4_source_port = be16_to_cpu(match.key->src);
86 		acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
87 		acl_m->l4_source_port = be16_to_cpu(match.mask->src);
88 		acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
89 	}
90 
91 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
92 		struct flow_match_ip match;
93 
94 		flow_rule_match_ip(rule, &match);
95 		if (match.mask->ttl != 0) {
96 			NL_SET_ERR_MSG_MOD(extack,
97 					   "Matching on TTL not supported");
98 			return -EOPNOTSUPP;
99 		}
100 
101 		if ((match.mask->tos & 0x3) != 0) {
102 			NL_SET_ERR_MSG_MOD(extack,
103 					   "Matching on ECN not supported, only DSCP");
104 			return -EOPNOTSUPP;
105 		}
106 
107 		acl_h->l3_dscp = match.key->tos >> 2;
108 		acl_m->l3_dscp = match.mask->tos >> 2;
109 	}
110 
111 	return 0;
112 }
113 
dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block * filter_block,struct dpaa2_switch_acl_entry * entry)114 int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
115 			       struct dpaa2_switch_acl_entry *entry)
116 {
117 	struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
118 	struct ethsw_core *ethsw = filter_block->ethsw;
119 	struct dpsw_acl_key *acl_key = &entry->key;
120 	struct device *dev = ethsw->dev;
121 	u8 *cmd_buff;
122 	int err;
123 
124 	cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
125 	if (!cmd_buff)
126 		return -ENOMEM;
127 
128 	dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
129 
130 	acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
131 						 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
132 						 DMA_TO_DEVICE);
133 	if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
134 		dev_err(dev, "DMA mapping failed\n");
135 		kfree(cmd_buff);
136 		return -EFAULT;
137 	}
138 
139 	err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
140 				 filter_block->acl_id, acl_entry_cfg);
141 
142 	dma_unmap_single(dev, acl_entry_cfg->key_iova,
143 			 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
144 			 DMA_TO_DEVICE);
145 	if (err) {
146 		dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
147 		kfree(cmd_buff);
148 		return err;
149 	}
150 
151 	kfree(cmd_buff);
152 
153 	return 0;
154 }
155 
156 static int
dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)157 dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
158 			      struct dpaa2_switch_acl_entry *entry)
159 {
160 	struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
161 	struct dpsw_acl_key *acl_key = &entry->key;
162 	struct ethsw_core *ethsw = block->ethsw;
163 	struct device *dev = ethsw->dev;
164 	u8 *cmd_buff;
165 	int err;
166 
167 	cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
168 	if (!cmd_buff)
169 		return -ENOMEM;
170 
171 	dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
172 
173 	acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
174 						 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
175 						 DMA_TO_DEVICE);
176 	if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
177 		dev_err(dev, "DMA mapping failed\n");
178 		kfree(cmd_buff);
179 		return -EFAULT;
180 	}
181 
182 	err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
183 				    block->acl_id, acl_entry_cfg);
184 
185 	dma_unmap_single(dev, acl_entry_cfg->key_iova,
186 			 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
187 	if (err) {
188 		dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
189 		kfree(cmd_buff);
190 		return err;
191 	}
192 
193 	kfree(cmd_buff);
194 
195 	return 0;
196 }
197 
198 static int
dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)199 dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
200 				   struct dpaa2_switch_acl_entry *entry)
201 {
202 	struct dpaa2_switch_acl_entry *tmp;
203 	struct list_head *pos, *n;
204 	int index = 0;
205 
206 	if (list_empty(&block->acl_entries)) {
207 		list_add(&entry->list, &block->acl_entries);
208 		return index;
209 	}
210 
211 	list_for_each_safe(pos, n, &block->acl_entries) {
212 		tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
213 		if (entry->prio < tmp->prio)
214 			break;
215 		index++;
216 	}
217 	list_add(&entry->list, pos->prev);
218 	return index;
219 }
220 
221 static struct dpaa2_switch_acl_entry*
dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block * block,int index)222 dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
223 				    int index)
224 {
225 	struct dpaa2_switch_acl_entry *tmp;
226 	int i = 0;
227 
228 	list_for_each_entry(tmp, &block->acl_entries, list) {
229 		if (i == index)
230 			return tmp;
231 		++i;
232 	}
233 
234 	return NULL;
235 }
236 
237 static int
dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry,int precedence)238 dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
239 				      struct dpaa2_switch_acl_entry *entry,
240 				      int precedence)
241 {
242 	int err;
243 
244 	err = dpaa2_switch_acl_entry_remove(block, entry);
245 	if (err)
246 		return err;
247 
248 	entry->cfg.precedence = precedence;
249 	return dpaa2_switch_acl_entry_add(block, entry);
250 }
251 
252 static int
dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)253 dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
254 			       struct dpaa2_switch_acl_entry *entry)
255 {
256 	struct dpaa2_switch_acl_entry *tmp;
257 	int index, i, precedence, err;
258 
259 	/* Add the new ACL entry to the linked list and get its index */
260 	index = dpaa2_switch_acl_entry_add_to_list(block, entry);
261 
262 	/* Move up in priority the ACL entries to make space
263 	 * for the new filter.
264 	 */
265 	precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
266 	for (i = 0; i < index; i++) {
267 		tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
268 
269 		err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
270 							    precedence);
271 		if (err)
272 			return err;
273 
274 		precedence++;
275 	}
276 
277 	/* Add the new entry to hardware */
278 	entry->cfg.precedence = precedence;
279 	err = dpaa2_switch_acl_entry_add(block, entry);
280 	block->num_acl_rules++;
281 
282 	return err;
283 }
284 
285 static struct dpaa2_switch_acl_entry *
dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block * block,unsigned long cookie)286 dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
287 					  unsigned long cookie)
288 {
289 	struct dpaa2_switch_acl_entry *tmp, *n;
290 
291 	list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
292 		if (tmp->cookie == cookie)
293 			return tmp;
294 	}
295 	return NULL;
296 }
297 
298 static int
dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)299 dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
300 				 struct dpaa2_switch_acl_entry *entry)
301 {
302 	struct dpaa2_switch_acl_entry *tmp, *n;
303 	int index = 0;
304 
305 	list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
306 		if (tmp->cookie == entry->cookie)
307 			return index;
308 		index++;
309 	}
310 	return -ENOENT;
311 }
312 
313 static struct dpaa2_switch_mirror_entry *
dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block * block,unsigned long cookie)314 dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
315 					 unsigned long cookie)
316 {
317 	struct dpaa2_switch_mirror_entry *tmp, *n;
318 
319 	list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
320 		if (tmp->cookie == cookie)
321 			return tmp;
322 	}
323 	return NULL;
324 }
325 
326 static int
dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)327 dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
328 				  struct dpaa2_switch_acl_entry *entry)
329 {
330 	struct dpaa2_switch_acl_entry *tmp;
331 	int index, i, precedence, err;
332 
333 	index = dpaa2_switch_acl_entry_get_index(block, entry);
334 
335 	/* Remove from hardware the ACL entry */
336 	err = dpaa2_switch_acl_entry_remove(block, entry);
337 	if (err)
338 		return err;
339 
340 	block->num_acl_rules--;
341 
342 	/* Remove it from the list also */
343 	list_del(&entry->list);
344 
345 	/* Move down in priority the entries over the deleted one */
346 	precedence = entry->cfg.precedence;
347 	for (i = index - 1; i >= 0; i--) {
348 		tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
349 		err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
350 							    precedence);
351 		if (err)
352 			return err;
353 
354 		precedence--;
355 	}
356 
357 	kfree(entry);
358 
359 	return 0;
360 }
361 
dpaa2_switch_tc_parse_action_acl(struct ethsw_core * ethsw,struct flow_action_entry * cls_act,struct dpsw_acl_result * dpsw_act,struct netlink_ext_ack * extack)362 static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
363 					    struct flow_action_entry *cls_act,
364 					    struct dpsw_acl_result *dpsw_act,
365 					    struct netlink_ext_ack *extack)
366 {
367 	int err = 0;
368 
369 	switch (cls_act->id) {
370 	case FLOW_ACTION_TRAP:
371 		dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
372 		break;
373 	case FLOW_ACTION_REDIRECT:
374 		if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
375 			NL_SET_ERR_MSG_MOD(extack,
376 					   "Destination not a DPAA2 switch port");
377 			return -EOPNOTSUPP;
378 		}
379 
380 		dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
381 		dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
382 		break;
383 	case FLOW_ACTION_DROP:
384 		dpsw_act->action = DPSW_ACL_ACTION_DROP;
385 		break;
386 	default:
387 		NL_SET_ERR_MSG_MOD(extack,
388 				   "Action not supported");
389 		err = -EOPNOTSUPP;
390 		goto out;
391 	}
392 
393 out:
394 	return err;
395 }
396 
397 static int
dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_mirror_entry * entry,u16 to,struct netlink_ext_ack * extack)398 dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
399 			      struct dpaa2_switch_mirror_entry *entry,
400 			      u16 to, struct netlink_ext_ack *extack)
401 {
402 	unsigned long block_ports = block->ports;
403 	struct ethsw_core *ethsw = block->ethsw;
404 	struct ethsw_port_priv *port_priv;
405 	unsigned long ports_added = 0;
406 	u16 vlan = entry->cfg.vlan_id;
407 	bool mirror_port_enabled;
408 	int err, port;
409 
410 	/* Setup the mirroring port */
411 	mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
412 	if (!mirror_port_enabled) {
413 		err = dpsw_set_reflection_if(ethsw->mc_io, 0,
414 					     ethsw->dpsw_handle, to);
415 		if (err)
416 			return err;
417 		ethsw->mirror_port = to;
418 	}
419 
420 	/* Setup the same egress mirroring configuration on all the switch
421 	 * ports that share the same filter block.
422 	 */
423 	for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
424 		port_priv = ethsw->ports[port];
425 
426 		/* We cannot add a per VLAN mirroring rule if the VLAN in
427 		 * question is not installed on the switch port.
428 		 */
429 		if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
430 		    !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
431 			NL_SET_ERR_MSG(extack,
432 				       "VLAN must be installed on the switch port");
433 			err = -EINVAL;
434 			goto err_remove_filters;
435 		}
436 
437 		err = dpsw_if_add_reflection(ethsw->mc_io, 0,
438 					     ethsw->dpsw_handle,
439 					     port, &entry->cfg);
440 		if (err)
441 			goto err_remove_filters;
442 
443 		ports_added |= BIT(port);
444 	}
445 
446 	list_add(&entry->list, &block->mirror_entries);
447 
448 	return 0;
449 
450 err_remove_filters:
451 	for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
452 		dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
453 					  port, &entry->cfg);
454 	}
455 
456 	if (!mirror_port_enabled)
457 		ethsw->mirror_port = ethsw->sw_attr.num_ifs;
458 
459 	return err;
460 }
461 
462 static int
dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_mirror_entry * entry)463 dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
464 				 struct dpaa2_switch_mirror_entry *entry)
465 {
466 	struct dpsw_reflection_cfg *cfg = &entry->cfg;
467 	unsigned long block_ports = block->ports;
468 	struct ethsw_core *ethsw = block->ethsw;
469 	int port;
470 
471 	/* Remove this mirroring configuration from all the ports belonging to
472 	 * the filter block.
473 	 */
474 	for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
475 		dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
476 					  port, cfg);
477 
478 	/* Also remove it from the list of mirror filters */
479 	list_del(&entry->list);
480 	kfree(entry);
481 
482 	/* If this was the last mirror filter, then unset the mirror port */
483 	if (list_empty(&block->mirror_entries))
484 		ethsw->mirror_port =  ethsw->sw_attr.num_ifs;
485 
486 	return 0;
487 }
488 
489 static int
dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)490 dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
491 				    struct flow_cls_offload *cls)
492 {
493 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
494 	struct netlink_ext_ack *extack = cls->common.extack;
495 	struct dpaa2_switch_acl_entry *acl_entry;
496 	struct ethsw_core *ethsw = block->ethsw;
497 	struct flow_action_entry *act;
498 	int err;
499 
500 	if (dpaa2_switch_acl_tbl_is_full(block)) {
501 		NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
502 		return -ENOMEM;
503 	}
504 
505 	acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
506 	if (!acl_entry)
507 		return -ENOMEM;
508 
509 	err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
510 	if (err)
511 		goto free_acl_entry;
512 
513 	act = &rule->action.entries[0];
514 	err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
515 					       &acl_entry->cfg.result, extack);
516 	if (err)
517 		goto free_acl_entry;
518 
519 	acl_entry->prio = cls->common.prio;
520 	acl_entry->cookie = cls->cookie;
521 
522 	err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
523 	if (err)
524 		goto free_acl_entry;
525 
526 	return 0;
527 
528 free_acl_entry:
529 	kfree(acl_entry);
530 
531 	return err;
532 }
533 
dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload * cls,u16 * vlan)534 static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
535 						u16 *vlan)
536 {
537 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
538 	struct flow_dissector *dissector = rule->match.dissector;
539 	struct netlink_ext_ack *extack = cls->common.extack;
540 	int ret = -EOPNOTSUPP;
541 
542 	if (dissector->used_keys &
543 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
544 	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
545 	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
546 		NL_SET_ERR_MSG_MOD(extack,
547 				   "Mirroring is supported only per VLAN");
548 		return -EOPNOTSUPP;
549 	}
550 
551 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
552 		struct flow_match_vlan match;
553 
554 		flow_rule_match_vlan(rule, &match);
555 
556 		if (match.mask->vlan_priority != 0 ||
557 		    match.mask->vlan_dei != 0) {
558 			NL_SET_ERR_MSG_MOD(extack,
559 					   "Only matching on VLAN ID supported");
560 			return -EOPNOTSUPP;
561 		}
562 
563 		if (match.mask->vlan_id != 0xFFF) {
564 			NL_SET_ERR_MSG_MOD(extack,
565 					   "Masked matching not supported");
566 			return -EOPNOTSUPP;
567 		}
568 
569 		*vlan = (u16)match.key->vlan_id;
570 		ret = 0;
571 	}
572 
573 	return ret;
574 }
575 
576 static int
dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)577 dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
578 				       struct flow_cls_offload *cls)
579 {
580 	struct netlink_ext_ack *extack = cls->common.extack;
581 	struct dpaa2_switch_mirror_entry *mirror_entry;
582 	struct ethsw_core *ethsw = block->ethsw;
583 	struct dpaa2_switch_mirror_entry *tmp;
584 	struct flow_action_entry *cls_act;
585 	struct list_head *pos, *n;
586 	bool mirror_port_enabled;
587 	u16 if_id, vlan;
588 	int err;
589 
590 	mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
591 	cls_act = &cls->rule->action.entries[0];
592 
593 	/* Offload rules only when the destination is a DPAA2 switch port */
594 	if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
595 		NL_SET_ERR_MSG_MOD(extack,
596 				   "Destination not a DPAA2 switch port");
597 		return -EOPNOTSUPP;
598 	}
599 	if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
600 
601 	/* We have a single mirror port but can configure egress mirroring on
602 	 * all the other switch ports. We need to allow mirroring rules only
603 	 * when the destination port is the same.
604 	 */
605 	if (mirror_port_enabled && ethsw->mirror_port != if_id) {
606 		NL_SET_ERR_MSG_MOD(extack,
607 				   "Multiple mirror ports not supported");
608 		return -EBUSY;
609 	}
610 
611 	/* Parse the key */
612 	err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
613 	if (err)
614 		return err;
615 
616 	/* Make sure that we don't already have a mirror rule with the same
617 	 * configuration.
618 	 */
619 	list_for_each_safe(pos, n, &block->mirror_entries) {
620 		tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
621 
622 		if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
623 		    tmp->cfg.vlan_id == vlan) {
624 			NL_SET_ERR_MSG_MOD(extack,
625 					   "VLAN mirror filter already installed");
626 			return -EBUSY;
627 		}
628 	}
629 
630 	mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
631 	if (!mirror_entry)
632 		return -ENOMEM;
633 
634 	mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
635 	mirror_entry->cfg.vlan_id = vlan;
636 	mirror_entry->cookie = cls->cookie;
637 
638 	return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
639 					     extack);
640 }
641 
dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)642 int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
643 				    struct flow_cls_offload *cls)
644 {
645 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
646 	struct netlink_ext_ack *extack = cls->common.extack;
647 	struct flow_action_entry *act;
648 
649 	if (!flow_offload_has_one_action(&rule->action)) {
650 		NL_SET_ERR_MSG(extack, "Only singular actions are supported");
651 		return -EOPNOTSUPP;
652 	}
653 
654 	act = &rule->action.entries[0];
655 	switch (act->id) {
656 	case FLOW_ACTION_REDIRECT:
657 	case FLOW_ACTION_TRAP:
658 	case FLOW_ACTION_DROP:
659 		return dpaa2_switch_cls_flower_replace_acl(block, cls);
660 	case FLOW_ACTION_MIRRED:
661 		return dpaa2_switch_cls_flower_replace_mirror(block, cls);
662 	default:
663 		NL_SET_ERR_MSG_MOD(extack, "Action not supported");
664 		return -EOPNOTSUPP;
665 	}
666 }
667 
dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)668 int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
669 				    struct flow_cls_offload *cls)
670 {
671 	struct dpaa2_switch_mirror_entry *mirror_entry;
672 	struct dpaa2_switch_acl_entry *acl_entry;
673 
674 	/* If this filter is a an ACL one, remove it */
675 	acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
676 							      cls->cookie);
677 	if (acl_entry)
678 		return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
679 
680 	/* If not, then it has to be a mirror */
681 	mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
682 								cls->cookie);
683 	if (mirror_entry)
684 		return dpaa2_switch_block_remove_mirror(block,
685 							mirror_entry);
686 
687 	return 0;
688 }
689 
690 static int
dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)691 dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
692 				      struct tc_cls_matchall_offload *cls)
693 {
694 	struct netlink_ext_ack *extack = cls->common.extack;
695 	struct ethsw_core *ethsw = block->ethsw;
696 	struct dpaa2_switch_acl_entry *acl_entry;
697 	struct flow_action_entry *act;
698 	int err;
699 
700 	if (dpaa2_switch_acl_tbl_is_full(block)) {
701 		NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
702 		return -ENOMEM;
703 	}
704 
705 	acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
706 	if (!acl_entry)
707 		return -ENOMEM;
708 
709 	act = &cls->rule->action.entries[0];
710 	err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
711 					       &acl_entry->cfg.result, extack);
712 	if (err)
713 		goto free_acl_entry;
714 
715 	acl_entry->prio = cls->common.prio;
716 	acl_entry->cookie = cls->cookie;
717 
718 	err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
719 	if (err)
720 		goto free_acl_entry;
721 
722 	return 0;
723 
724 free_acl_entry:
725 	kfree(acl_entry);
726 
727 	return err;
728 }
729 
730 static int
dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)731 dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
732 					 struct tc_cls_matchall_offload *cls)
733 {
734 	struct netlink_ext_ack *extack = cls->common.extack;
735 	struct dpaa2_switch_mirror_entry *mirror_entry;
736 	struct ethsw_core *ethsw = block->ethsw;
737 	struct dpaa2_switch_mirror_entry *tmp;
738 	struct flow_action_entry *cls_act;
739 	struct list_head *pos, *n;
740 	bool mirror_port_enabled;
741 	u16 if_id;
742 
743 	mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
744 	cls_act = &cls->rule->action.entries[0];
745 
746 	/* Offload rules only when the destination is a DPAA2 switch port */
747 	if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
748 		NL_SET_ERR_MSG_MOD(extack,
749 				   "Destination not a DPAA2 switch port");
750 		return -EOPNOTSUPP;
751 	}
752 	if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
753 
754 	/* We have a single mirror port but can configure egress mirroring on
755 	 * all the other switch ports. We need to allow mirroring rules only
756 	 * when the destination port is the same.
757 	 */
758 	if (mirror_port_enabled && ethsw->mirror_port != if_id) {
759 		NL_SET_ERR_MSG_MOD(extack,
760 				   "Multiple mirror ports not supported");
761 		return -EBUSY;
762 	}
763 
764 	/* Make sure that we don't already have a mirror rule with the same
765 	 * configuration. One matchall rule per block is the maximum.
766 	 */
767 	list_for_each_safe(pos, n, &block->mirror_entries) {
768 		tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
769 
770 		if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
771 			NL_SET_ERR_MSG_MOD(extack,
772 					   "Matchall mirror filter already installed");
773 			return -EBUSY;
774 		}
775 	}
776 
777 	mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
778 	if (!mirror_entry)
779 		return -ENOMEM;
780 
781 	mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
782 	mirror_entry->cookie = cls->cookie;
783 
784 	return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
785 					     extack);
786 }
787 
dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)788 int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
789 				      struct tc_cls_matchall_offload *cls)
790 {
791 	struct netlink_ext_ack *extack = cls->common.extack;
792 	struct flow_action_entry *act;
793 
794 	if (!flow_offload_has_one_action(&cls->rule->action)) {
795 		NL_SET_ERR_MSG(extack, "Only singular actions are supported");
796 		return -EOPNOTSUPP;
797 	}
798 
799 	act = &cls->rule->action.entries[0];
800 	switch (act->id) {
801 	case FLOW_ACTION_REDIRECT:
802 	case FLOW_ACTION_TRAP:
803 	case FLOW_ACTION_DROP:
804 		return dpaa2_switch_cls_matchall_replace_acl(block, cls);
805 	case FLOW_ACTION_MIRRED:
806 		return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
807 	default:
808 		NL_SET_ERR_MSG_MOD(extack, "Action not supported");
809 		return -EOPNOTSUPP;
810 	}
811 }
812 
dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block * block,struct ethsw_port_priv * port_priv)813 int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
814 				      struct ethsw_port_priv *port_priv)
815 {
816 	struct ethsw_core *ethsw = port_priv->ethsw_data;
817 	struct dpaa2_switch_mirror_entry *tmp;
818 	int err;
819 
820 	list_for_each_entry(tmp, &block->mirror_entries, list) {
821 		err = dpsw_if_add_reflection(ethsw->mc_io, 0,
822 					     ethsw->dpsw_handle,
823 					     port_priv->idx, &tmp->cfg);
824 		if (err)
825 			goto unwind_add;
826 	}
827 
828 	return 0;
829 
830 unwind_add:
831 	list_for_each_entry(tmp, &block->mirror_entries, list)
832 		dpsw_if_remove_reflection(ethsw->mc_io, 0,
833 					  ethsw->dpsw_handle,
834 					  port_priv->idx, &tmp->cfg);
835 
836 	return err;
837 }
838 
dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block * block,struct ethsw_port_priv * port_priv)839 int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
840 					struct ethsw_port_priv *port_priv)
841 {
842 	struct ethsw_core *ethsw = port_priv->ethsw_data;
843 	struct dpaa2_switch_mirror_entry *tmp;
844 	int err;
845 
846 	list_for_each_entry(tmp, &block->mirror_entries, list) {
847 		err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
848 						ethsw->dpsw_handle,
849 						port_priv->idx, &tmp->cfg);
850 		if (err)
851 			goto unwind_remove;
852 	}
853 
854 	return 0;
855 
856 unwind_remove:
857 	list_for_each_entry(tmp, &block->mirror_entries, list)
858 		dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
859 				       port_priv->idx, &tmp->cfg);
860 
861 	return err;
862 }
863 
dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)864 int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
865 				      struct tc_cls_matchall_offload *cls)
866 {
867 	struct dpaa2_switch_mirror_entry *mirror_entry;
868 	struct dpaa2_switch_acl_entry *acl_entry;
869 
870 	/* If this filter is a an ACL one, remove it */
871 	acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
872 							      cls->cookie);
873 	if (acl_entry)
874 		return dpaa2_switch_acl_tbl_remove_entry(block,
875 							 acl_entry);
876 
877 	/* If not, then it has to be a mirror */
878 	mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
879 								cls->cookie);
880 	if (mirror_entry)
881 		return dpaa2_switch_block_remove_mirror(block,
882 							mirror_entry);
883 
884 	return 0;
885 }
886