1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2020, NXP Semiconductors
3 */
4 #include "sja1105.h"
5 #include "sja1105_vl.h"
6
sja1105_rule_find(struct sja1105_private * priv,unsigned long cookie)7 struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
8 unsigned long cookie)
9 {
10 struct sja1105_rule *rule;
11
12 list_for_each_entry(rule, &priv->flow_block.rules, list)
13 if (rule->cookie == cookie)
14 return rule;
15
16 return NULL;
17 }
18
sja1105_find_free_l2_policer(struct sja1105_private * priv)19 static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
20 {
21 int i;
22
23 for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
24 if (!priv->flow_block.l2_policer_used[i])
25 return i;
26
27 return -1;
28 }
29
sja1105_setup_bcast_policer(struct sja1105_private * priv,struct netlink_ext_ack * extack,unsigned long cookie,int port,u64 rate_bytes_per_sec,u32 burst)30 static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
31 struct netlink_ext_ack *extack,
32 unsigned long cookie, int port,
33 u64 rate_bytes_per_sec,
34 u32 burst)
35 {
36 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
37 struct sja1105_l2_policing_entry *policing;
38 bool new_rule = false;
39 unsigned long p;
40 int rc;
41
42 if (!rule) {
43 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
44 if (!rule)
45 return -ENOMEM;
46
47 rule->cookie = cookie;
48 rule->type = SJA1105_RULE_BCAST_POLICER;
49 rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
50 rule->key.type = SJA1105_KEY_BCAST;
51 new_rule = true;
52 }
53
54 if (rule->bcast_pol.sharindx == -1) {
55 NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
56 rc = -ENOSPC;
57 goto out;
58 }
59
60 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
61
62 if (policing[(SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port].sharindx != port) {
63 NL_SET_ERR_MSG_MOD(extack,
64 "Port already has a broadcast policer");
65 rc = -EEXIST;
66 goto out;
67 }
68
69 rule->port_mask |= BIT(port);
70
71 /* Make the broadcast policers of all ports attached to this block
72 * point to the newly allocated policer
73 */
74 for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
75 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + p;
76
77 policing[bcast].sharindx = rule->bcast_pol.sharindx;
78 }
79
80 policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
81 512, 1000000);
82 policing[rule->bcast_pol.sharindx].smax = burst;
83
84 /* TODO: support per-flow MTU */
85 policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
86 ETH_FCS_LEN;
87
88 rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
89
90 out:
91 if (rc == 0 && new_rule) {
92 priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
93 list_add(&rule->list, &priv->flow_block.rules);
94 } else if (new_rule) {
95 kfree(rule);
96 }
97
98 return rc;
99 }
100
sja1105_setup_tc_policer(struct sja1105_private * priv,struct netlink_ext_ack * extack,unsigned long cookie,int port,int tc,u64 rate_bytes_per_sec,u32 burst)101 static int sja1105_setup_tc_policer(struct sja1105_private *priv,
102 struct netlink_ext_ack *extack,
103 unsigned long cookie, int port, int tc,
104 u64 rate_bytes_per_sec,
105 u32 burst)
106 {
107 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
108 struct sja1105_l2_policing_entry *policing;
109 bool new_rule = false;
110 unsigned long p;
111 int rc;
112
113 if (!rule) {
114 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
115 if (!rule)
116 return -ENOMEM;
117
118 rule->cookie = cookie;
119 rule->type = SJA1105_RULE_TC_POLICER;
120 rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
121 rule->key.type = SJA1105_KEY_TC;
122 rule->key.tc.pcp = tc;
123 new_rule = true;
124 }
125
126 if (rule->tc_pol.sharindx == -1) {
127 NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
128 rc = -ENOSPC;
129 goto out;
130 }
131
132 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
133
134 if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
135 NL_SET_ERR_MSG_MOD(extack,
136 "Port-TC pair already has an L2 policer");
137 rc = -EEXIST;
138 goto out;
139 }
140
141 rule->port_mask |= BIT(port);
142
143 /* Make the policers for traffic class @tc of all ports attached to
144 * this block point to the newly allocated policer
145 */
146 for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
147 int index = (p * SJA1105_NUM_TC) + tc;
148
149 policing[index].sharindx = rule->tc_pol.sharindx;
150 }
151
152 policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
153 512, 1000000);
154 policing[rule->tc_pol.sharindx].smax = burst;
155
156 /* TODO: support per-flow MTU */
157 policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
158 ETH_FCS_LEN;
159
160 rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
161
162 out:
163 if (rc == 0 && new_rule) {
164 priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
165 list_add(&rule->list, &priv->flow_block.rules);
166 } else if (new_rule) {
167 kfree(rule);
168 }
169
170 return rc;
171 }
172
sja1105_flower_policer(struct sja1105_private * priv,int port,struct netlink_ext_ack * extack,unsigned long cookie,struct sja1105_key * key,u64 rate_bytes_per_sec,u32 burst)173 static int sja1105_flower_policer(struct sja1105_private *priv, int port,
174 struct netlink_ext_ack *extack,
175 unsigned long cookie,
176 struct sja1105_key *key,
177 u64 rate_bytes_per_sec,
178 u32 burst)
179 {
180 switch (key->type) {
181 case SJA1105_KEY_BCAST:
182 return sja1105_setup_bcast_policer(priv, extack, cookie, port,
183 rate_bytes_per_sec, burst);
184 case SJA1105_KEY_TC:
185 return sja1105_setup_tc_policer(priv, extack, cookie, port,
186 key->tc.pcp, rate_bytes_per_sec,
187 burst);
188 default:
189 NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
190 return -EOPNOTSUPP;
191 }
192 }
193
sja1105_flower_parse_key(struct sja1105_private * priv,struct netlink_ext_ack * extack,struct flow_cls_offload * cls,struct sja1105_key * key)194 static int sja1105_flower_parse_key(struct sja1105_private *priv,
195 struct netlink_ext_ack *extack,
196 struct flow_cls_offload *cls,
197 struct sja1105_key *key)
198 {
199 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
200 struct flow_dissector *dissector = rule->match.dissector;
201 bool is_bcast_dmac = false;
202 u64 dmac = U64_MAX;
203 u16 vid = U16_MAX;
204 u16 pcp = U16_MAX;
205
206 if (dissector->used_keys &
207 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
208 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
209 BIT(FLOW_DISSECTOR_KEY_VLAN) |
210 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
211 NL_SET_ERR_MSG_MOD(extack,
212 "Unsupported keys used");
213 return -EOPNOTSUPP;
214 }
215
216 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
217 struct flow_match_basic match;
218
219 flow_rule_match_basic(rule, &match);
220 if (match.key->n_proto) {
221 NL_SET_ERR_MSG_MOD(extack,
222 "Matching on protocol not supported");
223 return -EOPNOTSUPP;
224 }
225 }
226
227 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
228 u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
229 u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
230 struct flow_match_eth_addrs match;
231
232 flow_rule_match_eth_addrs(rule, &match);
233
234 if (!ether_addr_equal_masked(match.key->src, null,
235 match.mask->src)) {
236 NL_SET_ERR_MSG_MOD(extack,
237 "Matching on source MAC not supported");
238 return -EOPNOTSUPP;
239 }
240
241 if (!ether_addr_equal(match.mask->dst, bcast)) {
242 NL_SET_ERR_MSG_MOD(extack,
243 "Masked matching on MAC not supported");
244 return -EOPNOTSUPP;
245 }
246
247 dmac = ether_addr_to_u64(match.key->dst);
248 is_bcast_dmac = ether_addr_equal(match.key->dst, bcast);
249 }
250
251 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
252 struct flow_match_vlan match;
253
254 flow_rule_match_vlan(rule, &match);
255
256 if (match.mask->vlan_id &&
257 match.mask->vlan_id != VLAN_VID_MASK) {
258 NL_SET_ERR_MSG_MOD(extack,
259 "Masked matching on VID is not supported");
260 return -EOPNOTSUPP;
261 }
262
263 if (match.mask->vlan_priority &&
264 match.mask->vlan_priority != 0x7) {
265 NL_SET_ERR_MSG_MOD(extack,
266 "Masked matching on PCP is not supported");
267 return -EOPNOTSUPP;
268 }
269
270 if (match.mask->vlan_id)
271 vid = match.key->vlan_id;
272 if (match.mask->vlan_priority)
273 pcp = match.key->vlan_priority;
274 }
275
276 if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
277 key->type = SJA1105_KEY_BCAST;
278 return 0;
279 }
280 if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
281 key->type = SJA1105_KEY_TC;
282 key->tc.pcp = pcp;
283 return 0;
284 }
285 if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
286 key->type = SJA1105_KEY_VLAN_AWARE_VL;
287 key->vl.dmac = dmac;
288 key->vl.vid = vid;
289 key->vl.pcp = pcp;
290 return 0;
291 }
292 if (dmac != U64_MAX) {
293 key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
294 key->vl.dmac = dmac;
295 return 0;
296 }
297
298 NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
299 return -EOPNOTSUPP;
300 }
301
sja1105_cls_flower_add(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)302 int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
303 struct flow_cls_offload *cls, bool ingress)
304 {
305 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
306 struct netlink_ext_ack *extack = cls->common.extack;
307 struct sja1105_private *priv = ds->priv;
308 const struct flow_action_entry *act;
309 unsigned long cookie = cls->cookie;
310 bool routing_rule = false;
311 struct sja1105_key key;
312 bool gate_rule = false;
313 bool vl_rule = false;
314 int rc, i;
315
316 rc = sja1105_flower_parse_key(priv, extack, cls, &key);
317 if (rc)
318 return rc;
319
320 rc = -EOPNOTSUPP;
321
322 flow_action_for_each(i, act, &rule->action) {
323 switch (act->id) {
324 case FLOW_ACTION_POLICE:
325 rc = sja1105_flower_policer(priv, port, extack, cookie,
326 &key,
327 act->police.rate_bytes_ps,
328 act->police.burst);
329 if (rc)
330 goto out;
331 break;
332 case FLOW_ACTION_TRAP: {
333 int cpu = dsa_upstream_port(ds, port);
334
335 routing_rule = true;
336 vl_rule = true;
337
338 rc = sja1105_vl_redirect(priv, port, extack, cookie,
339 &key, BIT(cpu), true);
340 if (rc)
341 goto out;
342 break;
343 }
344 case FLOW_ACTION_REDIRECT: {
345 struct dsa_port *to_dp;
346
347 to_dp = dsa_port_from_netdev(act->dev);
348 if (IS_ERR(to_dp)) {
349 NL_SET_ERR_MSG_MOD(extack,
350 "Destination not a switch port");
351 return -EOPNOTSUPP;
352 }
353
354 routing_rule = true;
355 vl_rule = true;
356
357 rc = sja1105_vl_redirect(priv, port, extack, cookie,
358 &key, BIT(to_dp->index), true);
359 if (rc)
360 goto out;
361 break;
362 }
363 case FLOW_ACTION_DROP:
364 vl_rule = true;
365
366 rc = sja1105_vl_redirect(priv, port, extack, cookie,
367 &key, 0, false);
368 if (rc)
369 goto out;
370 break;
371 case FLOW_ACTION_GATE:
372 gate_rule = true;
373 vl_rule = true;
374
375 rc = sja1105_vl_gate(priv, port, extack, cookie,
376 &key, act->gate.index,
377 act->gate.prio,
378 act->gate.basetime,
379 act->gate.cycletime,
380 act->gate.cycletimeext,
381 act->gate.num_entries,
382 act->gate.entries);
383 if (rc)
384 goto out;
385 break;
386 default:
387 NL_SET_ERR_MSG_MOD(extack,
388 "Action not supported");
389 rc = -EOPNOTSUPP;
390 goto out;
391 }
392 }
393
394 if (vl_rule && !rc) {
395 /* Delay scheduling configuration until DESTPORTS has been
396 * populated by all other actions.
397 */
398 if (gate_rule) {
399 if (!routing_rule) {
400 NL_SET_ERR_MSG_MOD(extack,
401 "Can only offload gate action together with redirect or trap");
402 return -EOPNOTSUPP;
403 }
404 rc = sja1105_init_scheduling(priv);
405 if (rc)
406 goto out;
407 }
408
409 rc = sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
410 }
411
412 out:
413 return rc;
414 }
415
sja1105_cls_flower_del(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)416 int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
417 struct flow_cls_offload *cls, bool ingress)
418 {
419 struct sja1105_private *priv = ds->priv;
420 struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
421 struct sja1105_l2_policing_entry *policing;
422 int old_sharindx;
423
424 if (!rule)
425 return 0;
426
427 if (rule->type == SJA1105_RULE_VL)
428 return sja1105_vl_delete(priv, port, rule, cls->common.extack);
429
430 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
431
432 if (rule->type == SJA1105_RULE_BCAST_POLICER) {
433 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
434
435 old_sharindx = policing[bcast].sharindx;
436 policing[bcast].sharindx = port;
437 } else if (rule->type == SJA1105_RULE_TC_POLICER) {
438 int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
439
440 old_sharindx = policing[index].sharindx;
441 policing[index].sharindx = port;
442 } else {
443 return -EINVAL;
444 }
445
446 rule->port_mask &= ~BIT(port);
447 if (!rule->port_mask) {
448 priv->flow_block.l2_policer_used[old_sharindx] = false;
449 list_del(&rule->list);
450 kfree(rule);
451 }
452
453 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
454 }
455
sja1105_cls_flower_stats(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)456 int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
457 struct flow_cls_offload *cls, bool ingress)
458 {
459 struct sja1105_private *priv = ds->priv;
460 struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
461 int rc;
462
463 if (!rule)
464 return 0;
465
466 if (rule->type != SJA1105_RULE_VL)
467 return 0;
468
469 rc = sja1105_vl_stats(priv, port, rule, &cls->stats,
470 cls->common.extack);
471 if (rc)
472 return rc;
473
474 return 0;
475 }
476
sja1105_flower_setup(struct dsa_switch * ds)477 void sja1105_flower_setup(struct dsa_switch *ds)
478 {
479 struct sja1105_private *priv = ds->priv;
480 int port;
481
482 INIT_LIST_HEAD(&priv->flow_block.rules);
483
484 for (port = 0; port < SJA1105_NUM_PORTS; port++)
485 priv->flow_block.l2_policer_used[port] = true;
486 }
487
sja1105_flower_teardown(struct dsa_switch * ds)488 void sja1105_flower_teardown(struct dsa_switch *ds)
489 {
490 struct sja1105_private *priv = ds->priv;
491 struct sja1105_rule *rule;
492 struct list_head *pos, *n;
493
494 list_for_each_safe(pos, n, &priv->flow_block.rules) {
495 rule = list_entry(pos, struct sja1105_rule, list);
496 list_del(&rule->list);
497 kfree(rule);
498 }
499 }
500