1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #include <linux/etherdevice.h>
5 #include <linux/inetdevice.h>
6 #include <net/netevent.h>
7 #include <linux/idr.h>
8 #include <net/dst_metadata.h>
9 #include <net/arp.h>
10
11 #include "cmsg.h"
12 #include "main.h"
13 #include "../nfp_net_repr.h"
14 #include "../nfp_net.h"
15
16 #define NFP_FL_MAX_ROUTES 32
17
18 #define NFP_TUN_PRE_TUN_RULE_LIMIT 32
19 #define NFP_TUN_PRE_TUN_RULE_DEL BIT(0)
20 #define NFP_TUN_PRE_TUN_IDX_BIT BIT(3)
21 #define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7)
22
23 /**
24 * struct nfp_tun_pre_run_rule - rule matched before decap
25 * @flags: options for the rule offset
26 * @port_idx: index of destination MAC address for the rule
27 * @vlan_tci: VLAN info associated with MAC
28 * @host_ctx_id: stats context of rule to update
29 */
30 struct nfp_tun_pre_tun_rule {
31 __be32 flags;
32 __be16 port_idx;
33 __be16 vlan_tci;
34 __be32 host_ctx_id;
35 };
36
37 /**
38 * struct nfp_tun_active_tuns - periodic message of active tunnels
39 * @seq: sequence number of the message
40 * @count: number of tunnels report in message
41 * @flags: options part of the request
42 * @tun_info.ipv4: dest IPv4 address of active route
43 * @tun_info.egress_port: port the encapsulated packet egressed
44 * @tun_info.extra: reserved for future use
45 * @tun_info: tunnels that have sent traffic in reported period
46 */
47 struct nfp_tun_active_tuns {
48 __be32 seq;
49 __be32 count;
50 __be32 flags;
51 struct route_ip_info {
52 __be32 ipv4;
53 __be32 egress_port;
54 __be32 extra[2];
55 } tun_info[];
56 };
57
58 /**
59 * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels
60 * @seq: sequence number of the message
61 * @count: number of tunnels report in message
62 * @flags: options part of the request
63 * @tun_info.ipv6: dest IPv6 address of active route
64 * @tun_info.egress_port: port the encapsulated packet egressed
65 * @tun_info.extra: reserved for future use
66 * @tun_info: tunnels that have sent traffic in reported period
67 */
68 struct nfp_tun_active_tuns_v6 {
69 __be32 seq;
70 __be32 count;
71 __be32 flags;
72 struct route_ip_info_v6 {
73 struct in6_addr ipv6;
74 __be32 egress_port;
75 __be32 extra[2];
76 } tun_info[];
77 };
78
79 /**
80 * struct nfp_tun_neigh - neighbour/route entry on the NFP
81 * @dst_ipv4: destination IPv4 address
82 * @src_ipv4: source IPv4 address
83 * @dst_addr: destination MAC address
84 * @src_addr: source MAC address
85 * @port_id: NFP port to output packet on - associated with source IPv4
86 */
87 struct nfp_tun_neigh {
88 __be32 dst_ipv4;
89 __be32 src_ipv4;
90 u8 dst_addr[ETH_ALEN];
91 u8 src_addr[ETH_ALEN];
92 __be32 port_id;
93 };
94
95 /**
96 * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP
97 * @dst_ipv6: destination IPv6 address
98 * @src_ipv6: source IPv6 address
99 * @dst_addr: destination MAC address
100 * @src_addr: source MAC address
101 * @port_id: NFP port to output packet on - associated with source IPv6
102 */
103 struct nfp_tun_neigh_v6 {
104 struct in6_addr dst_ipv6;
105 struct in6_addr src_ipv6;
106 u8 dst_addr[ETH_ALEN];
107 u8 src_addr[ETH_ALEN];
108 __be32 port_id;
109 };
110
111 /**
112 * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
113 * @ingress_port: ingress port of packet that signalled request
114 * @ipv4_addr: destination ipv4 address for route
115 * @reserved: reserved for future use
116 */
117 struct nfp_tun_req_route_ipv4 {
118 __be32 ingress_port;
119 __be32 ipv4_addr;
120 __be32 reserved[2];
121 };
122
123 /**
124 * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup
125 * @ingress_port: ingress port of packet that signalled request
126 * @ipv6_addr: destination ipv6 address for route
127 */
128 struct nfp_tun_req_route_ipv6 {
129 __be32 ingress_port;
130 struct in6_addr ipv6_addr;
131 };
132
133 /**
134 * struct nfp_offloaded_route - routes that are offloaded to the NFP
135 * @list: list pointer
136 * @ip_add: destination of route - can be IPv4 or IPv6
137 */
138 struct nfp_offloaded_route {
139 struct list_head list;
140 u8 ip_add[];
141 };
142
143 #define NFP_FL_IPV4_ADDRS_MAX 32
144
145 /**
146 * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
147 * @count: number of IPs populated in the array
148 * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
149 */
150 struct nfp_tun_ipv4_addr {
151 __be32 count;
152 __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
153 };
154
155 /**
156 * struct nfp_ipv4_addr_entry - cached IPv4 addresses
157 * @ipv4_addr: IP address
158 * @ref_count: number of rules currently using this IP
159 * @list: list pointer
160 */
161 struct nfp_ipv4_addr_entry {
162 __be32 ipv4_addr;
163 int ref_count;
164 struct list_head list;
165 };
166
167 #define NFP_FL_IPV6_ADDRS_MAX 4
168
169 /**
170 * struct nfp_tun_ipv6_addr - set the IP address list on the NFP
171 * @count: number of IPs populated in the array
172 * @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses
173 */
174 struct nfp_tun_ipv6_addr {
175 __be32 count;
176 struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX];
177 };
178
179 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
180
181 /**
182 * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
183 * @flags: MAC address offload options
184 * @count: number of MAC addresses in the message (should be 1)
185 * @index: index of MAC address in the lookup table
186 * @addr: interface MAC address
187 */
188 struct nfp_tun_mac_addr_offload {
189 __be16 flags;
190 __be16 count;
191 __be16 index;
192 u8 addr[ETH_ALEN];
193 };
194
195 enum nfp_flower_mac_offload_cmd {
196 NFP_TUNNEL_MAC_OFFLOAD_ADD = 0,
197 NFP_TUNNEL_MAC_OFFLOAD_DEL = 1,
198 NFP_TUNNEL_MAC_OFFLOAD_MOD = 2,
199 };
200
201 #define NFP_MAX_MAC_INDEX 0xff
202
203 /**
204 * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
205 * @ht_node: Hashtable entry
206 * @addr: Offloaded MAC address
207 * @index: Offloaded index for given MAC address
208 * @ref_count: Number of devs using this MAC address
209 * @repr_list: List of reprs sharing this MAC address
210 * @bridge_count: Number of bridge/internal devs with MAC
211 */
212 struct nfp_tun_offloaded_mac {
213 struct rhash_head ht_node;
214 u8 addr[ETH_ALEN];
215 u16 index;
216 int ref_count;
217 struct list_head repr_list;
218 int bridge_count;
219 };
220
221 static const struct rhashtable_params offloaded_macs_params = {
222 .key_offset = offsetof(struct nfp_tun_offloaded_mac, addr),
223 .head_offset = offsetof(struct nfp_tun_offloaded_mac, ht_node),
224 .key_len = ETH_ALEN,
225 .automatic_shrinking = true,
226 };
227
nfp_tunnel_keep_alive(struct nfp_app * app,struct sk_buff * skb)228 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
229 {
230 struct nfp_tun_active_tuns *payload;
231 struct net_device *netdev;
232 int count, i, pay_len;
233 struct neighbour *n;
234 __be32 ipv4_addr;
235 u32 port;
236
237 payload = nfp_flower_cmsg_get_data(skb);
238 count = be32_to_cpu(payload->count);
239 if (count > NFP_FL_MAX_ROUTES) {
240 nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
241 return;
242 }
243
244 pay_len = nfp_flower_cmsg_get_data_len(skb);
245 if (pay_len != struct_size(payload, tun_info, count)) {
246 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
247 return;
248 }
249
250 rcu_read_lock();
251 for (i = 0; i < count; i++) {
252 ipv4_addr = payload->tun_info[i].ipv4;
253 port = be32_to_cpu(payload->tun_info[i].egress_port);
254 netdev = nfp_app_dev_get(app, port, NULL);
255 if (!netdev)
256 continue;
257
258 n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
259 if (!n)
260 continue;
261
262 /* Update the used timestamp of neighbour */
263 neigh_event_send(n, NULL);
264 neigh_release(n);
265 }
266 rcu_read_unlock();
267 }
268
nfp_tunnel_keep_alive_v6(struct nfp_app * app,struct sk_buff * skb)269 void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb)
270 {
271 #if IS_ENABLED(CONFIG_IPV6)
272 struct nfp_tun_active_tuns_v6 *payload;
273 struct net_device *netdev;
274 int count, i, pay_len;
275 struct neighbour *n;
276 void *ipv6_add;
277 u32 port;
278
279 payload = nfp_flower_cmsg_get_data(skb);
280 count = be32_to_cpu(payload->count);
281 if (count > NFP_FL_IPV6_ADDRS_MAX) {
282 nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n");
283 return;
284 }
285
286 pay_len = nfp_flower_cmsg_get_data_len(skb);
287 if (pay_len != struct_size(payload, tun_info, count)) {
288 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
289 return;
290 }
291
292 rcu_read_lock();
293 for (i = 0; i < count; i++) {
294 ipv6_add = &payload->tun_info[i].ipv6;
295 port = be32_to_cpu(payload->tun_info[i].egress_port);
296 netdev = nfp_app_dev_get(app, port, NULL);
297 if (!netdev)
298 continue;
299
300 n = neigh_lookup(&nd_tbl, ipv6_add, netdev);
301 if (!n)
302 continue;
303
304 /* Update the used timestamp of neighbour */
305 neigh_event_send(n, NULL);
306 neigh_release(n);
307 }
308 rcu_read_unlock();
309 #endif
310 }
311
312 static int
nfp_flower_xmit_tun_conf(struct nfp_app * app,u8 mtype,u16 plen,void * pdata,gfp_t flag)313 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
314 gfp_t flag)
315 {
316 struct sk_buff *skb;
317 unsigned char *msg;
318
319 skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
320 if (!skb)
321 return -ENOMEM;
322
323 msg = nfp_flower_cmsg_get_data(skb);
324 memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
325
326 nfp_ctrl_tx(app->ctrl, skb);
327 return 0;
328 }
329
330 static bool
__nfp_tun_has_route(struct list_head * route_list,spinlock_t * list_lock,void * add,int add_len)331 __nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock,
332 void *add, int add_len)
333 {
334 struct nfp_offloaded_route *entry;
335
336 spin_lock_bh(list_lock);
337 list_for_each_entry(entry, route_list, list)
338 if (!memcmp(entry->ip_add, add, add_len)) {
339 spin_unlock_bh(list_lock);
340 return true;
341 }
342 spin_unlock_bh(list_lock);
343 return false;
344 }
345
346 static int
__nfp_tun_add_route_to_cache(struct list_head * route_list,spinlock_t * list_lock,void * add,int add_len)347 __nfp_tun_add_route_to_cache(struct list_head *route_list,
348 spinlock_t *list_lock, void *add, int add_len)
349 {
350 struct nfp_offloaded_route *entry;
351
352 spin_lock_bh(list_lock);
353 list_for_each_entry(entry, route_list, list)
354 if (!memcmp(entry->ip_add, add, add_len)) {
355 spin_unlock_bh(list_lock);
356 return 0;
357 }
358
359 entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC);
360 if (!entry) {
361 spin_unlock_bh(list_lock);
362 return -ENOMEM;
363 }
364
365 memcpy(entry->ip_add, add, add_len);
366 list_add_tail(&entry->list, route_list);
367 spin_unlock_bh(list_lock);
368
369 return 0;
370 }
371
372 static void
__nfp_tun_del_route_from_cache(struct list_head * route_list,spinlock_t * list_lock,void * add,int add_len)373 __nfp_tun_del_route_from_cache(struct list_head *route_list,
374 spinlock_t *list_lock, void *add, int add_len)
375 {
376 struct nfp_offloaded_route *entry;
377
378 spin_lock_bh(list_lock);
379 list_for_each_entry(entry, route_list, list)
380 if (!memcmp(entry->ip_add, add, add_len)) {
381 list_del(&entry->list);
382 kfree(entry);
383 break;
384 }
385 spin_unlock_bh(list_lock);
386 }
387
nfp_tun_has_route_v4(struct nfp_app * app,__be32 * ipv4_addr)388 static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr)
389 {
390 struct nfp_flower_priv *priv = app->priv;
391
392 return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4,
393 &priv->tun.neigh_off_lock_v4, ipv4_addr,
394 sizeof(*ipv4_addr));
395 }
396
397 static bool
nfp_tun_has_route_v6(struct nfp_app * app,struct in6_addr * ipv6_addr)398 nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
399 {
400 struct nfp_flower_priv *priv = app->priv;
401
402 return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6,
403 &priv->tun.neigh_off_lock_v6, ipv6_addr,
404 sizeof(*ipv6_addr));
405 }
406
407 static void
nfp_tun_add_route_to_cache_v4(struct nfp_app * app,__be32 * ipv4_addr)408 nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
409 {
410 struct nfp_flower_priv *priv = app->priv;
411
412 __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4,
413 &priv->tun.neigh_off_lock_v4, ipv4_addr,
414 sizeof(*ipv4_addr));
415 }
416
417 static void
nfp_tun_add_route_to_cache_v6(struct nfp_app * app,struct in6_addr * ipv6_addr)418 nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
419 {
420 struct nfp_flower_priv *priv = app->priv;
421
422 __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6,
423 &priv->tun.neigh_off_lock_v6, ipv6_addr,
424 sizeof(*ipv6_addr));
425 }
426
427 static void
nfp_tun_del_route_from_cache_v4(struct nfp_app * app,__be32 * ipv4_addr)428 nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
429 {
430 struct nfp_flower_priv *priv = app->priv;
431
432 __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4,
433 &priv->tun.neigh_off_lock_v4, ipv4_addr,
434 sizeof(*ipv4_addr));
435 }
436
437 static void
nfp_tun_del_route_from_cache_v6(struct nfp_app * app,struct in6_addr * ipv6_addr)438 nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
439 {
440 struct nfp_flower_priv *priv = app->priv;
441
442 __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6,
443 &priv->tun.neigh_off_lock_v6, ipv6_addr,
444 sizeof(*ipv6_addr));
445 }
446
447 static void
nfp_tun_write_neigh_v4(struct net_device * netdev,struct nfp_app * app,struct flowi4 * flow,struct neighbour * neigh,gfp_t flag)448 nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app,
449 struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
450 {
451 struct nfp_tun_neigh payload;
452 u32 port_id;
453
454 port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
455 if (!port_id)
456 return;
457
458 memset(&payload, 0, sizeof(struct nfp_tun_neigh));
459 payload.dst_ipv4 = flow->daddr;
460
461 /* If entry has expired send dst IP with all other fields 0. */
462 if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
463 nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4);
464 /* Trigger ARP to verify invalid neighbour state. */
465 neigh_event_send(neigh, NULL);
466 goto send_msg;
467 }
468
469 /* Have a valid neighbour so populate rest of entry. */
470 payload.src_ipv4 = flow->saddr;
471 ether_addr_copy(payload.src_addr, netdev->dev_addr);
472 neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
473 payload.port_id = cpu_to_be32(port_id);
474 /* Add destination of new route to NFP cache. */
475 nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4);
476
477 send_msg:
478 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
479 sizeof(struct nfp_tun_neigh),
480 (unsigned char *)&payload, flag);
481 }
482
483 static void
nfp_tun_write_neigh_v6(struct net_device * netdev,struct nfp_app * app,struct flowi6 * flow,struct neighbour * neigh,gfp_t flag)484 nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app,
485 struct flowi6 *flow, struct neighbour *neigh, gfp_t flag)
486 {
487 struct nfp_tun_neigh_v6 payload;
488 u32 port_id;
489
490 port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
491 if (!port_id)
492 return;
493
494 memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6));
495 payload.dst_ipv6 = flow->daddr;
496
497 /* If entry has expired send dst IP with all other fields 0. */
498 if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
499 nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6);
500 /* Trigger probe to verify invalid neighbour state. */
501 neigh_event_send(neigh, NULL);
502 goto send_msg;
503 }
504
505 /* Have a valid neighbour so populate rest of entry. */
506 payload.src_ipv6 = flow->saddr;
507 ether_addr_copy(payload.src_addr, netdev->dev_addr);
508 neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
509 payload.port_id = cpu_to_be32(port_id);
510 /* Add destination of new route to NFP cache. */
511 nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6);
512
513 send_msg:
514 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
515 sizeof(struct nfp_tun_neigh_v6),
516 (unsigned char *)&payload, flag);
517 }
518
519 static int
nfp_tun_neigh_event_handler(struct notifier_block * nb,unsigned long event,void * ptr)520 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
521 void *ptr)
522 {
523 struct nfp_flower_priv *app_priv;
524 struct netevent_redirect *redir;
525 struct flowi4 flow4 = {};
526 struct flowi6 flow6 = {};
527 struct neighbour *n;
528 struct nfp_app *app;
529 struct rtable *rt;
530 bool ipv6 = false;
531 int err;
532
533 switch (event) {
534 case NETEVENT_REDIRECT:
535 redir = (struct netevent_redirect *)ptr;
536 n = redir->neigh;
537 break;
538 case NETEVENT_NEIGH_UPDATE:
539 n = (struct neighbour *)ptr;
540 break;
541 default:
542 return NOTIFY_DONE;
543 }
544
545 if (n->tbl->family == AF_INET6)
546 ipv6 = true;
547
548 if (ipv6)
549 flow6.daddr = *(struct in6_addr *)n->primary_key;
550 else
551 flow4.daddr = *(__be32 *)n->primary_key;
552
553 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
554 app = app_priv->app;
555
556 if (!nfp_netdev_is_nfp_repr(n->dev) &&
557 !nfp_flower_internal_port_can_offload(app, n->dev))
558 return NOTIFY_DONE;
559
560 /* Only concerned with changes to routes already added to NFP. */
561 if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) ||
562 (!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr)))
563 return NOTIFY_DONE;
564
565 #if IS_ENABLED(CONFIG_INET)
566 if (ipv6) {
567 #if IS_ENABLED(CONFIG_IPV6)
568 struct dst_entry *dst;
569
570 dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL,
571 &flow6, NULL);
572 if (IS_ERR(dst))
573 return NOTIFY_DONE;
574
575 dst_release(dst);
576 flow6.flowi6_proto = IPPROTO_UDP;
577 nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC);
578 #else
579 return NOTIFY_DONE;
580 #endif /* CONFIG_IPV6 */
581 } else {
582 /* Do a route lookup to populate flow data. */
583 rt = ip_route_output_key(dev_net(n->dev), &flow4);
584 err = PTR_ERR_OR_ZERO(rt);
585 if (err)
586 return NOTIFY_DONE;
587
588 ip_rt_put(rt);
589
590 flow4.flowi4_proto = IPPROTO_UDP;
591 nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC);
592 }
593 #else
594 return NOTIFY_DONE;
595 #endif /* CONFIG_INET */
596
597 return NOTIFY_OK;
598 }
599
nfp_tunnel_request_route_v4(struct nfp_app * app,struct sk_buff * skb)600 void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
601 {
602 struct nfp_tun_req_route_ipv4 *payload;
603 struct net_device *netdev;
604 struct flowi4 flow = {};
605 struct neighbour *n;
606 struct rtable *rt;
607 int err;
608
609 payload = nfp_flower_cmsg_get_data(skb);
610
611 rcu_read_lock();
612 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
613 if (!netdev)
614 goto fail_rcu_unlock;
615
616 flow.daddr = payload->ipv4_addr;
617 flow.flowi4_proto = IPPROTO_UDP;
618
619 #if IS_ENABLED(CONFIG_INET)
620 /* Do a route lookup on same namespace as ingress port. */
621 rt = ip_route_output_key(dev_net(netdev), &flow);
622 err = PTR_ERR_OR_ZERO(rt);
623 if (err)
624 goto fail_rcu_unlock;
625 #else
626 goto fail_rcu_unlock;
627 #endif
628
629 /* Get the neighbour entry for the lookup */
630 n = dst_neigh_lookup(&rt->dst, &flow.daddr);
631 ip_rt_put(rt);
632 if (!n)
633 goto fail_rcu_unlock;
634 nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC);
635 neigh_release(n);
636 rcu_read_unlock();
637 return;
638
639 fail_rcu_unlock:
640 rcu_read_unlock();
641 nfp_flower_cmsg_warn(app, "Requested route not found.\n");
642 }
643
nfp_tunnel_request_route_v6(struct nfp_app * app,struct sk_buff * skb)644 void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
645 {
646 struct nfp_tun_req_route_ipv6 *payload;
647 struct net_device *netdev;
648 struct flowi6 flow = {};
649 struct dst_entry *dst;
650 struct neighbour *n;
651
652 payload = nfp_flower_cmsg_get_data(skb);
653
654 rcu_read_lock();
655 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
656 if (!netdev)
657 goto fail_rcu_unlock;
658
659 flow.daddr = payload->ipv6_addr;
660 flow.flowi6_proto = IPPROTO_UDP;
661
662 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
663 dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow,
664 NULL);
665 if (IS_ERR(dst))
666 goto fail_rcu_unlock;
667 #else
668 goto fail_rcu_unlock;
669 #endif
670
671 n = dst_neigh_lookup(dst, &flow.daddr);
672 dst_release(dst);
673 if (!n)
674 goto fail_rcu_unlock;
675
676 nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC);
677 neigh_release(n);
678 rcu_read_unlock();
679 return;
680
681 fail_rcu_unlock:
682 rcu_read_unlock();
683 nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
684 }
685
nfp_tun_write_ipv4_list(struct nfp_app * app)686 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
687 {
688 struct nfp_flower_priv *priv = app->priv;
689 struct nfp_ipv4_addr_entry *entry;
690 struct nfp_tun_ipv4_addr payload;
691 struct list_head *ptr, *storage;
692 int count;
693
694 memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
695 mutex_lock(&priv->tun.ipv4_off_lock);
696 count = 0;
697 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
698 if (count >= NFP_FL_IPV4_ADDRS_MAX) {
699 mutex_unlock(&priv->tun.ipv4_off_lock);
700 nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
701 return;
702 }
703 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
704 payload.ipv4_addr[count++] = entry->ipv4_addr;
705 }
706 payload.count = cpu_to_be32(count);
707 mutex_unlock(&priv->tun.ipv4_off_lock);
708
709 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
710 sizeof(struct nfp_tun_ipv4_addr),
711 &payload, GFP_KERNEL);
712 }
713
nfp_tunnel_add_ipv4_off(struct nfp_app * app,__be32 ipv4)714 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
715 {
716 struct nfp_flower_priv *priv = app->priv;
717 struct nfp_ipv4_addr_entry *entry;
718 struct list_head *ptr, *storage;
719
720 mutex_lock(&priv->tun.ipv4_off_lock);
721 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
722 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
723 if (entry->ipv4_addr == ipv4) {
724 entry->ref_count++;
725 mutex_unlock(&priv->tun.ipv4_off_lock);
726 return;
727 }
728 }
729
730 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
731 if (!entry) {
732 mutex_unlock(&priv->tun.ipv4_off_lock);
733 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
734 return;
735 }
736 entry->ipv4_addr = ipv4;
737 entry->ref_count = 1;
738 list_add_tail(&entry->list, &priv->tun.ipv4_off_list);
739 mutex_unlock(&priv->tun.ipv4_off_lock);
740
741 nfp_tun_write_ipv4_list(app);
742 }
743
nfp_tunnel_del_ipv4_off(struct nfp_app * app,__be32 ipv4)744 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
745 {
746 struct nfp_flower_priv *priv = app->priv;
747 struct nfp_ipv4_addr_entry *entry;
748 struct list_head *ptr, *storage;
749
750 mutex_lock(&priv->tun.ipv4_off_lock);
751 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
752 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
753 if (entry->ipv4_addr == ipv4) {
754 entry->ref_count--;
755 if (!entry->ref_count) {
756 list_del(&entry->list);
757 kfree(entry);
758 }
759 break;
760 }
761 }
762 mutex_unlock(&priv->tun.ipv4_off_lock);
763
764 nfp_tun_write_ipv4_list(app);
765 }
766
nfp_tun_write_ipv6_list(struct nfp_app * app)767 static void nfp_tun_write_ipv6_list(struct nfp_app *app)
768 {
769 struct nfp_flower_priv *priv = app->priv;
770 struct nfp_ipv6_addr_entry *entry;
771 struct nfp_tun_ipv6_addr payload;
772 int count = 0;
773
774 memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr));
775 mutex_lock(&priv->tun.ipv6_off_lock);
776 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) {
777 if (count >= NFP_FL_IPV6_ADDRS_MAX) {
778 nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n");
779 break;
780 }
781 payload.ipv6_addr[count++] = entry->ipv6_addr;
782 }
783 mutex_unlock(&priv->tun.ipv6_off_lock);
784 payload.count = cpu_to_be32(count);
785
786 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6,
787 sizeof(struct nfp_tun_ipv6_addr),
788 &payload, GFP_KERNEL);
789 }
790
791 struct nfp_ipv6_addr_entry *
nfp_tunnel_add_ipv6_off(struct nfp_app * app,struct in6_addr * ipv6)792 nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6)
793 {
794 struct nfp_flower_priv *priv = app->priv;
795 struct nfp_ipv6_addr_entry *entry;
796
797 mutex_lock(&priv->tun.ipv6_off_lock);
798 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list)
799 if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) {
800 entry->ref_count++;
801 mutex_unlock(&priv->tun.ipv6_off_lock);
802 return entry;
803 }
804
805 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
806 if (!entry) {
807 mutex_unlock(&priv->tun.ipv6_off_lock);
808 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
809 return NULL;
810 }
811 entry->ipv6_addr = *ipv6;
812 entry->ref_count = 1;
813 list_add_tail(&entry->list, &priv->tun.ipv6_off_list);
814 mutex_unlock(&priv->tun.ipv6_off_lock);
815
816 nfp_tun_write_ipv6_list(app);
817
818 return entry;
819 }
820
821 void
nfp_tunnel_put_ipv6_off(struct nfp_app * app,struct nfp_ipv6_addr_entry * entry)822 nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
823 {
824 struct nfp_flower_priv *priv = app->priv;
825 bool freed = false;
826
827 mutex_lock(&priv->tun.ipv6_off_lock);
828 if (!--entry->ref_count) {
829 list_del(&entry->list);
830 kfree(entry);
831 freed = true;
832 }
833 mutex_unlock(&priv->tun.ipv6_off_lock);
834
835 if (freed)
836 nfp_tun_write_ipv6_list(app);
837 }
838
839 static int
__nfp_tunnel_offload_mac(struct nfp_app * app,u8 * mac,u16 idx,bool del)840 __nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
841 {
842 struct nfp_tun_mac_addr_offload payload;
843
844 memset(&payload, 0, sizeof(payload));
845
846 if (del)
847 payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
848
849 /* FW supports multiple MACs per cmsg but restrict to single. */
850 payload.count = cpu_to_be16(1);
851 payload.index = cpu_to_be16(idx);
852 ether_addr_copy(payload.addr, mac);
853
854 return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
855 sizeof(struct nfp_tun_mac_addr_offload),
856 &payload, GFP_KERNEL);
857 }
858
nfp_tunnel_port_is_phy_repr(int port)859 static bool nfp_tunnel_port_is_phy_repr(int port)
860 {
861 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
862 NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
863 return true;
864
865 return false;
866 }
867
nfp_tunnel_get_mac_idx_from_phy_port_id(int port)868 static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
869 {
870 return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
871 }
872
nfp_tunnel_get_global_mac_idx_from_ida(int id)873 static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
874 {
875 return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
876 }
877
nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)878 static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
879 {
880 return nfp_mac_idx >> 8;
881 }
882
nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)883 static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
884 {
885 return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
886 }
887
888 static struct nfp_tun_offloaded_mac *
nfp_tunnel_lookup_offloaded_macs(struct nfp_app * app,u8 * mac)889 nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
890 {
891 struct nfp_flower_priv *priv = app->priv;
892
893 return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
894 offloaded_macs_params);
895 }
896
897 static void
nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac * entry,struct net_device * netdev,bool mod)898 nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
899 struct net_device *netdev, bool mod)
900 {
901 if (nfp_netdev_is_nfp_repr(netdev)) {
902 struct nfp_flower_repr_priv *repr_priv;
903 struct nfp_repr *repr;
904
905 repr = netdev_priv(netdev);
906 repr_priv = repr->app_priv;
907
908 /* If modifing MAC, remove repr from old list first. */
909 if (mod)
910 list_del(&repr_priv->mac_list);
911
912 list_add_tail(&repr_priv->mac_list, &entry->repr_list);
913 } else if (nfp_flower_is_supported_bridge(netdev)) {
914 entry->bridge_count++;
915 }
916
917 entry->ref_count++;
918 }
919
920 static int
nfp_tunnel_add_shared_mac(struct nfp_app * app,struct net_device * netdev,int port,bool mod)921 nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
922 int port, bool mod)
923 {
924 struct nfp_flower_priv *priv = app->priv;
925 struct nfp_tun_offloaded_mac *entry;
926 int ida_idx = -1, err;
927 u16 nfp_mac_idx = 0;
928
929 entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
930 if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) {
931 if (entry->bridge_count ||
932 !nfp_flower_is_supported_bridge(netdev)) {
933 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
934 netdev, mod);
935 return 0;
936 }
937
938 /* MAC is global but matches need to go to pre_tun table. */
939 nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
940 }
941
942 if (!nfp_mac_idx) {
943 /* Assign a global index if non-repr or MAC is now shared. */
944 if (entry || !port) {
945 ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
946 NFP_MAX_MAC_INDEX, GFP_KERNEL);
947 if (ida_idx < 0)
948 return ida_idx;
949
950 nfp_mac_idx =
951 nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
952
953 if (nfp_flower_is_supported_bridge(netdev))
954 nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
955
956 } else {
957 nfp_mac_idx =
958 nfp_tunnel_get_mac_idx_from_phy_port_id(port);
959 }
960 }
961
962 if (!entry) {
963 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
964 if (!entry) {
965 err = -ENOMEM;
966 goto err_free_ida;
967 }
968
969 ether_addr_copy(entry->addr, netdev->dev_addr);
970 INIT_LIST_HEAD(&entry->repr_list);
971
972 if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
973 &entry->ht_node,
974 offloaded_macs_params)) {
975 err = -ENOMEM;
976 goto err_free_entry;
977 }
978 }
979
980 err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
981 nfp_mac_idx, false);
982 if (err) {
983 /* If not shared then free. */
984 if (!entry->ref_count)
985 goto err_remove_hash;
986 goto err_free_ida;
987 }
988
989 entry->index = nfp_mac_idx;
990 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
991
992 return 0;
993
994 err_remove_hash:
995 rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
996 offloaded_macs_params);
997 err_free_entry:
998 kfree(entry);
999 err_free_ida:
1000 if (ida_idx != -1)
1001 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
1002
1003 return err;
1004 }
1005
1006 static int
nfp_tunnel_del_shared_mac(struct nfp_app * app,struct net_device * netdev,u8 * mac,bool mod)1007 nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
1008 u8 *mac, bool mod)
1009 {
1010 struct nfp_flower_priv *priv = app->priv;
1011 struct nfp_flower_repr_priv *repr_priv;
1012 struct nfp_tun_offloaded_mac *entry;
1013 struct nfp_repr *repr;
1014 u16 nfp_mac_idx;
1015 int ida_idx;
1016
1017 entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
1018 if (!entry)
1019 return 0;
1020
1021 entry->ref_count--;
1022 /* If del is part of a mod then mac_list is still in use elsewheree. */
1023 if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
1024 repr = netdev_priv(netdev);
1025 repr_priv = repr->app_priv;
1026 list_del(&repr_priv->mac_list);
1027 }
1028
1029 if (nfp_flower_is_supported_bridge(netdev)) {
1030 entry->bridge_count--;
1031
1032 if (!entry->bridge_count && entry->ref_count) {
1033 nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
1034 if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
1035 false)) {
1036 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1037 netdev_name(netdev));
1038 return 0;
1039 }
1040
1041 entry->index = nfp_mac_idx;
1042 return 0;
1043 }
1044 }
1045
1046 /* If MAC is now used by 1 repr set the offloaded MAC index to port. */
1047 if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
1048 int port, err;
1049
1050 repr_priv = list_first_entry(&entry->repr_list,
1051 struct nfp_flower_repr_priv,
1052 mac_list);
1053 repr = repr_priv->nfp_repr;
1054 port = nfp_repr_get_port_id(repr->netdev);
1055 nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
1056 err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
1057 if (err) {
1058 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1059 netdev_name(netdev));
1060 return 0;
1061 }
1062
1063 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1064 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
1065 entry->index = nfp_mac_idx;
1066 return 0;
1067 }
1068
1069 if (entry->ref_count)
1070 return 0;
1071
1072 WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
1073 &entry->ht_node,
1074 offloaded_macs_params));
1075
1076 if (nfp_flower_is_supported_bridge(netdev))
1077 nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
1078 else
1079 nfp_mac_idx = entry->index;
1080
1081 /* If MAC has global ID then extract and free the ida entry. */
1082 if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
1083 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1084 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
1085 }
1086
1087 kfree(entry);
1088
1089 return __nfp_tunnel_offload_mac(app, mac, 0, true);
1090 }
1091
1092 static int
nfp_tunnel_offload_mac(struct nfp_app * app,struct net_device * netdev,enum nfp_flower_mac_offload_cmd cmd)1093 nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
1094 enum nfp_flower_mac_offload_cmd cmd)
1095 {
1096 struct nfp_flower_non_repr_priv *nr_priv = NULL;
1097 bool non_repr = false, *mac_offloaded;
1098 u8 *off_mac = NULL;
1099 int err, port = 0;
1100
1101 if (nfp_netdev_is_nfp_repr(netdev)) {
1102 struct nfp_flower_repr_priv *repr_priv;
1103 struct nfp_repr *repr;
1104
1105 repr = netdev_priv(netdev);
1106 if (repr->app != app)
1107 return 0;
1108
1109 repr_priv = repr->app_priv;
1110 if (repr_priv->on_bridge)
1111 return 0;
1112
1113 mac_offloaded = &repr_priv->mac_offloaded;
1114 off_mac = &repr_priv->offloaded_mac_addr[0];
1115 port = nfp_repr_get_port_id(netdev);
1116 if (!nfp_tunnel_port_is_phy_repr(port))
1117 return 0;
1118 } else if (nfp_fl_is_netdev_to_offload(netdev)) {
1119 nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
1120 if (!nr_priv)
1121 return -ENOMEM;
1122
1123 mac_offloaded = &nr_priv->mac_offloaded;
1124 off_mac = &nr_priv->offloaded_mac_addr[0];
1125 non_repr = true;
1126 } else {
1127 return 0;
1128 }
1129
1130 if (!is_valid_ether_addr(netdev->dev_addr)) {
1131 err = -EINVAL;
1132 goto err_put_non_repr_priv;
1133 }
1134
1135 if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
1136 cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
1137
1138 switch (cmd) {
1139 case NFP_TUNNEL_MAC_OFFLOAD_ADD:
1140 err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
1141 if (err)
1142 goto err_put_non_repr_priv;
1143
1144 if (non_repr)
1145 __nfp_flower_non_repr_priv_get(nr_priv);
1146
1147 *mac_offloaded = true;
1148 ether_addr_copy(off_mac, netdev->dev_addr);
1149 break;
1150 case NFP_TUNNEL_MAC_OFFLOAD_DEL:
1151 /* Only attempt delete if add was successful. */
1152 if (!*mac_offloaded)
1153 break;
1154
1155 if (non_repr)
1156 __nfp_flower_non_repr_priv_put(nr_priv);
1157
1158 *mac_offloaded = false;
1159
1160 err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
1161 false);
1162 if (err)
1163 goto err_put_non_repr_priv;
1164
1165 break;
1166 case NFP_TUNNEL_MAC_OFFLOAD_MOD:
1167 /* Ignore if changing to the same address. */
1168 if (ether_addr_equal(netdev->dev_addr, off_mac))
1169 break;
1170
1171 err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
1172 if (err)
1173 goto err_put_non_repr_priv;
1174
1175 /* Delete the previous MAC address. */
1176 err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
1177 if (err)
1178 nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
1179 netdev_name(netdev));
1180
1181 ether_addr_copy(off_mac, netdev->dev_addr);
1182 break;
1183 default:
1184 err = -EINVAL;
1185 goto err_put_non_repr_priv;
1186 }
1187
1188 if (non_repr)
1189 __nfp_flower_non_repr_priv_put(nr_priv);
1190
1191 return 0;
1192
1193 err_put_non_repr_priv:
1194 if (non_repr)
1195 __nfp_flower_non_repr_priv_put(nr_priv);
1196
1197 return err;
1198 }
1199
nfp_tunnel_mac_event_handler(struct nfp_app * app,struct net_device * netdev,unsigned long event,void * ptr)1200 int nfp_tunnel_mac_event_handler(struct nfp_app *app,
1201 struct net_device *netdev,
1202 unsigned long event, void *ptr)
1203 {
1204 int err;
1205
1206 if (event == NETDEV_DOWN) {
1207 err = nfp_tunnel_offload_mac(app, netdev,
1208 NFP_TUNNEL_MAC_OFFLOAD_DEL);
1209 if (err)
1210 nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
1211 netdev_name(netdev));
1212 } else if (event == NETDEV_UP) {
1213 err = nfp_tunnel_offload_mac(app, netdev,
1214 NFP_TUNNEL_MAC_OFFLOAD_ADD);
1215 if (err)
1216 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1217 netdev_name(netdev));
1218 } else if (event == NETDEV_CHANGEADDR) {
1219 /* Only offload addr change if netdev is already up. */
1220 if (!(netdev->flags & IFF_UP))
1221 return NOTIFY_OK;
1222
1223 err = nfp_tunnel_offload_mac(app, netdev,
1224 NFP_TUNNEL_MAC_OFFLOAD_MOD);
1225 if (err)
1226 nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
1227 netdev_name(netdev));
1228 } else if (event == NETDEV_CHANGEUPPER) {
1229 /* If a repr is attached to a bridge then tunnel packets
1230 * entering the physical port are directed through the bridge
1231 * datapath and cannot be directly detunneled. Therefore,
1232 * associated offloaded MACs and indexes should not be used
1233 * by fw for detunneling.
1234 */
1235 struct netdev_notifier_changeupper_info *info = ptr;
1236 struct net_device *upper = info->upper_dev;
1237 struct nfp_flower_repr_priv *repr_priv;
1238 struct nfp_repr *repr;
1239
1240 if (!nfp_netdev_is_nfp_repr(netdev) ||
1241 !nfp_flower_is_supported_bridge(upper))
1242 return NOTIFY_OK;
1243
1244 repr = netdev_priv(netdev);
1245 if (repr->app != app)
1246 return NOTIFY_OK;
1247
1248 repr_priv = repr->app_priv;
1249
1250 if (info->linking) {
1251 if (nfp_tunnel_offload_mac(app, netdev,
1252 NFP_TUNNEL_MAC_OFFLOAD_DEL))
1253 nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
1254 netdev_name(netdev));
1255 repr_priv->on_bridge = true;
1256 } else {
1257 repr_priv->on_bridge = false;
1258
1259 if (!(netdev->flags & IFF_UP))
1260 return NOTIFY_OK;
1261
1262 if (nfp_tunnel_offload_mac(app, netdev,
1263 NFP_TUNNEL_MAC_OFFLOAD_ADD))
1264 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1265 netdev_name(netdev));
1266 }
1267 }
1268 return NOTIFY_OK;
1269 }
1270
nfp_flower_xmit_pre_tun_flow(struct nfp_app * app,struct nfp_fl_payload * flow)1271 int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
1272 struct nfp_fl_payload *flow)
1273 {
1274 struct nfp_flower_priv *app_priv = app->priv;
1275 struct nfp_tun_offloaded_mac *mac_entry;
1276 struct nfp_flower_meta_tci *key_meta;
1277 struct nfp_tun_pre_tun_rule payload;
1278 struct net_device *internal_dev;
1279 int err;
1280
1281 if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
1282 return -ENOSPC;
1283
1284 memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1285
1286 internal_dev = flow->pre_tun_rule.dev;
1287 payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1288 payload.host_ctx_id = flow->meta.host_ctx_id;
1289
1290 /* Lookup MAC index for the pre-tunnel rule egress device.
1291 * Note that because the device is always an internal port, it will
1292 * have a constant global index so does not need to be tracked.
1293 */
1294 mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
1295 internal_dev->dev_addr);
1296 if (!mac_entry)
1297 return -ENOENT;
1298
1299 /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being
1300 * set/clear for port_idx.
1301 */
1302 key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data;
1303 if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6)
1304 mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT;
1305 else
1306 mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT;
1307
1308 payload.port_idx = cpu_to_be16(mac_entry->index);
1309
1310 /* Copy mac id and vlan to flow - dev may not exist at delete time. */
1311 flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
1312 flow->pre_tun_rule.port_idx = payload.port_idx;
1313
1314 err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1315 sizeof(struct nfp_tun_pre_tun_rule),
1316 (unsigned char *)&payload, GFP_KERNEL);
1317 if (err)
1318 return err;
1319
1320 app_priv->pre_tun_rule_cnt++;
1321
1322 return 0;
1323 }
1324
nfp_flower_xmit_pre_tun_del_flow(struct nfp_app * app,struct nfp_fl_payload * flow)1325 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
1326 struct nfp_fl_payload *flow)
1327 {
1328 struct nfp_flower_priv *app_priv = app->priv;
1329 struct nfp_tun_pre_tun_rule payload;
1330 u32 tmp_flags = 0;
1331 int err;
1332
1333 memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1334
1335 tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
1336 payload.flags = cpu_to_be32(tmp_flags);
1337 payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1338 payload.port_idx = flow->pre_tun_rule.port_idx;
1339
1340 err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1341 sizeof(struct nfp_tun_pre_tun_rule),
1342 (unsigned char *)&payload, GFP_KERNEL);
1343 if (err)
1344 return err;
1345
1346 app_priv->pre_tun_rule_cnt--;
1347
1348 return 0;
1349 }
1350
nfp_tunnel_config_start(struct nfp_app * app)1351 int nfp_tunnel_config_start(struct nfp_app *app)
1352 {
1353 struct nfp_flower_priv *priv = app->priv;
1354 int err;
1355
1356 /* Initialise rhash for MAC offload tracking. */
1357 err = rhashtable_init(&priv->tun.offloaded_macs,
1358 &offloaded_macs_params);
1359 if (err)
1360 return err;
1361
1362 ida_init(&priv->tun.mac_off_ids);
1363
1364 /* Initialise priv data for IPv4/v6 offloading. */
1365 mutex_init(&priv->tun.ipv4_off_lock);
1366 INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
1367 mutex_init(&priv->tun.ipv6_off_lock);
1368 INIT_LIST_HEAD(&priv->tun.ipv6_off_list);
1369
1370 /* Initialise priv data for neighbour offloading. */
1371 spin_lock_init(&priv->tun.neigh_off_lock_v4);
1372 INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4);
1373 spin_lock_init(&priv->tun.neigh_off_lock_v6);
1374 INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6);
1375 priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
1376
1377 err = register_netevent_notifier(&priv->tun.neigh_nb);
1378 if (err) {
1379 rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1380 nfp_check_rhashtable_empty, NULL);
1381 return err;
1382 }
1383
1384 return 0;
1385 }
1386
nfp_tunnel_config_stop(struct nfp_app * app)1387 void nfp_tunnel_config_stop(struct nfp_app *app)
1388 {
1389 struct nfp_offloaded_route *route_entry, *temp;
1390 struct nfp_flower_priv *priv = app->priv;
1391 struct nfp_ipv4_addr_entry *ip_entry;
1392 struct nfp_tun_neigh_v6 ipv6_route;
1393 struct nfp_tun_neigh ipv4_route;
1394 struct list_head *ptr, *storage;
1395
1396 unregister_netevent_notifier(&priv->tun.neigh_nb);
1397
1398 ida_destroy(&priv->tun.mac_off_ids);
1399
1400 /* Free any memory that may be occupied by ipv4 list. */
1401 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
1402 ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
1403 list_del(&ip_entry->list);
1404 kfree(ip_entry);
1405 }
1406
1407 mutex_destroy(&priv->tun.ipv6_off_lock);
1408
1409 /* Free memory in the route list and remove entries from fw cache. */
1410 list_for_each_entry_safe(route_entry, temp,
1411 &priv->tun.neigh_off_list_v4, list) {
1412 memset(&ipv4_route, 0, sizeof(ipv4_route));
1413 memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add,
1414 sizeof(ipv4_route.dst_ipv4));
1415 list_del(&route_entry->list);
1416 kfree(route_entry);
1417
1418 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
1419 sizeof(struct nfp_tun_neigh),
1420 (unsigned char *)&ipv4_route,
1421 GFP_KERNEL);
1422 }
1423
1424 list_for_each_entry_safe(route_entry, temp,
1425 &priv->tun.neigh_off_list_v6, list) {
1426 memset(&ipv6_route, 0, sizeof(ipv6_route));
1427 memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add,
1428 sizeof(ipv6_route.dst_ipv6));
1429 list_del(&route_entry->list);
1430 kfree(route_entry);
1431
1432 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
1433 sizeof(struct nfp_tun_neigh),
1434 (unsigned char *)&ipv6_route,
1435 GFP_KERNEL);
1436 }
1437
1438 /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
1439 rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1440 nfp_check_rhashtable_empty, NULL);
1441 }
1442