1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
23 #include <net/arp.h>
24 #include <net/ip_fib.h>
25 #include <net/ip6_fib.h>
26 #include <net/nexthop.h>
27 #include <net/fib_rules.h>
28 #include <net/ip_tunnels.h>
29 #include <net/l3mdev.h>
30 #include <net/addrconf.h>
31 #include <net/ndisc.h>
32 #include <net/ipv6.h>
33 #include <net/fib_notifier.h>
34 #include <net/switchdev.h>
35
36 #include "spectrum.h"
37 #include "core.h"
38 #include "reg.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_ipip.h"
42 #include "spectrum_mr.h"
43 #include "spectrum_mr_tcam.h"
44 #include "spectrum_router.h"
45 #include "spectrum_span.h"
46
47 struct mlxsw_sp_fib;
48 struct mlxsw_sp_vr;
49 struct mlxsw_sp_lpm_tree;
50 struct mlxsw_sp_rif_ops;
51
52 struct mlxsw_sp_rif {
53 struct list_head nexthop_list;
54 struct list_head neigh_list;
55 struct net_device *dev; /* NULL for underlay RIF */
56 struct mlxsw_sp_fid *fid;
57 unsigned char addr[ETH_ALEN];
58 int mtu;
59 u16 rif_index;
60 u16 vr_id;
61 const struct mlxsw_sp_rif_ops *ops;
62 struct mlxsw_sp *mlxsw_sp;
63
64 unsigned int counter_ingress;
65 bool counter_ingress_valid;
66 unsigned int counter_egress;
67 bool counter_egress_valid;
68 };
69
70 struct mlxsw_sp_rif_params {
71 struct net_device *dev;
72 union {
73 u16 system_port;
74 u16 lag_id;
75 };
76 u16 vid;
77 bool lag;
78 };
79
80 struct mlxsw_sp_rif_subport {
81 struct mlxsw_sp_rif common;
82 refcount_t ref_count;
83 union {
84 u16 system_port;
85 u16 lag_id;
86 };
87 u16 vid;
88 bool lag;
89 };
90
91 struct mlxsw_sp_rif_ipip_lb {
92 struct mlxsw_sp_rif common;
93 struct mlxsw_sp_rif_ipip_lb_config lb_config;
94 u16 ul_vr_id; /* Reserved for Spectrum-2. */
95 u16 ul_rif_id; /* Reserved for Spectrum. */
96 };
97
98 struct mlxsw_sp_rif_params_ipip_lb {
99 struct mlxsw_sp_rif_params common;
100 struct mlxsw_sp_rif_ipip_lb_config lb_config;
101 };
102
103 struct mlxsw_sp_rif_ops {
104 enum mlxsw_sp_rif_type type;
105 size_t rif_size;
106
107 void (*setup)(struct mlxsw_sp_rif *rif,
108 const struct mlxsw_sp_rif_params *params);
109 int (*configure)(struct mlxsw_sp_rif *rif);
110 void (*deconfigure)(struct mlxsw_sp_rif *rif);
111 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
112 struct netlink_ext_ack *extack);
113 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
114 };
115
116 struct mlxsw_sp_router_ops {
117 int (*init)(struct mlxsw_sp *mlxsw_sp);
118 };
119
120 static struct mlxsw_sp_rif *
121 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
122 const struct net_device *dev);
123 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
124 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
125 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
126 struct mlxsw_sp_lpm_tree *lpm_tree);
127 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
128 const struct mlxsw_sp_fib *fib,
129 u8 tree_id);
130 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
131 const struct mlxsw_sp_fib *fib);
132
133 static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)134 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
135 enum mlxsw_sp_rif_counter_dir dir)
136 {
137 switch (dir) {
138 case MLXSW_SP_RIF_COUNTER_EGRESS:
139 return &rif->counter_egress;
140 case MLXSW_SP_RIF_COUNTER_INGRESS:
141 return &rif->counter_ingress;
142 }
143 return NULL;
144 }
145
146 static bool
mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)147 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
148 enum mlxsw_sp_rif_counter_dir dir)
149 {
150 switch (dir) {
151 case MLXSW_SP_RIF_COUNTER_EGRESS:
152 return rif->counter_egress_valid;
153 case MLXSW_SP_RIF_COUNTER_INGRESS:
154 return rif->counter_ingress_valid;
155 }
156 return false;
157 }
158
159 static void
mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,bool valid)160 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
161 enum mlxsw_sp_rif_counter_dir dir,
162 bool valid)
163 {
164 switch (dir) {
165 case MLXSW_SP_RIF_COUNTER_EGRESS:
166 rif->counter_egress_valid = valid;
167 break;
168 case MLXSW_SP_RIF_COUNTER_INGRESS:
169 rif->counter_ingress_valid = valid;
170 break;
171 }
172 }
173
mlxsw_sp_rif_counter_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,unsigned int counter_index,bool enable,enum mlxsw_sp_rif_counter_dir dir)174 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
175 unsigned int counter_index, bool enable,
176 enum mlxsw_sp_rif_counter_dir dir)
177 {
178 char ritr_pl[MLXSW_REG_RITR_LEN];
179 bool is_egress = false;
180 int err;
181
182 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
183 is_egress = true;
184 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
185 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
186 if (err)
187 return err;
188
189 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
190 is_egress);
191 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
192 }
193
mlxsw_sp_rif_counter_value_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,u64 * cnt)194 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
195 struct mlxsw_sp_rif *rif,
196 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
197 {
198 char ricnt_pl[MLXSW_REG_RICNT_LEN];
199 unsigned int *p_counter_index;
200 bool valid;
201 int err;
202
203 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
204 if (!valid)
205 return -EINVAL;
206
207 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
208 if (!p_counter_index)
209 return -EINVAL;
210 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
211 MLXSW_REG_RICNT_OPCODE_NOP);
212 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
213 if (err)
214 return err;
215 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
216 return 0;
217 }
218
mlxsw_sp_rif_counter_clear(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)219 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
220 unsigned int counter_index)
221 {
222 char ricnt_pl[MLXSW_REG_RICNT_LEN];
223
224 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
225 MLXSW_REG_RICNT_OPCODE_CLEAR);
226 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
227 }
228
mlxsw_sp_rif_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)229 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
230 struct mlxsw_sp_rif *rif,
231 enum mlxsw_sp_rif_counter_dir dir)
232 {
233 unsigned int *p_counter_index;
234 int err;
235
236 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
237 if (!p_counter_index)
238 return -EINVAL;
239 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
240 p_counter_index);
241 if (err)
242 return err;
243
244 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
245 if (err)
246 goto err_counter_clear;
247
248 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
249 *p_counter_index, true, dir);
250 if (err)
251 goto err_counter_edit;
252 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
253 return 0;
254
255 err_counter_edit:
256 err_counter_clear:
257 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
258 *p_counter_index);
259 return err;
260 }
261
mlxsw_sp_rif_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)262 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
263 struct mlxsw_sp_rif *rif,
264 enum mlxsw_sp_rif_counter_dir dir)
265 {
266 unsigned int *p_counter_index;
267
268 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
269 return;
270
271 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
272 if (WARN_ON(!p_counter_index))
273 return;
274 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
275 *p_counter_index, false, dir);
276 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
277 *p_counter_index);
278 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
279 }
280
mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif * rif)281 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
282 {
283 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
284 struct devlink *devlink;
285
286 devlink = priv_to_devlink(mlxsw_sp->core);
287 if (!devlink_dpipe_table_counter_enabled(devlink,
288 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
289 return;
290 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
291 }
292
mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif * rif)293 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
294 {
295 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
296
297 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
298 }
299
300 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
301
302 struct mlxsw_sp_prefix_usage {
303 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
304 };
305
306 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
307 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
308
309 static bool
mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)310 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
311 struct mlxsw_sp_prefix_usage *prefix_usage2)
312 {
313 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
314 }
315
316 static void
mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)317 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
318 struct mlxsw_sp_prefix_usage *prefix_usage2)
319 {
320 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
321 }
322
323 static void
mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)324 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
325 unsigned char prefix_len)
326 {
327 set_bit(prefix_len, prefix_usage->b);
328 }
329
330 static void
mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)331 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
332 unsigned char prefix_len)
333 {
334 clear_bit(prefix_len, prefix_usage->b);
335 }
336
337 struct mlxsw_sp_fib_key {
338 unsigned char addr[sizeof(struct in6_addr)];
339 unsigned char prefix_len;
340 };
341
342 enum mlxsw_sp_fib_entry_type {
343 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
344 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
345 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
346 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
347 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
348
349 /* This is a special case of local delivery, where a packet should be
350 * decapsulated on reception. Note that there is no corresponding ENCAP,
351 * because that's a type of next hop, not of FIB entry. (There can be
352 * several next hops in a REMOTE entry, and some of them may be
353 * encapsulating entries.)
354 */
355 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
356 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
357 };
358
359 struct mlxsw_sp_nexthop_group_info;
360 struct mlxsw_sp_nexthop_group;
361 struct mlxsw_sp_fib_entry;
362
363 struct mlxsw_sp_fib_node {
364 struct mlxsw_sp_fib_entry *fib_entry;
365 struct list_head list;
366 struct rhash_head ht_node;
367 struct mlxsw_sp_fib *fib;
368 struct mlxsw_sp_fib_key key;
369 };
370
371 struct mlxsw_sp_fib_entry_decap {
372 struct mlxsw_sp_ipip_entry *ipip_entry;
373 u32 tunnel_index;
374 };
375
376 static struct mlxsw_sp_fib_entry_priv *
mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops * ll_ops)377 mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
378 {
379 struct mlxsw_sp_fib_entry_priv *priv;
380
381 if (!ll_ops->fib_entry_priv_size)
382 /* No need to have priv */
383 return NULL;
384
385 priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
386 if (!priv)
387 return ERR_PTR(-ENOMEM);
388 refcount_set(&priv->refcnt, 1);
389 return priv;
390 }
391
392 static void
mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv * priv)393 mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
394 {
395 kfree(priv);
396 }
397
mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv * priv)398 static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
399 {
400 refcount_inc(&priv->refcnt);
401 }
402
mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv * priv)403 static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
404 {
405 if (!priv || !refcount_dec_and_test(&priv->refcnt))
406 return;
407 mlxsw_sp_fib_entry_priv_destroy(priv);
408 }
409
mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry_priv * priv)410 static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
411 struct mlxsw_sp_fib_entry_priv *priv)
412 {
413 if (!priv)
414 return;
415 mlxsw_sp_fib_entry_priv_hold(priv);
416 list_add(&priv->list, &op_ctx->fib_entry_priv_list);
417 }
418
mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx * op_ctx)419 static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
420 {
421 struct mlxsw_sp_fib_entry_priv *priv, *tmp;
422
423 list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
424 mlxsw_sp_fib_entry_priv_put(priv);
425 INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
426 }
427
428 struct mlxsw_sp_fib_entry {
429 struct mlxsw_sp_fib_node *fib_node;
430 enum mlxsw_sp_fib_entry_type type;
431 struct list_head nexthop_group_node;
432 struct mlxsw_sp_nexthop_group *nh_group;
433 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
434 struct mlxsw_sp_fib_entry_priv *priv;
435 };
436
437 struct mlxsw_sp_fib4_entry {
438 struct mlxsw_sp_fib_entry common;
439 struct fib_info *fi;
440 u32 tb_id;
441 u8 tos;
442 u8 type;
443 };
444
445 struct mlxsw_sp_fib6_entry {
446 struct mlxsw_sp_fib_entry common;
447 struct list_head rt6_list;
448 unsigned int nrt6;
449 };
450
451 struct mlxsw_sp_rt6 {
452 struct list_head list;
453 struct fib6_info *rt;
454 };
455
456 struct mlxsw_sp_lpm_tree {
457 u8 id; /* tree ID */
458 unsigned int ref_count;
459 enum mlxsw_sp_l3proto proto;
460 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
461 struct mlxsw_sp_prefix_usage prefix_usage;
462 };
463
464 struct mlxsw_sp_fib {
465 struct rhashtable ht;
466 struct list_head node_list;
467 struct mlxsw_sp_vr *vr;
468 struct mlxsw_sp_lpm_tree *lpm_tree;
469 enum mlxsw_sp_l3proto proto;
470 const struct mlxsw_sp_router_ll_ops *ll_ops;
471 };
472
473 struct mlxsw_sp_vr {
474 u16 id; /* virtual router ID */
475 u32 tb_id; /* kernel fib table id */
476 unsigned int rif_count;
477 struct mlxsw_sp_fib *fib4;
478 struct mlxsw_sp_fib *fib6;
479 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
480 struct mlxsw_sp_rif *ul_rif;
481 refcount_t ul_rif_refcnt;
482 };
483
mlxsw_sp_router_ll_basic_init(struct mlxsw_sp * mlxsw_sp,u16 vr_id,enum mlxsw_sp_l3proto proto)484 static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
485 enum mlxsw_sp_l3proto proto)
486 {
487 return 0;
488 }
489
mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp * mlxsw_sp,char * xralta_pl)490 static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
491 {
492 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
493 xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
494 }
495
mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp * mlxsw_sp,char * xralst_pl)496 static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
497 {
498 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
499 xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
500 }
501
mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp * mlxsw_sp,char * xraltb_pl)502 static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
503 {
504 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
505 xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
506 }
507
508 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
509
mlxsw_sp_fib_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)510 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
511 struct mlxsw_sp_vr *vr,
512 enum mlxsw_sp_l3proto proto)
513 {
514 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
515 struct mlxsw_sp_lpm_tree *lpm_tree;
516 struct mlxsw_sp_fib *fib;
517 int err;
518
519 err = ll_ops->init(mlxsw_sp, vr->id, proto);
520 if (err)
521 return ERR_PTR(err);
522
523 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
524 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
525 if (!fib)
526 return ERR_PTR(-ENOMEM);
527 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
528 if (err)
529 goto err_rhashtable_init;
530 INIT_LIST_HEAD(&fib->node_list);
531 fib->proto = proto;
532 fib->vr = vr;
533 fib->lpm_tree = lpm_tree;
534 fib->ll_ops = ll_ops;
535 mlxsw_sp_lpm_tree_hold(lpm_tree);
536 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
537 if (err)
538 goto err_lpm_tree_bind;
539 return fib;
540
541 err_lpm_tree_bind:
542 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
543 err_rhashtable_init:
544 kfree(fib);
545 return ERR_PTR(err);
546 }
547
mlxsw_sp_fib_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib)548 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
549 struct mlxsw_sp_fib *fib)
550 {
551 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
552 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
553 WARN_ON(!list_empty(&fib->node_list));
554 rhashtable_destroy(&fib->ht);
555 kfree(fib);
556 }
557
558 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp * mlxsw_sp)559 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
560 {
561 static struct mlxsw_sp_lpm_tree *lpm_tree;
562 int i;
563
564 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
565 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
566 if (lpm_tree->ref_count == 0)
567 return lpm_tree;
568 }
569 return NULL;
570 }
571
mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_router_ll_ops * ll_ops,struct mlxsw_sp_lpm_tree * lpm_tree)572 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
573 const struct mlxsw_sp_router_ll_ops *ll_ops,
574 struct mlxsw_sp_lpm_tree *lpm_tree)
575 {
576 char xralta_pl[MLXSW_REG_XRALTA_LEN];
577
578 mlxsw_reg_xralta_pack(xralta_pl, true,
579 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
580 lpm_tree->id);
581 return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
582 }
583
mlxsw_sp_lpm_tree_free(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_router_ll_ops * ll_ops,struct mlxsw_sp_lpm_tree * lpm_tree)584 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
585 const struct mlxsw_sp_router_ll_ops *ll_ops,
586 struct mlxsw_sp_lpm_tree *lpm_tree)
587 {
588 char xralta_pl[MLXSW_REG_XRALTA_LEN];
589
590 mlxsw_reg_xralta_pack(xralta_pl, false,
591 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
592 lpm_tree->id);
593 ll_ops->ralta_write(mlxsw_sp, xralta_pl);
594 }
595
596 static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_router_ll_ops * ll_ops,struct mlxsw_sp_prefix_usage * prefix_usage,struct mlxsw_sp_lpm_tree * lpm_tree)597 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
598 const struct mlxsw_sp_router_ll_ops *ll_ops,
599 struct mlxsw_sp_prefix_usage *prefix_usage,
600 struct mlxsw_sp_lpm_tree *lpm_tree)
601 {
602 char xralst_pl[MLXSW_REG_XRALST_LEN];
603 u8 root_bin = 0;
604 u8 prefix;
605 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
606
607 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
608 root_bin = prefix;
609
610 mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
611 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
612 if (prefix == 0)
613 continue;
614 mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
615 MLXSW_REG_RALST_BIN_NO_CHILD);
616 last_prefix = prefix;
617 }
618 return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
619 }
620
621 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_router_ll_ops * ll_ops,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)622 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
623 const struct mlxsw_sp_router_ll_ops *ll_ops,
624 struct mlxsw_sp_prefix_usage *prefix_usage,
625 enum mlxsw_sp_l3proto proto)
626 {
627 struct mlxsw_sp_lpm_tree *lpm_tree;
628 int err;
629
630 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
631 if (!lpm_tree)
632 return ERR_PTR(-EBUSY);
633 lpm_tree->proto = proto;
634 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
635 if (err)
636 return ERR_PTR(err);
637
638 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
639 if (err)
640 goto err_left_struct_set;
641 memcpy(&lpm_tree->prefix_usage, prefix_usage,
642 sizeof(lpm_tree->prefix_usage));
643 memset(&lpm_tree->prefix_ref_count, 0,
644 sizeof(lpm_tree->prefix_ref_count));
645 lpm_tree->ref_count = 1;
646 return lpm_tree;
647
648 err_left_struct_set:
649 mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
650 return ERR_PTR(err);
651 }
652
mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_router_ll_ops * ll_ops,struct mlxsw_sp_lpm_tree * lpm_tree)653 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
654 const struct mlxsw_sp_router_ll_ops *ll_ops,
655 struct mlxsw_sp_lpm_tree *lpm_tree)
656 {
657 mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
658 }
659
660 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)661 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
662 struct mlxsw_sp_prefix_usage *prefix_usage,
663 enum mlxsw_sp_l3proto proto)
664 {
665 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
666 struct mlxsw_sp_lpm_tree *lpm_tree;
667 int i;
668
669 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
670 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
671 if (lpm_tree->ref_count != 0 &&
672 lpm_tree->proto == proto &&
673 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
674 prefix_usage)) {
675 mlxsw_sp_lpm_tree_hold(lpm_tree);
676 return lpm_tree;
677 }
678 }
679 return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
680 }
681
mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree * lpm_tree)682 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
683 {
684 lpm_tree->ref_count++;
685 }
686
mlxsw_sp_lpm_tree_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)687 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
688 struct mlxsw_sp_lpm_tree *lpm_tree)
689 {
690 const struct mlxsw_sp_router_ll_ops *ll_ops =
691 mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
692
693 if (--lpm_tree->ref_count == 0)
694 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
695 }
696
697 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
698
mlxsw_sp_lpm_init(struct mlxsw_sp * mlxsw_sp)699 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
700 {
701 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
702 struct mlxsw_sp_lpm_tree *lpm_tree;
703 u64 max_trees;
704 int err, i;
705
706 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
707 return -EIO;
708
709 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
710 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
711 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
712 sizeof(struct mlxsw_sp_lpm_tree),
713 GFP_KERNEL);
714 if (!mlxsw_sp->router->lpm.trees)
715 return -ENOMEM;
716
717 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
718 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
719 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
720 }
721
722 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
723 MLXSW_SP_L3_PROTO_IPV4);
724 if (IS_ERR(lpm_tree)) {
725 err = PTR_ERR(lpm_tree);
726 goto err_ipv4_tree_get;
727 }
728 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
729
730 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
731 MLXSW_SP_L3_PROTO_IPV6);
732 if (IS_ERR(lpm_tree)) {
733 err = PTR_ERR(lpm_tree);
734 goto err_ipv6_tree_get;
735 }
736 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
737
738 return 0;
739
740 err_ipv6_tree_get:
741 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
742 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
743 err_ipv4_tree_get:
744 kfree(mlxsw_sp->router->lpm.trees);
745 return err;
746 }
747
mlxsw_sp_lpm_fini(struct mlxsw_sp * mlxsw_sp)748 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
749 {
750 struct mlxsw_sp_lpm_tree *lpm_tree;
751
752 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
753 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
754
755 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
756 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
757
758 kfree(mlxsw_sp->router->lpm.trees);
759 }
760
mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr * vr)761 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
762 {
763 return !!vr->fib4 || !!vr->fib6 ||
764 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
765 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
766 }
767
mlxsw_sp_vr_find_unused(struct mlxsw_sp * mlxsw_sp)768 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
769 {
770 struct mlxsw_sp_vr *vr;
771 int i;
772
773 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
774 vr = &mlxsw_sp->router->vrs[i];
775 if (!mlxsw_sp_vr_is_used(vr))
776 return vr;
777 }
778 return NULL;
779 }
780
mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib,u8 tree_id)781 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
782 const struct mlxsw_sp_fib *fib, u8 tree_id)
783 {
784 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
785
786 mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
787 (enum mlxsw_reg_ralxx_protocol) fib->proto,
788 tree_id);
789 return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
790 }
791
mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib)792 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
793 const struct mlxsw_sp_fib *fib)
794 {
795 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
796
797 /* Bind to tree 0 which is default */
798 mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
799 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
800 return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
801 }
802
mlxsw_sp_fix_tb_id(u32 tb_id)803 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
804 {
805 /* For our purpose, squash main, default and local tables into one */
806 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
807 tb_id = RT_TABLE_MAIN;
808 return tb_id;
809 }
810
mlxsw_sp_vr_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id)811 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
812 u32 tb_id)
813 {
814 struct mlxsw_sp_vr *vr;
815 int i;
816
817 tb_id = mlxsw_sp_fix_tb_id(tb_id);
818
819 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
820 vr = &mlxsw_sp->router->vrs[i];
821 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
822 return vr;
823 }
824 return NULL;
825 }
826
mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp * mlxsw_sp,u32 tb_id,u16 * vr_id)827 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
828 u16 *vr_id)
829 {
830 struct mlxsw_sp_vr *vr;
831 int err = 0;
832
833 mutex_lock(&mlxsw_sp->router->lock);
834 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
835 if (!vr) {
836 err = -ESRCH;
837 goto out;
838 }
839 *vr_id = vr->id;
840 out:
841 mutex_unlock(&mlxsw_sp->router->lock);
842 return err;
843 }
844
mlxsw_sp_vr_fib(const struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)845 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
846 enum mlxsw_sp_l3proto proto)
847 {
848 switch (proto) {
849 case MLXSW_SP_L3_PROTO_IPV4:
850 return vr->fib4;
851 case MLXSW_SP_L3_PROTO_IPV6:
852 return vr->fib6;
853 }
854 return NULL;
855 }
856
mlxsw_sp_vr_create(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)857 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
858 u32 tb_id,
859 struct netlink_ext_ack *extack)
860 {
861 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
862 struct mlxsw_sp_fib *fib4;
863 struct mlxsw_sp_fib *fib6;
864 struct mlxsw_sp_vr *vr;
865 int err;
866
867 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
868 if (!vr) {
869 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
870 return ERR_PTR(-EBUSY);
871 }
872 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
873 if (IS_ERR(fib4))
874 return ERR_CAST(fib4);
875 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
876 if (IS_ERR(fib6)) {
877 err = PTR_ERR(fib6);
878 goto err_fib6_create;
879 }
880 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
881 MLXSW_SP_L3_PROTO_IPV4);
882 if (IS_ERR(mr4_table)) {
883 err = PTR_ERR(mr4_table);
884 goto err_mr4_table_create;
885 }
886 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
887 MLXSW_SP_L3_PROTO_IPV6);
888 if (IS_ERR(mr6_table)) {
889 err = PTR_ERR(mr6_table);
890 goto err_mr6_table_create;
891 }
892
893 vr->fib4 = fib4;
894 vr->fib6 = fib6;
895 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
896 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
897 vr->tb_id = tb_id;
898 return vr;
899
900 err_mr6_table_create:
901 mlxsw_sp_mr_table_destroy(mr4_table);
902 err_mr4_table_create:
903 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
904 err_fib6_create:
905 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
906 return ERR_PTR(err);
907 }
908
mlxsw_sp_vr_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)909 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
910 struct mlxsw_sp_vr *vr)
911 {
912 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
913 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
914 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
915 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
916 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
917 vr->fib6 = NULL;
918 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
919 vr->fib4 = NULL;
920 }
921
mlxsw_sp_vr_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)922 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
923 struct netlink_ext_ack *extack)
924 {
925 struct mlxsw_sp_vr *vr;
926
927 tb_id = mlxsw_sp_fix_tb_id(tb_id);
928 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
929 if (!vr)
930 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
931 return vr;
932 }
933
mlxsw_sp_vr_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)934 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
935 {
936 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
937 list_empty(&vr->fib6->node_list) &&
938 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
939 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
940 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
941 }
942
943 static bool
mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto,u8 tree_id)944 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
945 enum mlxsw_sp_l3proto proto, u8 tree_id)
946 {
947 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
948
949 if (!mlxsw_sp_vr_is_used(vr))
950 return false;
951 if (fib->lpm_tree->id == tree_id)
952 return true;
953 return false;
954 }
955
mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)956 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
957 struct mlxsw_sp_fib *fib,
958 struct mlxsw_sp_lpm_tree *new_tree)
959 {
960 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
961 int err;
962
963 fib->lpm_tree = new_tree;
964 mlxsw_sp_lpm_tree_hold(new_tree);
965 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
966 if (err)
967 goto err_tree_bind;
968 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
969 return 0;
970
971 err_tree_bind:
972 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
973 fib->lpm_tree = old_tree;
974 return err;
975 }
976
mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)977 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
978 struct mlxsw_sp_fib *fib,
979 struct mlxsw_sp_lpm_tree *new_tree)
980 {
981 enum mlxsw_sp_l3proto proto = fib->proto;
982 struct mlxsw_sp_lpm_tree *old_tree;
983 u8 old_id, new_id = new_tree->id;
984 struct mlxsw_sp_vr *vr;
985 int i, err;
986
987 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
988 old_id = old_tree->id;
989
990 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
991 vr = &mlxsw_sp->router->vrs[i];
992 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
993 continue;
994 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
995 mlxsw_sp_vr_fib(vr, proto),
996 new_tree);
997 if (err)
998 goto err_tree_replace;
999 }
1000
1001 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1002 sizeof(new_tree->prefix_ref_count));
1003 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1004 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1005
1006 return 0;
1007
1008 err_tree_replace:
1009 for (i--; i >= 0; i--) {
1010 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1011 continue;
1012 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1013 mlxsw_sp_vr_fib(vr, proto),
1014 old_tree);
1015 }
1016 return err;
1017 }
1018
mlxsw_sp_vrs_init(struct mlxsw_sp * mlxsw_sp)1019 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1020 {
1021 struct mlxsw_sp_vr *vr;
1022 u64 max_vrs;
1023 int i;
1024
1025 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1026 return -EIO;
1027
1028 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1029 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1030 GFP_KERNEL);
1031 if (!mlxsw_sp->router->vrs)
1032 return -ENOMEM;
1033
1034 for (i = 0; i < max_vrs; i++) {
1035 vr = &mlxsw_sp->router->vrs[i];
1036 vr->id = i;
1037 }
1038
1039 return 0;
1040 }
1041
1042 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1043
mlxsw_sp_vrs_fini(struct mlxsw_sp * mlxsw_sp)1044 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1045 {
1046 /* At this stage we're guaranteed not to have new incoming
1047 * FIB notifications and the work queue is free from FIBs
1048 * sitting on top of mlxsw netdevs. However, we can still
1049 * have other FIBs queued. Flush the queue before flushing
1050 * the device's tables. No need for locks, as we're the only
1051 * writer.
1052 */
1053 mlxsw_core_flush_owq();
1054 mlxsw_sp_router_fib_flush(mlxsw_sp);
1055 kfree(mlxsw_sp->router->vrs);
1056 }
1057
1058 static struct net_device *
__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device * ol_dev)1059 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
1060 {
1061 struct ip_tunnel *tun = netdev_priv(ol_dev);
1062 struct net *net = dev_net(ol_dev);
1063
1064 return dev_get_by_index_rcu(net, tun->parms.link);
1065 }
1066
mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device * ol_dev)1067 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1068 {
1069 struct net_device *d;
1070 u32 tb_id;
1071
1072 rcu_read_lock();
1073 d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1074 if (d)
1075 tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1076 else
1077 tb_id = RT_TABLE_MAIN;
1078 rcu_read_unlock();
1079
1080 return tb_id;
1081 }
1082
1083 static struct mlxsw_sp_rif *
1084 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1085 const struct mlxsw_sp_rif_params *params,
1086 struct netlink_ext_ack *extack);
1087
1088 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev,struct netlink_ext_ack * extack)1089 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1090 enum mlxsw_sp_ipip_type ipipt,
1091 struct net_device *ol_dev,
1092 struct netlink_ext_ack *extack)
1093 {
1094 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1095 const struct mlxsw_sp_ipip_ops *ipip_ops;
1096 struct mlxsw_sp_rif *rif;
1097
1098 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1099 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1100 .common.dev = ol_dev,
1101 .common.lag = false,
1102 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1103 };
1104
1105 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1106 if (IS_ERR(rif))
1107 return ERR_CAST(rif);
1108 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1109 }
1110
1111 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1112 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1113 enum mlxsw_sp_ipip_type ipipt,
1114 struct net_device *ol_dev)
1115 {
1116 const struct mlxsw_sp_ipip_ops *ipip_ops;
1117 struct mlxsw_sp_ipip_entry *ipip_entry;
1118 struct mlxsw_sp_ipip_entry *ret = NULL;
1119
1120 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1121 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1122 if (!ipip_entry)
1123 return ERR_PTR(-ENOMEM);
1124
1125 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1126 ol_dev, NULL);
1127 if (IS_ERR(ipip_entry->ol_lb)) {
1128 ret = ERR_CAST(ipip_entry->ol_lb);
1129 goto err_ol_ipip_lb_create;
1130 }
1131
1132 ipip_entry->ipipt = ipipt;
1133 ipip_entry->ol_dev = ol_dev;
1134
1135 switch (ipip_ops->ul_proto) {
1136 case MLXSW_SP_L3_PROTO_IPV4:
1137 ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1138 break;
1139 case MLXSW_SP_L3_PROTO_IPV6:
1140 WARN_ON(1);
1141 break;
1142 }
1143
1144 return ipip_entry;
1145
1146 err_ol_ipip_lb_create:
1147 kfree(ipip_entry);
1148 return ret;
1149 }
1150
1151 static void
mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry * ipip_entry)1152 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
1153 {
1154 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1155 kfree(ipip_entry);
1156 }
1157
1158 static bool
mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp * mlxsw_sp,const enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,struct mlxsw_sp_ipip_entry * ipip_entry)1159 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1160 const enum mlxsw_sp_l3proto ul_proto,
1161 union mlxsw_sp_l3addr saddr,
1162 u32 ul_tb_id,
1163 struct mlxsw_sp_ipip_entry *ipip_entry)
1164 {
1165 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1166 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1167 union mlxsw_sp_l3addr tun_saddr;
1168
1169 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1170 return false;
1171
1172 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1173 return tun_ul_tb_id == ul_tb_id &&
1174 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1175 }
1176
1177 static int
mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct mlxsw_sp_ipip_entry * ipip_entry)1178 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1179 struct mlxsw_sp_fib_entry *fib_entry,
1180 struct mlxsw_sp_ipip_entry *ipip_entry)
1181 {
1182 u32 tunnel_index;
1183 int err;
1184
1185 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1186 1, &tunnel_index);
1187 if (err)
1188 return err;
1189
1190 ipip_entry->decap_fib_entry = fib_entry;
1191 fib_entry->decap.ipip_entry = ipip_entry;
1192 fib_entry->decap.tunnel_index = tunnel_index;
1193 return 0;
1194 }
1195
mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)1196 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1197 struct mlxsw_sp_fib_entry *fib_entry)
1198 {
1199 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1200 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1201 fib_entry->decap.ipip_entry = NULL;
1202 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1203 1, fib_entry->decap.tunnel_index);
1204 }
1205
1206 static struct mlxsw_sp_fib_node *
1207 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1208 size_t addr_len, unsigned char prefix_len);
1209 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1210 struct mlxsw_sp_fib_entry *fib_entry);
1211
1212 static void
mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1213 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1214 struct mlxsw_sp_ipip_entry *ipip_entry)
1215 {
1216 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1217
1218 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1219 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1220
1221 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1222 }
1223
1224 static void
mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct mlxsw_sp_fib_entry * decap_fib_entry)1225 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1226 struct mlxsw_sp_ipip_entry *ipip_entry,
1227 struct mlxsw_sp_fib_entry *decap_fib_entry)
1228 {
1229 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1230 ipip_entry))
1231 return;
1232 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1233
1234 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1235 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1236 }
1237
1238 static struct mlxsw_sp_fib_entry *
mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id,enum mlxsw_sp_l3proto proto,const union mlxsw_sp_l3addr * addr,enum mlxsw_sp_fib_entry_type type)1239 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1240 enum mlxsw_sp_l3proto proto,
1241 const union mlxsw_sp_l3addr *addr,
1242 enum mlxsw_sp_fib_entry_type type)
1243 {
1244 struct mlxsw_sp_fib_node *fib_node;
1245 unsigned char addr_prefix_len;
1246 struct mlxsw_sp_fib *fib;
1247 struct mlxsw_sp_vr *vr;
1248 const void *addrp;
1249 size_t addr_len;
1250 u32 addr4;
1251
1252 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1253 if (!vr)
1254 return NULL;
1255 fib = mlxsw_sp_vr_fib(vr, proto);
1256
1257 switch (proto) {
1258 case MLXSW_SP_L3_PROTO_IPV4:
1259 addr4 = be32_to_cpu(addr->addr4);
1260 addrp = &addr4;
1261 addr_len = 4;
1262 addr_prefix_len = 32;
1263 break;
1264 case MLXSW_SP_L3_PROTO_IPV6:
1265 default:
1266 WARN_ON(1);
1267 return NULL;
1268 }
1269
1270 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1271 addr_prefix_len);
1272 if (!fib_node || fib_node->fib_entry->type != type)
1273 return NULL;
1274
1275 return fib_node->fib_entry;
1276 }
1277
1278 /* Given an IPIP entry, find the corresponding decap route. */
1279 static struct mlxsw_sp_fib_entry *
mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1280 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1281 struct mlxsw_sp_ipip_entry *ipip_entry)
1282 {
1283 static struct mlxsw_sp_fib_node *fib_node;
1284 const struct mlxsw_sp_ipip_ops *ipip_ops;
1285 unsigned char saddr_prefix_len;
1286 union mlxsw_sp_l3addr saddr;
1287 struct mlxsw_sp_fib *ul_fib;
1288 struct mlxsw_sp_vr *ul_vr;
1289 const void *saddrp;
1290 size_t saddr_len;
1291 u32 ul_tb_id;
1292 u32 saddr4;
1293
1294 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1295
1296 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1297 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1298 if (!ul_vr)
1299 return NULL;
1300
1301 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1302 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1303 ipip_entry->ol_dev);
1304
1305 switch (ipip_ops->ul_proto) {
1306 case MLXSW_SP_L3_PROTO_IPV4:
1307 saddr4 = be32_to_cpu(saddr.addr4);
1308 saddrp = &saddr4;
1309 saddr_len = 4;
1310 saddr_prefix_len = 32;
1311 break;
1312 default:
1313 WARN_ON(1);
1314 return NULL;
1315 }
1316
1317 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1318 saddr_prefix_len);
1319 if (!fib_node ||
1320 fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1321 return NULL;
1322
1323 return fib_node->fib_entry;
1324 }
1325
1326 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1327 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1328 enum mlxsw_sp_ipip_type ipipt,
1329 struct net_device *ol_dev)
1330 {
1331 struct mlxsw_sp_ipip_entry *ipip_entry;
1332
1333 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1334 if (IS_ERR(ipip_entry))
1335 return ipip_entry;
1336
1337 list_add_tail(&ipip_entry->ipip_list_node,
1338 &mlxsw_sp->router->ipip_list);
1339
1340 return ipip_entry;
1341 }
1342
1343 static void
mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1344 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1345 struct mlxsw_sp_ipip_entry *ipip_entry)
1346 {
1347 list_del(&ipip_entry->ipip_list_node);
1348 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
1349 }
1350
1351 static bool
mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip,struct mlxsw_sp_ipip_entry * ipip_entry)1352 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1353 const struct net_device *ul_dev,
1354 enum mlxsw_sp_l3proto ul_proto,
1355 union mlxsw_sp_l3addr ul_dip,
1356 struct mlxsw_sp_ipip_entry *ipip_entry)
1357 {
1358 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1359 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1360
1361 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1362 return false;
1363
1364 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1365 ul_tb_id, ipip_entry);
1366 }
1367
1368 /* Given decap parameters, find the corresponding IPIP entry. */
1369 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp * mlxsw_sp,int ul_dev_ifindex,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip)1370 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1371 enum mlxsw_sp_l3proto ul_proto,
1372 union mlxsw_sp_l3addr ul_dip)
1373 {
1374 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1375 struct net_device *ul_dev;
1376
1377 rcu_read_lock();
1378
1379 ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1380 if (!ul_dev)
1381 goto out_unlock;
1382
1383 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1384 ipip_list_node)
1385 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1386 ul_proto, ul_dip,
1387 ipip_entry))
1388 goto out_unlock;
1389
1390 rcu_read_unlock();
1391
1392 return NULL;
1393
1394 out_unlock:
1395 rcu_read_unlock();
1396 return ipip_entry;
1397 }
1398
mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev,enum mlxsw_sp_ipip_type * p_type)1399 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1400 const struct net_device *dev,
1401 enum mlxsw_sp_ipip_type *p_type)
1402 {
1403 struct mlxsw_sp_router *router = mlxsw_sp->router;
1404 const struct mlxsw_sp_ipip_ops *ipip_ops;
1405 enum mlxsw_sp_ipip_type ipipt;
1406
1407 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1408 ipip_ops = router->ipip_ops_arr[ipipt];
1409 if (dev->type == ipip_ops->dev_type) {
1410 if (p_type)
1411 *p_type = ipipt;
1412 return true;
1413 }
1414 }
1415 return false;
1416 }
1417
mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1418 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1419 const struct net_device *dev)
1420 {
1421 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1422 }
1423
1424 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev)1425 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1426 const struct net_device *ol_dev)
1427 {
1428 struct mlxsw_sp_ipip_entry *ipip_entry;
1429
1430 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1431 ipip_list_node)
1432 if (ipip_entry->ol_dev == ol_dev)
1433 return ipip_entry;
1434
1435 return NULL;
1436 }
1437
1438 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,struct mlxsw_sp_ipip_entry * start)1439 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1440 const struct net_device *ul_dev,
1441 struct mlxsw_sp_ipip_entry *start)
1442 {
1443 struct mlxsw_sp_ipip_entry *ipip_entry;
1444
1445 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1446 ipip_list_node);
1447 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1448 ipip_list_node) {
1449 struct net_device *ol_dev = ipip_entry->ol_dev;
1450 struct net_device *ipip_ul_dev;
1451
1452 rcu_read_lock();
1453 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1454 rcu_read_unlock();
1455
1456 if (ipip_ul_dev == ul_dev)
1457 return ipip_entry;
1458 }
1459
1460 return NULL;
1461 }
1462
mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1463 bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1464 const struct net_device *dev)
1465 {
1466 bool is_ipip_ul;
1467
1468 mutex_lock(&mlxsw_sp->router->lock);
1469 is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1470 mutex_unlock(&mlxsw_sp->router->lock);
1471
1472 return is_ipip_ul;
1473 }
1474
mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev,enum mlxsw_sp_ipip_type ipipt)1475 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1476 const struct net_device *ol_dev,
1477 enum mlxsw_sp_ipip_type ipipt)
1478 {
1479 const struct mlxsw_sp_ipip_ops *ops
1480 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1481
1482 return ops->can_offload(mlxsw_sp, ol_dev);
1483 }
1484
mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1485 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1486 struct net_device *ol_dev)
1487 {
1488 enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1489 struct mlxsw_sp_ipip_entry *ipip_entry;
1490 enum mlxsw_sp_l3proto ul_proto;
1491 union mlxsw_sp_l3addr saddr;
1492 u32 ul_tb_id;
1493
1494 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1495 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1496 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1497 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1498 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1499 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1500 saddr, ul_tb_id,
1501 NULL)) {
1502 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1503 ol_dev);
1504 if (IS_ERR(ipip_entry))
1505 return PTR_ERR(ipip_entry);
1506 }
1507 }
1508
1509 return 0;
1510 }
1511
mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1512 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1513 struct net_device *ol_dev)
1514 {
1515 struct mlxsw_sp_ipip_entry *ipip_entry;
1516
1517 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1518 if (ipip_entry)
1519 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1520 }
1521
1522 static void
mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1523 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1524 struct mlxsw_sp_ipip_entry *ipip_entry)
1525 {
1526 struct mlxsw_sp_fib_entry *decap_fib_entry;
1527
1528 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1529 if (decap_fib_entry)
1530 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1531 decap_fib_entry);
1532 }
1533
1534 static int
mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb * lb_rif,u16 ul_vr_id,u16 ul_rif_id,bool enable)1535 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1536 u16 ul_rif_id, bool enable)
1537 {
1538 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1539 struct mlxsw_sp_rif *rif = &lb_rif->common;
1540 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1541 char ritr_pl[MLXSW_REG_RITR_LEN];
1542 u32 saddr4;
1543
1544 switch (lb_cf.ul_protocol) {
1545 case MLXSW_SP_L3_PROTO_IPV4:
1546 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1547 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1548 rif->rif_index, rif->vr_id, rif->dev->mtu);
1549 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1550 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1551 ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
1552 break;
1553
1554 case MLXSW_SP_L3_PROTO_IPV6:
1555 return -EAFNOSUPPORT;
1556 }
1557
1558 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1559 }
1560
mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1561 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1562 struct net_device *ol_dev)
1563 {
1564 struct mlxsw_sp_ipip_entry *ipip_entry;
1565 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1566 int err = 0;
1567
1568 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1569 if (ipip_entry) {
1570 lb_rif = ipip_entry->ol_lb;
1571 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1572 lb_rif->ul_rif_id, true);
1573 if (err)
1574 goto out;
1575 lb_rif->common.mtu = ol_dev->mtu;
1576 }
1577
1578 out:
1579 return err;
1580 }
1581
mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1582 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1583 struct net_device *ol_dev)
1584 {
1585 struct mlxsw_sp_ipip_entry *ipip_entry;
1586
1587 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1588 if (ipip_entry)
1589 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1590 }
1591
1592 static void
mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1593 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1594 struct mlxsw_sp_ipip_entry *ipip_entry)
1595 {
1596 if (ipip_entry->decap_fib_entry)
1597 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1598 }
1599
mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1600 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1601 struct net_device *ol_dev)
1602 {
1603 struct mlxsw_sp_ipip_entry *ipip_entry;
1604
1605 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1606 if (ipip_entry)
1607 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1608 }
1609
1610 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1611 struct mlxsw_sp_rif *old_rif,
1612 struct mlxsw_sp_rif *new_rif);
1613 static int
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool keep_encap,struct netlink_ext_ack * extack)1614 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1615 struct mlxsw_sp_ipip_entry *ipip_entry,
1616 bool keep_encap,
1617 struct netlink_ext_ack *extack)
1618 {
1619 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1620 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1621
1622 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1623 ipip_entry->ipipt,
1624 ipip_entry->ol_dev,
1625 extack);
1626 if (IS_ERR(new_lb_rif))
1627 return PTR_ERR(new_lb_rif);
1628 ipip_entry->ol_lb = new_lb_rif;
1629
1630 if (keep_encap)
1631 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1632 &new_lb_rif->common);
1633
1634 mlxsw_sp_rif_destroy(&old_lb_rif->common);
1635
1636 return 0;
1637 }
1638
1639 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1640 struct mlxsw_sp_rif *rif);
1641
1642 /**
1643 * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1644 * @mlxsw_sp: mlxsw_sp.
1645 * @ipip_entry: IPIP entry.
1646 * @recreate_loopback: Recreates the associated loopback RIF.
1647 * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1648 * relevant when recreate_loopback is true.
1649 * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1650 * is only relevant when recreate_loopback is false.
1651 * @extack: extack.
1652 *
1653 * Return: Non-zero value on failure.
1654 */
__mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool recreate_loopback,bool keep_encap,bool update_nexthops,struct netlink_ext_ack * extack)1655 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1656 struct mlxsw_sp_ipip_entry *ipip_entry,
1657 bool recreate_loopback,
1658 bool keep_encap,
1659 bool update_nexthops,
1660 struct netlink_ext_ack *extack)
1661 {
1662 int err;
1663
1664 /* RIFs can't be edited, so to update loopback, we need to destroy and
1665 * recreate it. That creates a window of opportunity where RALUE and
1666 * RATR registers end up referencing a RIF that's already gone. RATRs
1667 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1668 * of RALUE, demote the decap route back.
1669 */
1670 if (ipip_entry->decap_fib_entry)
1671 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1672
1673 if (recreate_loopback) {
1674 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1675 keep_encap, extack);
1676 if (err)
1677 return err;
1678 } else if (update_nexthops) {
1679 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1680 &ipip_entry->ol_lb->common);
1681 }
1682
1683 if (ipip_entry->ol_dev->flags & IFF_UP)
1684 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1685
1686 return 0;
1687 }
1688
mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1689 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1690 struct net_device *ol_dev,
1691 struct netlink_ext_ack *extack)
1692 {
1693 struct mlxsw_sp_ipip_entry *ipip_entry =
1694 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1695
1696 if (!ipip_entry)
1697 return 0;
1698
1699 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1700 true, false, false, extack);
1701 }
1702
1703 static int
mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,struct netlink_ext_ack * extack)1704 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1705 struct mlxsw_sp_ipip_entry *ipip_entry,
1706 struct net_device *ul_dev,
1707 bool *demote_this,
1708 struct netlink_ext_ack *extack)
1709 {
1710 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1711 enum mlxsw_sp_l3proto ul_proto;
1712 union mlxsw_sp_l3addr saddr;
1713
1714 /* Moving underlay to a different VRF might cause local address
1715 * conflict, and the conflicting tunnels need to be demoted.
1716 */
1717 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1718 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1719 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1720 saddr, ul_tb_id,
1721 ipip_entry)) {
1722 *demote_this = true;
1723 return 0;
1724 }
1725
1726 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1727 true, true, false, extack);
1728 }
1729
1730 static int
mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1731 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1732 struct mlxsw_sp_ipip_entry *ipip_entry,
1733 struct net_device *ul_dev)
1734 {
1735 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1736 false, false, true, NULL);
1737 }
1738
1739 static int
mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1740 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1741 struct mlxsw_sp_ipip_entry *ipip_entry,
1742 struct net_device *ul_dev)
1743 {
1744 /* A down underlay device causes encapsulated packets to not be
1745 * forwarded, but decap still works. So refresh next hops without
1746 * touching anything else.
1747 */
1748 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1749 false, false, true, NULL);
1750 }
1751
1752 static int
mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1753 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1754 struct net_device *ol_dev,
1755 struct netlink_ext_ack *extack)
1756 {
1757 const struct mlxsw_sp_ipip_ops *ipip_ops;
1758 struct mlxsw_sp_ipip_entry *ipip_entry;
1759 int err;
1760
1761 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1762 if (!ipip_entry)
1763 /* A change might make a tunnel eligible for offloading, but
1764 * that is currently not implemented. What falls to slow path
1765 * stays there.
1766 */
1767 return 0;
1768
1769 /* A change might make a tunnel not eligible for offloading. */
1770 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1771 ipip_entry->ipipt)) {
1772 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1773 return 0;
1774 }
1775
1776 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1777 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1778 return err;
1779 }
1780
mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1781 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1782 struct mlxsw_sp_ipip_entry *ipip_entry)
1783 {
1784 struct net_device *ol_dev = ipip_entry->ol_dev;
1785
1786 if (ol_dev->flags & IFF_UP)
1787 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1788 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1789 }
1790
1791 /* The configuration where several tunnels have the same local address in the
1792 * same underlay table needs special treatment in the HW. That is currently not
1793 * implemented in the driver. This function finds and demotes the first tunnel
1794 * with a given source address, except the one passed in in the argument
1795 * `except'.
1796 */
1797 bool
mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,const struct mlxsw_sp_ipip_entry * except)1798 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1799 enum mlxsw_sp_l3proto ul_proto,
1800 union mlxsw_sp_l3addr saddr,
1801 u32 ul_tb_id,
1802 const struct mlxsw_sp_ipip_entry *except)
1803 {
1804 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1805
1806 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1807 ipip_list_node) {
1808 if (ipip_entry != except &&
1809 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1810 ul_tb_id, ipip_entry)) {
1811 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1812 return true;
1813 }
1814 }
1815
1816 return false;
1817 }
1818
mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev)1819 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1820 struct net_device *ul_dev)
1821 {
1822 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1823
1824 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1825 ipip_list_node) {
1826 struct net_device *ol_dev = ipip_entry->ol_dev;
1827 struct net_device *ipip_ul_dev;
1828
1829 rcu_read_lock();
1830 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1831 rcu_read_unlock();
1832 if (ipip_ul_dev == ul_dev)
1833 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1834 }
1835 }
1836
mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,unsigned long event,struct netdev_notifier_info * info)1837 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1838 struct net_device *ol_dev,
1839 unsigned long event,
1840 struct netdev_notifier_info *info)
1841 {
1842 struct netdev_notifier_changeupper_info *chup;
1843 struct netlink_ext_ack *extack;
1844 int err = 0;
1845
1846 mutex_lock(&mlxsw_sp->router->lock);
1847 switch (event) {
1848 case NETDEV_REGISTER:
1849 err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1850 break;
1851 case NETDEV_UNREGISTER:
1852 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1853 break;
1854 case NETDEV_UP:
1855 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1856 break;
1857 case NETDEV_DOWN:
1858 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1859 break;
1860 case NETDEV_CHANGEUPPER:
1861 chup = container_of(info, typeof(*chup), info);
1862 extack = info->extack;
1863 if (netif_is_l3_master(chup->upper_dev))
1864 err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1865 ol_dev,
1866 extack);
1867 break;
1868 case NETDEV_CHANGE:
1869 extack = info->extack;
1870 err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1871 ol_dev, extack);
1872 break;
1873 case NETDEV_CHANGEMTU:
1874 err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1875 break;
1876 }
1877 mutex_unlock(&mlxsw_sp->router->lock);
1878 return err;
1879 }
1880
1881 static int
__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,unsigned long event,struct netdev_notifier_info * info)1882 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1883 struct mlxsw_sp_ipip_entry *ipip_entry,
1884 struct net_device *ul_dev,
1885 bool *demote_this,
1886 unsigned long event,
1887 struct netdev_notifier_info *info)
1888 {
1889 struct netdev_notifier_changeupper_info *chup;
1890 struct netlink_ext_ack *extack;
1891
1892 switch (event) {
1893 case NETDEV_CHANGEUPPER:
1894 chup = container_of(info, typeof(*chup), info);
1895 extack = info->extack;
1896 if (netif_is_l3_master(chup->upper_dev))
1897 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1898 ipip_entry,
1899 ul_dev,
1900 demote_this,
1901 extack);
1902 break;
1903
1904 case NETDEV_UP:
1905 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1906 ul_dev);
1907 case NETDEV_DOWN:
1908 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1909 ipip_entry,
1910 ul_dev);
1911 }
1912 return 0;
1913 }
1914
1915 int
mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev,unsigned long event,struct netdev_notifier_info * info)1916 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1917 struct net_device *ul_dev,
1918 unsigned long event,
1919 struct netdev_notifier_info *info)
1920 {
1921 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1922 int err = 0;
1923
1924 mutex_lock(&mlxsw_sp->router->lock);
1925 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1926 ul_dev,
1927 ipip_entry))) {
1928 struct mlxsw_sp_ipip_entry *prev;
1929 bool demote_this = false;
1930
1931 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1932 ul_dev, &demote_this,
1933 event, info);
1934 if (err) {
1935 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1936 ul_dev);
1937 break;
1938 }
1939
1940 if (demote_this) {
1941 if (list_is_first(&ipip_entry->ipip_list_node,
1942 &mlxsw_sp->router->ipip_list))
1943 prev = NULL;
1944 else
1945 /* This can't be cached from previous iteration,
1946 * because that entry could be gone now.
1947 */
1948 prev = list_prev_entry(ipip_entry,
1949 ipip_list_node);
1950 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1951 ipip_entry = prev;
1952 }
1953 }
1954 mutex_unlock(&mlxsw_sp->router->lock);
1955
1956 return err;
1957 }
1958
mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip,u32 tunnel_index)1959 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1960 enum mlxsw_sp_l3proto ul_proto,
1961 const union mlxsw_sp_l3addr *ul_sip,
1962 u32 tunnel_index)
1963 {
1964 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1965 struct mlxsw_sp_router *router = mlxsw_sp->router;
1966 struct mlxsw_sp_fib_entry *fib_entry;
1967 int err = 0;
1968
1969 mutex_lock(&mlxsw_sp->router->lock);
1970
1971 if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
1972 err = -EINVAL;
1973 goto out;
1974 }
1975
1976 router->nve_decap_config.ul_tb_id = ul_tb_id;
1977 router->nve_decap_config.tunnel_index = tunnel_index;
1978 router->nve_decap_config.ul_proto = ul_proto;
1979 router->nve_decap_config.ul_sip = *ul_sip;
1980 router->nve_decap_config.valid = true;
1981
1982 /* It is valid to create a tunnel with a local IP and only later
1983 * assign this IP address to a local interface
1984 */
1985 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1986 ul_proto, ul_sip,
1987 type);
1988 if (!fib_entry)
1989 goto out;
1990
1991 fib_entry->decap.tunnel_index = tunnel_index;
1992 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1993
1994 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1995 if (err)
1996 goto err_fib_entry_update;
1997
1998 goto out;
1999
2000 err_fib_entry_update:
2001 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2002 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2003 out:
2004 mutex_unlock(&mlxsw_sp->router->lock);
2005 return err;
2006 }
2007
mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)2008 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2009 enum mlxsw_sp_l3proto ul_proto,
2010 const union mlxsw_sp_l3addr *ul_sip)
2011 {
2012 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2013 struct mlxsw_sp_router *router = mlxsw_sp->router;
2014 struct mlxsw_sp_fib_entry *fib_entry;
2015
2016 mutex_lock(&mlxsw_sp->router->lock);
2017
2018 if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2019 goto out;
2020
2021 router->nve_decap_config.valid = false;
2022
2023 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2024 ul_proto, ul_sip,
2025 type);
2026 if (!fib_entry)
2027 goto out;
2028
2029 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2030 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2031 out:
2032 mutex_unlock(&mlxsw_sp->router->lock);
2033 }
2034
mlxsw_sp_router_nve_is_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)2035 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2036 u32 ul_tb_id,
2037 enum mlxsw_sp_l3proto ul_proto,
2038 const union mlxsw_sp_l3addr *ul_sip)
2039 {
2040 struct mlxsw_sp_router *router = mlxsw_sp->router;
2041
2042 return router->nve_decap_config.valid &&
2043 router->nve_decap_config.ul_tb_id == ul_tb_id &&
2044 router->nve_decap_config.ul_proto == ul_proto &&
2045 !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2046 sizeof(*ul_sip));
2047 }
2048
2049 struct mlxsw_sp_neigh_key {
2050 struct neighbour *n;
2051 };
2052
2053 struct mlxsw_sp_neigh_entry {
2054 struct list_head rif_list_node;
2055 struct rhash_head ht_node;
2056 struct mlxsw_sp_neigh_key key;
2057 u16 rif;
2058 bool connected;
2059 unsigned char ha[ETH_ALEN];
2060 struct list_head nexthop_list; /* list of nexthops using
2061 * this neigh entry
2062 */
2063 struct list_head nexthop_neighs_list_node;
2064 unsigned int counter_index;
2065 bool counter_valid;
2066 };
2067
2068 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2069 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2070 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2071 .key_len = sizeof(struct mlxsw_sp_neigh_key),
2072 };
2073
2074 struct mlxsw_sp_neigh_entry *
mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif * rif,struct mlxsw_sp_neigh_entry * neigh_entry)2075 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2076 struct mlxsw_sp_neigh_entry *neigh_entry)
2077 {
2078 if (!neigh_entry) {
2079 if (list_empty(&rif->neigh_list))
2080 return NULL;
2081 else
2082 return list_first_entry(&rif->neigh_list,
2083 typeof(*neigh_entry),
2084 rif_list_node);
2085 }
2086 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2087 return NULL;
2088 return list_next_entry(neigh_entry, rif_list_node);
2089 }
2090
mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry * neigh_entry)2091 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2092 {
2093 return neigh_entry->key.n->tbl->family;
2094 }
2095
2096 unsigned char *
mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry * neigh_entry)2097 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2098 {
2099 return neigh_entry->ha;
2100 }
2101
mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)2102 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2103 {
2104 struct neighbour *n;
2105
2106 n = neigh_entry->key.n;
2107 return ntohl(*((__be32 *) n->primary_key));
2108 }
2109
2110 struct in6_addr *
mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)2111 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2112 {
2113 struct neighbour *n;
2114
2115 n = neigh_entry->key.n;
2116 return (struct in6_addr *) &n->primary_key;
2117 }
2118
mlxsw_sp_neigh_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,u64 * p_counter)2119 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2120 struct mlxsw_sp_neigh_entry *neigh_entry,
2121 u64 *p_counter)
2122 {
2123 if (!neigh_entry->counter_valid)
2124 return -EINVAL;
2125
2126 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2127 p_counter, NULL);
2128 }
2129
2130 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp * mlxsw_sp,struct neighbour * n,u16 rif)2131 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2132 u16 rif)
2133 {
2134 struct mlxsw_sp_neigh_entry *neigh_entry;
2135
2136 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2137 if (!neigh_entry)
2138 return NULL;
2139
2140 neigh_entry->key.n = n;
2141 neigh_entry->rif = rif;
2142 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2143
2144 return neigh_entry;
2145 }
2146
mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry * neigh_entry)2147 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2148 {
2149 kfree(neigh_entry);
2150 }
2151
2152 static int
mlxsw_sp_neigh_entry_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2153 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2154 struct mlxsw_sp_neigh_entry *neigh_entry)
2155 {
2156 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2157 &neigh_entry->ht_node,
2158 mlxsw_sp_neigh_ht_params);
2159 }
2160
2161 static void
mlxsw_sp_neigh_entry_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2162 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2163 struct mlxsw_sp_neigh_entry *neigh_entry)
2164 {
2165 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2166 &neigh_entry->ht_node,
2167 mlxsw_sp_neigh_ht_params);
2168 }
2169
2170 static bool
mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2171 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2172 struct mlxsw_sp_neigh_entry *neigh_entry)
2173 {
2174 struct devlink *devlink;
2175 const char *table_name;
2176
2177 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2178 case AF_INET:
2179 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2180 break;
2181 case AF_INET6:
2182 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2183 break;
2184 default:
2185 WARN_ON(1);
2186 return false;
2187 }
2188
2189 devlink = priv_to_devlink(mlxsw_sp->core);
2190 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2191 }
2192
2193 static void
mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2194 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2195 struct mlxsw_sp_neigh_entry *neigh_entry)
2196 {
2197 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2198 return;
2199
2200 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2201 return;
2202
2203 neigh_entry->counter_valid = true;
2204 }
2205
2206 static void
mlxsw_sp_neigh_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2207 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2208 struct mlxsw_sp_neigh_entry *neigh_entry)
2209 {
2210 if (!neigh_entry->counter_valid)
2211 return;
2212 mlxsw_sp_flow_counter_free(mlxsw_sp,
2213 neigh_entry->counter_index);
2214 neigh_entry->counter_valid = false;
2215 }
2216
2217 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_create(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2218 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2219 {
2220 struct mlxsw_sp_neigh_entry *neigh_entry;
2221 struct mlxsw_sp_rif *rif;
2222 int err;
2223
2224 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2225 if (!rif)
2226 return ERR_PTR(-EINVAL);
2227
2228 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2229 if (!neigh_entry)
2230 return ERR_PTR(-ENOMEM);
2231
2232 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2233 if (err)
2234 goto err_neigh_entry_insert;
2235
2236 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2237 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2238
2239 return neigh_entry;
2240
2241 err_neigh_entry_insert:
2242 mlxsw_sp_neigh_entry_free(neigh_entry);
2243 return ERR_PTR(err);
2244 }
2245
2246 static void
mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2247 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2248 struct mlxsw_sp_neigh_entry *neigh_entry)
2249 {
2250 list_del(&neigh_entry->rif_list_node);
2251 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2252 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2253 mlxsw_sp_neigh_entry_free(neigh_entry);
2254 }
2255
2256 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2257 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2258 {
2259 struct mlxsw_sp_neigh_key key;
2260
2261 key.n = n;
2262 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2263 &key, mlxsw_sp_neigh_ht_params);
2264 }
2265
2266 static void
mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp * mlxsw_sp)2267 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2268 {
2269 unsigned long interval;
2270
2271 #if IS_ENABLED(CONFIG_IPV6)
2272 interval = min_t(unsigned long,
2273 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2274 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2275 #else
2276 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2277 #endif
2278 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2279 }
2280
mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int ent_index)2281 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2282 char *rauhtd_pl,
2283 int ent_index)
2284 {
2285 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2286 struct net_device *dev;
2287 struct neighbour *n;
2288 __be32 dipn;
2289 u32 dip;
2290 u16 rif;
2291
2292 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2293
2294 if (WARN_ON_ONCE(rif >= max_rifs))
2295 return;
2296 if (!mlxsw_sp->router->rifs[rif]) {
2297 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2298 return;
2299 }
2300
2301 dipn = htonl(dip);
2302 dev = mlxsw_sp->router->rifs[rif]->dev;
2303 n = neigh_lookup(&arp_tbl, &dipn, dev);
2304 if (!n)
2305 return;
2306
2307 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2308 neigh_event_send(n, NULL);
2309 neigh_release(n);
2310 }
2311
2312 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2313 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2314 char *rauhtd_pl,
2315 int rec_index)
2316 {
2317 struct net_device *dev;
2318 struct neighbour *n;
2319 struct in6_addr dip;
2320 u16 rif;
2321
2322 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2323 (char *) &dip);
2324
2325 if (!mlxsw_sp->router->rifs[rif]) {
2326 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2327 return;
2328 }
2329
2330 dev = mlxsw_sp->router->rifs[rif]->dev;
2331 n = neigh_lookup(&nd_tbl, &dip, dev);
2332 if (!n)
2333 return;
2334
2335 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2336 neigh_event_send(n, NULL);
2337 neigh_release(n);
2338 }
2339 #else
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2340 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2341 char *rauhtd_pl,
2342 int rec_index)
2343 {
2344 }
2345 #endif
2346
mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2347 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2348 char *rauhtd_pl,
2349 int rec_index)
2350 {
2351 u8 num_entries;
2352 int i;
2353
2354 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2355 rec_index);
2356 /* Hardware starts counting at 0, so add 1. */
2357 num_entries++;
2358
2359 /* Each record consists of several neighbour entries. */
2360 for (i = 0; i < num_entries; i++) {
2361 int ent_index;
2362
2363 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2364 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2365 ent_index);
2366 }
2367
2368 }
2369
mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2370 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2371 char *rauhtd_pl,
2372 int rec_index)
2373 {
2374 /* One record contains one entry. */
2375 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2376 rec_index);
2377 }
2378
mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2379 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2380 char *rauhtd_pl, int rec_index)
2381 {
2382 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2383 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2384 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2385 rec_index);
2386 break;
2387 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2388 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2389 rec_index);
2390 break;
2391 }
2392 }
2393
mlxsw_sp_router_rauhtd_is_full(char * rauhtd_pl)2394 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2395 {
2396 u8 num_rec, last_rec_index, num_entries;
2397
2398 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2399 last_rec_index = num_rec - 1;
2400
2401 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2402 return false;
2403 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2404 MLXSW_REG_RAUHTD_TYPE_IPV6)
2405 return true;
2406
2407 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2408 last_rec_index);
2409 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2410 return true;
2411 return false;
2412 }
2413
2414 static int
__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,enum mlxsw_reg_rauhtd_type type)2415 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2416 char *rauhtd_pl,
2417 enum mlxsw_reg_rauhtd_type type)
2418 {
2419 int i, num_rec;
2420 int err;
2421
2422 /* Ensure the RIF we read from the device does not change mid-dump. */
2423 mutex_lock(&mlxsw_sp->router->lock);
2424 do {
2425 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2426 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2427 rauhtd_pl);
2428 if (err) {
2429 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2430 break;
2431 }
2432 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2433 for (i = 0; i < num_rec; i++)
2434 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2435 i);
2436 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2437 mutex_unlock(&mlxsw_sp->router->lock);
2438
2439 return err;
2440 }
2441
mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp)2442 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2443 {
2444 enum mlxsw_reg_rauhtd_type type;
2445 char *rauhtd_pl;
2446 int err;
2447
2448 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2449 if (!rauhtd_pl)
2450 return -ENOMEM;
2451
2452 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2453 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2454 if (err)
2455 goto out;
2456
2457 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2458 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2459 out:
2460 kfree(rauhtd_pl);
2461 return err;
2462 }
2463
mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp * mlxsw_sp)2464 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2465 {
2466 struct mlxsw_sp_neigh_entry *neigh_entry;
2467
2468 mutex_lock(&mlxsw_sp->router->lock);
2469 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2470 nexthop_neighs_list_node)
2471 /* If this neigh have nexthops, make the kernel think this neigh
2472 * is active regardless of the traffic.
2473 */
2474 neigh_event_send(neigh_entry->key.n, NULL);
2475 mutex_unlock(&mlxsw_sp->router->lock);
2476 }
2477
2478 static void
mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp * mlxsw_sp)2479 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2480 {
2481 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2482
2483 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2484 msecs_to_jiffies(interval));
2485 }
2486
mlxsw_sp_router_neighs_update_work(struct work_struct * work)2487 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2488 {
2489 struct mlxsw_sp_router *router;
2490 int err;
2491
2492 router = container_of(work, struct mlxsw_sp_router,
2493 neighs_update.dw.work);
2494 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2495 if (err)
2496 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2497
2498 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2499
2500 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2501 }
2502
mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct * work)2503 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2504 {
2505 struct mlxsw_sp_neigh_entry *neigh_entry;
2506 struct mlxsw_sp_router *router;
2507
2508 router = container_of(work, struct mlxsw_sp_router,
2509 nexthop_probe_dw.work);
2510 /* Iterate over nexthop neighbours, find those who are unresolved and
2511 * send arp on them. This solves the chicken-egg problem when
2512 * the nexthop wouldn't get offloaded until the neighbor is resolved
2513 * but it wouldn't get resolved ever in case traffic is flowing in HW
2514 * using different nexthop.
2515 */
2516 mutex_lock(&router->lock);
2517 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2518 nexthop_neighs_list_node)
2519 if (!neigh_entry->connected)
2520 neigh_event_send(neigh_entry->key.n, NULL);
2521 mutex_unlock(&router->lock);
2522
2523 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2524 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2525 }
2526
2527 static void
2528 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2529 struct mlxsw_sp_neigh_entry *neigh_entry,
2530 bool removing, bool dead);
2531
mlxsw_sp_rauht_op(bool adding)2532 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2533 {
2534 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2535 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2536 }
2537
2538 static int
mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2539 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2540 struct mlxsw_sp_neigh_entry *neigh_entry,
2541 enum mlxsw_reg_rauht_op op)
2542 {
2543 struct neighbour *n = neigh_entry->key.n;
2544 u32 dip = ntohl(*((__be32 *) n->primary_key));
2545 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2546
2547 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2548 dip);
2549 if (neigh_entry->counter_valid)
2550 mlxsw_reg_rauht_pack_counter(rauht_pl,
2551 neigh_entry->counter_index);
2552 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2553 }
2554
2555 static int
mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2556 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2557 struct mlxsw_sp_neigh_entry *neigh_entry,
2558 enum mlxsw_reg_rauht_op op)
2559 {
2560 struct neighbour *n = neigh_entry->key.n;
2561 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2562 const char *dip = n->primary_key;
2563
2564 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2565 dip);
2566 if (neigh_entry->counter_valid)
2567 mlxsw_reg_rauht_pack_counter(rauht_pl,
2568 neigh_entry->counter_index);
2569 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2570 }
2571
mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry * neigh_entry)2572 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2573 {
2574 struct neighbour *n = neigh_entry->key.n;
2575
2576 /* Packets with a link-local destination address are trapped
2577 * after LPM lookup and never reach the neighbour table, so
2578 * there is no need to program such neighbours to the device.
2579 */
2580 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2581 IPV6_ADDR_LINKLOCAL)
2582 return true;
2583 return false;
2584 }
2585
2586 static void
mlxsw_sp_neigh_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2587 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2588 struct mlxsw_sp_neigh_entry *neigh_entry,
2589 bool adding)
2590 {
2591 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2592 int err;
2593
2594 if (!adding && !neigh_entry->connected)
2595 return;
2596 neigh_entry->connected = adding;
2597 if (neigh_entry->key.n->tbl->family == AF_INET) {
2598 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2599 op);
2600 if (err)
2601 return;
2602 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2603 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2604 return;
2605 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2606 op);
2607 if (err)
2608 return;
2609 } else {
2610 WARN_ON_ONCE(1);
2611 return;
2612 }
2613
2614 if (adding)
2615 neigh_entry->key.n->flags |= NTF_OFFLOADED;
2616 else
2617 neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2618 }
2619
2620 void
mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2621 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2622 struct mlxsw_sp_neigh_entry *neigh_entry,
2623 bool adding)
2624 {
2625 if (adding)
2626 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2627 else
2628 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2629 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2630 }
2631
2632 struct mlxsw_sp_netevent_work {
2633 struct work_struct work;
2634 struct mlxsw_sp *mlxsw_sp;
2635 struct neighbour *n;
2636 };
2637
mlxsw_sp_router_neigh_event_work(struct work_struct * work)2638 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2639 {
2640 struct mlxsw_sp_netevent_work *net_work =
2641 container_of(work, struct mlxsw_sp_netevent_work, work);
2642 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2643 struct mlxsw_sp_neigh_entry *neigh_entry;
2644 struct neighbour *n = net_work->n;
2645 unsigned char ha[ETH_ALEN];
2646 bool entry_connected;
2647 u8 nud_state, dead;
2648
2649 /* If these parameters are changed after we release the lock,
2650 * then we are guaranteed to receive another event letting us
2651 * know about it.
2652 */
2653 read_lock_bh(&n->lock);
2654 memcpy(ha, n->ha, ETH_ALEN);
2655 nud_state = n->nud_state;
2656 dead = n->dead;
2657 read_unlock_bh(&n->lock);
2658
2659 mutex_lock(&mlxsw_sp->router->lock);
2660 mlxsw_sp_span_respin(mlxsw_sp);
2661
2662 entry_connected = nud_state & NUD_VALID && !dead;
2663 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2664 if (!entry_connected && !neigh_entry)
2665 goto out;
2666 if (!neigh_entry) {
2667 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2668 if (IS_ERR(neigh_entry))
2669 goto out;
2670 }
2671
2672 if (neigh_entry->connected && entry_connected &&
2673 !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2674 goto out;
2675
2676 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2677 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2678 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2679 dead);
2680
2681 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2682 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2683
2684 out:
2685 mutex_unlock(&mlxsw_sp->router->lock);
2686 neigh_release(n);
2687 kfree(net_work);
2688 }
2689
2690 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2691
mlxsw_sp_router_mp_hash_event_work(struct work_struct * work)2692 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2693 {
2694 struct mlxsw_sp_netevent_work *net_work =
2695 container_of(work, struct mlxsw_sp_netevent_work, work);
2696 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2697
2698 mlxsw_sp_mp_hash_init(mlxsw_sp);
2699 kfree(net_work);
2700 }
2701
2702 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2703
mlxsw_sp_router_update_priority_work(struct work_struct * work)2704 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2705 {
2706 struct mlxsw_sp_netevent_work *net_work =
2707 container_of(work, struct mlxsw_sp_netevent_work, work);
2708 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2709
2710 __mlxsw_sp_router_init(mlxsw_sp);
2711 kfree(net_work);
2712 }
2713
mlxsw_sp_router_schedule_work(struct net * net,struct notifier_block * nb,void (* cb)(struct work_struct *))2714 static int mlxsw_sp_router_schedule_work(struct net *net,
2715 struct notifier_block *nb,
2716 void (*cb)(struct work_struct *))
2717 {
2718 struct mlxsw_sp_netevent_work *net_work;
2719 struct mlxsw_sp_router *router;
2720
2721 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2722 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2723 return NOTIFY_DONE;
2724
2725 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2726 if (!net_work)
2727 return NOTIFY_BAD;
2728
2729 INIT_WORK(&net_work->work, cb);
2730 net_work->mlxsw_sp = router->mlxsw_sp;
2731 mlxsw_core_schedule_work(&net_work->work);
2732 return NOTIFY_DONE;
2733 }
2734
mlxsw_sp_router_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)2735 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2736 unsigned long event, void *ptr)
2737 {
2738 struct mlxsw_sp_netevent_work *net_work;
2739 struct mlxsw_sp_port *mlxsw_sp_port;
2740 struct mlxsw_sp *mlxsw_sp;
2741 unsigned long interval;
2742 struct neigh_parms *p;
2743 struct neighbour *n;
2744
2745 switch (event) {
2746 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2747 p = ptr;
2748
2749 /* We don't care about changes in the default table. */
2750 if (!p->dev || (p->tbl->family != AF_INET &&
2751 p->tbl->family != AF_INET6))
2752 return NOTIFY_DONE;
2753
2754 /* We are in atomic context and can't take RTNL mutex,
2755 * so use RCU variant to walk the device chain.
2756 */
2757 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2758 if (!mlxsw_sp_port)
2759 return NOTIFY_DONE;
2760
2761 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2762 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2763 mlxsw_sp->router->neighs_update.interval = interval;
2764
2765 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2766 break;
2767 case NETEVENT_NEIGH_UPDATE:
2768 n = ptr;
2769
2770 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2771 return NOTIFY_DONE;
2772
2773 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2774 if (!mlxsw_sp_port)
2775 return NOTIFY_DONE;
2776
2777 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2778 if (!net_work) {
2779 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2780 return NOTIFY_BAD;
2781 }
2782
2783 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2784 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2785 net_work->n = n;
2786
2787 /* Take a reference to ensure the neighbour won't be
2788 * destructed until we drop the reference in delayed
2789 * work.
2790 */
2791 neigh_clone(n);
2792 mlxsw_core_schedule_work(&net_work->work);
2793 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2794 break;
2795 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2796 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2797 return mlxsw_sp_router_schedule_work(ptr, nb,
2798 mlxsw_sp_router_mp_hash_event_work);
2799
2800 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2801 return mlxsw_sp_router_schedule_work(ptr, nb,
2802 mlxsw_sp_router_update_priority_work);
2803 }
2804
2805 return NOTIFY_DONE;
2806 }
2807
mlxsw_sp_neigh_init(struct mlxsw_sp * mlxsw_sp)2808 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2809 {
2810 int err;
2811
2812 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2813 &mlxsw_sp_neigh_ht_params);
2814 if (err)
2815 return err;
2816
2817 /* Initialize the polling interval according to the default
2818 * table.
2819 */
2820 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2821
2822 /* Create the delayed works for the activity_update */
2823 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2824 mlxsw_sp_router_neighs_update_work);
2825 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2826 mlxsw_sp_router_probe_unresolved_nexthops);
2827 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2828 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2829 return 0;
2830 }
2831
mlxsw_sp_neigh_fini(struct mlxsw_sp * mlxsw_sp)2832 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2833 {
2834 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2835 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2836 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2837 }
2838
mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)2839 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2840 struct mlxsw_sp_rif *rif)
2841 {
2842 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2843
2844 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2845 rif_list_node) {
2846 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2847 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2848 }
2849 }
2850
2851 enum mlxsw_sp_nexthop_type {
2852 MLXSW_SP_NEXTHOP_TYPE_ETH,
2853 MLXSW_SP_NEXTHOP_TYPE_IPIP,
2854 };
2855
2856 enum mlxsw_sp_nexthop_action {
2857 /* Nexthop forwards packets to an egress RIF */
2858 MLXSW_SP_NEXTHOP_ACTION_FORWARD,
2859 /* Nexthop discards packets */
2860 MLXSW_SP_NEXTHOP_ACTION_DISCARD,
2861 /* Nexthop traps packets */
2862 MLXSW_SP_NEXTHOP_ACTION_TRAP,
2863 };
2864
2865 struct mlxsw_sp_nexthop_key {
2866 struct fib_nh *fib_nh;
2867 };
2868
2869 struct mlxsw_sp_nexthop {
2870 struct list_head neigh_list_node; /* member of neigh entry list */
2871 struct list_head rif_list_node;
2872 struct list_head router_list_node;
2873 struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
2874 * this nexthop belongs to
2875 */
2876 struct rhash_head ht_node;
2877 struct neigh_table *neigh_tbl;
2878 struct mlxsw_sp_nexthop_key key;
2879 unsigned char gw_addr[sizeof(struct in6_addr)];
2880 int ifindex;
2881 int nh_weight;
2882 int norm_nh_weight;
2883 int num_adj_entries;
2884 struct mlxsw_sp_rif *rif;
2885 u8 should_offload:1, /* set indicates this nexthop should be written
2886 * to the adjacency table.
2887 */
2888 offloaded:1, /* set indicates this nexthop was written to the
2889 * adjacency table.
2890 */
2891 update:1; /* set indicates this nexthop should be updated in the
2892 * adjacency table (f.e., its MAC changed).
2893 */
2894 enum mlxsw_sp_nexthop_action action;
2895 enum mlxsw_sp_nexthop_type type;
2896 union {
2897 struct mlxsw_sp_neigh_entry *neigh_entry;
2898 struct mlxsw_sp_ipip_entry *ipip_entry;
2899 };
2900 unsigned int counter_index;
2901 bool counter_valid;
2902 };
2903
2904 enum mlxsw_sp_nexthop_group_type {
2905 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
2906 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
2907 MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
2908 };
2909
2910 struct mlxsw_sp_nexthop_group_info {
2911 struct mlxsw_sp_nexthop_group *nh_grp;
2912 u32 adj_index;
2913 u16 ecmp_size;
2914 u16 count;
2915 int sum_norm_weight;
2916 u8 adj_index_valid:1,
2917 gateway:1, /* routes using the group use a gateway */
2918 is_resilient:1;
2919 struct list_head list; /* member in nh_res_grp_list */
2920 struct mlxsw_sp_nexthop nexthops[0];
2921 #define nh_rif nexthops[0].rif
2922 };
2923
2924 struct mlxsw_sp_nexthop_group_vr_key {
2925 u16 vr_id;
2926 enum mlxsw_sp_l3proto proto;
2927 };
2928
2929 struct mlxsw_sp_nexthop_group_vr_entry {
2930 struct list_head list; /* member in vr_list */
2931 struct rhash_head ht_node; /* member in vr_ht */
2932 refcount_t ref_count;
2933 struct mlxsw_sp_nexthop_group_vr_key key;
2934 };
2935
2936 struct mlxsw_sp_nexthop_group {
2937 struct rhash_head ht_node;
2938 struct list_head fib_list; /* list of fib entries that use this group */
2939 union {
2940 struct {
2941 struct fib_info *fi;
2942 } ipv4;
2943 struct {
2944 u32 id;
2945 } obj;
2946 };
2947 struct mlxsw_sp_nexthop_group_info *nhgi;
2948 struct list_head vr_list;
2949 struct rhashtable vr_ht;
2950 enum mlxsw_sp_nexthop_group_type type;
2951 bool can_destroy;
2952 };
2953
mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)2954 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2955 struct mlxsw_sp_nexthop *nh)
2956 {
2957 struct devlink *devlink;
2958
2959 devlink = priv_to_devlink(mlxsw_sp->core);
2960 if (!devlink_dpipe_table_counter_enabled(devlink,
2961 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2962 return;
2963
2964 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2965 return;
2966
2967 nh->counter_valid = true;
2968 }
2969
mlxsw_sp_nexthop_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)2970 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2971 struct mlxsw_sp_nexthop *nh)
2972 {
2973 if (!nh->counter_valid)
2974 return;
2975 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2976 nh->counter_valid = false;
2977 }
2978
mlxsw_sp_nexthop_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,u64 * p_counter)2979 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2980 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2981 {
2982 if (!nh->counter_valid)
2983 return -EINVAL;
2984
2985 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2986 p_counter, NULL);
2987 }
2988
mlxsw_sp_nexthop_next(struct mlxsw_sp_router * router,struct mlxsw_sp_nexthop * nh)2989 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2990 struct mlxsw_sp_nexthop *nh)
2991 {
2992 if (!nh) {
2993 if (list_empty(&router->nexthop_list))
2994 return NULL;
2995 else
2996 return list_first_entry(&router->nexthop_list,
2997 typeof(*nh), router_list_node);
2998 }
2999 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3000 return NULL;
3001 return list_next_entry(nh, router_list_node);
3002 }
3003
mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop * nh)3004 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3005 {
3006 return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3007 }
3008
mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop * nh)3009 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3010 {
3011 if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3012 !mlxsw_sp_nexthop_is_forward(nh))
3013 return NULL;
3014 return nh->neigh_entry->ha;
3015 }
3016
mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop * nh,u32 * p_adj_index,u32 * p_adj_size,u32 * p_adj_hash_index)3017 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3018 u32 *p_adj_size, u32 *p_adj_hash_index)
3019 {
3020 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3021 u32 adj_hash_index = 0;
3022 int i;
3023
3024 if (!nh->offloaded || !nhgi->adj_index_valid)
3025 return -EINVAL;
3026
3027 *p_adj_index = nhgi->adj_index;
3028 *p_adj_size = nhgi->ecmp_size;
3029
3030 for (i = 0; i < nhgi->count; i++) {
3031 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3032
3033 if (nh_iter == nh)
3034 break;
3035 if (nh_iter->offloaded)
3036 adj_hash_index += nh_iter->num_adj_entries;
3037 }
3038
3039 *p_adj_hash_index = adj_hash_index;
3040 return 0;
3041 }
3042
mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop * nh)3043 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3044 {
3045 return nh->rif;
3046 }
3047
mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop * nh)3048 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3049 {
3050 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3051 int i;
3052
3053 for (i = 0; i < nhgi->count; i++) {
3054 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3055
3056 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3057 return true;
3058 }
3059 return false;
3060 }
3061
3062 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3063 .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3064 .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3065 .key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3066 .automatic_shrinking = true,
3067 };
3068
3069 static struct mlxsw_sp_nexthop_group_vr_entry *
mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3070 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3071 const struct mlxsw_sp_fib *fib)
3072 {
3073 struct mlxsw_sp_nexthop_group_vr_key key;
3074
3075 memset(&key, 0, sizeof(key));
3076 key.vr_id = fib->vr->id;
3077 key.proto = fib->proto;
3078 return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3079 mlxsw_sp_nexthop_group_vr_ht_params);
3080 }
3081
3082 static int
mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3083 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3084 const struct mlxsw_sp_fib *fib)
3085 {
3086 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3087 int err;
3088
3089 vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3090 if (!vr_entry)
3091 return -ENOMEM;
3092
3093 vr_entry->key.vr_id = fib->vr->id;
3094 vr_entry->key.proto = fib->proto;
3095 refcount_set(&vr_entry->ref_count, 1);
3096
3097 err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3098 mlxsw_sp_nexthop_group_vr_ht_params);
3099 if (err)
3100 goto err_hashtable_insert;
3101
3102 list_add(&vr_entry->list, &nh_grp->vr_list);
3103
3104 return 0;
3105
3106 err_hashtable_insert:
3107 kfree(vr_entry);
3108 return err;
3109 }
3110
3111 static void
mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop_group_vr_entry * vr_entry)3112 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3113 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3114 {
3115 list_del(&vr_entry->list);
3116 rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3117 mlxsw_sp_nexthop_group_vr_ht_params);
3118 kfree(vr_entry);
3119 }
3120
3121 static int
mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3122 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3123 const struct mlxsw_sp_fib *fib)
3124 {
3125 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3126
3127 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3128 if (vr_entry) {
3129 refcount_inc(&vr_entry->ref_count);
3130 return 0;
3131 }
3132
3133 return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3134 }
3135
3136 static void
mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3137 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3138 const struct mlxsw_sp_fib *fib)
3139 {
3140 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3141
3142 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3143 if (WARN_ON_ONCE(!vr_entry))
3144 return;
3145
3146 if (!refcount_dec_and_test(&vr_entry->ref_count))
3147 return;
3148
3149 mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3150 }
3151
3152 struct mlxsw_sp_nexthop_group_cmp_arg {
3153 enum mlxsw_sp_nexthop_group_type type;
3154 union {
3155 struct fib_info *fi;
3156 struct mlxsw_sp_fib6_entry *fib6_entry;
3157 u32 id;
3158 };
3159 };
3160
3161 static bool
mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group * nh_grp,const struct in6_addr * gw,int ifindex,int weight)3162 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3163 const struct in6_addr *gw, int ifindex,
3164 int weight)
3165 {
3166 int i;
3167
3168 for (i = 0; i < nh_grp->nhgi->count; i++) {
3169 const struct mlxsw_sp_nexthop *nh;
3170
3171 nh = &nh_grp->nhgi->nexthops[i];
3172 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3173 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3174 return true;
3175 }
3176
3177 return false;
3178 }
3179
3180 static bool
mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib6_entry * fib6_entry)3181 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3182 const struct mlxsw_sp_fib6_entry *fib6_entry)
3183 {
3184 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3185
3186 if (nh_grp->nhgi->count != fib6_entry->nrt6)
3187 return false;
3188
3189 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3190 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3191 struct in6_addr *gw;
3192 int ifindex, weight;
3193
3194 ifindex = fib6_nh->fib_nh_dev->ifindex;
3195 weight = fib6_nh->fib_nh_weight;
3196 gw = &fib6_nh->fib_nh_gw6;
3197 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3198 weight))
3199 return false;
3200 }
3201
3202 return true;
3203 }
3204
3205 static int
mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg * arg,const void * ptr)3206 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3207 {
3208 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3209 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3210
3211 if (nh_grp->type != cmp_arg->type)
3212 return 1;
3213
3214 switch (cmp_arg->type) {
3215 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3216 return cmp_arg->fi != nh_grp->ipv4.fi;
3217 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3218 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3219 cmp_arg->fib6_entry);
3220 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3221 return cmp_arg->id != nh_grp->obj.id;
3222 default:
3223 WARN_ON(1);
3224 return 1;
3225 }
3226 }
3227
mlxsw_sp_nexthop_group_hash_obj(const void * data,u32 len,u32 seed)3228 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3229 {
3230 const struct mlxsw_sp_nexthop_group *nh_grp = data;
3231 const struct mlxsw_sp_nexthop *nh;
3232 struct fib_info *fi;
3233 unsigned int val;
3234 int i;
3235
3236 switch (nh_grp->type) {
3237 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3238 fi = nh_grp->ipv4.fi;
3239 return jhash(&fi, sizeof(fi), seed);
3240 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3241 val = nh_grp->nhgi->count;
3242 for (i = 0; i < nh_grp->nhgi->count; i++) {
3243 nh = &nh_grp->nhgi->nexthops[i];
3244 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3245 val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3246 }
3247 return jhash(&val, sizeof(val), seed);
3248 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3249 return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3250 default:
3251 WARN_ON(1);
3252 return 0;
3253 }
3254 }
3255
3256 static u32
mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry * fib6_entry,u32 seed)3257 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3258 {
3259 unsigned int val = fib6_entry->nrt6;
3260 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3261
3262 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3263 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3264 struct net_device *dev = fib6_nh->fib_nh_dev;
3265 struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3266
3267 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3268 val ^= jhash(gw, sizeof(*gw), seed);
3269 }
3270
3271 return jhash(&val, sizeof(val), seed);
3272 }
3273
3274 static u32
mlxsw_sp_nexthop_group_hash(const void * data,u32 len,u32 seed)3275 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3276 {
3277 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3278
3279 switch (cmp_arg->type) {
3280 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3281 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3282 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3283 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3284 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3285 return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3286 default:
3287 WARN_ON(1);
3288 return 0;
3289 }
3290 }
3291
3292 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3293 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3294 .hashfn = mlxsw_sp_nexthop_group_hash,
3295 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
3296 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
3297 };
3298
mlxsw_sp_nexthop_group_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3299 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3300 struct mlxsw_sp_nexthop_group *nh_grp)
3301 {
3302 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3303 !nh_grp->nhgi->gateway)
3304 return 0;
3305
3306 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3307 &nh_grp->ht_node,
3308 mlxsw_sp_nexthop_group_ht_params);
3309 }
3310
mlxsw_sp_nexthop_group_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3311 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3312 struct mlxsw_sp_nexthop_group *nh_grp)
3313 {
3314 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3315 !nh_grp->nhgi->gateway)
3316 return;
3317
3318 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3319 &nh_grp->ht_node,
3320 mlxsw_sp_nexthop_group_ht_params);
3321 }
3322
3323 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)3324 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3325 struct fib_info *fi)
3326 {
3327 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3328
3329 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3330 cmp_arg.fi = fi;
3331 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3332 &cmp_arg,
3333 mlxsw_sp_nexthop_group_ht_params);
3334 }
3335
3336 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)3337 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3338 struct mlxsw_sp_fib6_entry *fib6_entry)
3339 {
3340 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3341
3342 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3343 cmp_arg.fib6_entry = fib6_entry;
3344 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3345 &cmp_arg,
3346 mlxsw_sp_nexthop_group_ht_params);
3347 }
3348
3349 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3350 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3351 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3352 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3353 };
3354
mlxsw_sp_nexthop_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3355 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3356 struct mlxsw_sp_nexthop *nh)
3357 {
3358 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3359 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3360 }
3361
mlxsw_sp_nexthop_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3362 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3363 struct mlxsw_sp_nexthop *nh)
3364 {
3365 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3366 mlxsw_sp_nexthop_ht_params);
3367 }
3368
3369 static struct mlxsw_sp_nexthop *
mlxsw_sp_nexthop_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_key key)3370 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3371 struct mlxsw_sp_nexthop_key key)
3372 {
3373 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3374 mlxsw_sp_nexthop_ht_params);
3375 }
3376
mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_l3proto proto,u16 vr_id,u32 adj_index,u16 ecmp_size,u32 new_adj_index,u16 new_ecmp_size)3377 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3378 enum mlxsw_sp_l3proto proto,
3379 u16 vr_id,
3380 u32 adj_index, u16 ecmp_size,
3381 u32 new_adj_index,
3382 u16 new_ecmp_size)
3383 {
3384 char raleu_pl[MLXSW_REG_RALEU_LEN];
3385
3386 mlxsw_reg_raleu_pack(raleu_pl,
3387 (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3388 adj_index, ecmp_size, new_adj_index,
3389 new_ecmp_size);
3390 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3391 }
3392
mlxsw_sp_adj_index_mass_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,u32 old_adj_index,u16 old_ecmp_size)3393 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3394 struct mlxsw_sp_nexthop_group *nh_grp,
3395 u32 old_adj_index, u16 old_ecmp_size)
3396 {
3397 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3398 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3399 int err;
3400
3401 list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3402 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3403 vr_entry->key.proto,
3404 vr_entry->key.vr_id,
3405 old_adj_index,
3406 old_ecmp_size,
3407 nhgi->adj_index,
3408 nhgi->ecmp_size);
3409 if (err)
3410 goto err_mass_update_vr;
3411 }
3412 return 0;
3413
3414 err_mass_update_vr:
3415 list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3416 mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3417 vr_entry->key.vr_id,
3418 nhgi->adj_index,
3419 nhgi->ecmp_size,
3420 old_adj_index, old_ecmp_size);
3421 return err;
3422 }
3423
__mlxsw_sp_nexthop_eth_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3424 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3425 u32 adj_index,
3426 struct mlxsw_sp_nexthop *nh,
3427 bool force, char *ratr_pl)
3428 {
3429 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3430 enum mlxsw_reg_ratr_op op;
3431 u16 rif_index;
3432
3433 rif_index = nh->rif ? nh->rif->rif_index :
3434 mlxsw_sp->router->lb_rif_index;
3435 op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3436 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3437 mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3438 adj_index, rif_index);
3439 switch (nh->action) {
3440 case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3441 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3442 break;
3443 case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3444 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3445 MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3446 break;
3447 case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3448 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3449 MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3450 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3451 break;
3452 default:
3453 WARN_ON_ONCE(1);
3454 return -EINVAL;
3455 }
3456 if (nh->counter_valid)
3457 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3458 else
3459 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3460
3461 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3462 }
3463
mlxsw_sp_nexthop_eth_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3464 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3465 struct mlxsw_sp_nexthop *nh, bool force,
3466 char *ratr_pl)
3467 {
3468 int i;
3469
3470 for (i = 0; i < nh->num_adj_entries; i++) {
3471 int err;
3472
3473 err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3474 nh, force, ratr_pl);
3475 if (err)
3476 return err;
3477 }
3478
3479 return 0;
3480 }
3481
__mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3482 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3483 u32 adj_index,
3484 struct mlxsw_sp_nexthop *nh,
3485 bool force, char *ratr_pl)
3486 {
3487 const struct mlxsw_sp_ipip_ops *ipip_ops;
3488
3489 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3490 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3491 force, ratr_pl);
3492 }
3493
mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3494 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3495 u32 adj_index,
3496 struct mlxsw_sp_nexthop *nh, bool force,
3497 char *ratr_pl)
3498 {
3499 int i;
3500
3501 for (i = 0; i < nh->num_adj_entries; i++) {
3502 int err;
3503
3504 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3505 nh, force, ratr_pl);
3506 if (err)
3507 return err;
3508 }
3509
3510 return 0;
3511 }
3512
mlxsw_sp_nexthop_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3513 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3514 struct mlxsw_sp_nexthop *nh, bool force,
3515 char *ratr_pl)
3516 {
3517 /* When action is discard or trap, the nexthop must be
3518 * programmed as an Ethernet nexthop.
3519 */
3520 if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3521 nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3522 nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3523 return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3524 force, ratr_pl);
3525 else
3526 return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3527 force, ratr_pl);
3528 }
3529
3530 static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group_info * nhgi,bool reallocate)3531 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3532 struct mlxsw_sp_nexthop_group_info *nhgi,
3533 bool reallocate)
3534 {
3535 char ratr_pl[MLXSW_REG_RATR_LEN];
3536 u32 adj_index = nhgi->adj_index; /* base */
3537 struct mlxsw_sp_nexthop *nh;
3538 int i;
3539
3540 for (i = 0; i < nhgi->count; i++) {
3541 nh = &nhgi->nexthops[i];
3542
3543 if (!nh->should_offload) {
3544 nh->offloaded = 0;
3545 continue;
3546 }
3547
3548 if (nh->update || reallocate) {
3549 int err = 0;
3550
3551 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3552 true, ratr_pl);
3553 if (err)
3554 return err;
3555 nh->update = 0;
3556 nh->offloaded = 1;
3557 }
3558 adj_index += nh->num_adj_entries;
3559 }
3560 return 0;
3561 }
3562
3563 static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3564 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3565 struct mlxsw_sp_nexthop_group *nh_grp)
3566 {
3567 struct mlxsw_sp_fib_entry *fib_entry;
3568 int err;
3569
3570 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3571 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3572 if (err)
3573 return err;
3574 }
3575 return 0;
3576 }
3577
3578 struct mlxsw_sp_adj_grp_size_range {
3579 u16 start; /* Inclusive */
3580 u16 end; /* Inclusive */
3581 };
3582
3583 /* Ordered by range start value */
3584 static const struct mlxsw_sp_adj_grp_size_range
3585 mlxsw_sp1_adj_grp_size_ranges[] = {
3586 { .start = 1, .end = 64 },
3587 { .start = 512, .end = 512 },
3588 { .start = 1024, .end = 1024 },
3589 { .start = 2048, .end = 2048 },
3590 { .start = 4096, .end = 4096 },
3591 };
3592
3593 /* Ordered by range start value */
3594 static const struct mlxsw_sp_adj_grp_size_range
3595 mlxsw_sp2_adj_grp_size_ranges[] = {
3596 { .start = 1, .end = 128 },
3597 { .start = 256, .end = 256 },
3598 { .start = 512, .end = 512 },
3599 { .start = 1024, .end = 1024 },
3600 { .start = 2048, .end = 2048 },
3601 { .start = 4096, .end = 4096 },
3602 };
3603
mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size)3604 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3605 u16 *p_adj_grp_size)
3606 {
3607 int i;
3608
3609 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3610 const struct mlxsw_sp_adj_grp_size_range *size_range;
3611
3612 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3613
3614 if (*p_adj_grp_size >= size_range->start &&
3615 *p_adj_grp_size <= size_range->end)
3616 return;
3617
3618 if (*p_adj_grp_size <= size_range->end) {
3619 *p_adj_grp_size = size_range->end;
3620 return;
3621 }
3622 }
3623 }
3624
mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size,unsigned int alloc_size)3625 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3626 u16 *p_adj_grp_size,
3627 unsigned int alloc_size)
3628 {
3629 int i;
3630
3631 for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3632 const struct mlxsw_sp_adj_grp_size_range *size_range;
3633
3634 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3635
3636 if (alloc_size >= size_range->end) {
3637 *p_adj_grp_size = size_range->end;
3638 return;
3639 }
3640 }
3641 }
3642
mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size)3643 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3644 u16 *p_adj_grp_size)
3645 {
3646 unsigned int alloc_size;
3647 int err;
3648
3649 /* Round up the requested group size to the next size supported
3650 * by the device and make sure the request can be satisfied.
3651 */
3652 mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3653 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3654 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3655 *p_adj_grp_size, &alloc_size);
3656 if (err)
3657 return err;
3658 /* It is possible the allocation results in more allocated
3659 * entries than requested. Try to use as much of them as
3660 * possible.
3661 */
3662 mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3663
3664 return 0;
3665 }
3666
3667 static void
mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info * nhgi)3668 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3669 {
3670 int i, g = 0, sum_norm_weight = 0;
3671 struct mlxsw_sp_nexthop *nh;
3672
3673 for (i = 0; i < nhgi->count; i++) {
3674 nh = &nhgi->nexthops[i];
3675
3676 if (!nh->should_offload)
3677 continue;
3678 if (g > 0)
3679 g = gcd(nh->nh_weight, g);
3680 else
3681 g = nh->nh_weight;
3682 }
3683
3684 for (i = 0; i < nhgi->count; i++) {
3685 nh = &nhgi->nexthops[i];
3686
3687 if (!nh->should_offload)
3688 continue;
3689 nh->norm_nh_weight = nh->nh_weight / g;
3690 sum_norm_weight += nh->norm_nh_weight;
3691 }
3692
3693 nhgi->sum_norm_weight = sum_norm_weight;
3694 }
3695
3696 static void
mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info * nhgi)3697 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3698 {
3699 int i, weight = 0, lower_bound = 0;
3700 int total = nhgi->sum_norm_weight;
3701 u16 ecmp_size = nhgi->ecmp_size;
3702
3703 for (i = 0; i < nhgi->count; i++) {
3704 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3705 int upper_bound;
3706
3707 if (!nh->should_offload)
3708 continue;
3709 weight += nh->norm_nh_weight;
3710 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3711 nh->num_adj_entries = upper_bound - lower_bound;
3712 lower_bound = upper_bound;
3713 }
3714 }
3715
3716 static struct mlxsw_sp_nexthop *
3717 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3718 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3719
3720 static void
mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3721 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3722 struct mlxsw_sp_nexthop_group *nh_grp)
3723 {
3724 int i;
3725
3726 for (i = 0; i < nh_grp->nhgi->count; i++) {
3727 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3728
3729 if (nh->offloaded)
3730 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3731 else
3732 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3733 }
3734 }
3735
3736 static void
__mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_fib6_entry * fib6_entry)3737 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3738 struct mlxsw_sp_fib6_entry *fib6_entry)
3739 {
3740 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3741
3742 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3743 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3744 struct mlxsw_sp_nexthop *nh;
3745
3746 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3747 if (nh && nh->offloaded)
3748 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3749 else
3750 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3751 }
3752 }
3753
3754 static void
mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3755 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3756 struct mlxsw_sp_nexthop_group *nh_grp)
3757 {
3758 struct mlxsw_sp_fib6_entry *fib6_entry;
3759
3760 /* Unfortunately, in IPv6 the route and the nexthop are described by
3761 * the same struct, so we need to iterate over all the routes using the
3762 * nexthop group and set / clear the offload indication for them.
3763 */
3764 list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3765 common.nexthop_group_node)
3766 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3767 }
3768
3769 static void
mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop * nh,u16 bucket_index)3770 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3771 const struct mlxsw_sp_nexthop *nh,
3772 u16 bucket_index)
3773 {
3774 struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3775 bool offload = false, trap = false;
3776
3777 if (nh->offloaded) {
3778 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3779 trap = true;
3780 else
3781 offload = true;
3782 }
3783 nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3784 bucket_index, offload, trap);
3785 }
3786
3787 static void
mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3788 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3789 struct mlxsw_sp_nexthop_group *nh_grp)
3790 {
3791 int i;
3792
3793 /* Do not update the flags if the nexthop group is being destroyed
3794 * since:
3795 * 1. The nexthop objects is being deleted, in which case the flags are
3796 * irrelevant.
3797 * 2. The nexthop group was replaced by a newer group, in which case
3798 * the flags of the nexthop object were already updated based on the
3799 * new group.
3800 */
3801 if (nh_grp->can_destroy)
3802 return;
3803
3804 nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3805 nh_grp->nhgi->adj_index_valid, false);
3806
3807 /* Update flags of individual nexthop buckets in case of a resilient
3808 * nexthop group.
3809 */
3810 if (!nh_grp->nhgi->is_resilient)
3811 return;
3812
3813 for (i = 0; i < nh_grp->nhgi->count; i++) {
3814 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3815
3816 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
3817 }
3818 }
3819
3820 static void
mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3821 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3822 struct mlxsw_sp_nexthop_group *nh_grp)
3823 {
3824 switch (nh_grp->type) {
3825 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3826 mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3827 break;
3828 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3829 mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3830 break;
3831 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3832 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
3833 break;
3834 }
3835 }
3836
3837 static int
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3838 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3839 struct mlxsw_sp_nexthop_group *nh_grp)
3840 {
3841 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3842 u16 ecmp_size, old_ecmp_size;
3843 struct mlxsw_sp_nexthop *nh;
3844 bool offload_change = false;
3845 u32 adj_index;
3846 bool old_adj_index_valid;
3847 u32 old_adj_index;
3848 int i, err2, err;
3849
3850 if (!nhgi->gateway)
3851 return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3852
3853 for (i = 0; i < nhgi->count; i++) {
3854 nh = &nhgi->nexthops[i];
3855
3856 if (nh->should_offload != nh->offloaded) {
3857 offload_change = true;
3858 if (nh->should_offload)
3859 nh->update = 1;
3860 }
3861 }
3862 if (!offload_change) {
3863 /* Nothing was added or removed, so no need to reallocate. Just
3864 * update MAC on existing adjacency indexes.
3865 */
3866 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
3867 if (err) {
3868 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3869 goto set_trap;
3870 }
3871 /* Flags of individual nexthop buckets might need to be
3872 * updated.
3873 */
3874 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3875 return 0;
3876 }
3877 mlxsw_sp_nexthop_group_normalize(nhgi);
3878 if (!nhgi->sum_norm_weight) {
3879 /* No neigh of this group is connected so we just set
3880 * the trap and let everthing flow through kernel.
3881 */
3882 err = 0;
3883 goto set_trap;
3884 }
3885
3886 ecmp_size = nhgi->sum_norm_weight;
3887 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3888 if (err)
3889 /* No valid allocation size available. */
3890 goto set_trap;
3891
3892 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3893 ecmp_size, &adj_index);
3894 if (err) {
3895 /* We ran out of KVD linear space, just set the
3896 * trap and let everything flow through kernel.
3897 */
3898 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3899 goto set_trap;
3900 }
3901 old_adj_index_valid = nhgi->adj_index_valid;
3902 old_adj_index = nhgi->adj_index;
3903 old_ecmp_size = nhgi->ecmp_size;
3904 nhgi->adj_index_valid = 1;
3905 nhgi->adj_index = adj_index;
3906 nhgi->ecmp_size = ecmp_size;
3907 mlxsw_sp_nexthop_group_rebalance(nhgi);
3908 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
3909 if (err) {
3910 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3911 goto set_trap;
3912 }
3913
3914 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3915
3916 if (!old_adj_index_valid) {
3917 /* The trap was set for fib entries, so we have to call
3918 * fib entry update to unset it and use adjacency index.
3919 */
3920 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3921 if (err) {
3922 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3923 goto set_trap;
3924 }
3925 return 0;
3926 }
3927
3928 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3929 old_adj_index, old_ecmp_size);
3930 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3931 old_ecmp_size, old_adj_index);
3932 if (err) {
3933 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3934 goto set_trap;
3935 }
3936
3937 return 0;
3938
3939 set_trap:
3940 old_adj_index_valid = nhgi->adj_index_valid;
3941 nhgi->adj_index_valid = 0;
3942 for (i = 0; i < nhgi->count; i++) {
3943 nh = &nhgi->nexthops[i];
3944 nh->offloaded = 0;
3945 }
3946 err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3947 if (err2)
3948 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3949 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3950 if (old_adj_index_valid)
3951 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3952 nhgi->ecmp_size, nhgi->adj_index);
3953 return err;
3954 }
3955
__mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop * nh,bool removing)3956 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3957 bool removing)
3958 {
3959 if (!removing) {
3960 nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3961 nh->should_offload = 1;
3962 } else if (nh->nhgi->is_resilient) {
3963 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
3964 nh->should_offload = 1;
3965 } else {
3966 nh->should_offload = 0;
3967 }
3968 nh->update = 1;
3969 }
3970
3971 static int
mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)3972 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
3973 struct mlxsw_sp_neigh_entry *neigh_entry)
3974 {
3975 struct neighbour *n, *old_n = neigh_entry->key.n;
3976 struct mlxsw_sp_nexthop *nh;
3977 bool entry_connected;
3978 u8 nud_state, dead;
3979 int err;
3980
3981 nh = list_first_entry(&neigh_entry->nexthop_list,
3982 struct mlxsw_sp_nexthop, neigh_list_node);
3983
3984 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3985 if (!n) {
3986 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3987 if (IS_ERR(n))
3988 return PTR_ERR(n);
3989 neigh_event_send(n, NULL);
3990 }
3991
3992 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
3993 neigh_entry->key.n = n;
3994 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3995 if (err)
3996 goto err_neigh_entry_insert;
3997
3998 read_lock_bh(&n->lock);
3999 nud_state = n->nud_state;
4000 dead = n->dead;
4001 read_unlock_bh(&n->lock);
4002 entry_connected = nud_state & NUD_VALID && !dead;
4003
4004 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4005 neigh_list_node) {
4006 neigh_release(old_n);
4007 neigh_clone(n);
4008 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4009 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4010 }
4011
4012 neigh_release(n);
4013
4014 return 0;
4015
4016 err_neigh_entry_insert:
4017 neigh_entry->key.n = old_n;
4018 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4019 neigh_release(n);
4020 return err;
4021 }
4022
4023 static void
mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool removing,bool dead)4024 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4025 struct mlxsw_sp_neigh_entry *neigh_entry,
4026 bool removing, bool dead)
4027 {
4028 struct mlxsw_sp_nexthop *nh;
4029
4030 if (list_empty(&neigh_entry->nexthop_list))
4031 return;
4032
4033 if (dead) {
4034 int err;
4035
4036 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4037 neigh_entry);
4038 if (err)
4039 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4040 return;
4041 }
4042
4043 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4044 neigh_list_node) {
4045 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4046 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4047 }
4048 }
4049
mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_rif * rif)4050 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
4051 struct mlxsw_sp_rif *rif)
4052 {
4053 if (nh->rif)
4054 return;
4055
4056 nh->rif = rif;
4057 list_add(&nh->rif_list_node, &rif->nexthop_list);
4058 }
4059
mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop * nh)4060 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
4061 {
4062 if (!nh->rif)
4063 return;
4064
4065 list_del(&nh->rif_list_node);
4066 nh->rif = NULL;
4067 }
4068
mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4069 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4070 struct mlxsw_sp_nexthop *nh)
4071 {
4072 struct mlxsw_sp_neigh_entry *neigh_entry;
4073 struct neighbour *n;
4074 u8 nud_state, dead;
4075 int err;
4076
4077 if (!nh->nhgi->gateway || nh->neigh_entry)
4078 return 0;
4079
4080 /* Take a reference of neigh here ensuring that neigh would
4081 * not be destructed before the nexthop entry is finished.
4082 * The reference is taken either in neigh_lookup() or
4083 * in neigh_create() in case n is not found.
4084 */
4085 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4086 if (!n) {
4087 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4088 if (IS_ERR(n))
4089 return PTR_ERR(n);
4090 neigh_event_send(n, NULL);
4091 }
4092 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4093 if (!neigh_entry) {
4094 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4095 if (IS_ERR(neigh_entry)) {
4096 err = -EINVAL;
4097 goto err_neigh_entry_create;
4098 }
4099 }
4100
4101 /* If that is the first nexthop connected to that neigh, add to
4102 * nexthop_neighs_list
4103 */
4104 if (list_empty(&neigh_entry->nexthop_list))
4105 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4106 &mlxsw_sp->router->nexthop_neighs_list);
4107
4108 nh->neigh_entry = neigh_entry;
4109 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4110 read_lock_bh(&n->lock);
4111 nud_state = n->nud_state;
4112 dead = n->dead;
4113 read_unlock_bh(&n->lock);
4114 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4115
4116 return 0;
4117
4118 err_neigh_entry_create:
4119 neigh_release(n);
4120 return err;
4121 }
4122
mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4123 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4124 struct mlxsw_sp_nexthop *nh)
4125 {
4126 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4127 struct neighbour *n;
4128
4129 if (!neigh_entry)
4130 return;
4131 n = neigh_entry->key.n;
4132
4133 __mlxsw_sp_nexthop_neigh_update(nh, true);
4134 list_del(&nh->neigh_list_node);
4135 nh->neigh_entry = NULL;
4136
4137 /* If that is the last nexthop connected to that neigh, remove from
4138 * nexthop_neighs_list
4139 */
4140 if (list_empty(&neigh_entry->nexthop_list))
4141 list_del(&neigh_entry->nexthop_neighs_list_node);
4142
4143 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4144 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4145
4146 neigh_release(n);
4147 }
4148
mlxsw_sp_ipip_netdev_ul_up(struct net_device * ol_dev)4149 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4150 {
4151 struct net_device *ul_dev;
4152 bool is_up;
4153
4154 rcu_read_lock();
4155 ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4156 is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4157 rcu_read_unlock();
4158
4159 return is_up;
4160 }
4161
mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_ipip_entry * ipip_entry)4162 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4163 struct mlxsw_sp_nexthop *nh,
4164 struct mlxsw_sp_ipip_entry *ipip_entry)
4165 {
4166 bool removing;
4167
4168 if (!nh->nhgi->gateway || nh->ipip_entry)
4169 return;
4170
4171 nh->ipip_entry = ipip_entry;
4172 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4173 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4174 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
4175 }
4176
mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4177 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4178 struct mlxsw_sp_nexthop *nh)
4179 {
4180 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4181
4182 if (!ipip_entry)
4183 return;
4184
4185 __mlxsw_sp_nexthop_neigh_update(nh, true);
4186 nh->ipip_entry = NULL;
4187 }
4188
mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib_nh * fib_nh,enum mlxsw_sp_ipip_type * p_ipipt)4189 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4190 const struct fib_nh *fib_nh,
4191 enum mlxsw_sp_ipip_type *p_ipipt)
4192 {
4193 struct net_device *dev = fib_nh->fib_nh_dev;
4194
4195 return dev &&
4196 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4197 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4198 }
4199
mlxsw_sp_nexthop_type_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,const struct net_device * dev)4200 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4201 struct mlxsw_sp_nexthop *nh,
4202 const struct net_device *dev)
4203 {
4204 const struct mlxsw_sp_ipip_ops *ipip_ops;
4205 struct mlxsw_sp_ipip_entry *ipip_entry;
4206 struct mlxsw_sp_rif *rif;
4207 int err;
4208
4209 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4210 if (ipip_entry) {
4211 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4212 if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4213 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4214 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4215 return 0;
4216 }
4217 }
4218
4219 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4220 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4221 if (!rif)
4222 return 0;
4223
4224 mlxsw_sp_nexthop_rif_init(nh, rif);
4225 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4226 if (err)
4227 goto err_neigh_init;
4228
4229 return 0;
4230
4231 err_neigh_init:
4232 mlxsw_sp_nexthop_rif_fini(nh);
4233 return err;
4234 }
4235
mlxsw_sp_nexthop_type_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4236 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4237 struct mlxsw_sp_nexthop *nh)
4238 {
4239 switch (nh->type) {
4240 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4241 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4242 mlxsw_sp_nexthop_rif_fini(nh);
4243 break;
4244 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4245 mlxsw_sp_nexthop_rif_fini(nh);
4246 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4247 break;
4248 }
4249 }
4250
mlxsw_sp_nexthop4_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,struct fib_nh * fib_nh)4251 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4252 struct mlxsw_sp_nexthop_group *nh_grp,
4253 struct mlxsw_sp_nexthop *nh,
4254 struct fib_nh *fib_nh)
4255 {
4256 struct net_device *dev = fib_nh->fib_nh_dev;
4257 struct in_device *in_dev;
4258 int err;
4259
4260 nh->nhgi = nh_grp->nhgi;
4261 nh->key.fib_nh = fib_nh;
4262 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4263 nh->nh_weight = fib_nh->fib_nh_weight;
4264 #else
4265 nh->nh_weight = 1;
4266 #endif
4267 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4268 nh->neigh_tbl = &arp_tbl;
4269 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4270 if (err)
4271 return err;
4272
4273 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4274 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4275
4276 if (!dev)
4277 return 0;
4278 nh->ifindex = dev->ifindex;
4279
4280 rcu_read_lock();
4281 in_dev = __in_dev_get_rcu(dev);
4282 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4283 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4284 rcu_read_unlock();
4285 return 0;
4286 }
4287 rcu_read_unlock();
4288
4289 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4290 if (err)
4291 goto err_nexthop_neigh_init;
4292
4293 return 0;
4294
4295 err_nexthop_neigh_init:
4296 list_del(&nh->router_list_node);
4297 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4298 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4299 return err;
4300 }
4301
mlxsw_sp_nexthop4_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4302 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4303 struct mlxsw_sp_nexthop *nh)
4304 {
4305 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4306 list_del(&nh->router_list_node);
4307 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4308 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4309 }
4310
mlxsw_sp_nexthop4_event(struct mlxsw_sp * mlxsw_sp,unsigned long event,struct fib_nh * fib_nh)4311 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4312 unsigned long event, struct fib_nh *fib_nh)
4313 {
4314 struct mlxsw_sp_nexthop_key key;
4315 struct mlxsw_sp_nexthop *nh;
4316
4317 key.fib_nh = fib_nh;
4318 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4319 if (!nh)
4320 return;
4321
4322 switch (event) {
4323 case FIB_EVENT_NH_ADD:
4324 mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4325 break;
4326 case FIB_EVENT_NH_DEL:
4327 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4328 break;
4329 }
4330
4331 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4332 }
4333
mlxsw_sp_nexthop_rif_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4334 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4335 struct mlxsw_sp_rif *rif)
4336 {
4337 struct mlxsw_sp_nexthop *nh;
4338 bool removing;
4339
4340 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4341 switch (nh->type) {
4342 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4343 removing = false;
4344 break;
4345 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4346 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
4347 break;
4348 default:
4349 WARN_ON(1);
4350 continue;
4351 }
4352
4353 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4354 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4355 }
4356 }
4357
mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * old_rif,struct mlxsw_sp_rif * new_rif)4358 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
4359 struct mlxsw_sp_rif *old_rif,
4360 struct mlxsw_sp_rif *new_rif)
4361 {
4362 struct mlxsw_sp_nexthop *nh;
4363
4364 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
4365 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
4366 nh->rif = new_rif;
4367 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
4368 }
4369
mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4370 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4371 struct mlxsw_sp_rif *rif)
4372 {
4373 struct mlxsw_sp_nexthop *nh, *tmp;
4374
4375 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4376 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4377 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4378 }
4379 }
4380
4381 static void
mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop_group * nh_grp,unsigned long * activity)4382 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4383 const struct mlxsw_sp_nexthop_group *nh_grp,
4384 unsigned long *activity)
4385 {
4386 char *ratrad_pl;
4387 int i, err;
4388
4389 ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4390 if (!ratrad_pl)
4391 return;
4392
4393 mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4394 nh_grp->nhgi->count);
4395 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4396 if (err)
4397 goto out;
4398
4399 for (i = 0; i < nh_grp->nhgi->count; i++) {
4400 if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4401 continue;
4402 bitmap_set(activity, i, 1);
4403 }
4404
4405 out:
4406 kfree(ratrad_pl);
4407 }
4408
4409 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4410
4411 static void
mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop_group * nh_grp)4412 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4413 const struct mlxsw_sp_nexthop_group *nh_grp)
4414 {
4415 unsigned long *activity;
4416
4417 activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4418 if (!activity)
4419 return;
4420
4421 mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4422 nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4423 nh_grp->nhgi->count, activity);
4424
4425 bitmap_free(activity);
4426 }
4427
4428 static void
mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp * mlxsw_sp)4429 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4430 {
4431 unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4432
4433 mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4434 msecs_to_jiffies(interval));
4435 }
4436
mlxsw_sp_nh_grp_activity_work(struct work_struct * work)4437 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4438 {
4439 struct mlxsw_sp_nexthop_group_info *nhgi;
4440 struct mlxsw_sp_router *router;
4441 bool reschedule = false;
4442
4443 router = container_of(work, struct mlxsw_sp_router,
4444 nh_grp_activity_dw.work);
4445
4446 mutex_lock(&router->lock);
4447
4448 list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4449 mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4450 reschedule = true;
4451 }
4452
4453 mutex_unlock(&router->lock);
4454
4455 if (!reschedule)
4456 return;
4457 mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4458 }
4459
4460 static int
mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_single_info * nh,struct netlink_ext_ack * extack)4461 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4462 const struct nh_notifier_single_info *nh,
4463 struct netlink_ext_ack *extack)
4464 {
4465 int err = -EINVAL;
4466
4467 if (nh->is_fdb)
4468 NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4469 else if (nh->has_encap)
4470 NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4471 else
4472 err = 0;
4473
4474 return err;
4475 }
4476
4477 static int
mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_single_info * nh,struct netlink_ext_ack * extack)4478 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4479 const struct nh_notifier_single_info *nh,
4480 struct netlink_ext_ack *extack)
4481 {
4482 int err;
4483
4484 err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4485 if (err)
4486 return err;
4487
4488 /* Device only nexthops with an IPIP device are programmed as
4489 * encapsulating adjacency entries.
4490 */
4491 if (!nh->gw_family && !nh->is_reject &&
4492 !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4493 NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4494 return -EINVAL;
4495 }
4496
4497 return 0;
4498 }
4499
4500 static int
mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_grp_info * nh_grp,struct netlink_ext_ack * extack)4501 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4502 const struct nh_notifier_grp_info *nh_grp,
4503 struct netlink_ext_ack *extack)
4504 {
4505 int i;
4506
4507 if (nh_grp->is_fdb) {
4508 NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4509 return -EINVAL;
4510 }
4511
4512 for (i = 0; i < nh_grp->num_nh; i++) {
4513 const struct nh_notifier_single_info *nh;
4514 int err;
4515
4516 nh = &nh_grp->nh_entries[i].nh;
4517 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4518 extack);
4519 if (err)
4520 return err;
4521 }
4522
4523 return 0;
4524 }
4525
4526 static int
mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_res_table_info * nh_res_table,struct netlink_ext_ack * extack)4527 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4528 const struct nh_notifier_res_table_info *nh_res_table,
4529 struct netlink_ext_ack *extack)
4530 {
4531 unsigned int alloc_size;
4532 bool valid_size = false;
4533 int err, i;
4534
4535 if (nh_res_table->num_nh_buckets < 32) {
4536 NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4537 return -EINVAL;
4538 }
4539
4540 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4541 const struct mlxsw_sp_adj_grp_size_range *size_range;
4542
4543 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4544
4545 if (nh_res_table->num_nh_buckets >= size_range->start &&
4546 nh_res_table->num_nh_buckets <= size_range->end) {
4547 valid_size = true;
4548 break;
4549 }
4550 }
4551
4552 if (!valid_size) {
4553 NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4554 return -EINVAL;
4555 }
4556
4557 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4558 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4559 nh_res_table->num_nh_buckets,
4560 &alloc_size);
4561 if (err || nh_res_table->num_nh_buckets != alloc_size) {
4562 NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4563 return -EINVAL;
4564 }
4565
4566 return 0;
4567 }
4568
4569 static int
mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_res_table_info * nh_res_table,struct netlink_ext_ack * extack)4570 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4571 const struct nh_notifier_res_table_info *nh_res_table,
4572 struct netlink_ext_ack *extack)
4573 {
4574 int err;
4575 u16 i;
4576
4577 err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4578 nh_res_table,
4579 extack);
4580 if (err)
4581 return err;
4582
4583 for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4584 const struct nh_notifier_single_info *nh;
4585 int err;
4586
4587 nh = &nh_res_table->nhs[i];
4588 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4589 extack);
4590 if (err)
4591 return err;
4592 }
4593
4594 return 0;
4595 }
4596
mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp * mlxsw_sp,unsigned long event,struct nh_notifier_info * info)4597 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4598 unsigned long event,
4599 struct nh_notifier_info *info)
4600 {
4601 struct nh_notifier_single_info *nh;
4602
4603 if (event != NEXTHOP_EVENT_REPLACE &&
4604 event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4605 event != NEXTHOP_EVENT_BUCKET_REPLACE)
4606 return 0;
4607
4608 switch (info->type) {
4609 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4610 return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4611 info->extack);
4612 case NH_NOTIFIER_INFO_TYPE_GRP:
4613 return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4614 info->nh_grp,
4615 info->extack);
4616 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4617 return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4618 info->nh_res_table,
4619 info->extack);
4620 case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4621 nh = &info->nh_res_bucket->new_nh;
4622 return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4623 info->extack);
4624 default:
4625 NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4626 return -EOPNOTSUPP;
4627 }
4628 }
4629
mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_info * info)4630 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4631 const struct nh_notifier_info *info)
4632 {
4633 const struct net_device *dev;
4634
4635 switch (info->type) {
4636 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4637 dev = info->nh->dev;
4638 return info->nh->gw_family || info->nh->is_reject ||
4639 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4640 case NH_NOTIFIER_INFO_TYPE_GRP:
4641 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4642 /* Already validated earlier. */
4643 return true;
4644 default:
4645 return false;
4646 }
4647 }
4648
mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4649 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4650 struct mlxsw_sp_nexthop *nh)
4651 {
4652 u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
4653
4654 nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4655 nh->should_offload = 1;
4656 /* While nexthops that discard packets do not forward packets
4657 * via an egress RIF, they still need to be programmed using a
4658 * valid RIF, so use the loopback RIF created during init.
4659 */
4660 nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
4661 }
4662
mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4663 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4664 struct mlxsw_sp_nexthop *nh)
4665 {
4666 nh->rif = NULL;
4667 nh->should_offload = 0;
4668 }
4669
4670 static int
mlxsw_sp_nexthop_obj_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,struct nh_notifier_single_info * nh_obj,int weight)4671 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4672 struct mlxsw_sp_nexthop_group *nh_grp,
4673 struct mlxsw_sp_nexthop *nh,
4674 struct nh_notifier_single_info *nh_obj, int weight)
4675 {
4676 struct net_device *dev = nh_obj->dev;
4677 int err;
4678
4679 nh->nhgi = nh_grp->nhgi;
4680 nh->nh_weight = weight;
4681
4682 switch (nh_obj->gw_family) {
4683 case AF_INET:
4684 memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4685 nh->neigh_tbl = &arp_tbl;
4686 break;
4687 case AF_INET6:
4688 memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
4689 #if IS_ENABLED(CONFIG_IPV6)
4690 nh->neigh_tbl = &nd_tbl;
4691 #endif
4692 break;
4693 }
4694
4695 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4696 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4697 nh->ifindex = dev->ifindex;
4698
4699 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4700 if (err)
4701 goto err_type_init;
4702
4703 if (nh_obj->is_reject)
4704 mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
4705
4706 /* In a resilient nexthop group, all the nexthops must be written to
4707 * the adjacency table. Even if they do not have a valid neighbour or
4708 * RIF.
4709 */
4710 if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
4711 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4712 nh->should_offload = 1;
4713 }
4714
4715 return 0;
4716
4717 err_type_init:
4718 list_del(&nh->router_list_node);
4719 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4720 return err;
4721 }
4722
mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4723 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
4724 struct mlxsw_sp_nexthop *nh)
4725 {
4726 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
4727 mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
4728 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4729 list_del(&nh->router_list_node);
4730 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4731 nh->should_offload = 0;
4732 }
4733
4734 static int
mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct nh_notifier_info * info)4735 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
4736 struct mlxsw_sp_nexthop_group *nh_grp,
4737 struct nh_notifier_info *info)
4738 {
4739 struct mlxsw_sp_nexthop_group_info *nhgi;
4740 struct mlxsw_sp_nexthop *nh;
4741 bool is_resilient = false;
4742 unsigned int nhs;
4743 int err, i;
4744
4745 switch (info->type) {
4746 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4747 nhs = 1;
4748 break;
4749 case NH_NOTIFIER_INFO_TYPE_GRP:
4750 nhs = info->nh_grp->num_nh;
4751 break;
4752 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4753 nhs = info->nh_res_table->num_nh_buckets;
4754 is_resilient = true;
4755 break;
4756 default:
4757 return -EINVAL;
4758 }
4759
4760 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4761 if (!nhgi)
4762 return -ENOMEM;
4763 nh_grp->nhgi = nhgi;
4764 nhgi->nh_grp = nh_grp;
4765 nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
4766 nhgi->is_resilient = is_resilient;
4767 nhgi->count = nhs;
4768 for (i = 0; i < nhgi->count; i++) {
4769 struct nh_notifier_single_info *nh_obj;
4770 int weight;
4771
4772 nh = &nhgi->nexthops[i];
4773 switch (info->type) {
4774 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4775 nh_obj = info->nh;
4776 weight = 1;
4777 break;
4778 case NH_NOTIFIER_INFO_TYPE_GRP:
4779 nh_obj = &info->nh_grp->nh_entries[i].nh;
4780 weight = info->nh_grp->nh_entries[i].weight;
4781 break;
4782 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4783 nh_obj = &info->nh_res_table->nhs[i];
4784 weight = 1;
4785 break;
4786 default:
4787 err = -EINVAL;
4788 goto err_nexthop_obj_init;
4789 }
4790 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
4791 weight);
4792 if (err)
4793 goto err_nexthop_obj_init;
4794 }
4795 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4796 if (err) {
4797 NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
4798 goto err_group_refresh;
4799 }
4800
4801 /* Add resilient nexthop groups to a list so that the activity of their
4802 * nexthop buckets will be periodically queried and cleared.
4803 */
4804 if (nhgi->is_resilient) {
4805 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4806 mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
4807 list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
4808 }
4809
4810 return 0;
4811
4812 err_group_refresh:
4813 i = nhgi->count;
4814 err_nexthop_obj_init:
4815 for (i--; i >= 0; i--) {
4816 nh = &nhgi->nexthops[i];
4817 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4818 }
4819 kfree(nhgi);
4820 return err;
4821 }
4822
4823 static void
mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4824 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
4825 struct mlxsw_sp_nexthop_group *nh_grp)
4826 {
4827 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4828 struct mlxsw_sp_router *router = mlxsw_sp->router;
4829 int i;
4830
4831 if (nhgi->is_resilient) {
4832 list_del(&nhgi->list);
4833 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4834 cancel_delayed_work(&router->nh_grp_activity_dw);
4835 }
4836
4837 for (i = nhgi->count - 1; i >= 0; i--) {
4838 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
4839
4840 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4841 }
4842 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4843 WARN_ON_ONCE(nhgi->adj_index_valid);
4844 kfree(nhgi);
4845 }
4846
4847 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)4848 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
4849 struct nh_notifier_info *info)
4850 {
4851 struct mlxsw_sp_nexthop_group *nh_grp;
4852 int err;
4853
4854 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
4855 if (!nh_grp)
4856 return ERR_PTR(-ENOMEM);
4857 INIT_LIST_HEAD(&nh_grp->vr_list);
4858 err = rhashtable_init(&nh_grp->vr_ht,
4859 &mlxsw_sp_nexthop_group_vr_ht_params);
4860 if (err)
4861 goto err_nexthop_group_vr_ht_init;
4862 INIT_LIST_HEAD(&nh_grp->fib_list);
4863 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
4864 nh_grp->obj.id = info->id;
4865
4866 err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
4867 if (err)
4868 goto err_nexthop_group_info_init;
4869
4870 nh_grp->can_destroy = false;
4871
4872 return nh_grp;
4873
4874 err_nexthop_group_info_init:
4875 rhashtable_destroy(&nh_grp->vr_ht);
4876 err_nexthop_group_vr_ht_init:
4877 kfree(nh_grp);
4878 return ERR_PTR(err);
4879 }
4880
4881 static void
mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4882 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
4883 struct mlxsw_sp_nexthop_group *nh_grp)
4884 {
4885 if (!nh_grp->can_destroy)
4886 return;
4887 mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
4888 WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
4889 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
4890 rhashtable_destroy(&nh_grp->vr_ht);
4891 kfree(nh_grp);
4892 }
4893
4894 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp * mlxsw_sp,u32 id)4895 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
4896 {
4897 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
4898
4899 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
4900 cmp_arg.id = id;
4901 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
4902 &cmp_arg,
4903 mlxsw_sp_nexthop_group_ht_params);
4904 }
4905
mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4906 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
4907 struct mlxsw_sp_nexthop_group *nh_grp)
4908 {
4909 return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4910 }
4911
4912 static int
mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop_group * old_nh_grp,struct netlink_ext_ack * extack)4913 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
4914 struct mlxsw_sp_nexthop_group *nh_grp,
4915 struct mlxsw_sp_nexthop_group *old_nh_grp,
4916 struct netlink_ext_ack *extack)
4917 {
4918 struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
4919 struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
4920 int err;
4921
4922 old_nh_grp->nhgi = new_nhgi;
4923 new_nhgi->nh_grp = old_nh_grp;
4924 nh_grp->nhgi = old_nhgi;
4925 old_nhgi->nh_grp = nh_grp;
4926
4927 if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
4928 /* Both the old adjacency index and the new one are valid.
4929 * Routes are currently using the old one. Tell the device to
4930 * replace the old adjacency index with the new one.
4931 */
4932 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
4933 old_nhgi->adj_index,
4934 old_nhgi->ecmp_size);
4935 if (err) {
4936 NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
4937 goto err_out;
4938 }
4939 } else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
4940 /* The old adjacency index is valid, while the new one is not.
4941 * Iterate over all the routes using the group and change them
4942 * to trap packets to the CPU.
4943 */
4944 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
4945 if (err) {
4946 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
4947 goto err_out;
4948 }
4949 } else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
4950 /* The old adjacency index is invalid, while the new one is.
4951 * Iterate over all the routes using the group and change them
4952 * to forward packets using the new valid index.
4953 */
4954 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
4955 if (err) {
4956 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
4957 goto err_out;
4958 }
4959 }
4960
4961 /* Make sure the flags are set / cleared based on the new nexthop group
4962 * information.
4963 */
4964 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
4965
4966 /* At this point 'nh_grp' is just a shell that is not used by anyone
4967 * and its nexthop group info is the old info that was just replaced
4968 * with the new one. Remove it.
4969 */
4970 nh_grp->can_destroy = true;
4971 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
4972
4973 return 0;
4974
4975 err_out:
4976 old_nhgi->nh_grp = old_nh_grp;
4977 nh_grp->nhgi = new_nhgi;
4978 new_nhgi->nh_grp = nh_grp;
4979 old_nh_grp->nhgi = old_nhgi;
4980 return err;
4981 }
4982
mlxsw_sp_nexthop_obj_new(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)4983 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
4984 struct nh_notifier_info *info)
4985 {
4986 struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
4987 struct netlink_ext_ack *extack = info->extack;
4988 int err;
4989
4990 nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
4991 if (IS_ERR(nh_grp))
4992 return PTR_ERR(nh_grp);
4993
4994 old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
4995 if (!old_nh_grp)
4996 err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
4997 else
4998 err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
4999 old_nh_grp, extack);
5000
5001 if (err) {
5002 nh_grp->can_destroy = true;
5003 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5004 }
5005
5006 return err;
5007 }
5008
mlxsw_sp_nexthop_obj_del(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5009 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5010 struct nh_notifier_info *info)
5011 {
5012 struct mlxsw_sp_nexthop_group *nh_grp;
5013
5014 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5015 if (!nh_grp)
5016 return;
5017
5018 nh_grp->can_destroy = true;
5019 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5020
5021 /* If the group still has routes using it, then defer the delete
5022 * operation until the last route using it is deleted.
5023 */
5024 if (!list_empty(&nh_grp->fib_list))
5025 return;
5026 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5027 }
5028
mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp * mlxsw_sp,u32 adj_index,char * ratr_pl)5029 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5030 u32 adj_index, char *ratr_pl)
5031 {
5032 MLXSW_REG_ZERO(ratr, ratr_pl);
5033 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5034 mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5035 mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5036
5037 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5038 }
5039
mlxsw_sp_nexthop_obj_bucket_compare(char * ratr_pl,char * ratr_pl_new)5040 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5041 {
5042 /* Clear the opcode and activity on both the old and new payload as
5043 * they are irrelevant for the comparison.
5044 */
5045 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5046 mlxsw_reg_ratr_a_set(ratr_pl, 0);
5047 mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5048 mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5049
5050 /* If the contents of the adjacency entry are consistent with the
5051 * replacement request, then replacement was successful.
5052 */
5053 if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5054 return 0;
5055
5056 return -EINVAL;
5057 }
5058
5059 static int
mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct nh_notifier_info * info)5060 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5061 struct mlxsw_sp_nexthop *nh,
5062 struct nh_notifier_info *info)
5063 {
5064 u16 bucket_index = info->nh_res_bucket->bucket_index;
5065 struct netlink_ext_ack *extack = info->extack;
5066 bool force = info->nh_res_bucket->force;
5067 char ratr_pl_new[MLXSW_REG_RATR_LEN];
5068 char ratr_pl[MLXSW_REG_RATR_LEN];
5069 u32 adj_index;
5070 int err;
5071
5072 /* No point in trying an atomic replacement if the idle timer interval
5073 * is smaller than the interval in which we query and clear activity.
5074 */
5075 if (!force && info->nh_res_bucket->idle_timer_ms <
5076 MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5077 force = true;
5078
5079 adj_index = nh->nhgi->adj_index + bucket_index;
5080 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5081 if (err) {
5082 NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5083 return err;
5084 }
5085
5086 if (!force) {
5087 err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5088 ratr_pl_new);
5089 if (err) {
5090 NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5091 return err;
5092 }
5093
5094 err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5095 if (err) {
5096 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5097 return err;
5098 }
5099 }
5100
5101 nh->update = 0;
5102 nh->offloaded = 1;
5103 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5104
5105 return 0;
5106 }
5107
mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5108 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5109 struct nh_notifier_info *info)
5110 {
5111 u16 bucket_index = info->nh_res_bucket->bucket_index;
5112 struct netlink_ext_ack *extack = info->extack;
5113 struct mlxsw_sp_nexthop_group_info *nhgi;
5114 struct nh_notifier_single_info *nh_obj;
5115 struct mlxsw_sp_nexthop_group *nh_grp;
5116 struct mlxsw_sp_nexthop *nh;
5117 int err;
5118
5119 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5120 if (!nh_grp) {
5121 NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5122 return -EINVAL;
5123 }
5124
5125 nhgi = nh_grp->nhgi;
5126
5127 if (bucket_index >= nhgi->count) {
5128 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5129 return -EINVAL;
5130 }
5131
5132 nh = &nhgi->nexthops[bucket_index];
5133 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5134
5135 nh_obj = &info->nh_res_bucket->new_nh;
5136 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5137 if (err) {
5138 NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5139 goto err_nexthop_obj_init;
5140 }
5141
5142 err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5143 if (err)
5144 goto err_nexthop_obj_bucket_adj_update;
5145
5146 return 0;
5147
5148 err_nexthop_obj_bucket_adj_update:
5149 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5150 err_nexthop_obj_init:
5151 nh_obj = &info->nh_res_bucket->old_nh;
5152 mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5153 /* The old adjacency entry was not overwritten */
5154 nh->update = 0;
5155 nh->offloaded = 1;
5156 return err;
5157 }
5158
mlxsw_sp_nexthop_obj_event(struct notifier_block * nb,unsigned long event,void * ptr)5159 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5160 unsigned long event, void *ptr)
5161 {
5162 struct nh_notifier_info *info = ptr;
5163 struct mlxsw_sp_router *router;
5164 int err = 0;
5165
5166 router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5167 err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5168 if (err)
5169 goto out;
5170
5171 mutex_lock(&router->lock);
5172
5173 switch (event) {
5174 case NEXTHOP_EVENT_REPLACE:
5175 err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5176 break;
5177 case NEXTHOP_EVENT_DEL:
5178 mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5179 break;
5180 case NEXTHOP_EVENT_BUCKET_REPLACE:
5181 err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5182 info);
5183 break;
5184 default:
5185 break;
5186 }
5187
5188 mutex_unlock(&router->lock);
5189
5190 out:
5191 return notifier_from_errno(err);
5192 }
5193
mlxsw_sp_fi_is_gateway(const struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)5194 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5195 struct fib_info *fi)
5196 {
5197 const struct fib_nh *nh = fib_info_nh(fi, 0);
5198
5199 return nh->fib_nh_gw_family ||
5200 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5201 }
5202
5203 static int
mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5204 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5205 struct mlxsw_sp_nexthop_group *nh_grp)
5206 {
5207 unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5208 struct mlxsw_sp_nexthop_group_info *nhgi;
5209 struct mlxsw_sp_nexthop *nh;
5210 int err, i;
5211
5212 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5213 if (!nhgi)
5214 return -ENOMEM;
5215 nh_grp->nhgi = nhgi;
5216 nhgi->nh_grp = nh_grp;
5217 nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5218 nhgi->count = nhs;
5219 for (i = 0; i < nhgi->count; i++) {
5220 struct fib_nh *fib_nh;
5221
5222 nh = &nhgi->nexthops[i];
5223 fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5224 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5225 if (err)
5226 goto err_nexthop4_init;
5227 }
5228 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5229 if (err)
5230 goto err_group_refresh;
5231
5232 return 0;
5233
5234 err_group_refresh:
5235 i = nhgi->count;
5236 err_nexthop4_init:
5237 for (i--; i >= 0; i--) {
5238 nh = &nhgi->nexthops[i];
5239 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5240 }
5241 kfree(nhgi);
5242 return err;
5243 }
5244
5245 static void
mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5246 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5247 struct mlxsw_sp_nexthop_group *nh_grp)
5248 {
5249 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5250 int i;
5251
5252 for (i = nhgi->count - 1; i >= 0; i--) {
5253 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5254
5255 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5256 }
5257 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5258 WARN_ON_ONCE(nhgi->adj_index_valid);
5259 kfree(nhgi);
5260 }
5261
5262 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_create(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)5263 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5264 {
5265 struct mlxsw_sp_nexthop_group *nh_grp;
5266 int err;
5267
5268 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5269 if (!nh_grp)
5270 return ERR_PTR(-ENOMEM);
5271 INIT_LIST_HEAD(&nh_grp->vr_list);
5272 err = rhashtable_init(&nh_grp->vr_ht,
5273 &mlxsw_sp_nexthop_group_vr_ht_params);
5274 if (err)
5275 goto err_nexthop_group_vr_ht_init;
5276 INIT_LIST_HEAD(&nh_grp->fib_list);
5277 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5278 nh_grp->ipv4.fi = fi;
5279 fib_info_hold(fi);
5280
5281 err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5282 if (err)
5283 goto err_nexthop_group_info_init;
5284
5285 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5286 if (err)
5287 goto err_nexthop_group_insert;
5288
5289 nh_grp->can_destroy = true;
5290
5291 return nh_grp;
5292
5293 err_nexthop_group_insert:
5294 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5295 err_nexthop_group_info_init:
5296 fib_info_put(fi);
5297 rhashtable_destroy(&nh_grp->vr_ht);
5298 err_nexthop_group_vr_ht_init:
5299 kfree(nh_grp);
5300 return ERR_PTR(err);
5301 }
5302
5303 static void
mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5304 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5305 struct mlxsw_sp_nexthop_group *nh_grp)
5306 {
5307 if (!nh_grp->can_destroy)
5308 return;
5309 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5310 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5311 fib_info_put(nh_grp->ipv4.fi);
5312 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5313 rhashtable_destroy(&nh_grp->vr_ht);
5314 kfree(nh_grp);
5315 }
5316
mlxsw_sp_nexthop4_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct fib_info * fi)5317 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5318 struct mlxsw_sp_fib_entry *fib_entry,
5319 struct fib_info *fi)
5320 {
5321 struct mlxsw_sp_nexthop_group *nh_grp;
5322
5323 if (fi->nh) {
5324 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5325 fi->nh->id);
5326 if (WARN_ON_ONCE(!nh_grp))
5327 return -EINVAL;
5328 goto out;
5329 }
5330
5331 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5332 if (!nh_grp) {
5333 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5334 if (IS_ERR(nh_grp))
5335 return PTR_ERR(nh_grp);
5336 }
5337 out:
5338 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5339 fib_entry->nh_group = nh_grp;
5340 return 0;
5341 }
5342
mlxsw_sp_nexthop4_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5343 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5344 struct mlxsw_sp_fib_entry *fib_entry)
5345 {
5346 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5347
5348 list_del(&fib_entry->nexthop_group_node);
5349 if (!list_empty(&nh_grp->fib_list))
5350 return;
5351
5352 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5353 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5354 return;
5355 }
5356
5357 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5358 }
5359
5360 static bool
mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)5361 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5362 {
5363 struct mlxsw_sp_fib4_entry *fib4_entry;
5364
5365 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5366 common);
5367 return !fib4_entry->tos;
5368 }
5369
5370 static bool
mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)5371 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5372 {
5373 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5374
5375 switch (fib_entry->fib_node->fib->proto) {
5376 case MLXSW_SP_L3_PROTO_IPV4:
5377 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5378 return false;
5379 break;
5380 case MLXSW_SP_L3_PROTO_IPV6:
5381 break;
5382 }
5383
5384 switch (fib_entry->type) {
5385 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5386 return !!nh_group->nhgi->adj_index_valid;
5387 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5388 return !!nh_group->nhgi->nh_rif;
5389 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5390 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5391 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5392 return true;
5393 default:
5394 return false;
5395 }
5396 }
5397
5398 static struct mlxsw_sp_nexthop *
mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_rt6 * mlxsw_sp_rt6)5399 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5400 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5401 {
5402 int i;
5403
5404 for (i = 0; i < nh_grp->nhgi->count; i++) {
5405 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5406 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5407
5408 if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
5409 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5410 &rt->fib6_nh->fib_nh_gw6))
5411 return nh;
5412 }
5413
5414 return NULL;
5415 }
5416
5417 static void
mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib_entry_notifier_info * fen_info)5418 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5419 struct fib_entry_notifier_info *fen_info)
5420 {
5421 u32 *p_dst = (u32 *) &fen_info->dst;
5422 struct fib_rt_info fri;
5423
5424 fri.fi = fen_info->fi;
5425 fri.tb_id = fen_info->tb_id;
5426 fri.dst = cpu_to_be32(*p_dst);
5427 fri.dst_len = fen_info->dst_len;
5428 fri.tos = fen_info->tos;
5429 fri.type = fen_info->type;
5430 fri.offload = false;
5431 fri.trap = false;
5432 fri.offload_failed = true;
5433 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5434 }
5435
5436 static void
mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5437 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5438 struct mlxsw_sp_fib_entry *fib_entry)
5439 {
5440 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5441 int dst_len = fib_entry->fib_node->key.prefix_len;
5442 struct mlxsw_sp_fib4_entry *fib4_entry;
5443 struct fib_rt_info fri;
5444 bool should_offload;
5445
5446 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5447 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5448 common);
5449 fri.fi = fib4_entry->fi;
5450 fri.tb_id = fib4_entry->tb_id;
5451 fri.dst = cpu_to_be32(*p_dst);
5452 fri.dst_len = dst_len;
5453 fri.tos = fib4_entry->tos;
5454 fri.type = fib4_entry->type;
5455 fri.offload = should_offload;
5456 fri.trap = !should_offload;
5457 fri.offload_failed = false;
5458 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5459 }
5460
5461 static void
mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5462 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5463 struct mlxsw_sp_fib_entry *fib_entry)
5464 {
5465 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5466 int dst_len = fib_entry->fib_node->key.prefix_len;
5467 struct mlxsw_sp_fib4_entry *fib4_entry;
5468 struct fib_rt_info fri;
5469
5470 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5471 common);
5472 fri.fi = fib4_entry->fi;
5473 fri.tb_id = fib4_entry->tb_id;
5474 fri.dst = cpu_to_be32(*p_dst);
5475 fri.dst_len = dst_len;
5476 fri.tos = fib4_entry->tos;
5477 fri.type = fib4_entry->type;
5478 fri.offload = false;
5479 fri.trap = false;
5480 fri.offload_failed = false;
5481 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5482 }
5483
5484 #if IS_ENABLED(CONFIG_IPV6)
5485 static void
mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)5486 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5487 struct fib6_info **rt_arr,
5488 unsigned int nrt6)
5489 {
5490 int i;
5491
5492 /* In IPv6 a multipath route is represented using multiple routes, so
5493 * we need to set the flags on all of them.
5494 */
5495 for (i = 0; i < nrt6; i++)
5496 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5497 false, false, true);
5498 }
5499 #else
5500 static void
mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)5501 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5502 struct fib6_info **rt_arr,
5503 unsigned int nrt6)
5504 {
5505 }
5506 #endif
5507
5508 #if IS_ENABLED(CONFIG_IPV6)
5509 static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5510 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5511 struct mlxsw_sp_fib_entry *fib_entry)
5512 {
5513 struct mlxsw_sp_fib6_entry *fib6_entry;
5514 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5515 bool should_offload;
5516
5517 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5518
5519 /* In IPv6 a multipath route is represented using multiple routes, so
5520 * we need to set the flags on all of them.
5521 */
5522 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5523 common);
5524 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5525 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5526 should_offload, !should_offload, false);
5527 }
5528 #else
5529 static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5530 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5531 struct mlxsw_sp_fib_entry *fib_entry)
5532 {
5533 }
5534 #endif
5535
5536 #if IS_ENABLED(CONFIG_IPV6)
5537 static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5538 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5539 struct mlxsw_sp_fib_entry *fib_entry)
5540 {
5541 struct mlxsw_sp_fib6_entry *fib6_entry;
5542 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5543
5544 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5545 common);
5546 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5547 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5548 false, false, false);
5549 }
5550 #else
5551 static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5552 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5553 struct mlxsw_sp_fib_entry *fib_entry)
5554 {
5555 }
5556 #endif
5557
5558 static void
mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5559 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5560 struct mlxsw_sp_fib_entry *fib_entry)
5561 {
5562 switch (fib_entry->fib_node->fib->proto) {
5563 case MLXSW_SP_L3_PROTO_IPV4:
5564 mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5565 break;
5566 case MLXSW_SP_L3_PROTO_IPV6:
5567 mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5568 break;
5569 }
5570 }
5571
5572 static void
mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5573 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5574 struct mlxsw_sp_fib_entry *fib_entry)
5575 {
5576 switch (fib_entry->fib_node->fib->proto) {
5577 case MLXSW_SP_L3_PROTO_IPV4:
5578 mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5579 break;
5580 case MLXSW_SP_L3_PROTO_IPV6:
5581 mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5582 break;
5583 }
5584 }
5585
5586 static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5587 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5588 struct mlxsw_sp_fib_entry *fib_entry,
5589 enum mlxsw_sp_fib_entry_op op)
5590 {
5591 switch (op) {
5592 case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5593 case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5594 mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5595 break;
5596 case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5597 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5598 break;
5599 default:
5600 break;
5601 }
5602 }
5603
5604 struct mlxsw_sp_fib_entry_op_ctx_basic {
5605 char ralue_pl[MLXSW_REG_RALUE_LEN];
5606 };
5607
5608 static void
mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx * op_ctx,enum mlxsw_sp_l3proto proto,enum mlxsw_sp_fib_entry_op op,u16 virtual_router,u8 prefix_len,unsigned char * addr,struct mlxsw_sp_fib_entry_priv * priv)5609 mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5610 enum mlxsw_sp_l3proto proto,
5611 enum mlxsw_sp_fib_entry_op op,
5612 u16 virtual_router, u8 prefix_len,
5613 unsigned char *addr,
5614 struct mlxsw_sp_fib_entry_priv *priv)
5615 {
5616 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5617 enum mlxsw_reg_ralxx_protocol ralxx_proto;
5618 char *ralue_pl = op_ctx_basic->ralue_pl;
5619 enum mlxsw_reg_ralue_op ralue_op;
5620
5621 ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
5622
5623 switch (op) {
5624 case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5625 case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5626 ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
5627 break;
5628 case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5629 ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
5630 break;
5631 default:
5632 WARN_ON_ONCE(1);
5633 return;
5634 }
5635
5636 switch (proto) {
5637 case MLXSW_SP_L3_PROTO_IPV4:
5638 mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
5639 virtual_router, prefix_len, (u32 *) addr);
5640 break;
5641 case MLXSW_SP_L3_PROTO_IPV6:
5642 mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
5643 virtual_router, prefix_len, addr);
5644 break;
5645 }
5646 }
5647
5648 static void
mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx * op_ctx,enum mlxsw_reg_ralue_trap_action trap_action,u16 trap_id,u32 adjacency_index,u16 ecmp_size)5649 mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5650 enum mlxsw_reg_ralue_trap_action trap_action,
5651 u16 trap_id, u32 adjacency_index, u16 ecmp_size)
5652 {
5653 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5654
5655 mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
5656 trap_id, adjacency_index, ecmp_size);
5657 }
5658
5659 static void
mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx * op_ctx,enum mlxsw_reg_ralue_trap_action trap_action,u16 trap_id,u16 local_erif)5660 mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5661 enum mlxsw_reg_ralue_trap_action trap_action,
5662 u16 trap_id, u16 local_erif)
5663 {
5664 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5665
5666 mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
5667 trap_id, local_erif);
5668 }
5669
5670 static void
mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx * op_ctx)5671 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
5672 {
5673 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5674
5675 mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
5676 }
5677
5678 static void
mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx * op_ctx,u32 tunnel_ptr)5679 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5680 u32 tunnel_ptr)
5681 {
5682 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5683
5684 mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
5685 }
5686
5687 static int
mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,bool * postponed_for_bulk)5688 mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5689 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5690 bool *postponed_for_bulk)
5691 {
5692 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5693
5694 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5695 op_ctx_basic->ralue_pl);
5696 }
5697
5698 static bool
mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv * priv)5699 mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
5700 {
5701 return true;
5702 }
5703
mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5704 static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5705 struct mlxsw_sp_fib_entry *fib_entry,
5706 enum mlxsw_sp_fib_entry_op op)
5707 {
5708 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5709
5710 mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
5711 fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
5712 fib_entry->fib_node->key.prefix_len,
5713 fib_entry->fib_node->key.addr,
5714 fib_entry->priv);
5715 }
5716
mlxsw_sp_fib_entry_commit(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,const struct mlxsw_sp_router_ll_ops * ll_ops)5717 static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5718 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5719 const struct mlxsw_sp_router_ll_ops *ll_ops)
5720 {
5721 bool postponed_for_bulk = false;
5722 int err;
5723
5724 err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
5725 if (!postponed_for_bulk)
5726 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
5727 return err;
5728 }
5729
mlxsw_sp_adj_discard_write(struct mlxsw_sp * mlxsw_sp)5730 static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp)
5731 {
5732 enum mlxsw_reg_ratr_trap_action trap_action;
5733 char ratr_pl[MLXSW_REG_RATR_LEN];
5734 int err;
5735
5736 if (mlxsw_sp->router->adj_discard_index_valid)
5737 return 0;
5738
5739 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
5740 &mlxsw_sp->router->adj_discard_index);
5741 if (err)
5742 return err;
5743
5744 trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
5745 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
5746 MLXSW_REG_RATR_TYPE_ETHERNET,
5747 mlxsw_sp->router->adj_discard_index,
5748 mlxsw_sp->router->lb_rif_index);
5749 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
5750 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
5751 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5752 if (err)
5753 goto err_ratr_write;
5754
5755 mlxsw_sp->router->adj_discard_index_valid = true;
5756
5757 return 0;
5758
5759 err_ratr_write:
5760 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
5761 mlxsw_sp->router->adj_discard_index);
5762 return err;
5763 }
5764
mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5765 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5766 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5767 struct mlxsw_sp_fib_entry *fib_entry,
5768 enum mlxsw_sp_fib_entry_op op)
5769 {
5770 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5771 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5772 struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5773 enum mlxsw_reg_ralue_trap_action trap_action;
5774 u16 trap_id = 0;
5775 u32 adjacency_index = 0;
5776 u16 ecmp_size = 0;
5777 int err;
5778
5779 /* In case the nexthop group adjacency index is valid, use it
5780 * with provided ECMP size. Otherwise, setup trap and pass
5781 * traffic to kernel.
5782 */
5783 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5784 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5785 adjacency_index = nhgi->adj_index;
5786 ecmp_size = nhgi->ecmp_size;
5787 } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
5788 err = mlxsw_sp_adj_discard_write(mlxsw_sp);
5789 if (err)
5790 return err;
5791 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5792 adjacency_index = mlxsw_sp->router->adj_discard_index;
5793 ecmp_size = 1;
5794 } else {
5795 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5796 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5797 }
5798
5799 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5800 ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
5801 adjacency_index, ecmp_size);
5802 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5803 }
5804
mlxsw_sp_fib_entry_op_local(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5805 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5806 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5807 struct mlxsw_sp_fib_entry *fib_entry,
5808 enum mlxsw_sp_fib_entry_op op)
5809 {
5810 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5811 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
5812 enum mlxsw_reg_ralue_trap_action trap_action;
5813 u16 trap_id = 0;
5814 u16 rif_index = 0;
5815
5816 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5817 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5818 rif_index = rif->rif_index;
5819 } else {
5820 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5821 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5822 }
5823
5824 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5825 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
5826 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5827 }
5828
mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5829 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
5830 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5831 struct mlxsw_sp_fib_entry *fib_entry,
5832 enum mlxsw_sp_fib_entry_op op)
5833 {
5834 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5835
5836 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5837 ll_ops->fib_entry_act_ip2me_pack(op_ctx);
5838 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5839 }
5840
mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5841 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
5842 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5843 struct mlxsw_sp_fib_entry *fib_entry,
5844 enum mlxsw_sp_fib_entry_op op)
5845 {
5846 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5847 enum mlxsw_reg_ralue_trap_action trap_action;
5848
5849 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
5850 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5851 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
5852 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5853 }
5854
5855 static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5856 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
5857 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5858 struct mlxsw_sp_fib_entry *fib_entry,
5859 enum mlxsw_sp_fib_entry_op op)
5860 {
5861 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5862 enum mlxsw_reg_ralue_trap_action trap_action;
5863 u16 trap_id;
5864
5865 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5866 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
5867
5868 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5869 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
5870 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5871 }
5872
5873 static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5874 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
5875 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5876 struct mlxsw_sp_fib_entry *fib_entry,
5877 enum mlxsw_sp_fib_entry_op op)
5878 {
5879 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5880 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
5881 const struct mlxsw_sp_ipip_ops *ipip_ops;
5882 int err;
5883
5884 if (WARN_ON(!ipip_entry))
5885 return -EINVAL;
5886
5887 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5888 err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
5889 fib_entry->decap.tunnel_index);
5890 if (err)
5891 return err;
5892
5893 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5894 ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
5895 fib_entry->decap.tunnel_index);
5896 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5897 }
5898
mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5899 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
5900 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5901 struct mlxsw_sp_fib_entry *fib_entry,
5902 enum mlxsw_sp_fib_entry_op op)
5903 {
5904 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5905
5906 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5907 ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
5908 fib_entry->decap.tunnel_index);
5909 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5910 }
5911
__mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5912 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
5913 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5914 struct mlxsw_sp_fib_entry *fib_entry,
5915 enum mlxsw_sp_fib_entry_op op)
5916 {
5917 switch (fib_entry->type) {
5918 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5919 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
5920 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5921 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
5922 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
5923 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
5924 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5925 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
5926 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
5927 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
5928 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5929 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
5930 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5931 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
5932 }
5933 return -EINVAL;
5934 }
5935
mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5936 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
5937 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5938 struct mlxsw_sp_fib_entry *fib_entry,
5939 enum mlxsw_sp_fib_entry_op op)
5940 {
5941 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
5942
5943 if (err)
5944 return err;
5945
5946 mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
5947
5948 return err;
5949 }
5950
__mlxsw_sp_fib_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,bool is_new)5951 static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
5952 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5953 struct mlxsw_sp_fib_entry *fib_entry,
5954 bool is_new)
5955 {
5956 return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
5957 is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
5958 MLXSW_SP_FIB_ENTRY_OP_UPDATE);
5959 }
5960
mlxsw_sp_fib_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5961 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
5962 struct mlxsw_sp_fib_entry *fib_entry)
5963 {
5964 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
5965
5966 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
5967 return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
5968 }
5969
mlxsw_sp_fib_entry_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry)5970 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
5971 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5972 struct mlxsw_sp_fib_entry *fib_entry)
5973 {
5974 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5975
5976 if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
5977 return 0;
5978 return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
5979 MLXSW_SP_FIB_ENTRY_OP_DELETE);
5980 }
5981
5982 static int
mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info,struct mlxsw_sp_fib_entry * fib_entry)5983 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5984 const struct fib_entry_notifier_info *fen_info,
5985 struct mlxsw_sp_fib_entry *fib_entry)
5986 {
5987 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
5988 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
5989 struct mlxsw_sp_router *router = mlxsw_sp->router;
5990 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
5991 int ifindex = nhgi->nexthops[0].ifindex;
5992 struct mlxsw_sp_ipip_entry *ipip_entry;
5993
5994 switch (fen_info->type) {
5995 case RTN_LOCAL:
5996 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
5997 MLXSW_SP_L3_PROTO_IPV4, dip);
5998 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
5999 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6000 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6001 fib_entry,
6002 ipip_entry);
6003 }
6004 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6005 MLXSW_SP_L3_PROTO_IPV4,
6006 &dip)) {
6007 u32 tunnel_index;
6008
6009 tunnel_index = router->nve_decap_config.tunnel_index;
6010 fib_entry->decap.tunnel_index = tunnel_index;
6011 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6012 return 0;
6013 }
6014 fallthrough;
6015 case RTN_BROADCAST:
6016 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6017 return 0;
6018 case RTN_BLACKHOLE:
6019 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6020 return 0;
6021 case RTN_UNREACHABLE:
6022 case RTN_PROHIBIT:
6023 /* Packets hitting these routes need to be trapped, but
6024 * can do so with a lower priority than packets directed
6025 * at the host, so use action type local instead of trap.
6026 */
6027 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6028 return 0;
6029 case RTN_UNICAST:
6030 if (nhgi->gateway)
6031 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6032 else
6033 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6034 return 0;
6035 default:
6036 return -EINVAL;
6037 }
6038 }
6039
6040 static void
mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6041 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6042 struct mlxsw_sp_fib_entry *fib_entry)
6043 {
6044 switch (fib_entry->type) {
6045 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6046 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6047 break;
6048 default:
6049 break;
6050 }
6051 }
6052
6053 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,const struct fib_entry_notifier_info * fen_info)6054 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6055 struct mlxsw_sp_fib_node *fib_node,
6056 const struct fib_entry_notifier_info *fen_info)
6057 {
6058 struct mlxsw_sp_fib4_entry *fib4_entry;
6059 struct mlxsw_sp_fib_entry *fib_entry;
6060 int err;
6061
6062 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6063 if (!fib4_entry)
6064 return ERR_PTR(-ENOMEM);
6065 fib_entry = &fib4_entry->common;
6066
6067 fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
6068 if (IS_ERR(fib_entry->priv)) {
6069 err = PTR_ERR(fib_entry->priv);
6070 goto err_fib_entry_priv_create;
6071 }
6072
6073 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6074 if (err)
6075 goto err_nexthop4_group_get;
6076
6077 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6078 fib_node->fib);
6079 if (err)
6080 goto err_nexthop_group_vr_link;
6081
6082 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6083 if (err)
6084 goto err_fib4_entry_type_set;
6085
6086 fib4_entry->fi = fen_info->fi;
6087 fib_info_hold(fib4_entry->fi);
6088 fib4_entry->tb_id = fen_info->tb_id;
6089 fib4_entry->type = fen_info->type;
6090 fib4_entry->tos = fen_info->tos;
6091
6092 fib_entry->fib_node = fib_node;
6093
6094 return fib4_entry;
6095
6096 err_fib4_entry_type_set:
6097 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6098 err_nexthop_group_vr_link:
6099 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6100 err_nexthop4_group_get:
6101 mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
6102 err_fib_entry_priv_create:
6103 kfree(fib4_entry);
6104 return ERR_PTR(err);
6105 }
6106
mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib4_entry * fib4_entry)6107 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6108 struct mlxsw_sp_fib4_entry *fib4_entry)
6109 {
6110 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6111
6112 fib_info_put(fib4_entry->fi);
6113 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6114 mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6115 fib_node->fib);
6116 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6117 mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
6118 kfree(fib4_entry);
6119 }
6120
6121 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info)6122 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6123 const struct fib_entry_notifier_info *fen_info)
6124 {
6125 struct mlxsw_sp_fib4_entry *fib4_entry;
6126 struct mlxsw_sp_fib_node *fib_node;
6127 struct mlxsw_sp_fib *fib;
6128 struct mlxsw_sp_vr *vr;
6129
6130 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6131 if (!vr)
6132 return NULL;
6133 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6134
6135 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6136 sizeof(fen_info->dst),
6137 fen_info->dst_len);
6138 if (!fib_node)
6139 return NULL;
6140
6141 fib4_entry = container_of(fib_node->fib_entry,
6142 struct mlxsw_sp_fib4_entry, common);
6143 if (fib4_entry->tb_id == fen_info->tb_id &&
6144 fib4_entry->tos == fen_info->tos &&
6145 fib4_entry->type == fen_info->type &&
6146 fib4_entry->fi == fen_info->fi)
6147 return fib4_entry;
6148
6149 return NULL;
6150 }
6151
6152 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6153 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6154 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6155 .key_len = sizeof(struct mlxsw_sp_fib_key),
6156 .automatic_shrinking = true,
6157 };
6158
mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)6159 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6160 struct mlxsw_sp_fib_node *fib_node)
6161 {
6162 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6163 mlxsw_sp_fib_ht_params);
6164 }
6165
mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)6166 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6167 struct mlxsw_sp_fib_node *fib_node)
6168 {
6169 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6170 mlxsw_sp_fib_ht_params);
6171 }
6172
6173 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)6174 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6175 size_t addr_len, unsigned char prefix_len)
6176 {
6177 struct mlxsw_sp_fib_key key;
6178
6179 memset(&key, 0, sizeof(key));
6180 memcpy(key.addr, addr, addr_len);
6181 key.prefix_len = prefix_len;
6182 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6183 }
6184
6185 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_create(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)6186 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6187 size_t addr_len, unsigned char prefix_len)
6188 {
6189 struct mlxsw_sp_fib_node *fib_node;
6190
6191 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6192 if (!fib_node)
6193 return NULL;
6194
6195 list_add(&fib_node->list, &fib->node_list);
6196 memcpy(fib_node->key.addr, addr, addr_len);
6197 fib_node->key.prefix_len = prefix_len;
6198
6199 return fib_node;
6200 }
6201
mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node * fib_node)6202 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6203 {
6204 list_del(&fib_node->list);
6205 kfree(fib_node);
6206 }
6207
mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6208 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6209 struct mlxsw_sp_fib_node *fib_node)
6210 {
6211 struct mlxsw_sp_prefix_usage req_prefix_usage;
6212 struct mlxsw_sp_fib *fib = fib_node->fib;
6213 struct mlxsw_sp_lpm_tree *lpm_tree;
6214 int err;
6215
6216 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6217 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6218 goto out;
6219
6220 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6221 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6222 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6223 fib->proto);
6224 if (IS_ERR(lpm_tree))
6225 return PTR_ERR(lpm_tree);
6226
6227 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6228 if (err)
6229 goto err_lpm_tree_replace;
6230
6231 out:
6232 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6233 return 0;
6234
6235 err_lpm_tree_replace:
6236 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6237 return err;
6238 }
6239
mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6240 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6241 struct mlxsw_sp_fib_node *fib_node)
6242 {
6243 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6244 struct mlxsw_sp_prefix_usage req_prefix_usage;
6245 struct mlxsw_sp_fib *fib = fib_node->fib;
6246 int err;
6247
6248 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6249 return;
6250 /* Try to construct a new LPM tree from the current prefix usage
6251 * minus the unused one. If we fail, continue using the old one.
6252 */
6253 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6254 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6255 fib_node->key.prefix_len);
6256 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6257 fib->proto);
6258 if (IS_ERR(lpm_tree))
6259 return;
6260
6261 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6262 if (err)
6263 goto err_lpm_tree_replace;
6264
6265 return;
6266
6267 err_lpm_tree_replace:
6268 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6269 }
6270
mlxsw_sp_fib_node_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct mlxsw_sp_fib * fib)6271 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6272 struct mlxsw_sp_fib_node *fib_node,
6273 struct mlxsw_sp_fib *fib)
6274 {
6275 int err;
6276
6277 err = mlxsw_sp_fib_node_insert(fib, fib_node);
6278 if (err)
6279 return err;
6280 fib_node->fib = fib;
6281
6282 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6283 if (err)
6284 goto err_fib_lpm_tree_link;
6285
6286 return 0;
6287
6288 err_fib_lpm_tree_link:
6289 fib_node->fib = NULL;
6290 mlxsw_sp_fib_node_remove(fib, fib_node);
6291 return err;
6292 }
6293
mlxsw_sp_fib_node_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6294 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6295 struct mlxsw_sp_fib_node *fib_node)
6296 {
6297 struct mlxsw_sp_fib *fib = fib_node->fib;
6298
6299 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6300 fib_node->fib = NULL;
6301 mlxsw_sp_fib_node_remove(fib, fib_node);
6302 }
6303
6304 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,const void * addr,size_t addr_len,unsigned char prefix_len,enum mlxsw_sp_l3proto proto)6305 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6306 size_t addr_len, unsigned char prefix_len,
6307 enum mlxsw_sp_l3proto proto)
6308 {
6309 struct mlxsw_sp_fib_node *fib_node;
6310 struct mlxsw_sp_fib *fib;
6311 struct mlxsw_sp_vr *vr;
6312 int err;
6313
6314 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6315 if (IS_ERR(vr))
6316 return ERR_CAST(vr);
6317 fib = mlxsw_sp_vr_fib(vr, proto);
6318
6319 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6320 if (fib_node)
6321 return fib_node;
6322
6323 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6324 if (!fib_node) {
6325 err = -ENOMEM;
6326 goto err_fib_node_create;
6327 }
6328
6329 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6330 if (err)
6331 goto err_fib_node_init;
6332
6333 return fib_node;
6334
6335 err_fib_node_init:
6336 mlxsw_sp_fib_node_destroy(fib_node);
6337 err_fib_node_create:
6338 mlxsw_sp_vr_put(mlxsw_sp, vr);
6339 return ERR_PTR(err);
6340 }
6341
mlxsw_sp_fib_node_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6342 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6343 struct mlxsw_sp_fib_node *fib_node)
6344 {
6345 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6346
6347 if (fib_node->fib_entry)
6348 return;
6349 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6350 mlxsw_sp_fib_node_destroy(fib_node);
6351 mlxsw_sp_vr_put(mlxsw_sp, vr);
6352 }
6353
mlxsw_sp_fib_node_entry_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry)6354 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6355 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6356 struct mlxsw_sp_fib_entry *fib_entry)
6357 {
6358 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6359 bool is_new = !fib_node->fib_entry;
6360 int err;
6361
6362 fib_node->fib_entry = fib_entry;
6363
6364 err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
6365 if (err)
6366 goto err_fib_entry_update;
6367
6368 return 0;
6369
6370 err_fib_entry_update:
6371 fib_node->fib_entry = NULL;
6372 return err;
6373 }
6374
__mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry)6375 static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6376 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6377 struct mlxsw_sp_fib_entry *fib_entry)
6378 {
6379 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6380 int err;
6381
6382 err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
6383 fib_node->fib_entry = NULL;
6384 return err;
6385 }
6386
mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6387 static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6388 struct mlxsw_sp_fib_entry *fib_entry)
6389 {
6390 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6391
6392 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6393 __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
6394 }
6395
mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry * fib4_entry)6396 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6397 {
6398 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6399 struct mlxsw_sp_fib4_entry *fib4_replaced;
6400
6401 if (!fib_node->fib_entry)
6402 return true;
6403
6404 fib4_replaced = container_of(fib_node->fib_entry,
6405 struct mlxsw_sp_fib4_entry, common);
6406 if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6407 fib4_replaced->tb_id == RT_TABLE_LOCAL)
6408 return false;
6409
6410 return true;
6411 }
6412
6413 static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,const struct fib_entry_notifier_info * fen_info)6414 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6415 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6416 const struct fib_entry_notifier_info *fen_info)
6417 {
6418 struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6419 struct mlxsw_sp_fib_entry *replaced;
6420 struct mlxsw_sp_fib_node *fib_node;
6421 int err;
6422
6423 if (fen_info->fi->nh &&
6424 !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6425 return 0;
6426
6427 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6428 &fen_info->dst, sizeof(fen_info->dst),
6429 fen_info->dst_len,
6430 MLXSW_SP_L3_PROTO_IPV4);
6431 if (IS_ERR(fib_node)) {
6432 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6433 return PTR_ERR(fib_node);
6434 }
6435
6436 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6437 if (IS_ERR(fib4_entry)) {
6438 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6439 err = PTR_ERR(fib4_entry);
6440 goto err_fib4_entry_create;
6441 }
6442
6443 if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6444 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6445 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6446 return 0;
6447 }
6448
6449 replaced = fib_node->fib_entry;
6450 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
6451 if (err) {
6452 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6453 goto err_fib_node_entry_link;
6454 }
6455
6456 /* Nothing to replace */
6457 if (!replaced)
6458 return 0;
6459
6460 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6461 fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6462 common);
6463 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6464
6465 return 0;
6466
6467 err_fib_node_entry_link:
6468 fib_node->fib_entry = replaced;
6469 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6470 err_fib4_entry_create:
6471 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6472 return err;
6473 }
6474
mlxsw_sp_router_fib4_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct fib_entry_notifier_info * fen_info)6475 static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6476 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6477 struct fib_entry_notifier_info *fen_info)
6478 {
6479 struct mlxsw_sp_fib4_entry *fib4_entry;
6480 struct mlxsw_sp_fib_node *fib_node;
6481 int err;
6482
6483 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6484 if (!fib4_entry)
6485 return 0;
6486 fib_node = fib4_entry->common.fib_node;
6487
6488 err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
6489 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6490 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6491 return err;
6492 }
6493
mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info * rt)6494 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6495 {
6496 /* Multicast routes aren't supported, so ignore them. Neighbour
6497 * Discovery packets are specifically trapped.
6498 */
6499 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6500 return true;
6501
6502 /* Cloned routes are irrelevant in the forwarding path. */
6503 if (rt->fib6_flags & RTF_CACHE)
6504 return true;
6505
6506 return false;
6507 }
6508
mlxsw_sp_rt6_create(struct fib6_info * rt)6509 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6510 {
6511 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6512
6513 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6514 if (!mlxsw_sp_rt6)
6515 return ERR_PTR(-ENOMEM);
6516
6517 /* In case of route replace, replaced route is deleted with
6518 * no notification. Take reference to prevent accessing freed
6519 * memory.
6520 */
6521 mlxsw_sp_rt6->rt = rt;
6522 fib6_info_hold(rt);
6523
6524 return mlxsw_sp_rt6;
6525 }
6526
6527 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_rt6_release(struct fib6_info * rt)6528 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6529 {
6530 fib6_info_release(rt);
6531 }
6532 #else
mlxsw_sp_rt6_release(struct fib6_info * rt)6533 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6534 {
6535 }
6536 #endif
6537
mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 * mlxsw_sp_rt6)6538 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6539 {
6540 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6541
6542 if (!mlxsw_sp_rt6->rt->nh)
6543 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6544 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6545 kfree(mlxsw_sp_rt6);
6546 }
6547
6548 static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry * fib6_entry)6549 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6550 {
6551 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6552 list)->rt;
6553 }
6554
6555 static struct mlxsw_sp_rt6 *
mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry * fib6_entry,const struct fib6_info * rt)6556 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6557 const struct fib6_info *rt)
6558 {
6559 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6560
6561 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6562 if (mlxsw_sp_rt6->rt == rt)
6563 return mlxsw_sp_rt6;
6564 }
6565
6566 return NULL;
6567 }
6568
mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt,enum mlxsw_sp_ipip_type * ret)6569 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6570 const struct fib6_info *rt,
6571 enum mlxsw_sp_ipip_type *ret)
6572 {
6573 return rt->fib6_nh->fib_nh_dev &&
6574 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6575 }
6576
mlxsw_sp_nexthop6_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,const struct fib6_info * rt)6577 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6578 struct mlxsw_sp_nexthop_group *nh_grp,
6579 struct mlxsw_sp_nexthop *nh,
6580 const struct fib6_info *rt)
6581 {
6582 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6583 int err;
6584
6585 nh->nhgi = nh_grp->nhgi;
6586 nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6587 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6588 #if IS_ENABLED(CONFIG_IPV6)
6589 nh->neigh_tbl = &nd_tbl;
6590 #endif
6591 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6592
6593 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6594
6595 if (!dev)
6596 return 0;
6597 nh->ifindex = dev->ifindex;
6598
6599 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6600 if (err)
6601 goto err_nexthop_type_init;
6602
6603 return 0;
6604
6605 err_nexthop_type_init:
6606 list_del(&nh->router_list_node);
6607 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6608 return err;
6609 }
6610
mlxsw_sp_nexthop6_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)6611 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6612 struct mlxsw_sp_nexthop *nh)
6613 {
6614 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6615 list_del(&nh->router_list_node);
6616 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6617 }
6618
mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)6619 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6620 const struct fib6_info *rt)
6621 {
6622 return rt->fib6_nh->fib_nh_gw_family ||
6623 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6624 }
6625
6626 static int
mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_fib6_entry * fib6_entry)6627 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6628 struct mlxsw_sp_nexthop_group *nh_grp,
6629 struct mlxsw_sp_fib6_entry *fib6_entry)
6630 {
6631 struct mlxsw_sp_nexthop_group_info *nhgi;
6632 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6633 struct mlxsw_sp_nexthop *nh;
6634 int err, i;
6635
6636 nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6637 GFP_KERNEL);
6638 if (!nhgi)
6639 return -ENOMEM;
6640 nh_grp->nhgi = nhgi;
6641 nhgi->nh_grp = nh_grp;
6642 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6643 struct mlxsw_sp_rt6, list);
6644 nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6645 nhgi->count = fib6_entry->nrt6;
6646 for (i = 0; i < nhgi->count; i++) {
6647 struct fib6_info *rt = mlxsw_sp_rt6->rt;
6648
6649 nh = &nhgi->nexthops[i];
6650 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6651 if (err)
6652 goto err_nexthop6_init;
6653 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6654 }
6655 nh_grp->nhgi = nhgi;
6656 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6657 if (err)
6658 goto err_group_refresh;
6659
6660 return 0;
6661
6662 err_group_refresh:
6663 i = nhgi->count;
6664 err_nexthop6_init:
6665 for (i--; i >= 0; i--) {
6666 nh = &nhgi->nexthops[i];
6667 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6668 }
6669 kfree(nhgi);
6670 return err;
6671 }
6672
6673 static void
mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)6674 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6675 struct mlxsw_sp_nexthop_group *nh_grp)
6676 {
6677 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6678 int i;
6679
6680 for (i = nhgi->count - 1; i >= 0; i--) {
6681 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6682
6683 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6684 }
6685 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6686 WARN_ON_ONCE(nhgi->adj_index_valid);
6687 kfree(nhgi);
6688 }
6689
6690 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)6691 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6692 struct mlxsw_sp_fib6_entry *fib6_entry)
6693 {
6694 struct mlxsw_sp_nexthop_group *nh_grp;
6695 int err;
6696
6697 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6698 if (!nh_grp)
6699 return ERR_PTR(-ENOMEM);
6700 INIT_LIST_HEAD(&nh_grp->vr_list);
6701 err = rhashtable_init(&nh_grp->vr_ht,
6702 &mlxsw_sp_nexthop_group_vr_ht_params);
6703 if (err)
6704 goto err_nexthop_group_vr_ht_init;
6705 INIT_LIST_HEAD(&nh_grp->fib_list);
6706 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6707
6708 err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6709 if (err)
6710 goto err_nexthop_group_info_init;
6711
6712 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6713 if (err)
6714 goto err_nexthop_group_insert;
6715
6716 nh_grp->can_destroy = true;
6717
6718 return nh_grp;
6719
6720 err_nexthop_group_insert:
6721 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6722 err_nexthop_group_info_init:
6723 rhashtable_destroy(&nh_grp->vr_ht);
6724 err_nexthop_group_vr_ht_init:
6725 kfree(nh_grp);
6726 return ERR_PTR(err);
6727 }
6728
6729 static void
mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)6730 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6731 struct mlxsw_sp_nexthop_group *nh_grp)
6732 {
6733 if (!nh_grp->can_destroy)
6734 return;
6735 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6736 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6737 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6738 rhashtable_destroy(&nh_grp->vr_ht);
6739 kfree(nh_grp);
6740 }
6741
mlxsw_sp_nexthop6_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)6742 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6743 struct mlxsw_sp_fib6_entry *fib6_entry)
6744 {
6745 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6746 struct mlxsw_sp_nexthop_group *nh_grp;
6747
6748 if (rt->nh) {
6749 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6750 rt->nh->id);
6751 if (WARN_ON_ONCE(!nh_grp))
6752 return -EINVAL;
6753 goto out;
6754 }
6755
6756 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6757 if (!nh_grp) {
6758 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6759 if (IS_ERR(nh_grp))
6760 return PTR_ERR(nh_grp);
6761 }
6762
6763 /* The route and the nexthop are described by the same struct, so we
6764 * need to the update the nexthop offload indication for the new route.
6765 */
6766 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6767
6768 out:
6769 list_add_tail(&fib6_entry->common.nexthop_group_node,
6770 &nh_grp->fib_list);
6771 fib6_entry->common.nh_group = nh_grp;
6772
6773 return 0;
6774 }
6775
mlxsw_sp_nexthop6_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6776 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6777 struct mlxsw_sp_fib_entry *fib_entry)
6778 {
6779 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6780
6781 list_del(&fib_entry->nexthop_group_node);
6782 if (!list_empty(&nh_grp->fib_list))
6783 return;
6784
6785 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6786 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6787 return;
6788 }
6789
6790 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6791 }
6792
mlxsw_sp_nexthop6_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib6_entry * fib6_entry)6793 static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6794 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6795 struct mlxsw_sp_fib6_entry *fib6_entry)
6796 {
6797 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6798 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6799 int err;
6800
6801 mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6802 fib6_entry->common.nh_group = NULL;
6803 list_del(&fib6_entry->common.nexthop_group_node);
6804
6805 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6806 if (err)
6807 goto err_nexthop6_group_get;
6808
6809 err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6810 fib_node->fib);
6811 if (err)
6812 goto err_nexthop_group_vr_link;
6813
6814 /* In case this entry is offloaded, then the adjacency index
6815 * currently associated with it in the device's table is that
6816 * of the old group. Start using the new one instead.
6817 */
6818 err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
6819 &fib6_entry->common, false);
6820 if (err)
6821 goto err_fib_entry_update;
6822
6823 if (list_empty(&old_nh_grp->fib_list))
6824 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6825
6826 return 0;
6827
6828 err_fib_entry_update:
6829 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6830 fib_node->fib);
6831 err_nexthop_group_vr_link:
6832 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6833 err_nexthop6_group_get:
6834 list_add_tail(&fib6_entry->common.nexthop_group_node,
6835 &old_nh_grp->fib_list);
6836 fib6_entry->common.nh_group = old_nh_grp;
6837 mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6838 return err;
6839 }
6840
6841 static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)6842 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6843 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6844 struct mlxsw_sp_fib6_entry *fib6_entry,
6845 struct fib6_info **rt_arr, unsigned int nrt6)
6846 {
6847 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6848 int err, i;
6849
6850 for (i = 0; i < nrt6; i++) {
6851 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6852 if (IS_ERR(mlxsw_sp_rt6)) {
6853 err = PTR_ERR(mlxsw_sp_rt6);
6854 goto err_rt6_create;
6855 }
6856
6857 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6858 fib6_entry->nrt6++;
6859 }
6860
6861 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
6862 if (err)
6863 goto err_nexthop6_group_update;
6864
6865 return 0;
6866
6867 err_nexthop6_group_update:
6868 i = nrt6;
6869 err_rt6_create:
6870 for (i--; i >= 0; i--) {
6871 fib6_entry->nrt6--;
6872 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6873 struct mlxsw_sp_rt6, list);
6874 list_del(&mlxsw_sp_rt6->list);
6875 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6876 }
6877 return err;
6878 }
6879
6880 static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)6881 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
6882 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6883 struct mlxsw_sp_fib6_entry *fib6_entry,
6884 struct fib6_info **rt_arr, unsigned int nrt6)
6885 {
6886 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6887 int i;
6888
6889 for (i = 0; i < nrt6; i++) {
6890 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
6891 rt_arr[i]);
6892 if (WARN_ON_ONCE(!mlxsw_sp_rt6))
6893 continue;
6894
6895 fib6_entry->nrt6--;
6896 list_del(&mlxsw_sp_rt6->list);
6897 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6898 }
6899
6900 mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
6901 }
6902
mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,const struct fib6_info * rt)6903 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6904 struct mlxsw_sp_fib_entry *fib_entry,
6905 const struct fib6_info *rt)
6906 {
6907 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
6908 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6909 else if (rt->fib6_type == RTN_BLACKHOLE)
6910 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6911 else if (rt->fib6_flags & RTF_REJECT)
6912 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6913 else if (fib_entry->nh_group->nhgi->gateway)
6914 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6915 else
6916 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6917 }
6918
6919 static void
mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry * fib6_entry)6920 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
6921 {
6922 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
6923
6924 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
6925 list) {
6926 fib6_entry->nrt6--;
6927 list_del(&mlxsw_sp_rt6->list);
6928 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6929 }
6930 }
6931
6932 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct fib6_info ** rt_arr,unsigned int nrt6)6933 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
6934 struct mlxsw_sp_fib_node *fib_node,
6935 struct fib6_info **rt_arr, unsigned int nrt6)
6936 {
6937 struct mlxsw_sp_fib6_entry *fib6_entry;
6938 struct mlxsw_sp_fib_entry *fib_entry;
6939 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6940 int err, i;
6941
6942 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
6943 if (!fib6_entry)
6944 return ERR_PTR(-ENOMEM);
6945 fib_entry = &fib6_entry->common;
6946
6947 fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
6948 if (IS_ERR(fib_entry->priv)) {
6949 err = PTR_ERR(fib_entry->priv);
6950 goto err_fib_entry_priv_create;
6951 }
6952
6953 INIT_LIST_HEAD(&fib6_entry->rt6_list);
6954
6955 for (i = 0; i < nrt6; i++) {
6956 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6957 if (IS_ERR(mlxsw_sp_rt6)) {
6958 err = PTR_ERR(mlxsw_sp_rt6);
6959 goto err_rt6_create;
6960 }
6961 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6962 fib6_entry->nrt6++;
6963 }
6964
6965 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6966 if (err)
6967 goto err_nexthop6_group_get;
6968
6969 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6970 fib_node->fib);
6971 if (err)
6972 goto err_nexthop_group_vr_link;
6973
6974 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
6975
6976 fib_entry->fib_node = fib_node;
6977
6978 return fib6_entry;
6979
6980 err_nexthop_group_vr_link:
6981 mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
6982 err_nexthop6_group_get:
6983 i = nrt6;
6984 err_rt6_create:
6985 for (i--; i >= 0; i--) {
6986 fib6_entry->nrt6--;
6987 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6988 struct mlxsw_sp_rt6, list);
6989 list_del(&mlxsw_sp_rt6->list);
6990 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6991 }
6992 mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
6993 err_fib_entry_priv_create:
6994 kfree(fib6_entry);
6995 return ERR_PTR(err);
6996 }
6997
mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)6998 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6999 struct mlxsw_sp_fib6_entry *fib6_entry)
7000 {
7001 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7002
7003 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7004 fib_node->fib);
7005 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7006 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7007 WARN_ON(fib6_entry->nrt6);
7008 mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
7009 kfree(fib6_entry);
7010 }
7011
7012 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)7013 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7014 const struct fib6_info *rt)
7015 {
7016 struct mlxsw_sp_fib6_entry *fib6_entry;
7017 struct mlxsw_sp_fib_node *fib_node;
7018 struct mlxsw_sp_fib *fib;
7019 struct fib6_info *cmp_rt;
7020 struct mlxsw_sp_vr *vr;
7021
7022 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7023 if (!vr)
7024 return NULL;
7025 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7026
7027 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7028 sizeof(rt->fib6_dst.addr),
7029 rt->fib6_dst.plen);
7030 if (!fib_node)
7031 return NULL;
7032
7033 fib6_entry = container_of(fib_node->fib_entry,
7034 struct mlxsw_sp_fib6_entry, common);
7035 cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7036 if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7037 rt->fib6_metric == cmp_rt->fib6_metric &&
7038 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7039 return fib6_entry;
7040
7041 return NULL;
7042 }
7043
mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry * fib6_entry)7044 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7045 {
7046 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7047 struct mlxsw_sp_fib6_entry *fib6_replaced;
7048 struct fib6_info *rt, *rt_replaced;
7049
7050 if (!fib_node->fib_entry)
7051 return true;
7052
7053 fib6_replaced = container_of(fib_node->fib_entry,
7054 struct mlxsw_sp_fib6_entry,
7055 common);
7056 rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7057 rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7058 if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7059 rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7060 return false;
7061
7062 return true;
7063 }
7064
mlxsw_sp_router_fib6_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct fib6_info ** rt_arr,unsigned int nrt6)7065 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7066 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7067 struct fib6_info **rt_arr, unsigned int nrt6)
7068 {
7069 struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7070 struct mlxsw_sp_fib_entry *replaced;
7071 struct mlxsw_sp_fib_node *fib_node;
7072 struct fib6_info *rt = rt_arr[0];
7073 int err;
7074
7075 if (rt->fib6_src.plen)
7076 return -EINVAL;
7077
7078 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7079 return 0;
7080
7081 if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7082 return 0;
7083
7084 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7085 &rt->fib6_dst.addr,
7086 sizeof(rt->fib6_dst.addr),
7087 rt->fib6_dst.plen,
7088 MLXSW_SP_L3_PROTO_IPV6);
7089 if (IS_ERR(fib_node))
7090 return PTR_ERR(fib_node);
7091
7092 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7093 nrt6);
7094 if (IS_ERR(fib6_entry)) {
7095 err = PTR_ERR(fib6_entry);
7096 goto err_fib6_entry_create;
7097 }
7098
7099 if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7100 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7101 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7102 return 0;
7103 }
7104
7105 replaced = fib_node->fib_entry;
7106 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
7107 if (err)
7108 goto err_fib_node_entry_link;
7109
7110 /* Nothing to replace */
7111 if (!replaced)
7112 return 0;
7113
7114 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7115 fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7116 common);
7117 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7118
7119 return 0;
7120
7121 err_fib_node_entry_link:
7122 fib_node->fib_entry = replaced;
7123 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7124 err_fib6_entry_create:
7125 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7126 return err;
7127 }
7128
mlxsw_sp_router_fib6_append(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct fib6_info ** rt_arr,unsigned int nrt6)7129 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7130 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7131 struct fib6_info **rt_arr, unsigned int nrt6)
7132 {
7133 struct mlxsw_sp_fib6_entry *fib6_entry;
7134 struct mlxsw_sp_fib_node *fib_node;
7135 struct fib6_info *rt = rt_arr[0];
7136 int err;
7137
7138 if (rt->fib6_src.plen)
7139 return -EINVAL;
7140
7141 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7142 return 0;
7143
7144 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7145 &rt->fib6_dst.addr,
7146 sizeof(rt->fib6_dst.addr),
7147 rt->fib6_dst.plen,
7148 MLXSW_SP_L3_PROTO_IPV6);
7149 if (IS_ERR(fib_node))
7150 return PTR_ERR(fib_node);
7151
7152 if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7153 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7154 return -EINVAL;
7155 }
7156
7157 fib6_entry = container_of(fib_node->fib_entry,
7158 struct mlxsw_sp_fib6_entry, common);
7159 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7160 if (err)
7161 goto err_fib6_entry_nexthop_add;
7162
7163 return 0;
7164
7165 err_fib6_entry_nexthop_add:
7166 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7167 return err;
7168 }
7169
mlxsw_sp_router_fib6_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct fib6_info ** rt_arr,unsigned int nrt6)7170 static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7171 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7172 struct fib6_info **rt_arr, unsigned int nrt6)
7173 {
7174 struct mlxsw_sp_fib6_entry *fib6_entry;
7175 struct mlxsw_sp_fib_node *fib_node;
7176 struct fib6_info *rt = rt_arr[0];
7177 int err;
7178
7179 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7180 return 0;
7181
7182 /* Multipath routes are first added to the FIB trie and only then
7183 * notified. If we vetoed the addition, we will get a delete
7184 * notification for a route we do not have. Therefore, do not warn if
7185 * route was not found.
7186 */
7187 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7188 if (!fib6_entry)
7189 return 0;
7190
7191 /* If not all the nexthops are deleted, then only reduce the nexthop
7192 * group.
7193 */
7194 if (nrt6 != fib6_entry->nrt6) {
7195 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7196 return 0;
7197 }
7198
7199 fib_node = fib6_entry->common.fib_node;
7200
7201 err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
7202 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7203 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7204 return err;
7205 }
7206
7207 static struct mlxsw_sp_mr_table *
mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr * vr,int family)7208 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7209 {
7210 if (family == RTNL_FAMILY_IPMR)
7211 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7212 else
7213 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7214 }
7215
mlxsw_sp_router_fibmr_add(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info,bool replace)7216 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7217 struct mfc_entry_notifier_info *men_info,
7218 bool replace)
7219 {
7220 struct mlxsw_sp_mr_table *mrt;
7221 struct mlxsw_sp_vr *vr;
7222
7223 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7224 if (IS_ERR(vr))
7225 return PTR_ERR(vr);
7226
7227 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7228 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7229 }
7230
mlxsw_sp_router_fibmr_del(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info)7231 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7232 struct mfc_entry_notifier_info *men_info)
7233 {
7234 struct mlxsw_sp_mr_table *mrt;
7235 struct mlxsw_sp_vr *vr;
7236
7237 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7238 if (WARN_ON(!vr))
7239 return;
7240
7241 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7242 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7243 mlxsw_sp_vr_put(mlxsw_sp, vr);
7244 }
7245
7246 static int
mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)7247 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7248 struct vif_entry_notifier_info *ven_info)
7249 {
7250 struct mlxsw_sp_mr_table *mrt;
7251 struct mlxsw_sp_rif *rif;
7252 struct mlxsw_sp_vr *vr;
7253
7254 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7255 if (IS_ERR(vr))
7256 return PTR_ERR(vr);
7257
7258 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7259 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7260 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7261 ven_info->vif_index,
7262 ven_info->vif_flags, rif);
7263 }
7264
7265 static void
mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)7266 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7267 struct vif_entry_notifier_info *ven_info)
7268 {
7269 struct mlxsw_sp_mr_table *mrt;
7270 struct mlxsw_sp_vr *vr;
7271
7272 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7273 if (WARN_ON(!vr))
7274 return;
7275
7276 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7277 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7278 mlxsw_sp_vr_put(mlxsw_sp, vr);
7279 }
7280
mlxsw_sp_fib4_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7281 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7282 struct mlxsw_sp_fib_node *fib_node)
7283 {
7284 struct mlxsw_sp_fib4_entry *fib4_entry;
7285
7286 fib4_entry = container_of(fib_node->fib_entry,
7287 struct mlxsw_sp_fib4_entry, common);
7288 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7289 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7290 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7291 }
7292
mlxsw_sp_fib6_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7293 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7294 struct mlxsw_sp_fib_node *fib_node)
7295 {
7296 struct mlxsw_sp_fib6_entry *fib6_entry;
7297
7298 fib6_entry = container_of(fib_node->fib_entry,
7299 struct mlxsw_sp_fib6_entry, common);
7300 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7301 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7302 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7303 }
7304
mlxsw_sp_fib_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7305 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7306 struct mlxsw_sp_fib_node *fib_node)
7307 {
7308 switch (fib_node->fib->proto) {
7309 case MLXSW_SP_L3_PROTO_IPV4:
7310 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7311 break;
7312 case MLXSW_SP_L3_PROTO_IPV6:
7313 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7314 break;
7315 }
7316 }
7317
mlxsw_sp_vr_fib_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)7318 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7319 struct mlxsw_sp_vr *vr,
7320 enum mlxsw_sp_l3proto proto)
7321 {
7322 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7323 struct mlxsw_sp_fib_node *fib_node, *tmp;
7324
7325 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7326 bool do_break = &tmp->list == &fib->node_list;
7327
7328 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7329 if (do_break)
7330 break;
7331 }
7332 }
7333
mlxsw_sp_router_fib_flush(struct mlxsw_sp * mlxsw_sp)7334 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7335 {
7336 int i, j;
7337
7338 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
7339 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7340
7341 if (!mlxsw_sp_vr_is_used(vr))
7342 continue;
7343
7344 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7345 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7346 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7347
7348 /* If virtual router was only used for IPv4, then it's no
7349 * longer used.
7350 */
7351 if (!mlxsw_sp_vr_is_used(vr))
7352 continue;
7353 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7354 }
7355
7356 /* After flushing all the routes, it is not possible anyone is still
7357 * using the adjacency index that is discarding packets, so free it in
7358 * case it was allocated.
7359 */
7360 if (!mlxsw_sp->router->adj_discard_index_valid)
7361 return;
7362 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
7363 mlxsw_sp->router->adj_discard_index);
7364 mlxsw_sp->router->adj_discard_index_valid = false;
7365 }
7366
7367 struct mlxsw_sp_fib6_event {
7368 struct fib6_info **rt_arr;
7369 unsigned int nrt6;
7370 };
7371
7372 struct mlxsw_sp_fib_event {
7373 struct list_head list; /* node in fib queue */
7374 union {
7375 struct mlxsw_sp_fib6_event fib6_event;
7376 struct fib_entry_notifier_info fen_info;
7377 struct fib_rule_notifier_info fr_info;
7378 struct fib_nh_notifier_info fnh_info;
7379 struct mfc_entry_notifier_info men_info;
7380 struct vif_entry_notifier_info ven_info;
7381 };
7382 struct mlxsw_sp *mlxsw_sp;
7383 unsigned long event;
7384 int family;
7385 };
7386
7387 static int
mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event * fib6_event,struct fib6_entry_notifier_info * fen6_info)7388 mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
7389 struct fib6_entry_notifier_info *fen6_info)
7390 {
7391 struct fib6_info *rt = fen6_info->rt;
7392 struct fib6_info **rt_arr;
7393 struct fib6_info *iter;
7394 unsigned int nrt6;
7395 int i = 0;
7396
7397 nrt6 = fen6_info->nsiblings + 1;
7398
7399 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7400 if (!rt_arr)
7401 return -ENOMEM;
7402
7403 fib6_event->rt_arr = rt_arr;
7404 fib6_event->nrt6 = nrt6;
7405
7406 rt_arr[0] = rt;
7407 fib6_info_hold(rt);
7408
7409 if (!fen6_info->nsiblings)
7410 return 0;
7411
7412 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7413 if (i == fen6_info->nsiblings)
7414 break;
7415
7416 rt_arr[i + 1] = iter;
7417 fib6_info_hold(iter);
7418 i++;
7419 }
7420 WARN_ON_ONCE(i != fen6_info->nsiblings);
7421
7422 return 0;
7423 }
7424
7425 static void
mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event * fib6_event)7426 mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
7427 {
7428 int i;
7429
7430 for (i = 0; i < fib6_event->nrt6; i++)
7431 mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
7432 kfree(fib6_event->rt_arr);
7433 }
7434
mlxsw_sp_router_fib4_event_process(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_event * fib_event)7435 static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
7436 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7437 struct mlxsw_sp_fib_event *fib_event)
7438 {
7439 int err;
7440
7441 mlxsw_sp_span_respin(mlxsw_sp);
7442
7443 switch (fib_event->event) {
7444 case FIB_EVENT_ENTRY_REPLACE:
7445 err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
7446 if (err) {
7447 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7448 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7449 mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7450 &fib_event->fen_info);
7451 }
7452 fib_info_put(fib_event->fen_info.fi);
7453 break;
7454 case FIB_EVENT_ENTRY_DEL:
7455 err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
7456 if (err)
7457 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7458 fib_info_put(fib_event->fen_info.fi);
7459 break;
7460 case FIB_EVENT_NH_ADD:
7461 case FIB_EVENT_NH_DEL:
7462 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
7463 fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
7464 break;
7465 }
7466 }
7467
mlxsw_sp_router_fib6_event_process(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_event * fib_event)7468 static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
7469 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7470 struct mlxsw_sp_fib_event *fib_event)
7471 {
7472 struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
7473 int err;
7474
7475 mlxsw_sp_span_respin(mlxsw_sp);
7476
7477 switch (fib_event->event) {
7478 case FIB_EVENT_ENTRY_REPLACE:
7479 err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7480 fib_event->fib6_event.nrt6);
7481 if (err) {
7482 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7483 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7484 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7485 fib6_event->rt_arr,
7486 fib6_event->nrt6);
7487 }
7488 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7489 break;
7490 case FIB_EVENT_ENTRY_APPEND:
7491 err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7492 fib_event->fib6_event.nrt6);
7493 if (err) {
7494 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7495 dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7496 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7497 fib6_event->rt_arr,
7498 fib6_event->nrt6);
7499 }
7500 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7501 break;
7502 case FIB_EVENT_ENTRY_DEL:
7503 err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7504 fib_event->fib6_event.nrt6);
7505 if (err)
7506 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7507 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7508 break;
7509 }
7510 }
7511
mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_event * fib_event)7512 static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
7513 struct mlxsw_sp_fib_event *fib_event)
7514 {
7515 bool replace;
7516 int err;
7517
7518 rtnl_lock();
7519 mutex_lock(&mlxsw_sp->router->lock);
7520 switch (fib_event->event) {
7521 case FIB_EVENT_ENTRY_REPLACE:
7522 case FIB_EVENT_ENTRY_ADD:
7523 replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
7524
7525 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
7526 if (err)
7527 dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7528 mr_cache_put(fib_event->men_info.mfc);
7529 break;
7530 case FIB_EVENT_ENTRY_DEL:
7531 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
7532 mr_cache_put(fib_event->men_info.mfc);
7533 break;
7534 case FIB_EVENT_VIF_ADD:
7535 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7536 &fib_event->ven_info);
7537 if (err)
7538 dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7539 dev_put(fib_event->ven_info.dev);
7540 break;
7541 case FIB_EVENT_VIF_DEL:
7542 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
7543 dev_put(fib_event->ven_info.dev);
7544 break;
7545 }
7546 mutex_unlock(&mlxsw_sp->router->lock);
7547 rtnl_unlock();
7548 }
7549
mlxsw_sp_router_fib_event_work(struct work_struct * work)7550 static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
7551 {
7552 struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
7553 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
7554 struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
7555 struct mlxsw_sp_fib_event *next_fib_event;
7556 struct mlxsw_sp_fib_event *fib_event;
7557 int last_family = AF_UNSPEC;
7558 LIST_HEAD(fib_event_queue);
7559
7560 spin_lock_bh(&router->fib_event_queue_lock);
7561 list_splice_init(&router->fib_event_queue, &fib_event_queue);
7562 spin_unlock_bh(&router->fib_event_queue_lock);
7563
7564 /* Router lock is held here to make sure per-instance
7565 * operation context is not used in between FIB4/6 events
7566 * processing.
7567 */
7568 mutex_lock(&router->lock);
7569 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
7570 list_for_each_entry_safe(fib_event, next_fib_event,
7571 &fib_event_queue, list) {
7572 /* Check if the next entry in the queue exists and it is
7573 * of the same type (family and event) as the currect one.
7574 * In that case it is permitted to do the bulking
7575 * of multiple FIB entries to a single register write.
7576 */
7577 op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
7578 fib_event->family == next_fib_event->family &&
7579 fib_event->event == next_fib_event->event;
7580 op_ctx->event = fib_event->event;
7581
7582 /* In case family of this and the previous entry are different, context
7583 * reinitialization is going to be needed now, indicate that.
7584 * Note that since last_family is initialized to AF_UNSPEC, this is always
7585 * going to happen for the first entry processed in the work.
7586 */
7587 if (fib_event->family != last_family)
7588 op_ctx->initialized = false;
7589
7590 switch (fib_event->family) {
7591 case AF_INET:
7592 mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
7593 fib_event);
7594 break;
7595 case AF_INET6:
7596 mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
7597 fib_event);
7598 break;
7599 case RTNL_FAMILY_IP6MR:
7600 case RTNL_FAMILY_IPMR:
7601 /* Unlock here as inside FIBMR the lock is taken again
7602 * under RTNL. The per-instance operation context
7603 * is not used by FIBMR.
7604 */
7605 mutex_unlock(&router->lock);
7606 mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
7607 fib_event);
7608 mutex_lock(&router->lock);
7609 break;
7610 default:
7611 WARN_ON_ONCE(1);
7612 }
7613 last_family = fib_event->family;
7614 kfree(fib_event);
7615 cond_resched();
7616 }
7617 WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
7618 mutex_unlock(&router->lock);
7619 }
7620
mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event * fib_event,struct fib_notifier_info * info)7621 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
7622 struct fib_notifier_info *info)
7623 {
7624 struct fib_entry_notifier_info *fen_info;
7625 struct fib_nh_notifier_info *fnh_info;
7626
7627 switch (fib_event->event) {
7628 case FIB_EVENT_ENTRY_REPLACE:
7629 case FIB_EVENT_ENTRY_DEL:
7630 fen_info = container_of(info, struct fib_entry_notifier_info,
7631 info);
7632 fib_event->fen_info = *fen_info;
7633 /* Take reference on fib_info to prevent it from being
7634 * freed while event is queued. Release it afterwards.
7635 */
7636 fib_info_hold(fib_event->fen_info.fi);
7637 break;
7638 case FIB_EVENT_NH_ADD:
7639 case FIB_EVENT_NH_DEL:
7640 fnh_info = container_of(info, struct fib_nh_notifier_info,
7641 info);
7642 fib_event->fnh_info = *fnh_info;
7643 fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
7644 break;
7645 }
7646 }
7647
mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event * fib_event,struct fib_notifier_info * info)7648 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
7649 struct fib_notifier_info *info)
7650 {
7651 struct fib6_entry_notifier_info *fen6_info;
7652 int err;
7653
7654 switch (fib_event->event) {
7655 case FIB_EVENT_ENTRY_REPLACE:
7656 case FIB_EVENT_ENTRY_APPEND:
7657 case FIB_EVENT_ENTRY_DEL:
7658 fen6_info = container_of(info, struct fib6_entry_notifier_info,
7659 info);
7660 err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
7661 fen6_info);
7662 if (err)
7663 return err;
7664 break;
7665 }
7666
7667 return 0;
7668 }
7669
7670 static void
mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event * fib_event,struct fib_notifier_info * info)7671 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
7672 struct fib_notifier_info *info)
7673 {
7674 switch (fib_event->event) {
7675 case FIB_EVENT_ENTRY_REPLACE:
7676 case FIB_EVENT_ENTRY_ADD:
7677 case FIB_EVENT_ENTRY_DEL:
7678 memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
7679 mr_cache_hold(fib_event->men_info.mfc);
7680 break;
7681 case FIB_EVENT_VIF_ADD:
7682 case FIB_EVENT_VIF_DEL:
7683 memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
7684 dev_hold(fib_event->ven_info.dev);
7685 break;
7686 }
7687 }
7688
mlxsw_sp_router_fib_rule_event(unsigned long event,struct fib_notifier_info * info,struct mlxsw_sp * mlxsw_sp)7689 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7690 struct fib_notifier_info *info,
7691 struct mlxsw_sp *mlxsw_sp)
7692 {
7693 struct netlink_ext_ack *extack = info->extack;
7694 struct fib_rule_notifier_info *fr_info;
7695 struct fib_rule *rule;
7696 int err = 0;
7697
7698 /* nothing to do at the moment */
7699 if (event == FIB_EVENT_RULE_DEL)
7700 return 0;
7701
7702 fr_info = container_of(info, struct fib_rule_notifier_info, info);
7703 rule = fr_info->rule;
7704
7705 /* Rule only affects locally generated traffic */
7706 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7707 return 0;
7708
7709 switch (info->family) {
7710 case AF_INET:
7711 if (!fib4_rule_default(rule) && !rule->l3mdev)
7712 err = -EOPNOTSUPP;
7713 break;
7714 case AF_INET6:
7715 if (!fib6_rule_default(rule) && !rule->l3mdev)
7716 err = -EOPNOTSUPP;
7717 break;
7718 case RTNL_FAMILY_IPMR:
7719 if (!ipmr_rule_default(rule) && !rule->l3mdev)
7720 err = -EOPNOTSUPP;
7721 break;
7722 case RTNL_FAMILY_IP6MR:
7723 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7724 err = -EOPNOTSUPP;
7725 break;
7726 }
7727
7728 if (err < 0)
7729 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7730
7731 return err;
7732 }
7733
7734 /* Called with rcu_read_lock() */
mlxsw_sp_router_fib_event(struct notifier_block * nb,unsigned long event,void * ptr)7735 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7736 unsigned long event, void *ptr)
7737 {
7738 struct mlxsw_sp_fib_event *fib_event;
7739 struct fib_notifier_info *info = ptr;
7740 struct mlxsw_sp_router *router;
7741 int err;
7742
7743 if ((info->family != AF_INET && info->family != AF_INET6 &&
7744 info->family != RTNL_FAMILY_IPMR &&
7745 info->family != RTNL_FAMILY_IP6MR))
7746 return NOTIFY_DONE;
7747
7748 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7749
7750 switch (event) {
7751 case FIB_EVENT_RULE_ADD:
7752 case FIB_EVENT_RULE_DEL:
7753 err = mlxsw_sp_router_fib_rule_event(event, info,
7754 router->mlxsw_sp);
7755 return notifier_from_errno(err);
7756 case FIB_EVENT_ENTRY_ADD:
7757 case FIB_EVENT_ENTRY_REPLACE:
7758 case FIB_EVENT_ENTRY_APPEND:
7759 if (info->family == AF_INET) {
7760 struct fib_entry_notifier_info *fen_info = ptr;
7761
7762 if (fen_info->fi->fib_nh_is_v6) {
7763 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7764 return notifier_from_errno(-EINVAL);
7765 }
7766 }
7767 break;
7768 }
7769
7770 fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
7771 if (!fib_event)
7772 return NOTIFY_BAD;
7773
7774 fib_event->mlxsw_sp = router->mlxsw_sp;
7775 fib_event->event = event;
7776 fib_event->family = info->family;
7777
7778 switch (info->family) {
7779 case AF_INET:
7780 mlxsw_sp_router_fib4_event(fib_event, info);
7781 break;
7782 case AF_INET6:
7783 err = mlxsw_sp_router_fib6_event(fib_event, info);
7784 if (err)
7785 goto err_fib_event;
7786 break;
7787 case RTNL_FAMILY_IP6MR:
7788 case RTNL_FAMILY_IPMR:
7789 mlxsw_sp_router_fibmr_event(fib_event, info);
7790 break;
7791 }
7792
7793 /* Enqueue the event and trigger the work */
7794 spin_lock_bh(&router->fib_event_queue_lock);
7795 list_add_tail(&fib_event->list, &router->fib_event_queue);
7796 spin_unlock_bh(&router->fib_event_queue_lock);
7797 mlxsw_core_schedule_work(&router->fib_event_work);
7798
7799 return NOTIFY_DONE;
7800
7801 err_fib_event:
7802 kfree(fib_event);
7803 return NOTIFY_BAD;
7804 }
7805
7806 static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)7807 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7808 const struct net_device *dev)
7809 {
7810 int i;
7811
7812 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7813 if (mlxsw_sp->router->rifs[i] &&
7814 mlxsw_sp->router->rifs[i]->dev == dev)
7815 return mlxsw_sp->router->rifs[i];
7816
7817 return NULL;
7818 }
7819
mlxsw_sp_rif_exists(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)7820 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7821 const struct net_device *dev)
7822 {
7823 struct mlxsw_sp_rif *rif;
7824
7825 mutex_lock(&mlxsw_sp->router->lock);
7826 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7827 mutex_unlock(&mlxsw_sp->router->lock);
7828
7829 return rif;
7830 }
7831
mlxsw_sp_rif_vid(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)7832 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7833 {
7834 struct mlxsw_sp_rif *rif;
7835 u16 vid = 0;
7836
7837 mutex_lock(&mlxsw_sp->router->lock);
7838 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7839 if (!rif)
7840 goto out;
7841
7842 /* We only return the VID for VLAN RIFs. Otherwise we return an
7843 * invalid value (0).
7844 */
7845 if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7846 goto out;
7847
7848 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7849
7850 out:
7851 mutex_unlock(&mlxsw_sp->router->lock);
7852 return vid;
7853 }
7854
mlxsw_sp_router_rif_disable(struct mlxsw_sp * mlxsw_sp,u16 rif)7855 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7856 {
7857 char ritr_pl[MLXSW_REG_RITR_LEN];
7858 int err;
7859
7860 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
7861 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7862 if (err)
7863 return err;
7864
7865 mlxsw_reg_ritr_enable_set(ritr_pl, false);
7866 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7867 }
7868
mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)7869 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
7870 struct mlxsw_sp_rif *rif)
7871 {
7872 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
7873 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
7874 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
7875 }
7876
7877 static bool
mlxsw_sp_rif_should_config(struct mlxsw_sp_rif * rif,struct net_device * dev,unsigned long event)7878 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
7879 unsigned long event)
7880 {
7881 struct inet6_dev *inet6_dev;
7882 bool addr_list_empty = true;
7883 struct in_device *idev;
7884
7885 switch (event) {
7886 case NETDEV_UP:
7887 return rif == NULL;
7888 case NETDEV_DOWN:
7889 rcu_read_lock();
7890 idev = __in_dev_get_rcu(dev);
7891 if (idev && idev->ifa_list)
7892 addr_list_empty = false;
7893
7894 inet6_dev = __in6_dev_get(dev);
7895 if (addr_list_empty && inet6_dev &&
7896 !list_empty(&inet6_dev->addr_list))
7897 addr_list_empty = false;
7898 rcu_read_unlock();
7899
7900 /* macvlans do not have a RIF, but rather piggy back on the
7901 * RIF of their lower device.
7902 */
7903 if (netif_is_macvlan(dev) && addr_list_empty)
7904 return true;
7905
7906 if (rif && addr_list_empty &&
7907 !netif_is_l3_slave(rif->dev))
7908 return true;
7909 /* It is possible we already removed the RIF ourselves
7910 * if it was assigned to a netdev that is now a bridge
7911 * or LAG slave.
7912 */
7913 return false;
7914 }
7915
7916 return false;
7917 }
7918
7919 static enum mlxsw_sp_rif_type
mlxsw_sp_dev_rif_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)7920 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
7921 const struct net_device *dev)
7922 {
7923 enum mlxsw_sp_fid_type type;
7924
7925 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
7926 return MLXSW_SP_RIF_TYPE_IPIP_LB;
7927
7928 /* Otherwise RIF type is derived from the type of the underlying FID. */
7929 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
7930 type = MLXSW_SP_FID_TYPE_8021Q;
7931 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
7932 type = MLXSW_SP_FID_TYPE_8021Q;
7933 else if (netif_is_bridge_master(dev))
7934 type = MLXSW_SP_FID_TYPE_8021D;
7935 else
7936 type = MLXSW_SP_FID_TYPE_RFID;
7937
7938 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
7939 }
7940
mlxsw_sp_rif_index_alloc(struct mlxsw_sp * mlxsw_sp,u16 * p_rif_index)7941 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
7942 {
7943 int i;
7944
7945 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
7946 if (!mlxsw_sp->router->rifs[i]) {
7947 *p_rif_index = i;
7948 return 0;
7949 }
7950 }
7951
7952 return -ENOBUFS;
7953 }
7954
mlxsw_sp_rif_alloc(size_t rif_size,u16 rif_index,u16 vr_id,struct net_device * l3_dev)7955 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
7956 u16 vr_id,
7957 struct net_device *l3_dev)
7958 {
7959 struct mlxsw_sp_rif *rif;
7960
7961 rif = kzalloc(rif_size, GFP_KERNEL);
7962 if (!rif)
7963 return NULL;
7964
7965 INIT_LIST_HEAD(&rif->nexthop_list);
7966 INIT_LIST_HEAD(&rif->neigh_list);
7967 if (l3_dev) {
7968 ether_addr_copy(rif->addr, l3_dev->dev_addr);
7969 rif->mtu = l3_dev->mtu;
7970 rif->dev = l3_dev;
7971 }
7972 rif->vr_id = vr_id;
7973 rif->rif_index = rif_index;
7974
7975 return rif;
7976 }
7977
mlxsw_sp_rif_by_index(const struct mlxsw_sp * mlxsw_sp,u16 rif_index)7978 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
7979 u16 rif_index)
7980 {
7981 return mlxsw_sp->router->rifs[rif_index];
7982 }
7983
mlxsw_sp_rif_index(const struct mlxsw_sp_rif * rif)7984 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
7985 {
7986 return rif->rif_index;
7987 }
7988
mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb * lb_rif)7989 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
7990 {
7991 return lb_rif->common.rif_index;
7992 }
7993
mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb * lb_rif)7994 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
7995 {
7996 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
7997 struct mlxsw_sp_vr *ul_vr;
7998
7999 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8000 if (WARN_ON(IS_ERR(ul_vr)))
8001 return 0;
8002
8003 return ul_vr->id;
8004 }
8005
mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8006 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8007 {
8008 return lb_rif->ul_rif_id;
8009 }
8010
mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif * rif)8011 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8012 {
8013 return rif->dev->ifindex;
8014 }
8015
mlxsw_sp_rif_dev(const struct mlxsw_sp_rif * rif)8016 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
8017 {
8018 return rif->dev;
8019 }
8020
8021 static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)8022 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8023 const struct mlxsw_sp_rif_params *params,
8024 struct netlink_ext_ack *extack)
8025 {
8026 u32 tb_id = l3mdev_fib_table(params->dev);
8027 const struct mlxsw_sp_rif_ops *ops;
8028 struct mlxsw_sp_fid *fid = NULL;
8029 enum mlxsw_sp_rif_type type;
8030 struct mlxsw_sp_rif *rif;
8031 struct mlxsw_sp_vr *vr;
8032 u16 rif_index;
8033 int i, err;
8034
8035 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8036 ops = mlxsw_sp->router->rif_ops_arr[type];
8037
8038 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8039 if (IS_ERR(vr))
8040 return ERR_CAST(vr);
8041 vr->rif_count++;
8042
8043 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
8044 if (err) {
8045 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8046 goto err_rif_index_alloc;
8047 }
8048
8049 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
8050 if (!rif) {
8051 err = -ENOMEM;
8052 goto err_rif_alloc;
8053 }
8054 dev_hold(rif->dev);
8055 mlxsw_sp->router->rifs[rif_index] = rif;
8056 rif->mlxsw_sp = mlxsw_sp;
8057 rif->ops = ops;
8058
8059 if (ops->fid_get) {
8060 fid = ops->fid_get(rif, extack);
8061 if (IS_ERR(fid)) {
8062 err = PTR_ERR(fid);
8063 goto err_fid_get;
8064 }
8065 rif->fid = fid;
8066 }
8067
8068 if (ops->setup)
8069 ops->setup(rif, params);
8070
8071 err = ops->configure(rif);
8072 if (err)
8073 goto err_configure;
8074
8075 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8076 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8077 if (err)
8078 goto err_mr_rif_add;
8079 }
8080
8081 mlxsw_sp_rif_counters_alloc(rif);
8082
8083 return rif;
8084
8085 err_mr_rif_add:
8086 for (i--; i >= 0; i--)
8087 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8088 ops->deconfigure(rif);
8089 err_configure:
8090 if (fid)
8091 mlxsw_sp_fid_put(fid);
8092 err_fid_get:
8093 mlxsw_sp->router->rifs[rif_index] = NULL;
8094 dev_put(rif->dev);
8095 kfree(rif);
8096 err_rif_alloc:
8097 err_rif_index_alloc:
8098 vr->rif_count--;
8099 mlxsw_sp_vr_put(mlxsw_sp, vr);
8100 return ERR_PTR(err);
8101 }
8102
mlxsw_sp_rif_destroy(struct mlxsw_sp_rif * rif)8103 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8104 {
8105 const struct mlxsw_sp_rif_ops *ops = rif->ops;
8106 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8107 struct mlxsw_sp_fid *fid = rif->fid;
8108 struct mlxsw_sp_vr *vr;
8109 int i;
8110
8111 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8112 vr = &mlxsw_sp->router->vrs[rif->vr_id];
8113
8114 mlxsw_sp_rif_counters_free(rif);
8115 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8116 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8117 ops->deconfigure(rif);
8118 if (fid)
8119 /* Loopback RIFs are not associated with a FID. */
8120 mlxsw_sp_fid_put(fid);
8121 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8122 dev_put(rif->dev);
8123 kfree(rif);
8124 vr->rif_count--;
8125 mlxsw_sp_vr_put(mlxsw_sp, vr);
8126 }
8127
mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)8128 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8129 struct net_device *dev)
8130 {
8131 struct mlxsw_sp_rif *rif;
8132
8133 mutex_lock(&mlxsw_sp->router->lock);
8134 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8135 if (!rif)
8136 goto out;
8137 mlxsw_sp_rif_destroy(rif);
8138 out:
8139 mutex_unlock(&mlxsw_sp->router->lock);
8140 }
8141
8142 static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params * params,struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8143 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8144 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8145 {
8146 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8147
8148 params->vid = mlxsw_sp_port_vlan->vid;
8149 params->lag = mlxsw_sp_port->lagged;
8150 if (params->lag)
8151 params->lag_id = mlxsw_sp_port->lag_id;
8152 else
8153 params->system_port = mlxsw_sp_port->local_port;
8154 }
8155
8156 static struct mlxsw_sp_rif_subport *
mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif * rif)8157 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8158 {
8159 return container_of(rif, struct mlxsw_sp_rif_subport, common);
8160 }
8161
8162 static struct mlxsw_sp_rif *
mlxsw_sp_rif_subport_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)8163 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8164 const struct mlxsw_sp_rif_params *params,
8165 struct netlink_ext_ack *extack)
8166 {
8167 struct mlxsw_sp_rif_subport *rif_subport;
8168 struct mlxsw_sp_rif *rif;
8169
8170 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8171 if (!rif)
8172 return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8173
8174 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8175 refcount_inc(&rif_subport->ref_count);
8176 return rif;
8177 }
8178
mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif * rif)8179 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8180 {
8181 struct mlxsw_sp_rif_subport *rif_subport;
8182
8183 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8184 if (!refcount_dec_and_test(&rif_subport->ref_count))
8185 return;
8186
8187 mlxsw_sp_rif_destroy(rif);
8188 }
8189
8190 static int
__mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct net_device * l3_dev,struct netlink_ext_ack * extack)8191 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8192 struct net_device *l3_dev,
8193 struct netlink_ext_ack *extack)
8194 {
8195 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8196 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8197 struct mlxsw_sp_rif_params params = {
8198 .dev = l3_dev,
8199 };
8200 u16 vid = mlxsw_sp_port_vlan->vid;
8201 struct mlxsw_sp_rif *rif;
8202 struct mlxsw_sp_fid *fid;
8203 int err;
8204
8205 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
8206 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack);
8207 if (IS_ERR(rif))
8208 return PTR_ERR(rif);
8209
8210 /* FID was already created, just take a reference */
8211 fid = rif->ops->fid_get(rif, extack);
8212 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8213 if (err)
8214 goto err_fid_port_vid_map;
8215
8216 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8217 if (err)
8218 goto err_port_vid_learning_set;
8219
8220 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8221 BR_STATE_FORWARDING);
8222 if (err)
8223 goto err_port_vid_stp_set;
8224
8225 mlxsw_sp_port_vlan->fid = fid;
8226
8227 return 0;
8228
8229 err_port_vid_stp_set:
8230 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8231 err_port_vid_learning_set:
8232 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8233 err_fid_port_vid_map:
8234 mlxsw_sp_fid_put(fid);
8235 mlxsw_sp_rif_subport_put(rif);
8236 return err;
8237 }
8238
8239 static void
__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8240 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8241 {
8242 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8243 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8244 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8245 u16 vid = mlxsw_sp_port_vlan->vid;
8246
8247 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8248 return;
8249
8250 mlxsw_sp_port_vlan->fid = NULL;
8251 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8252 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8253 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8254 mlxsw_sp_fid_put(fid);
8255 mlxsw_sp_rif_subport_put(rif);
8256 }
8257
8258 int
mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct net_device * l3_dev,struct netlink_ext_ack * extack)8259 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8260 struct net_device *l3_dev,
8261 struct netlink_ext_ack *extack)
8262 {
8263 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8264 struct mlxsw_sp_rif *rif;
8265 int err = 0;
8266
8267 mutex_lock(&mlxsw_sp->router->lock);
8268 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8269 if (!rif)
8270 goto out;
8271
8272 err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8273 extack);
8274 out:
8275 mutex_unlock(&mlxsw_sp->router->lock);
8276 return err;
8277 }
8278
8279 void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8280 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8281 {
8282 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8283
8284 mutex_lock(&mlxsw_sp->router->lock);
8285 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8286 mutex_unlock(&mlxsw_sp->router->lock);
8287 }
8288
mlxsw_sp_inetaddr_port_vlan_event(struct net_device * l3_dev,struct net_device * port_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)8289 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8290 struct net_device *port_dev,
8291 unsigned long event, u16 vid,
8292 struct netlink_ext_ack *extack)
8293 {
8294 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8295 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8296
8297 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8298 if (WARN_ON(!mlxsw_sp_port_vlan))
8299 return -EINVAL;
8300
8301 switch (event) {
8302 case NETDEV_UP:
8303 return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
8304 l3_dev, extack);
8305 case NETDEV_DOWN:
8306 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8307 break;
8308 }
8309
8310 return 0;
8311 }
8312
mlxsw_sp_inetaddr_port_event(struct net_device * port_dev,unsigned long event,struct netlink_ext_ack * extack)8313 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
8314 unsigned long event,
8315 struct netlink_ext_ack *extack)
8316 {
8317 if (netif_is_bridge_port(port_dev) ||
8318 netif_is_lag_port(port_dev) ||
8319 netif_is_ovs_port(port_dev))
8320 return 0;
8321
8322 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
8323 MLXSW_SP_DEFAULT_VID, extack);
8324 }
8325
__mlxsw_sp_inetaddr_lag_event(struct net_device * l3_dev,struct net_device * lag_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)8326 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
8327 struct net_device *lag_dev,
8328 unsigned long event, u16 vid,
8329 struct netlink_ext_ack *extack)
8330 {
8331 struct net_device *port_dev;
8332 struct list_head *iter;
8333 int err;
8334
8335 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
8336 if (mlxsw_sp_port_dev_check(port_dev)) {
8337 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
8338 port_dev,
8339 event, vid,
8340 extack);
8341 if (err)
8342 return err;
8343 }
8344 }
8345
8346 return 0;
8347 }
8348
mlxsw_sp_inetaddr_lag_event(struct net_device * lag_dev,unsigned long event,struct netlink_ext_ack * extack)8349 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
8350 unsigned long event,
8351 struct netlink_ext_ack *extack)
8352 {
8353 if (netif_is_bridge_port(lag_dev))
8354 return 0;
8355
8356 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
8357 MLXSW_SP_DEFAULT_VID, extack);
8358 }
8359
mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,unsigned long event,struct netlink_ext_ack * extack)8360 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8361 struct net_device *l3_dev,
8362 unsigned long event,
8363 struct netlink_ext_ack *extack)
8364 {
8365 struct mlxsw_sp_rif_params params = {
8366 .dev = l3_dev,
8367 };
8368 struct mlxsw_sp_rif *rif;
8369
8370 switch (event) {
8371 case NETDEV_UP:
8372 if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
8373 u16 proto;
8374
8375 br_vlan_get_proto(l3_dev, &proto);
8376 if (proto == ETH_P_8021AD) {
8377 NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
8378 return -EOPNOTSUPP;
8379 }
8380 }
8381 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
8382 if (IS_ERR(rif))
8383 return PTR_ERR(rif);
8384 break;
8385 case NETDEV_DOWN:
8386 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8387 mlxsw_sp_rif_destroy(rif);
8388 break;
8389 }
8390
8391 return 0;
8392 }
8393
mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * vlan_dev,unsigned long event,struct netlink_ext_ack * extack)8394 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
8395 struct net_device *vlan_dev,
8396 unsigned long event,
8397 struct netlink_ext_ack *extack)
8398 {
8399 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
8400 u16 vid = vlan_dev_vlan_id(vlan_dev);
8401
8402 if (netif_is_bridge_port(vlan_dev))
8403 return 0;
8404
8405 if (mlxsw_sp_port_dev_check(real_dev))
8406 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
8407 event, vid, extack);
8408 else if (netif_is_lag_master(real_dev))
8409 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
8410 vid, extack);
8411 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
8412 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
8413 extack);
8414
8415 return 0;
8416 }
8417
mlxsw_sp_rif_macvlan_is_vrrp4(const u8 * mac)8418 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
8419 {
8420 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
8421 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8422
8423 return ether_addr_equal_masked(mac, vrrp4, mask);
8424 }
8425
mlxsw_sp_rif_macvlan_is_vrrp6(const u8 * mac)8426 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
8427 {
8428 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
8429 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8430
8431 return ether_addr_equal_masked(mac, vrrp6, mask);
8432 }
8433
mlxsw_sp_rif_vrrp_op(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const u8 * mac,bool adding)8434 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8435 const u8 *mac, bool adding)
8436 {
8437 char ritr_pl[MLXSW_REG_RITR_LEN];
8438 u8 vrrp_id = adding ? mac[5] : 0;
8439 int err;
8440
8441 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
8442 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
8443 return 0;
8444
8445 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8446 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8447 if (err)
8448 return err;
8449
8450 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
8451 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
8452 else
8453 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
8454
8455 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8456 }
8457
mlxsw_sp_rif_macvlan_add(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev,struct netlink_ext_ack * extack)8458 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
8459 const struct net_device *macvlan_dev,
8460 struct netlink_ext_ack *extack)
8461 {
8462 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8463 struct mlxsw_sp_rif *rif;
8464 int err;
8465
8466 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8467 if (!rif) {
8468 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
8469 return -EOPNOTSUPP;
8470 }
8471
8472 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8473 mlxsw_sp_fid_index(rif->fid), true);
8474 if (err)
8475 return err;
8476
8477 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
8478 macvlan_dev->dev_addr, true);
8479 if (err)
8480 goto err_rif_vrrp_add;
8481
8482 /* Make sure the bridge driver does not have this MAC pointing at
8483 * some other port.
8484 */
8485 if (rif->ops->fdb_del)
8486 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
8487
8488 return 0;
8489
8490 err_rif_vrrp_add:
8491 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8492 mlxsw_sp_fid_index(rif->fid), false);
8493 return err;
8494 }
8495
__mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)8496 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8497 const struct net_device *macvlan_dev)
8498 {
8499 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8500 struct mlxsw_sp_rif *rif;
8501
8502 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8503 /* If we do not have a RIF, then we already took care of
8504 * removing the macvlan's MAC during RIF deletion.
8505 */
8506 if (!rif)
8507 return;
8508 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
8509 false);
8510 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8511 mlxsw_sp_fid_index(rif->fid), false);
8512 }
8513
mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)8514 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8515 const struct net_device *macvlan_dev)
8516 {
8517 mutex_lock(&mlxsw_sp->router->lock);
8518 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8519 mutex_unlock(&mlxsw_sp->router->lock);
8520 }
8521
mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * macvlan_dev,unsigned long event,struct netlink_ext_ack * extack)8522 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
8523 struct net_device *macvlan_dev,
8524 unsigned long event,
8525 struct netlink_ext_ack *extack)
8526 {
8527 switch (event) {
8528 case NETDEV_UP:
8529 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
8530 case NETDEV_DOWN:
8531 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8532 break;
8533 }
8534
8535 return 0;
8536 }
8537
mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,const unsigned char * dev_addr,struct netlink_ext_ack * extack)8538 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
8539 struct net_device *dev,
8540 const unsigned char *dev_addr,
8541 struct netlink_ext_ack *extack)
8542 {
8543 struct mlxsw_sp_rif *rif;
8544 int i;
8545
8546 /* A RIF is not created for macvlan netdevs. Their MAC is used to
8547 * populate the FDB
8548 */
8549 if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
8550 return 0;
8551
8552 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
8553 rif = mlxsw_sp->router->rifs[i];
8554 if (rif && rif->ops &&
8555 rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
8556 continue;
8557 if (rif && rif->dev && rif->dev != dev &&
8558 !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
8559 mlxsw_sp->mac_mask)) {
8560 NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
8561 return -EINVAL;
8562 }
8563 }
8564
8565 return 0;
8566 }
8567
__mlxsw_sp_inetaddr_event(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,struct netlink_ext_ack * extack)8568 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
8569 struct net_device *dev,
8570 unsigned long event,
8571 struct netlink_ext_ack *extack)
8572 {
8573 if (mlxsw_sp_port_dev_check(dev))
8574 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
8575 else if (netif_is_lag_master(dev))
8576 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
8577 else if (netif_is_bridge_master(dev))
8578 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
8579 extack);
8580 else if (is_vlan_dev(dev))
8581 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
8582 extack);
8583 else if (netif_is_macvlan(dev))
8584 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
8585 extack);
8586 else
8587 return 0;
8588 }
8589
mlxsw_sp_inetaddr_event(struct notifier_block * nb,unsigned long event,void * ptr)8590 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
8591 unsigned long event, void *ptr)
8592 {
8593 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
8594 struct net_device *dev = ifa->ifa_dev->dev;
8595 struct mlxsw_sp_router *router;
8596 struct mlxsw_sp_rif *rif;
8597 int err = 0;
8598
8599 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
8600 if (event == NETDEV_UP)
8601 return NOTIFY_DONE;
8602
8603 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
8604 mutex_lock(&router->lock);
8605 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
8606 if (!mlxsw_sp_rif_should_config(rif, dev, event))
8607 goto out;
8608
8609 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
8610 out:
8611 mutex_unlock(&router->lock);
8612 return notifier_from_errno(err);
8613 }
8614
mlxsw_sp_inetaddr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)8615 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
8616 unsigned long event, void *ptr)
8617 {
8618 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
8619 struct net_device *dev = ivi->ivi_dev->dev;
8620 struct mlxsw_sp *mlxsw_sp;
8621 struct mlxsw_sp_rif *rif;
8622 int err = 0;
8623
8624 mlxsw_sp = mlxsw_sp_lower_get(dev);
8625 if (!mlxsw_sp)
8626 return NOTIFY_DONE;
8627
8628 mutex_lock(&mlxsw_sp->router->lock);
8629 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8630 if (!mlxsw_sp_rif_should_config(rif, dev, event))
8631 goto out;
8632
8633 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
8634 ivi->extack);
8635 if (err)
8636 goto out;
8637
8638 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
8639 out:
8640 mutex_unlock(&mlxsw_sp->router->lock);
8641 return notifier_from_errno(err);
8642 }
8643
8644 struct mlxsw_sp_inet6addr_event_work {
8645 struct work_struct work;
8646 struct mlxsw_sp *mlxsw_sp;
8647 struct net_device *dev;
8648 unsigned long event;
8649 };
8650
mlxsw_sp_inet6addr_event_work(struct work_struct * work)8651 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
8652 {
8653 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
8654 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
8655 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
8656 struct net_device *dev = inet6addr_work->dev;
8657 unsigned long event = inet6addr_work->event;
8658 struct mlxsw_sp_rif *rif;
8659
8660 rtnl_lock();
8661 mutex_lock(&mlxsw_sp->router->lock);
8662
8663 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8664 if (!mlxsw_sp_rif_should_config(rif, dev, event))
8665 goto out;
8666
8667 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
8668 out:
8669 mutex_unlock(&mlxsw_sp->router->lock);
8670 rtnl_unlock();
8671 dev_put(dev);
8672 kfree(inet6addr_work);
8673 }
8674
8675 /* Called with rcu_read_lock() */
mlxsw_sp_inet6addr_event(struct notifier_block * nb,unsigned long event,void * ptr)8676 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
8677 unsigned long event, void *ptr)
8678 {
8679 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
8680 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
8681 struct net_device *dev = if6->idev->dev;
8682 struct mlxsw_sp_router *router;
8683
8684 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
8685 if (event == NETDEV_UP)
8686 return NOTIFY_DONE;
8687
8688 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
8689 if (!inet6addr_work)
8690 return NOTIFY_BAD;
8691
8692 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
8693 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
8694 inet6addr_work->mlxsw_sp = router->mlxsw_sp;
8695 inet6addr_work->dev = dev;
8696 inet6addr_work->event = event;
8697 dev_hold(dev);
8698 mlxsw_core_schedule_work(&inet6addr_work->work);
8699
8700 return NOTIFY_DONE;
8701 }
8702
mlxsw_sp_inet6addr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)8703 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
8704 unsigned long event, void *ptr)
8705 {
8706 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
8707 struct net_device *dev = i6vi->i6vi_dev->dev;
8708 struct mlxsw_sp *mlxsw_sp;
8709 struct mlxsw_sp_rif *rif;
8710 int err = 0;
8711
8712 mlxsw_sp = mlxsw_sp_lower_get(dev);
8713 if (!mlxsw_sp)
8714 return NOTIFY_DONE;
8715
8716 mutex_lock(&mlxsw_sp->router->lock);
8717 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8718 if (!mlxsw_sp_rif_should_config(rif, dev, event))
8719 goto out;
8720
8721 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
8722 i6vi->extack);
8723 if (err)
8724 goto out;
8725
8726 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
8727 out:
8728 mutex_unlock(&mlxsw_sp->router->lock);
8729 return notifier_from_errno(err);
8730 }
8731
mlxsw_sp_rif_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const char * mac,int mtu)8732 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8733 const char *mac, int mtu)
8734 {
8735 char ritr_pl[MLXSW_REG_RITR_LEN];
8736 int err;
8737
8738 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8739 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8740 if (err)
8741 return err;
8742
8743 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
8744 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
8745 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
8746 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8747 }
8748
8749 static int
mlxsw_sp_router_port_change_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)8750 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
8751 struct mlxsw_sp_rif *rif)
8752 {
8753 struct net_device *dev = rif->dev;
8754 u16 fid_index;
8755 int err;
8756
8757 fid_index = mlxsw_sp_fid_index(rif->fid);
8758
8759 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
8760 if (err)
8761 return err;
8762
8763 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
8764 dev->mtu);
8765 if (err)
8766 goto err_rif_edit;
8767
8768 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
8769 if (err)
8770 goto err_rif_fdb_op;
8771
8772 if (rif->mtu != dev->mtu) {
8773 struct mlxsw_sp_vr *vr;
8774 int i;
8775
8776 /* The RIF is relevant only to its mr_table instance, as unlike
8777 * unicast routing, in multicast routing a RIF cannot be shared
8778 * between several multicast routing tables.
8779 */
8780 vr = &mlxsw_sp->router->vrs[rif->vr_id];
8781 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8782 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
8783 rif, dev->mtu);
8784 }
8785
8786 ether_addr_copy(rif->addr, dev->dev_addr);
8787 rif->mtu = dev->mtu;
8788
8789 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
8790
8791 return 0;
8792
8793 err_rif_fdb_op:
8794 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
8795 err_rif_edit:
8796 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
8797 return err;
8798 }
8799
mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif * rif,struct netdev_notifier_pre_changeaddr_info * info)8800 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
8801 struct netdev_notifier_pre_changeaddr_info *info)
8802 {
8803 struct netlink_ext_ack *extack;
8804
8805 extack = netdev_notifier_info_to_extack(&info->info);
8806 return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
8807 info->dev_addr, extack);
8808 }
8809
mlxsw_sp_netdevice_router_port_event(struct net_device * dev,unsigned long event,void * ptr)8810 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
8811 unsigned long event, void *ptr)
8812 {
8813 struct mlxsw_sp *mlxsw_sp;
8814 struct mlxsw_sp_rif *rif;
8815 int err = 0;
8816
8817 mlxsw_sp = mlxsw_sp_lower_get(dev);
8818 if (!mlxsw_sp)
8819 return 0;
8820
8821 mutex_lock(&mlxsw_sp->router->lock);
8822 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8823 if (!rif)
8824 goto out;
8825
8826 switch (event) {
8827 case NETDEV_CHANGEMTU:
8828 case NETDEV_CHANGEADDR:
8829 err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
8830 break;
8831 case NETDEV_PRE_CHANGEADDR:
8832 err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
8833 break;
8834 }
8835
8836 out:
8837 mutex_unlock(&mlxsw_sp->router->lock);
8838 return err;
8839 }
8840
mlxsw_sp_port_vrf_join(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,struct netlink_ext_ack * extack)8841 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
8842 struct net_device *l3_dev,
8843 struct netlink_ext_ack *extack)
8844 {
8845 struct mlxsw_sp_rif *rif;
8846
8847 /* If netdev is already associated with a RIF, then we need to
8848 * destroy it and create a new one with the new virtual router ID.
8849 */
8850 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8851 if (rif)
8852 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
8853 extack);
8854
8855 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
8856 }
8857
mlxsw_sp_port_vrf_leave(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev)8858 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
8859 struct net_device *l3_dev)
8860 {
8861 struct mlxsw_sp_rif *rif;
8862
8863 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8864 if (!rif)
8865 return;
8866 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
8867 }
8868
mlxsw_sp_netdevice_vrf_event(struct net_device * l3_dev,unsigned long event,struct netdev_notifier_changeupper_info * info)8869 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
8870 struct netdev_notifier_changeupper_info *info)
8871 {
8872 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
8873 int err = 0;
8874
8875 /* We do not create a RIF for a macvlan, but only use it to
8876 * direct more MAC addresses to the router.
8877 */
8878 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
8879 return 0;
8880
8881 mutex_lock(&mlxsw_sp->router->lock);
8882 switch (event) {
8883 case NETDEV_PRECHANGEUPPER:
8884 break;
8885 case NETDEV_CHANGEUPPER:
8886 if (info->linking) {
8887 struct netlink_ext_ack *extack;
8888
8889 extack = netdev_notifier_info_to_extack(&info->info);
8890 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
8891 } else {
8892 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
8893 }
8894 break;
8895 }
8896 mutex_unlock(&mlxsw_sp->router->lock);
8897
8898 return err;
8899 }
8900
__mlxsw_sp_rif_macvlan_flush(struct net_device * dev,struct netdev_nested_priv * priv)8901 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
8902 struct netdev_nested_priv *priv)
8903 {
8904 struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
8905
8906 if (!netif_is_macvlan(dev))
8907 return 0;
8908
8909 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
8910 mlxsw_sp_fid_index(rif->fid), false);
8911 }
8912
mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif * rif)8913 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
8914 {
8915 struct netdev_nested_priv priv = {
8916 .data = (void *)rif,
8917 };
8918
8919 if (!netif_is_macvlan_port(rif->dev))
8920 return 0;
8921
8922 netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
8923 return netdev_walk_all_upper_dev_rcu(rif->dev,
8924 __mlxsw_sp_rif_macvlan_flush, &priv);
8925 }
8926
mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)8927 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
8928 const struct mlxsw_sp_rif_params *params)
8929 {
8930 struct mlxsw_sp_rif_subport *rif_subport;
8931
8932 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8933 refcount_set(&rif_subport->ref_count, 1);
8934 rif_subport->vid = params->vid;
8935 rif_subport->lag = params->lag;
8936 if (params->lag)
8937 rif_subport->lag_id = params->lag_id;
8938 else
8939 rif_subport->system_port = params->system_port;
8940 }
8941
mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif * rif,bool enable)8942 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
8943 {
8944 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8945 struct mlxsw_sp_rif_subport *rif_subport;
8946 char ritr_pl[MLXSW_REG_RITR_LEN];
8947
8948 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8949 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
8950 rif->rif_index, rif->vr_id, rif->dev->mtu);
8951 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
8952 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
8953 rif_subport->lag ? rif_subport->lag_id :
8954 rif_subport->system_port,
8955 rif_subport->vid);
8956
8957 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8958 }
8959
mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif * rif)8960 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
8961 {
8962 int err;
8963
8964 err = mlxsw_sp_rif_subport_op(rif, true);
8965 if (err)
8966 return err;
8967
8968 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
8969 mlxsw_sp_fid_index(rif->fid), true);
8970 if (err)
8971 goto err_rif_fdb_op;
8972
8973 mlxsw_sp_fid_rif_set(rif->fid, rif);
8974 return 0;
8975
8976 err_rif_fdb_op:
8977 mlxsw_sp_rif_subport_op(rif, false);
8978 return err;
8979 }
8980
mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif * rif)8981 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
8982 {
8983 struct mlxsw_sp_fid *fid = rif->fid;
8984
8985 mlxsw_sp_fid_rif_set(fid, NULL);
8986 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
8987 mlxsw_sp_fid_index(fid), false);
8988 mlxsw_sp_rif_macvlan_flush(rif);
8989 mlxsw_sp_rif_subport_op(rif, false);
8990 }
8991
8992 static struct mlxsw_sp_fid *
mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)8993 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
8994 struct netlink_ext_ack *extack)
8995 {
8996 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
8997 }
8998
8999 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
9000 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
9001 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
9002 .setup = mlxsw_sp_rif_subport_setup,
9003 .configure = mlxsw_sp_rif_subport_configure,
9004 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
9005 .fid_get = mlxsw_sp_rif_subport_fid_get,
9006 };
9007
mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif * rif,enum mlxsw_reg_ritr_if_type type,u16 vid_fid,bool enable)9008 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
9009 enum mlxsw_reg_ritr_if_type type,
9010 u16 vid_fid, bool enable)
9011 {
9012 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9013 char ritr_pl[MLXSW_REG_RITR_LEN];
9014
9015 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
9016 rif->dev->mtu);
9017 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9018 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
9019
9020 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9021 }
9022
mlxsw_sp_router_port(const struct mlxsw_sp * mlxsw_sp)9023 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
9024 {
9025 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
9026 }
9027
mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif * rif)9028 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
9029 {
9030 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9031 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9032 int err;
9033
9034 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
9035 true);
9036 if (err)
9037 return err;
9038
9039 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9040 mlxsw_sp_router_port(mlxsw_sp), true);
9041 if (err)
9042 goto err_fid_mc_flood_set;
9043
9044 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9045 mlxsw_sp_router_port(mlxsw_sp), true);
9046 if (err)
9047 goto err_fid_bc_flood_set;
9048
9049 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9050 mlxsw_sp_fid_index(rif->fid), true);
9051 if (err)
9052 goto err_rif_fdb_op;
9053
9054 mlxsw_sp_fid_rif_set(rif->fid, rif);
9055 return 0;
9056
9057 err_rif_fdb_op:
9058 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9059 mlxsw_sp_router_port(mlxsw_sp), false);
9060 err_fid_bc_flood_set:
9061 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9062 mlxsw_sp_router_port(mlxsw_sp), false);
9063 err_fid_mc_flood_set:
9064 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9065 return err;
9066 }
9067
mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif * rif)9068 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
9069 {
9070 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9071 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9072 struct mlxsw_sp_fid *fid = rif->fid;
9073
9074 mlxsw_sp_fid_rif_set(fid, NULL);
9075 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9076 mlxsw_sp_fid_index(fid), false);
9077 mlxsw_sp_rif_macvlan_flush(rif);
9078 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9079 mlxsw_sp_router_port(mlxsw_sp), false);
9080 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9081 mlxsw_sp_router_port(mlxsw_sp), false);
9082 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9083 }
9084
9085 static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)9086 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
9087 struct netlink_ext_ack *extack)
9088 {
9089 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
9090 }
9091
mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)9092 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9093 {
9094 struct switchdev_notifier_fdb_info info = {};
9095 struct net_device *dev;
9096
9097 dev = br_fdb_find_port(rif->dev, mac, 0);
9098 if (!dev)
9099 return;
9100
9101 info.addr = mac;
9102 info.vid = 0;
9103 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9104 NULL);
9105 }
9106
9107 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
9108 .type = MLXSW_SP_RIF_TYPE_FID,
9109 .rif_size = sizeof(struct mlxsw_sp_rif),
9110 .configure = mlxsw_sp_rif_fid_configure,
9111 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
9112 .fid_get = mlxsw_sp_rif_fid_fid_get,
9113 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
9114 };
9115
9116 static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)9117 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
9118 struct netlink_ext_ack *extack)
9119 {
9120 struct net_device *br_dev;
9121 u16 vid;
9122 int err;
9123
9124 if (is_vlan_dev(rif->dev)) {
9125 vid = vlan_dev_vlan_id(rif->dev);
9126 br_dev = vlan_dev_real_dev(rif->dev);
9127 if (WARN_ON(!netif_is_bridge_master(br_dev)))
9128 return ERR_PTR(-EINVAL);
9129 } else {
9130 err = br_vlan_get_pvid(rif->dev, &vid);
9131 if (err < 0 || !vid) {
9132 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
9133 return ERR_PTR(-EINVAL);
9134 }
9135 }
9136
9137 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
9138 }
9139
mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)9140 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9141 {
9142 struct switchdev_notifier_fdb_info info = {};
9143 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9144 struct net_device *br_dev;
9145 struct net_device *dev;
9146
9147 br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
9148 dev = br_fdb_find_port(br_dev, mac, vid);
9149 if (!dev)
9150 return;
9151
9152 info.addr = mac;
9153 info.vid = vid;
9154 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9155 NULL);
9156 }
9157
9158 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
9159 .type = MLXSW_SP_RIF_TYPE_VLAN,
9160 .rif_size = sizeof(struct mlxsw_sp_rif),
9161 .configure = mlxsw_sp_rif_fid_configure,
9162 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
9163 .fid_get = mlxsw_sp_rif_vlan_fid_get,
9164 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
9165 };
9166
9167 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif * rif)9168 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
9169 {
9170 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
9171 }
9172
9173 static void
mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)9174 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
9175 const struct mlxsw_sp_rif_params *params)
9176 {
9177 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
9178 struct mlxsw_sp_rif_ipip_lb *rif_lb;
9179
9180 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
9181 common);
9182 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
9183 rif_lb->lb_config = params_lb->lb_config;
9184 }
9185
9186 static int
mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif)9187 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
9188 {
9189 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9190 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9191 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9192 struct mlxsw_sp_vr *ul_vr;
9193 int err;
9194
9195 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
9196 if (IS_ERR(ul_vr))
9197 return PTR_ERR(ul_vr);
9198
9199 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
9200 if (err)
9201 goto err_loopback_op;
9202
9203 lb_rif->ul_vr_id = ul_vr->id;
9204 lb_rif->ul_rif_id = 0;
9205 ++ul_vr->rif_count;
9206 return 0;
9207
9208 err_loopback_op:
9209 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9210 return err;
9211 }
9212
mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)9213 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9214 {
9215 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9216 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9217 struct mlxsw_sp_vr *ul_vr;
9218
9219 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
9220 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
9221
9222 --ul_vr->rif_count;
9223 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9224 }
9225
9226 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
9227 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
9228 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
9229 .setup = mlxsw_sp_rif_ipip_lb_setup,
9230 .configure = mlxsw_sp1_rif_ipip_lb_configure,
9231 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
9232 };
9233
9234 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
9235 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
9236 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
9237 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
9238 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
9239 };
9240
9241 static int
mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif * ul_rif,bool enable)9242 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
9243 {
9244 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9245 char ritr_pl[MLXSW_REG_RITR_LEN];
9246
9247 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
9248 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
9249 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
9250 MLXSW_REG_RITR_LOOPBACK_GENERIC);
9251
9252 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9253 }
9254
9255 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,struct netlink_ext_ack * extack)9256 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
9257 struct netlink_ext_ack *extack)
9258 {
9259 struct mlxsw_sp_rif *ul_rif;
9260 u16 rif_index;
9261 int err;
9262
9263 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
9264 if (err) {
9265 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
9266 return ERR_PTR(err);
9267 }
9268
9269 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
9270 if (!ul_rif)
9271 return ERR_PTR(-ENOMEM);
9272
9273 mlxsw_sp->router->rifs[rif_index] = ul_rif;
9274 ul_rif->mlxsw_sp = mlxsw_sp;
9275 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
9276 if (err)
9277 goto ul_rif_op_err;
9278
9279 return ul_rif;
9280
9281 ul_rif_op_err:
9282 mlxsw_sp->router->rifs[rif_index] = NULL;
9283 kfree(ul_rif);
9284 return ERR_PTR(err);
9285 }
9286
mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif * ul_rif)9287 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
9288 {
9289 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9290
9291 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
9292 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
9293 kfree(ul_rif);
9294 }
9295
9296 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)9297 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
9298 struct netlink_ext_ack *extack)
9299 {
9300 struct mlxsw_sp_vr *vr;
9301 int err;
9302
9303 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
9304 if (IS_ERR(vr))
9305 return ERR_CAST(vr);
9306
9307 if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
9308 return vr->ul_rif;
9309
9310 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
9311 if (IS_ERR(vr->ul_rif)) {
9312 err = PTR_ERR(vr->ul_rif);
9313 goto err_ul_rif_create;
9314 }
9315
9316 vr->rif_count++;
9317 refcount_set(&vr->ul_rif_refcnt, 1);
9318
9319 return vr->ul_rif;
9320
9321 err_ul_rif_create:
9322 mlxsw_sp_vr_put(mlxsw_sp, vr);
9323 return ERR_PTR(err);
9324 }
9325
mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif * ul_rif)9326 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
9327 {
9328 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9329 struct mlxsw_sp_vr *vr;
9330
9331 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
9332
9333 if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
9334 return;
9335
9336 vr->rif_count--;
9337 mlxsw_sp_ul_rif_destroy(ul_rif);
9338 mlxsw_sp_vr_put(mlxsw_sp, vr);
9339 }
9340
mlxsw_sp_router_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,u16 * ul_rif_index)9341 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
9342 u16 *ul_rif_index)
9343 {
9344 struct mlxsw_sp_rif *ul_rif;
9345 int err = 0;
9346
9347 mutex_lock(&mlxsw_sp->router->lock);
9348 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9349 if (IS_ERR(ul_rif)) {
9350 err = PTR_ERR(ul_rif);
9351 goto out;
9352 }
9353 *ul_rif_index = ul_rif->rif_index;
9354 out:
9355 mutex_unlock(&mlxsw_sp->router->lock);
9356 return err;
9357 }
9358
mlxsw_sp_router_ul_rif_put(struct mlxsw_sp * mlxsw_sp,u16 ul_rif_index)9359 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
9360 {
9361 struct mlxsw_sp_rif *ul_rif;
9362
9363 mutex_lock(&mlxsw_sp->router->lock);
9364 ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
9365 if (WARN_ON(!ul_rif))
9366 goto out;
9367
9368 mlxsw_sp_ul_rif_put(ul_rif);
9369 out:
9370 mutex_unlock(&mlxsw_sp->router->lock);
9371 }
9372
9373 static int
mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif)9374 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
9375 {
9376 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9377 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9378 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9379 struct mlxsw_sp_rif *ul_rif;
9380 int err;
9381
9382 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9383 if (IS_ERR(ul_rif))
9384 return PTR_ERR(ul_rif);
9385
9386 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
9387 if (err)
9388 goto err_loopback_op;
9389
9390 lb_rif->ul_vr_id = 0;
9391 lb_rif->ul_rif_id = ul_rif->rif_index;
9392
9393 return 0;
9394
9395 err_loopback_op:
9396 mlxsw_sp_ul_rif_put(ul_rif);
9397 return err;
9398 }
9399
mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)9400 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9401 {
9402 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9403 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9404 struct mlxsw_sp_rif *ul_rif;
9405
9406 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
9407 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
9408 mlxsw_sp_ul_rif_put(ul_rif);
9409 }
9410
9411 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
9412 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
9413 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
9414 .setup = mlxsw_sp_rif_ipip_lb_setup,
9415 .configure = mlxsw_sp2_rif_ipip_lb_configure,
9416 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
9417 };
9418
9419 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
9420 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
9421 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
9422 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
9423 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
9424 };
9425
mlxsw_sp_rifs_init(struct mlxsw_sp * mlxsw_sp)9426 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
9427 {
9428 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
9429
9430 mlxsw_sp->router->rifs = kcalloc(max_rifs,
9431 sizeof(struct mlxsw_sp_rif *),
9432 GFP_KERNEL);
9433 if (!mlxsw_sp->router->rifs)
9434 return -ENOMEM;
9435
9436 return 0;
9437 }
9438
mlxsw_sp_rifs_fini(struct mlxsw_sp * mlxsw_sp)9439 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
9440 {
9441 int i;
9442
9443 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
9444 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
9445
9446 kfree(mlxsw_sp->router->rifs);
9447 }
9448
9449 static int
mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp * mlxsw_sp)9450 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
9451 {
9452 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
9453
9454 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
9455 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
9456 }
9457
mlxsw_sp_ipips_init(struct mlxsw_sp * mlxsw_sp)9458 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
9459 {
9460 int err;
9461
9462 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
9463 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
9464
9465 err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
9466 if (err)
9467 return err;
9468 err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
9469 if (err)
9470 return err;
9471
9472 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
9473 }
9474
mlxsw_sp_ipips_fini(struct mlxsw_sp * mlxsw_sp)9475 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
9476 {
9477 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
9478 }
9479
mlxsw_sp_router_fib_dump_flush(struct notifier_block * nb)9480 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
9481 {
9482 struct mlxsw_sp_router *router;
9483
9484 /* Flush pending FIB notifications and then flush the device's
9485 * table before requesting another dump. The FIB notification
9486 * block is unregistered, so no need to take RTNL.
9487 */
9488 mlxsw_core_flush_owq();
9489 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
9490 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
9491 }
9492
9493 #ifdef CONFIG_IP_ROUTE_MULTIPATH
9494 struct mlxsw_sp_mp_hash_config {
9495 DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
9496 DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
9497 DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
9498 DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
9499 bool inc_parsing_depth;
9500 };
9501
9502 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
9503 bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
9504
9505 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
9506 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
9507
9508 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
9509 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
9510
mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config * config)9511 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
9512 {
9513 unsigned long *inner_headers = config->inner_headers;
9514 unsigned long *inner_fields = config->inner_fields;
9515
9516 /* IPv4 inner */
9517 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
9518 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
9519 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
9520 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
9521 /* IPv6 inner */
9522 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
9523 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
9524 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
9525 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
9526 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
9527 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
9528 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
9529 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
9530 }
9531
mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config * config)9532 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
9533 {
9534 unsigned long *headers = config->headers;
9535 unsigned long *fields = config->fields;
9536
9537 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
9538 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
9539 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
9540 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
9541 }
9542
9543 static void
mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config * config,u32 hash_fields)9544 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
9545 u32 hash_fields)
9546 {
9547 unsigned long *inner_headers = config->inner_headers;
9548 unsigned long *inner_fields = config->inner_fields;
9549
9550 /* IPv4 Inner */
9551 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
9552 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
9553 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
9554 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
9555 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
9556 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
9557 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
9558 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
9559 /* IPv6 inner */
9560 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
9561 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
9562 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
9563 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
9564 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
9565 }
9566 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
9567 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
9568 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
9569 }
9570 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
9571 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
9572 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
9573 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
9574 /* L4 inner */
9575 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
9576 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
9577 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
9578 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
9579 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
9580 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
9581 }
9582
mlxsw_sp_mp4_hash_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mp_hash_config * config)9583 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
9584 struct mlxsw_sp_mp_hash_config *config)
9585 {
9586 struct net *net = mlxsw_sp_net(mlxsw_sp);
9587 unsigned long *headers = config->headers;
9588 unsigned long *fields = config->fields;
9589 u32 hash_fields;
9590
9591 switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
9592 case 0:
9593 mlxsw_sp_mp4_hash_outer_addr(config);
9594 break;
9595 case 1:
9596 mlxsw_sp_mp4_hash_outer_addr(config);
9597 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
9598 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
9599 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
9600 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
9601 break;
9602 case 2:
9603 /* Outer */
9604 mlxsw_sp_mp4_hash_outer_addr(config);
9605 /* Inner */
9606 mlxsw_sp_mp_hash_inner_l3(config);
9607 break;
9608 case 3:
9609 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
9610 /* Outer */
9611 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
9612 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
9613 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
9614 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
9615 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
9616 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
9617 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
9618 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
9619 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
9620 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
9621 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
9622 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
9623 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
9624 /* Inner */
9625 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
9626 break;
9627 }
9628 }
9629
mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config * config)9630 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
9631 {
9632 unsigned long *headers = config->headers;
9633 unsigned long *fields = config->fields;
9634
9635 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
9636 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
9637 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
9638 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
9639 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
9640 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
9641 }
9642
mlxsw_sp_mp6_hash_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mp_hash_config * config)9643 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
9644 struct mlxsw_sp_mp_hash_config *config)
9645 {
9646 u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
9647 unsigned long *headers = config->headers;
9648 unsigned long *fields = config->fields;
9649
9650 switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
9651 case 0:
9652 mlxsw_sp_mp6_hash_outer_addr(config);
9653 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
9654 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
9655 break;
9656 case 1:
9657 mlxsw_sp_mp6_hash_outer_addr(config);
9658 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
9659 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
9660 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
9661 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
9662 break;
9663 case 2:
9664 /* Outer */
9665 mlxsw_sp_mp6_hash_outer_addr(config);
9666 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
9667 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
9668 /* Inner */
9669 mlxsw_sp_mp_hash_inner_l3(config);
9670 config->inc_parsing_depth = true;
9671 break;
9672 case 3:
9673 /* Outer */
9674 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
9675 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
9676 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
9677 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
9678 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
9679 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
9680 }
9681 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
9682 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
9683 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
9684 }
9685 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
9686 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
9687 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
9688 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
9689 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
9690 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
9691 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
9692 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
9693 /* Inner */
9694 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
9695 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
9696 config->inc_parsing_depth = true;
9697 break;
9698 }
9699 }
9700
mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp * mlxsw_sp,bool old_inc_parsing_depth,bool new_inc_parsing_depth)9701 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
9702 bool old_inc_parsing_depth,
9703 bool new_inc_parsing_depth)
9704 {
9705 int err;
9706
9707 if (!old_inc_parsing_depth && new_inc_parsing_depth) {
9708 err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
9709 if (err)
9710 return err;
9711 mlxsw_sp->router->inc_parsing_depth = true;
9712 } else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
9713 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
9714 mlxsw_sp->router->inc_parsing_depth = false;
9715 }
9716
9717 return 0;
9718 }
9719
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)9720 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
9721 {
9722 bool old_inc_parsing_depth, new_inc_parsing_depth;
9723 struct mlxsw_sp_mp_hash_config config = {};
9724 char recr2_pl[MLXSW_REG_RECR2_LEN];
9725 unsigned long bit;
9726 u32 seed;
9727 int err;
9728
9729 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
9730 mlxsw_reg_recr2_pack(recr2_pl, seed);
9731 mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
9732 mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
9733
9734 old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
9735 new_inc_parsing_depth = config.inc_parsing_depth;
9736 err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
9737 old_inc_parsing_depth,
9738 new_inc_parsing_depth);
9739 if (err)
9740 return err;
9741
9742 for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
9743 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
9744 for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
9745 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
9746 for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
9747 mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
9748 for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
9749 mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
9750
9751 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
9752 if (err)
9753 goto err_reg_write;
9754
9755 return 0;
9756
9757 err_reg_write:
9758 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
9759 old_inc_parsing_depth);
9760 return err;
9761 }
9762 #else
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)9763 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
9764 {
9765 return 0;
9766 }
9767 #endif
9768
mlxsw_sp_dscp_init(struct mlxsw_sp * mlxsw_sp)9769 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
9770 {
9771 char rdpm_pl[MLXSW_REG_RDPM_LEN];
9772 unsigned int i;
9773
9774 MLXSW_REG_ZERO(rdpm, rdpm_pl);
9775
9776 /* HW is determining switch priority based on DSCP-bits, but the
9777 * kernel is still doing that based on the ToS. Since there's a
9778 * mismatch in bits we need to make sure to translate the right
9779 * value ToS would observe, skipping the 2 least-significant ECN bits.
9780 */
9781 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
9782 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
9783
9784 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
9785 }
9786
__mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp)9787 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
9788 {
9789 struct net *net = mlxsw_sp_net(mlxsw_sp);
9790 char rgcr_pl[MLXSW_REG_RGCR_LEN];
9791 u64 max_rifs;
9792 bool usp;
9793
9794 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
9795 return -EIO;
9796 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
9797 usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
9798
9799 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
9800 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
9801 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
9802 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
9803 }
9804
__mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)9805 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
9806 {
9807 char rgcr_pl[MLXSW_REG_RGCR_LEN];
9808
9809 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
9810 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
9811 }
9812
9813 static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
9814 .init = mlxsw_sp_router_ll_basic_init,
9815 .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
9816 .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
9817 .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
9818 .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
9819 .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
9820 .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
9821 .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
9822 .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
9823 .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
9824 .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
9825 .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
9826 };
9827
mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router * router)9828 static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
9829 {
9830 size_t max_size = 0;
9831 int i;
9832
9833 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
9834 size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
9835
9836 if (size > max_size)
9837 max_size = size;
9838 }
9839 router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
9840 GFP_KERNEL);
9841 if (!router->ll_op_ctx)
9842 return -ENOMEM;
9843 INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
9844 return 0;
9845 }
9846
mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router * router)9847 static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
9848 {
9849 WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
9850 kfree(router->ll_op_ctx);
9851 }
9852
mlxsw_sp_lb_rif_init(struct mlxsw_sp * mlxsw_sp)9853 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
9854 {
9855 u16 lb_rif_index;
9856 int err;
9857
9858 /* Create a generic loopback RIF associated with the main table
9859 * (default VRF). Any table can be used, but the main table exists
9860 * anyway, so we do not waste resources.
9861 */
9862 err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
9863 &lb_rif_index);
9864 if (err)
9865 return err;
9866
9867 mlxsw_sp->router->lb_rif_index = lb_rif_index;
9868
9869 return 0;
9870 }
9871
mlxsw_sp_lb_rif_fini(struct mlxsw_sp * mlxsw_sp)9872 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
9873 {
9874 mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
9875 }
9876
mlxsw_sp1_router_init(struct mlxsw_sp * mlxsw_sp)9877 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
9878 {
9879 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
9880
9881 mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
9882 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
9883 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
9884
9885 return 0;
9886 }
9887
9888 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
9889 .init = mlxsw_sp1_router_init,
9890 };
9891
mlxsw_sp2_router_init(struct mlxsw_sp * mlxsw_sp)9892 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
9893 {
9894 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
9895
9896 mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
9897 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
9898 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
9899
9900 return 0;
9901 }
9902
9903 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
9904 .init = mlxsw_sp2_router_init,
9905 };
9906
mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp,struct netlink_ext_ack * extack)9907 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
9908 struct netlink_ext_ack *extack)
9909 {
9910 struct mlxsw_sp_router *router;
9911 int err;
9912
9913 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
9914 if (!router)
9915 return -ENOMEM;
9916 mutex_init(&router->lock);
9917 mlxsw_sp->router = router;
9918 router->mlxsw_sp = mlxsw_sp;
9919
9920 err = mlxsw_sp->router_ops->init(mlxsw_sp);
9921 if (err)
9922 goto err_router_ops_init;
9923
9924 err = mlxsw_sp_router_xm_init(mlxsw_sp);
9925 if (err)
9926 goto err_xm_init;
9927
9928 router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
9929 &mlxsw_sp_router_ll_xm_ops :
9930 &mlxsw_sp_router_ll_basic_ops;
9931 router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
9932
9933 err = mlxsw_sp_router_ll_op_ctx_init(router);
9934 if (err)
9935 goto err_ll_op_ctx_init;
9936
9937 INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
9938 INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
9939 mlxsw_sp_nh_grp_activity_work);
9940
9941 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
9942 err = __mlxsw_sp_router_init(mlxsw_sp);
9943 if (err)
9944 goto err_router_init;
9945
9946 err = mlxsw_sp_rifs_init(mlxsw_sp);
9947 if (err)
9948 goto err_rifs_init;
9949
9950 err = mlxsw_sp_ipips_init(mlxsw_sp);
9951 if (err)
9952 goto err_ipips_init;
9953
9954 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
9955 &mlxsw_sp_nexthop_ht_params);
9956 if (err)
9957 goto err_nexthop_ht_init;
9958
9959 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
9960 &mlxsw_sp_nexthop_group_ht_params);
9961 if (err)
9962 goto err_nexthop_group_ht_init;
9963
9964 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
9965 err = mlxsw_sp_lpm_init(mlxsw_sp);
9966 if (err)
9967 goto err_lpm_init;
9968
9969 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
9970 if (err)
9971 goto err_mr_init;
9972
9973 err = mlxsw_sp_vrs_init(mlxsw_sp);
9974 if (err)
9975 goto err_vrs_init;
9976
9977 err = mlxsw_sp_lb_rif_init(mlxsw_sp);
9978 if (err)
9979 goto err_lb_rif_init;
9980
9981 err = mlxsw_sp_neigh_init(mlxsw_sp);
9982 if (err)
9983 goto err_neigh_init;
9984
9985 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
9986 if (err)
9987 goto err_mp_hash_init;
9988
9989 err = mlxsw_sp_dscp_init(mlxsw_sp);
9990 if (err)
9991 goto err_dscp_init;
9992
9993 INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
9994 INIT_LIST_HEAD(&router->fib_event_queue);
9995 spin_lock_init(&router->fib_event_queue_lock);
9996
9997 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
9998 err = register_inetaddr_notifier(&router->inetaddr_nb);
9999 if (err)
10000 goto err_register_inetaddr_notifier;
10001
10002 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
10003 err = register_inet6addr_notifier(&router->inet6addr_nb);
10004 if (err)
10005 goto err_register_inet6addr_notifier;
10006
10007 mlxsw_sp->router->netevent_nb.notifier_call =
10008 mlxsw_sp_router_netevent_event;
10009 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10010 if (err)
10011 goto err_register_netevent_notifier;
10012
10013 mlxsw_sp->router->nexthop_nb.notifier_call =
10014 mlxsw_sp_nexthop_obj_event;
10015 err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10016 &mlxsw_sp->router->nexthop_nb,
10017 extack);
10018 if (err)
10019 goto err_register_nexthop_notifier;
10020
10021 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
10022 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10023 &mlxsw_sp->router->fib_nb,
10024 mlxsw_sp_router_fib_dump_flush, extack);
10025 if (err)
10026 goto err_register_fib_notifier;
10027
10028 return 0;
10029
10030 err_register_fib_notifier:
10031 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10032 &mlxsw_sp->router->nexthop_nb);
10033 err_register_nexthop_notifier:
10034 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10035 err_register_netevent_notifier:
10036 unregister_inet6addr_notifier(&router->inet6addr_nb);
10037 err_register_inet6addr_notifier:
10038 unregister_inetaddr_notifier(&router->inetaddr_nb);
10039 err_register_inetaddr_notifier:
10040 mlxsw_core_flush_owq();
10041 WARN_ON(!list_empty(&router->fib_event_queue));
10042 err_dscp_init:
10043 err_mp_hash_init:
10044 mlxsw_sp_neigh_fini(mlxsw_sp);
10045 err_neigh_init:
10046 mlxsw_sp_lb_rif_fini(mlxsw_sp);
10047 err_lb_rif_init:
10048 mlxsw_sp_vrs_fini(mlxsw_sp);
10049 err_vrs_init:
10050 mlxsw_sp_mr_fini(mlxsw_sp);
10051 err_mr_init:
10052 mlxsw_sp_lpm_fini(mlxsw_sp);
10053 err_lpm_init:
10054 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10055 err_nexthop_group_ht_init:
10056 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10057 err_nexthop_ht_init:
10058 mlxsw_sp_ipips_fini(mlxsw_sp);
10059 err_ipips_init:
10060 mlxsw_sp_rifs_fini(mlxsw_sp);
10061 err_rifs_init:
10062 __mlxsw_sp_router_fini(mlxsw_sp);
10063 err_router_init:
10064 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10065 mlxsw_sp_router_ll_op_ctx_fini(router);
10066 err_ll_op_ctx_init:
10067 mlxsw_sp_router_xm_fini(mlxsw_sp);
10068 err_xm_init:
10069 err_router_ops_init:
10070 mutex_destroy(&mlxsw_sp->router->lock);
10071 kfree(mlxsw_sp->router);
10072 return err;
10073 }
10074
mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)10075 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10076 {
10077 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10078 &mlxsw_sp->router->fib_nb);
10079 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10080 &mlxsw_sp->router->nexthop_nb);
10081 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10082 unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
10083 unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
10084 mlxsw_core_flush_owq();
10085 WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
10086 mlxsw_sp_neigh_fini(mlxsw_sp);
10087 mlxsw_sp_lb_rif_fini(mlxsw_sp);
10088 mlxsw_sp_vrs_fini(mlxsw_sp);
10089 mlxsw_sp_mr_fini(mlxsw_sp);
10090 mlxsw_sp_lpm_fini(mlxsw_sp);
10091 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10092 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10093 mlxsw_sp_ipips_fini(mlxsw_sp);
10094 mlxsw_sp_rifs_fini(mlxsw_sp);
10095 __mlxsw_sp_router_fini(mlxsw_sp);
10096 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10097 mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
10098 mlxsw_sp_router_xm_fini(mlxsw_sp);
10099 mutex_destroy(&mlxsw_sp->router->lock);
10100 kfree(mlxsw_sp->router);
10101 }
10102