• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/netdevice.h>
5 #include <linux/netlink.h>
6 #include <linux/random.h>
7 #include <net/vxlan.h>
8 
9 #include "reg.h"
10 #include "spectrum.h"
11 #include "spectrum_nve.h"
12 
13 #define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS	(VXLAN_F_UDP_ZERO_CSUM_TX | \
14 						 VXLAN_F_LEARN)
15 
mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve * nve,const struct mlxsw_sp_nve_params * params,struct netlink_ext_ack * extack)16 static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
17 					   const struct mlxsw_sp_nve_params *params,
18 					   struct netlink_ext_ack *extack)
19 {
20 	struct vxlan_dev *vxlan = netdev_priv(params->dev);
21 	struct vxlan_config *cfg = &vxlan->cfg;
22 
23 	if (cfg->saddr.sa.sa_family != AF_INET) {
24 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only IPv4 underlay is supported");
25 		return false;
26 	}
27 
28 	if (vxlan_addr_multicast(&cfg->remote_ip)) {
29 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported");
30 		return false;
31 	}
32 
33 	if (vxlan_addr_any(&cfg->saddr)) {
34 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Source address must be specified");
35 		return false;
36 	}
37 
38 	if (cfg->remote_ifindex) {
39 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Local interface is not supported");
40 		return false;
41 	}
42 
43 	if (cfg->port_min || cfg->port_max) {
44 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only default UDP source port range is supported");
45 		return false;
46 	}
47 
48 	if (cfg->tos != 1) {
49 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: TOS must be configured to inherit");
50 		return false;
51 	}
52 
53 	if (cfg->flags & VXLAN_F_TTL_INHERIT) {
54 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to inherit");
55 		return false;
56 	}
57 
58 	if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
59 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported");
60 		return false;
61 	}
62 
63 	if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS) {
64 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
65 		return false;
66 	}
67 
68 	if (cfg->ttl == 0) {
69 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to 0");
70 		return false;
71 	}
72 
73 	if (cfg->label != 0) {
74 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Flow label must be configured to 0");
75 		return false;
76 	}
77 
78 	return true;
79 }
80 
mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve * nve,const struct mlxsw_sp_nve_params * params,struct netlink_ext_ack * extack)81 static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
82 					    const struct mlxsw_sp_nve_params *params,
83 					    struct netlink_ext_ack *extack)
84 {
85 	if (params->ethertype == ETH_P_8021AD) {
86 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: 802.1ad bridge is not supported with VxLAN");
87 		return false;
88 	}
89 
90 	return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack);
91 }
92 
mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve * nve,const struct mlxsw_sp_nve_params * params,struct mlxsw_sp_nve_config * config)93 static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
94 				      const struct mlxsw_sp_nve_params *params,
95 				      struct mlxsw_sp_nve_config *config)
96 {
97 	struct vxlan_dev *vxlan = netdev_priv(params->dev);
98 	struct vxlan_config *cfg = &vxlan->cfg;
99 
100 	config->type = MLXSW_SP_NVE_TYPE_VXLAN;
101 	config->ttl = cfg->ttl;
102 	config->flowlabel = cfg->label;
103 	config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0;
104 	config->ul_tb_id = RT_TABLE_MAIN;
105 	config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
106 	config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
107 	config->udp_dport = cfg->dst_port;
108 }
109 
110 static void
mlxsw_sp_nve_vxlan_config_prepare(char * tngcr_pl,const struct mlxsw_sp_nve_config * config)111 mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
112 				  const struct mlxsw_sp_nve_config *config)
113 {
114 	u8 udp_sport;
115 
116 	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
117 			     config->ttl);
118 	/* VxLAN driver's default UDP source port range is 32768 (0x8000)
119 	 * to 60999 (0xee47). Set the upper 8 bits of the UDP source port
120 	 * to a random number between 0x80 and 0xee
121 	 */
122 	get_random_bytes(&udp_sport, sizeof(udp_sport));
123 	udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
124 	mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
125 	mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
126 }
127 
128 static int
mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nve_config * config)129 mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
130 			       const struct mlxsw_sp_nve_config *config)
131 {
132 	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
133 	u16 ul_vr_id;
134 	int err;
135 
136 	err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id,
137 					  &ul_vr_id);
138 	if (err)
139 		return err;
140 
141 	mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
142 	mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en);
143 	mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id);
144 
145 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
146 }
147 
mlxsw_sp1_nve_vxlan_config_clear(struct mlxsw_sp * mlxsw_sp)148 static void mlxsw_sp1_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
149 {
150 	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
151 
152 	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
153 
154 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
155 }
156 
mlxsw_sp1_nve_vxlan_rtdp_set(struct mlxsw_sp * mlxsw_sp,unsigned int tunnel_index)157 static int mlxsw_sp1_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
158 					unsigned int tunnel_index)
159 {
160 	char rtdp_pl[MLXSW_REG_RTDP_LEN];
161 
162 	mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
163 
164 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
165 }
166 
mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve * nve,const struct mlxsw_sp_nve_config * config)167 static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve,
168 				    const struct mlxsw_sp_nve_config *config)
169 {
170 	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
171 	int err;
172 
173 	err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport);
174 	if (err)
175 		return err;
176 
177 	err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
178 	if (err)
179 		goto err_parsing_depth_inc;
180 
181 	err = mlxsw_sp1_nve_vxlan_config_set(mlxsw_sp, config);
182 	if (err)
183 		goto err_config_set;
184 
185 	err = mlxsw_sp1_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index);
186 	if (err)
187 		goto err_rtdp_set;
188 
189 	err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
190 						config->ul_proto,
191 						&config->ul_sip,
192 						nve->tunnel_index);
193 	if (err)
194 		goto err_promote_decap;
195 
196 	return 0;
197 
198 err_promote_decap:
199 err_rtdp_set:
200 	mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
201 err_config_set:
202 	mlxsw_sp_parsing_depth_dec(mlxsw_sp);
203 err_parsing_depth_inc:
204 	mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
205 	return err;
206 }
207 
mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve * nve)208 static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
209 {
210 	struct mlxsw_sp_nve_config *config = &nve->config;
211 	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
212 
213 	mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
214 					 config->ul_proto, &config->ul_sip);
215 	mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
216 	mlxsw_sp_parsing_depth_dec(mlxsw_sp);
217 	mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
218 }
219 
220 static int
mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device * nve_dev,__be32 vni,struct netlink_ext_ack * extack)221 mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni,
222 			      struct netlink_ext_ack *extack)
223 {
224 	if (WARN_ON(!netif_is_vxlan(nve_dev)))
225 		return -EINVAL;
226 	return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier,
227 				extack);
228 }
229 
230 static void
mlxsw_sp_nve_vxlan_clear_offload(const struct net_device * nve_dev,__be32 vni)231 mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni)
232 {
233 	if (WARN_ON(!netif_is_vxlan(nve_dev)))
234 		return;
235 	vxlan_fdb_clear_offload(nve_dev, vni);
236 }
237 
238 const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
239 	.type		= MLXSW_SP_NVE_TYPE_VXLAN,
240 	.can_offload	= mlxsw_sp1_nve_vxlan_can_offload,
241 	.nve_config	= mlxsw_sp_nve_vxlan_config,
242 	.init		= mlxsw_sp1_nve_vxlan_init,
243 	.fini		= mlxsw_sp1_nve_vxlan_fini,
244 	.fdb_replay	= mlxsw_sp_nve_vxlan_fdb_replay,
245 	.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
246 };
247 
mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp * mlxsw_sp,bool learning_en)248 static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
249 					    bool learning_en)
250 {
251 	char tnpc_pl[MLXSW_REG_TNPC_LEN];
252 
253 	mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TUNNEL_PORT_NVE,
254 			    learning_en);
255 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl);
256 }
257 
258 static int
mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp * mlxsw_sp)259 mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp)
260 {
261 	char spvid_pl[MLXSW_REG_SPVID_LEN] = {};
262 
263 	mlxsw_reg_spvid_tport_set(spvid_pl, true);
264 	mlxsw_reg_spvid_local_port_set(spvid_pl,
265 				       MLXSW_REG_TUNNEL_PORT_NVE);
266 	mlxsw_reg_spvid_egr_et_set_set(spvid_pl, true);
267 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
268 }
269 
270 static int
mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nve_config * config)271 mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
272 			       const struct mlxsw_sp_nve_config *config)
273 {
274 	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
275 	char spvtr_pl[MLXSW_REG_SPVTR_LEN];
276 	u16 ul_rif_index;
277 	int err;
278 
279 	err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, config->ul_tb_id,
280 					 &ul_rif_index);
281 	if (err)
282 		return err;
283 	mlxsw_sp->nve->ul_rif_index = ul_rif_index;
284 
285 	err = mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, config->learning_en);
286 	if (err)
287 		goto err_vxlan_learning_set;
288 
289 	mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
290 	mlxsw_reg_tngcr_underlay_rif_set(tngcr_pl, ul_rif_index);
291 
292 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
293 	if (err)
294 		goto err_tngcr_write;
295 
296 	mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
297 			     MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN);
298 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
299 	if (err)
300 		goto err_spvtr_write;
301 
302 	err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp);
303 	if (err)
304 		goto err_decap_ethertype_set;
305 
306 	return 0;
307 
308 err_decap_ethertype_set:
309 	mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
310 			     MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
311 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
312 err_spvtr_write:
313 	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
314 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
315 err_tngcr_write:
316 	mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
317 err_vxlan_learning_set:
318 	mlxsw_sp_router_ul_rif_put(mlxsw_sp, ul_rif_index);
319 	return err;
320 }
321 
mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp * mlxsw_sp)322 static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
323 {
324 	char spvtr_pl[MLXSW_REG_SPVTR_LEN];
325 	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
326 
327 	mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
328 			     MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
329 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
330 	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
331 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
332 	mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
333 	mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->nve->ul_rif_index);
334 }
335 
mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp * mlxsw_sp,unsigned int tunnel_index,u16 ul_rif_index)336 static int mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
337 					unsigned int tunnel_index,
338 					u16 ul_rif_index)
339 {
340 	char rtdp_pl[MLXSW_REG_RTDP_LEN];
341 
342 	mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
343 	mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_index);
344 
345 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
346 }
347 
mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve * nve,const struct mlxsw_sp_nve_config * config)348 static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
349 				    const struct mlxsw_sp_nve_config *config)
350 {
351 	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
352 	int err;
353 
354 	err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport);
355 	if (err)
356 		return err;
357 
358 	err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
359 	if (err)
360 		goto err_parsing_depth_inc;
361 
362 	err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config);
363 	if (err)
364 		goto err_config_set;
365 
366 	err = mlxsw_sp2_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index,
367 					   nve->ul_rif_index);
368 	if (err)
369 		goto err_rtdp_set;
370 
371 	err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
372 						config->ul_proto,
373 						&config->ul_sip,
374 						nve->tunnel_index);
375 	if (err)
376 		goto err_promote_decap;
377 
378 	return 0;
379 
380 err_promote_decap:
381 err_rtdp_set:
382 	mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
383 err_config_set:
384 	mlxsw_sp_parsing_depth_dec(mlxsw_sp);
385 err_parsing_depth_inc:
386 	mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
387 	return err;
388 }
389 
mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve * nve)390 static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
391 {
392 	struct mlxsw_sp_nve_config *config = &nve->config;
393 	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
394 
395 	mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
396 					 config->ul_proto, &config->ul_sip);
397 	mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
398 	mlxsw_sp_parsing_depth_dec(mlxsw_sp);
399 	mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
400 }
401 
402 const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
403 	.type		= MLXSW_SP_NVE_TYPE_VXLAN,
404 	.can_offload	= mlxsw_sp_nve_vxlan_can_offload,
405 	.nve_config	= mlxsw_sp_nve_vxlan_config,
406 	.init		= mlxsw_sp2_nve_vxlan_init,
407 	.fini		= mlxsw_sp2_nve_vxlan_fini,
408 	.fdb_replay	= mlxsw_sp_nve_vxlan_fdb_replay,
409 	.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
410 };
411