• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2021, Red Hat.
5  */
6 
7 #define pr_fmt(fmt) "MPTCP: " fmt
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <net/sock.h>
12 #include <net/protocol.h>
13 #include <net/tcp.h>
14 #include <net/mptcp.h>
15 #include "protocol.h"
16 
__mptcp_tcp_fallback(struct mptcp_sock * msk)17 static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk)
18 {
19 	sock_owned_by_me((const struct sock *)msk);
20 
21 	if (likely(!__mptcp_check_fallback(msk)))
22 		return NULL;
23 
24 	return msk->first;
25 }
26 
sockopt_seq_reset(const struct sock * sk)27 static u32 sockopt_seq_reset(const struct sock *sk)
28 {
29 	sock_owned_by_me(sk);
30 
31 	/* Highbits contain state.  Allows to distinguish sockopt_seq
32 	 * of listener and established:
33 	 * s0 = new_listener()
34 	 * sockopt(s0) - seq is 1
35 	 * s1 = accept(s0) - s1 inherits seq 1 if listener sk (s0)
36 	 * sockopt(s0) - seq increments to 2 on s0
37 	 * sockopt(s1) // seq increments to 2 on s1 (different option)
38 	 * new ssk completes join, inherits options from s0 // seq 2
39 	 * Needs sync from mptcp join logic, but ssk->seq == msk->seq
40 	 *
41 	 * Set High order bits to sk_state so ssk->seq == msk->seq test
42 	 * will fail.
43 	 */
44 
45 	return (u32)sk->sk_state << 24u;
46 }
47 
sockopt_seq_inc(struct mptcp_sock * msk)48 static void sockopt_seq_inc(struct mptcp_sock *msk)
49 {
50 	u32 seq = (msk->setsockopt_seq + 1) & 0x00ffffff;
51 
52 	msk->setsockopt_seq = sockopt_seq_reset((struct sock *)msk) + seq;
53 }
54 
mptcp_get_int_option(struct mptcp_sock * msk,sockptr_t optval,unsigned int optlen,int * val)55 static int mptcp_get_int_option(struct mptcp_sock *msk, sockptr_t optval,
56 				unsigned int optlen, int *val)
57 {
58 	if (optlen < sizeof(int))
59 		return -EINVAL;
60 
61 	if (copy_from_sockptr(val, optval, sizeof(*val)))
62 		return -EFAULT;
63 
64 	return 0;
65 }
66 
mptcp_sol_socket_sync_intval(struct mptcp_sock * msk,int optname,int val)67 static void mptcp_sol_socket_sync_intval(struct mptcp_sock *msk, int optname, int val)
68 {
69 	struct mptcp_subflow_context *subflow;
70 	struct sock *sk = (struct sock *)msk;
71 
72 	lock_sock(sk);
73 	sockopt_seq_inc(msk);
74 
75 	mptcp_for_each_subflow(msk, subflow) {
76 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
77 		bool slow = lock_sock_fast(ssk);
78 
79 		switch (optname) {
80 		case SO_DEBUG:
81 			sock_valbool_flag(ssk, SOCK_DBG, !!val);
82 			break;
83 		case SO_KEEPALIVE:
84 			if (ssk->sk_prot->keepalive)
85 				ssk->sk_prot->keepalive(ssk, !!val);
86 			sock_valbool_flag(ssk, SOCK_KEEPOPEN, !!val);
87 			break;
88 		case SO_PRIORITY:
89 			ssk->sk_priority = val;
90 			break;
91 		case SO_SNDBUF:
92 		case SO_SNDBUFFORCE:
93 			ssk->sk_userlocks |= SOCK_SNDBUF_LOCK;
94 			WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf);
95 			break;
96 		case SO_RCVBUF:
97 		case SO_RCVBUFFORCE:
98 			ssk->sk_userlocks |= SOCK_RCVBUF_LOCK;
99 			WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf);
100 			break;
101 		case SO_MARK:
102 			if (READ_ONCE(ssk->sk_mark) != sk->sk_mark) {
103 				ssk->sk_mark = sk->sk_mark;
104 				sk_dst_reset(ssk);
105 			}
106 			break;
107 		case SO_INCOMING_CPU:
108 			WRITE_ONCE(ssk->sk_incoming_cpu, val);
109 			break;
110 		}
111 
112 		subflow->setsockopt_seq = msk->setsockopt_seq;
113 		unlock_sock_fast(ssk, slow);
114 	}
115 
116 	release_sock(sk);
117 }
118 
mptcp_sol_socket_intval(struct mptcp_sock * msk,int optname,int val)119 static int mptcp_sol_socket_intval(struct mptcp_sock *msk, int optname, int val)
120 {
121 	sockptr_t optval = KERNEL_SOCKPTR(&val);
122 	struct sock *sk = (struct sock *)msk;
123 	int ret;
124 
125 	ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname,
126 			      optval, sizeof(val));
127 	if (ret)
128 		return ret;
129 
130 	mptcp_sol_socket_sync_intval(msk, optname, val);
131 	return 0;
132 }
133 
mptcp_so_incoming_cpu(struct mptcp_sock * msk,int val)134 static void mptcp_so_incoming_cpu(struct mptcp_sock *msk, int val)
135 {
136 	struct sock *sk = (struct sock *)msk;
137 
138 	WRITE_ONCE(sk->sk_incoming_cpu, val);
139 
140 	mptcp_sol_socket_sync_intval(msk, SO_INCOMING_CPU, val);
141 }
142 
mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock * msk,int optname,int val)143 static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optname, int val)
144 {
145 	sockptr_t optval = KERNEL_SOCKPTR(&val);
146 	struct mptcp_subflow_context *subflow;
147 	struct sock *sk = (struct sock *)msk;
148 	int ret;
149 
150 	ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname,
151 			      optval, sizeof(val));
152 	if (ret)
153 		return ret;
154 
155 	lock_sock(sk);
156 	mptcp_for_each_subflow(msk, subflow) {
157 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
158 		bool slow = lock_sock_fast(ssk);
159 
160 		sock_set_timestamp(sk, optname, !!val);
161 		unlock_sock_fast(ssk, slow);
162 	}
163 
164 	release_sock(sk);
165 	return 0;
166 }
167 
mptcp_setsockopt_sol_socket_int(struct mptcp_sock * msk,int optname,sockptr_t optval,unsigned int optlen)168 static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
169 					   sockptr_t optval,
170 					   unsigned int optlen)
171 {
172 	int val, ret;
173 
174 	ret = mptcp_get_int_option(msk, optval, optlen, &val);
175 	if (ret)
176 		return ret;
177 
178 	switch (optname) {
179 	case SO_KEEPALIVE:
180 		mptcp_sol_socket_sync_intval(msk, optname, val);
181 		return 0;
182 	case SO_DEBUG:
183 	case SO_MARK:
184 	case SO_PRIORITY:
185 	case SO_SNDBUF:
186 	case SO_SNDBUFFORCE:
187 	case SO_RCVBUF:
188 	case SO_RCVBUFFORCE:
189 		return mptcp_sol_socket_intval(msk, optname, val);
190 	case SO_INCOMING_CPU:
191 		mptcp_so_incoming_cpu(msk, val);
192 		return 0;
193 	case SO_TIMESTAMP_OLD:
194 	case SO_TIMESTAMP_NEW:
195 	case SO_TIMESTAMPNS_OLD:
196 	case SO_TIMESTAMPNS_NEW:
197 		return mptcp_setsockopt_sol_socket_tstamp(msk, optname, val);
198 	}
199 
200 	return -ENOPROTOOPT;
201 }
202 
mptcp_setsockopt_sol_socket_timestamping(struct mptcp_sock * msk,int optname,sockptr_t optval,unsigned int optlen)203 static int mptcp_setsockopt_sol_socket_timestamping(struct mptcp_sock *msk,
204 						    int optname,
205 						    sockptr_t optval,
206 						    unsigned int optlen)
207 {
208 	struct mptcp_subflow_context *subflow;
209 	struct sock *sk = (struct sock *)msk;
210 	struct so_timestamping timestamping;
211 	int ret;
212 
213 	if (optlen == sizeof(timestamping)) {
214 		if (copy_from_sockptr(&timestamping, optval,
215 				      sizeof(timestamping)))
216 			return -EFAULT;
217 	} else if (optlen == sizeof(int)) {
218 		memset(&timestamping, 0, sizeof(timestamping));
219 
220 		if (copy_from_sockptr(&timestamping.flags, optval, sizeof(int)))
221 			return -EFAULT;
222 	} else {
223 		return -EINVAL;
224 	}
225 
226 	ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname,
227 			      KERNEL_SOCKPTR(&timestamping),
228 			      sizeof(timestamping));
229 	if (ret)
230 		return ret;
231 
232 	lock_sock(sk);
233 
234 	mptcp_for_each_subflow(msk, subflow) {
235 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
236 		bool slow = lock_sock_fast(ssk);
237 
238 		sock_set_timestamping(sk, optname, timestamping);
239 		unlock_sock_fast(ssk, slow);
240 	}
241 
242 	release_sock(sk);
243 
244 	return 0;
245 }
246 
mptcp_setsockopt_sol_socket_linger(struct mptcp_sock * msk,sockptr_t optval,unsigned int optlen)247 static int mptcp_setsockopt_sol_socket_linger(struct mptcp_sock *msk, sockptr_t optval,
248 					      unsigned int optlen)
249 {
250 	struct mptcp_subflow_context *subflow;
251 	struct sock *sk = (struct sock *)msk;
252 	struct linger ling;
253 	sockptr_t kopt;
254 	int ret;
255 
256 	if (optlen < sizeof(ling))
257 		return -EINVAL;
258 
259 	if (copy_from_sockptr(&ling, optval, sizeof(ling)))
260 		return -EFAULT;
261 
262 	kopt = KERNEL_SOCKPTR(&ling);
263 	ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, SO_LINGER, kopt, sizeof(ling));
264 	if (ret)
265 		return ret;
266 
267 	lock_sock(sk);
268 	sockopt_seq_inc(msk);
269 	mptcp_for_each_subflow(msk, subflow) {
270 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
271 		bool slow = lock_sock_fast(ssk);
272 
273 		if (!ling.l_onoff) {
274 			sock_reset_flag(ssk, SOCK_LINGER);
275 		} else {
276 			ssk->sk_lingertime = sk->sk_lingertime;
277 			sock_set_flag(ssk, SOCK_LINGER);
278 		}
279 
280 		subflow->setsockopt_seq = msk->setsockopt_seq;
281 		unlock_sock_fast(ssk, slow);
282 	}
283 
284 	release_sock(sk);
285 	return 0;
286 }
287 
mptcp_setsockopt_sol_socket(struct mptcp_sock * msk,int optname,sockptr_t optval,unsigned int optlen)288 static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
289 				       sockptr_t optval, unsigned int optlen)
290 {
291 	struct sock *sk = (struct sock *)msk;
292 	struct socket *ssock;
293 	int ret;
294 
295 	switch (optname) {
296 	case SO_REUSEPORT:
297 	case SO_REUSEADDR:
298 	case SO_BINDTODEVICE:
299 	case SO_BINDTOIFINDEX:
300 		lock_sock(sk);
301 		ssock = __mptcp_nmpc_socket(msk);
302 		if (!ssock) {
303 			release_sock(sk);
304 			return -EINVAL;
305 		}
306 
307 		ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen);
308 		if (ret == 0) {
309 			if (optname == SO_REUSEPORT)
310 				sk->sk_reuseport = ssock->sk->sk_reuseport;
311 			else if (optname == SO_REUSEADDR)
312 				sk->sk_reuse = ssock->sk->sk_reuse;
313 			else if (optname == SO_BINDTODEVICE)
314 				sk->sk_bound_dev_if = ssock->sk->sk_bound_dev_if;
315 			else if (optname == SO_BINDTOIFINDEX)
316 				sk->sk_bound_dev_if = ssock->sk->sk_bound_dev_if;
317 		}
318 		release_sock(sk);
319 		return ret;
320 	case SO_KEEPALIVE:
321 	case SO_PRIORITY:
322 	case SO_SNDBUF:
323 	case SO_SNDBUFFORCE:
324 	case SO_RCVBUF:
325 	case SO_RCVBUFFORCE:
326 	case SO_MARK:
327 	case SO_INCOMING_CPU:
328 	case SO_DEBUG:
329 	case SO_TIMESTAMP_OLD:
330 	case SO_TIMESTAMP_NEW:
331 	case SO_TIMESTAMPNS_OLD:
332 	case SO_TIMESTAMPNS_NEW:
333 		return mptcp_setsockopt_sol_socket_int(msk, optname, optval,
334 						       optlen);
335 	case SO_TIMESTAMPING_OLD:
336 	case SO_TIMESTAMPING_NEW:
337 		return mptcp_setsockopt_sol_socket_timestamping(msk, optname,
338 								optval, optlen);
339 	case SO_LINGER:
340 		return mptcp_setsockopt_sol_socket_linger(msk, optval, optlen);
341 	case SO_RCVLOWAT:
342 	case SO_RCVTIMEO_OLD:
343 	case SO_RCVTIMEO_NEW:
344 	case SO_BUSY_POLL:
345 	case SO_PREFER_BUSY_POLL:
346 	case SO_BUSY_POLL_BUDGET:
347 		/* No need to copy: only relevant for msk */
348 		return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen);
349 	case SO_NO_CHECK:
350 	case SO_DONTROUTE:
351 	case SO_BROADCAST:
352 	case SO_BSDCOMPAT:
353 	case SO_PASSCRED:
354 	case SO_PASSSEC:
355 	case SO_RXQ_OVFL:
356 	case SO_WIFI_STATUS:
357 	case SO_NOFCS:
358 	case SO_SELECT_ERR_QUEUE:
359 		return 0;
360 	}
361 
362 	/* SO_OOBINLINE is not supported, let's avoid the related mess
363 	 * SO_ATTACH_FILTER, SO_ATTACH_BPF, SO_ATTACH_REUSEPORT_CBPF,
364 	 * SO_DETACH_REUSEPORT_BPF, SO_DETACH_FILTER, SO_LOCK_FILTER,
365 	 * we must be careful with subflows
366 	 *
367 	 * SO_ATTACH_REUSEPORT_EBPF is not supported, at it checks
368 	 * explicitly the sk_protocol field
369 	 *
370 	 * SO_PEEK_OFF is unsupported, as it is for plain TCP
371 	 * SO_MAX_PACING_RATE is unsupported, we must be careful with subflows
372 	 * SO_CNX_ADVICE is currently unsupported, could possibly be relevant,
373 	 * but likely needs careful design
374 	 *
375 	 * SO_ZEROCOPY is currently unsupported, TODO in sndmsg
376 	 * SO_TXTIME is currently unsupported
377 	 */
378 
379 	return -EOPNOTSUPP;
380 }
381 
mptcp_setsockopt_v6(struct mptcp_sock * msk,int optname,sockptr_t optval,unsigned int optlen)382 static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
383 			       sockptr_t optval, unsigned int optlen)
384 {
385 	struct sock *sk = (struct sock *)msk;
386 	int ret = -EOPNOTSUPP;
387 	struct socket *ssock;
388 
389 	switch (optname) {
390 	case IPV6_V6ONLY:
391 		lock_sock(sk);
392 		ssock = __mptcp_nmpc_socket(msk);
393 		if (!ssock) {
394 			release_sock(sk);
395 			return -EINVAL;
396 		}
397 
398 		ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen);
399 		if (ret == 0)
400 			sk->sk_ipv6only = ssock->sk->sk_ipv6only;
401 
402 		release_sock(sk);
403 		break;
404 	}
405 
406 	return ret;
407 }
408 
mptcp_supported_sockopt(int level,int optname)409 static bool mptcp_supported_sockopt(int level, int optname)
410 {
411 	if (level == SOL_IP) {
412 		switch (optname) {
413 		/* should work fine */
414 		case IP_FREEBIND:
415 		case IP_TRANSPARENT:
416 
417 		/* the following are control cmsg related */
418 		case IP_PKTINFO:
419 		case IP_RECVTTL:
420 		case IP_RECVTOS:
421 		case IP_RECVOPTS:
422 		case IP_RETOPTS:
423 		case IP_PASSSEC:
424 		case IP_RECVORIGDSTADDR:
425 		case IP_CHECKSUM:
426 		case IP_RECVFRAGSIZE:
427 
428 		/* common stuff that need some love */
429 		case IP_TOS:
430 		case IP_TTL:
431 		case IP_BIND_ADDRESS_NO_PORT:
432 		case IP_MTU_DISCOVER:
433 		case IP_RECVERR:
434 
435 		/* possibly less common may deserve some love */
436 		case IP_MINTTL:
437 
438 		/* the following is apparently a no-op for plain TCP */
439 		case IP_RECVERR_RFC4884:
440 			return true;
441 		}
442 
443 		/* IP_OPTIONS is not supported, needs subflow care */
444 		/* IP_HDRINCL, IP_NODEFRAG are not supported, RAW specific */
445 		/* IP_MULTICAST_TTL, IP_MULTICAST_LOOP, IP_UNICAST_IF,
446 		 * IP_ADD_MEMBERSHIP, IP_ADD_SOURCE_MEMBERSHIP, IP_DROP_MEMBERSHIP,
447 		 * IP_DROP_SOURCE_MEMBERSHIP, IP_BLOCK_SOURCE, IP_UNBLOCK_SOURCE,
448 		 * MCAST_JOIN_GROUP, MCAST_LEAVE_GROUP MCAST_JOIN_SOURCE_GROUP,
449 		 * MCAST_LEAVE_SOURCE_GROUP, MCAST_BLOCK_SOURCE, MCAST_UNBLOCK_SOURCE,
450 		 * MCAST_MSFILTER, IP_MULTICAST_ALL are not supported, better not deal
451 		 * with mcast stuff
452 		 */
453 		/* IP_IPSEC_POLICY, IP_XFRM_POLICY are nut supported, unrelated here */
454 		return false;
455 	}
456 	if (level == SOL_IPV6) {
457 		switch (optname) {
458 		case IPV6_V6ONLY:
459 
460 		/* the following are control cmsg related */
461 		case IPV6_RECVPKTINFO:
462 		case IPV6_2292PKTINFO:
463 		case IPV6_RECVHOPLIMIT:
464 		case IPV6_2292HOPLIMIT:
465 		case IPV6_RECVRTHDR:
466 		case IPV6_2292RTHDR:
467 		case IPV6_RECVHOPOPTS:
468 		case IPV6_2292HOPOPTS:
469 		case IPV6_RECVDSTOPTS:
470 		case IPV6_2292DSTOPTS:
471 		case IPV6_RECVTCLASS:
472 		case IPV6_FLOWINFO:
473 		case IPV6_RECVPATHMTU:
474 		case IPV6_RECVORIGDSTADDR:
475 		case IPV6_RECVFRAGSIZE:
476 
477 		/* the following ones need some love but are quite common */
478 		case IPV6_TCLASS:
479 		case IPV6_TRANSPARENT:
480 		case IPV6_FREEBIND:
481 		case IPV6_PKTINFO:
482 		case IPV6_2292PKTOPTIONS:
483 		case IPV6_UNICAST_HOPS:
484 		case IPV6_MTU_DISCOVER:
485 		case IPV6_MTU:
486 		case IPV6_RECVERR:
487 		case IPV6_FLOWINFO_SEND:
488 		case IPV6_FLOWLABEL_MGR:
489 		case IPV6_MINHOPCOUNT:
490 		case IPV6_DONTFRAG:
491 		case IPV6_AUTOFLOWLABEL:
492 
493 		/* the following one is a no-op for plain TCP */
494 		case IPV6_RECVERR_RFC4884:
495 			return true;
496 		}
497 
498 		/* IPV6_HOPOPTS, IPV6_RTHDRDSTOPTS, IPV6_RTHDR, IPV6_DSTOPTS are
499 		 * not supported
500 		 */
501 		/* IPV6_MULTICAST_HOPS, IPV6_MULTICAST_LOOP, IPV6_UNICAST_IF,
502 		 * IPV6_MULTICAST_IF, IPV6_ADDRFORM,
503 		 * IPV6_ADD_MEMBERSHIP, IPV6_DROP_MEMBERSHIP, IPV6_JOIN_ANYCAST,
504 		 * IPV6_LEAVE_ANYCAST, IPV6_MULTICAST_ALL, MCAST_JOIN_GROUP, MCAST_LEAVE_GROUP,
505 		 * MCAST_JOIN_SOURCE_GROUP, MCAST_LEAVE_SOURCE_GROUP,
506 		 * MCAST_BLOCK_SOURCE, MCAST_UNBLOCK_SOURCE, MCAST_MSFILTER
507 		 * are not supported better not deal with mcast
508 		 */
509 		/* IPV6_ROUTER_ALERT, IPV6_ROUTER_ALERT_ISOLATE are not supported, since are evil */
510 
511 		/* IPV6_IPSEC_POLICY, IPV6_XFRM_POLICY are not supported */
512 		/* IPV6_ADDR_PREFERENCES is not supported, we must be careful with subflows */
513 		return false;
514 	}
515 	if (level == SOL_TCP) {
516 		switch (optname) {
517 		/* the following are no-op or should work just fine */
518 		case TCP_THIN_DUPACK:
519 		case TCP_DEFER_ACCEPT:
520 
521 		/* the following need some love */
522 		case TCP_MAXSEG:
523 		case TCP_NODELAY:
524 		case TCP_THIN_LINEAR_TIMEOUTS:
525 		case TCP_CONGESTION:
526 		case TCP_CORK:
527 		case TCP_KEEPIDLE:
528 		case TCP_KEEPINTVL:
529 		case TCP_KEEPCNT:
530 		case TCP_SYNCNT:
531 		case TCP_SAVE_SYN:
532 		case TCP_LINGER2:
533 		case TCP_WINDOW_CLAMP:
534 		case TCP_QUICKACK:
535 		case TCP_USER_TIMEOUT:
536 		case TCP_TIMESTAMP:
537 		case TCP_NOTSENT_LOWAT:
538 		case TCP_TX_DELAY:
539 			return true;
540 		}
541 
542 		/* TCP_MD5SIG, TCP_MD5SIG_EXT are not supported, MD5 is not compatible with MPTCP */
543 
544 		/* TCP_REPAIR, TCP_REPAIR_QUEUE, TCP_QUEUE_SEQ, TCP_REPAIR_OPTIONS,
545 		 * TCP_REPAIR_WINDOW are not supported, better avoid this mess
546 		 */
547 		/* TCP_FASTOPEN_KEY, TCP_FASTOPEN TCP_FASTOPEN_CONNECT, TCP_FASTOPEN_NO_COOKIE,
548 		 * are not supported fastopen is currently unsupported
549 		 */
550 		/* TCP_INQ is currently unsupported, needs some recvmsg work */
551 	}
552 	return false;
553 }
554 
mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock * msk,sockptr_t optval,unsigned int optlen)555 static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t optval,
556 					       unsigned int optlen)
557 {
558 	struct mptcp_subflow_context *subflow;
559 	struct sock *sk = (struct sock *)msk;
560 	char name[TCP_CA_NAME_MAX];
561 	bool cap_net_admin;
562 	int ret;
563 
564 	if (optlen < 1)
565 		return -EINVAL;
566 
567 	ret = strncpy_from_sockptr(name, optval,
568 				   min_t(long, TCP_CA_NAME_MAX - 1, optlen));
569 	if (ret < 0)
570 		return -EFAULT;
571 
572 	name[ret] = 0;
573 
574 	cap_net_admin = ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN);
575 
576 	ret = 0;
577 	lock_sock(sk);
578 	sockopt_seq_inc(msk);
579 	mptcp_for_each_subflow(msk, subflow) {
580 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
581 		int err;
582 
583 		lock_sock(ssk);
584 		err = tcp_set_congestion_control(ssk, name, true, cap_net_admin);
585 		if (err < 0 && ret == 0)
586 			ret = err;
587 		subflow->setsockopt_seq = msk->setsockopt_seq;
588 		release_sock(ssk);
589 	}
590 
591 	if (ret == 0)
592 		strcpy(msk->ca_name, name);
593 
594 	release_sock(sk);
595 	return ret;
596 }
597 
mptcp_setsockopt_sol_tcp(struct mptcp_sock * msk,int optname,sockptr_t optval,unsigned int optlen)598 static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
599 				    sockptr_t optval, unsigned int optlen)
600 {
601 	switch (optname) {
602 	case TCP_ULP:
603 		return -EOPNOTSUPP;
604 	case TCP_CONGESTION:
605 		return mptcp_setsockopt_sol_tcp_congestion(msk, optval, optlen);
606 	}
607 
608 	return -EOPNOTSUPP;
609 }
610 
mptcp_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)611 int mptcp_setsockopt(struct sock *sk, int level, int optname,
612 		     sockptr_t optval, unsigned int optlen)
613 {
614 	struct mptcp_sock *msk = mptcp_sk(sk);
615 	struct sock *ssk;
616 
617 	pr_debug("msk=%p", msk);
618 
619 	if (level == SOL_SOCKET)
620 		return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
621 
622 	if (!mptcp_supported_sockopt(level, optname))
623 		return -ENOPROTOOPT;
624 
625 	/* @@ the meaning of setsockopt() when the socket is connected and
626 	 * there are multiple subflows is not yet defined. It is up to the
627 	 * MPTCP-level socket to configure the subflows until the subflow
628 	 * is in TCP fallback, when TCP socket options are passed through
629 	 * to the one remaining subflow.
630 	 */
631 	lock_sock(sk);
632 	ssk = __mptcp_tcp_fallback(msk);
633 	release_sock(sk);
634 	if (ssk)
635 		return tcp_setsockopt(ssk, level, optname, optval, optlen);
636 
637 	if (level == SOL_IPV6)
638 		return mptcp_setsockopt_v6(msk, optname, optval, optlen);
639 
640 	if (level == SOL_TCP)
641 		return mptcp_setsockopt_sol_tcp(msk, optname, optval, optlen);
642 
643 	return -EOPNOTSUPP;
644 }
645 
mptcp_getsockopt_first_sf_only(struct mptcp_sock * msk,int level,int optname,char __user * optval,int __user * optlen)646 static int mptcp_getsockopt_first_sf_only(struct mptcp_sock *msk, int level, int optname,
647 					  char __user *optval, int __user *optlen)
648 {
649 	struct sock *sk = (struct sock *)msk;
650 	struct socket *ssock;
651 	int ret = -EINVAL;
652 	struct sock *ssk;
653 
654 	lock_sock(sk);
655 	ssk = msk->first;
656 	if (ssk) {
657 		ret = tcp_getsockopt(ssk, level, optname, optval, optlen);
658 		goto out;
659 	}
660 
661 	ssock = __mptcp_nmpc_socket(msk);
662 	if (!ssock)
663 		goto out;
664 
665 	ret = tcp_getsockopt(ssock->sk, level, optname, optval, optlen);
666 
667 out:
668 	release_sock(sk);
669 	return ret;
670 }
671 
mptcp_getsockopt_sol_tcp(struct mptcp_sock * msk,int optname,char __user * optval,int __user * optlen)672 static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
673 				    char __user *optval, int __user *optlen)
674 {
675 	switch (optname) {
676 	case TCP_ULP:
677 	case TCP_CONGESTION:
678 	case TCP_INFO:
679 	case TCP_CC_INFO:
680 		return mptcp_getsockopt_first_sf_only(msk, SOL_TCP, optname,
681 						      optval, optlen);
682 	}
683 	return -EOPNOTSUPP;
684 }
685 
mptcp_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * option)686 int mptcp_getsockopt(struct sock *sk, int level, int optname,
687 		     char __user *optval, int __user *option)
688 {
689 	struct mptcp_sock *msk = mptcp_sk(sk);
690 	struct sock *ssk;
691 
692 	pr_debug("msk=%p", msk);
693 
694 	/* @@ the meaning of setsockopt() when the socket is connected and
695 	 * there are multiple subflows is not yet defined. It is up to the
696 	 * MPTCP-level socket to configure the subflows until the subflow
697 	 * is in TCP fallback, when socket options are passed through
698 	 * to the one remaining subflow.
699 	 */
700 	lock_sock(sk);
701 	ssk = __mptcp_tcp_fallback(msk);
702 	release_sock(sk);
703 	if (ssk)
704 		return tcp_getsockopt(ssk, level, optname, optval, option);
705 
706 	if (level == SOL_TCP)
707 		return mptcp_getsockopt_sol_tcp(msk, optname, optval, option);
708 	return -EOPNOTSUPP;
709 }
710 
sync_socket_options(struct mptcp_sock * msk,struct sock * ssk)711 static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
712 {
713 	static const unsigned int tx_rx_locks = SOCK_RCVBUF_LOCK | SOCK_SNDBUF_LOCK;
714 	struct sock *sk = (struct sock *)msk;
715 
716 	if (ssk->sk_prot->keepalive) {
717 		if (sock_flag(sk, SOCK_KEEPOPEN))
718 			ssk->sk_prot->keepalive(ssk, 1);
719 		else
720 			ssk->sk_prot->keepalive(ssk, 0);
721 	}
722 
723 	ssk->sk_priority = sk->sk_priority;
724 	ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
725 	ssk->sk_incoming_cpu = sk->sk_incoming_cpu;
726 
727 	if (sk->sk_userlocks & tx_rx_locks) {
728 		ssk->sk_userlocks |= sk->sk_userlocks & tx_rx_locks;
729 		if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
730 			WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf);
731 		if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
732 			WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf);
733 	}
734 
735 	if (sock_flag(sk, SOCK_LINGER)) {
736 		ssk->sk_lingertime = sk->sk_lingertime;
737 		sock_set_flag(ssk, SOCK_LINGER);
738 	} else {
739 		sock_reset_flag(ssk, SOCK_LINGER);
740 	}
741 
742 	if (sk->sk_mark != ssk->sk_mark) {
743 		ssk->sk_mark = sk->sk_mark;
744 		sk_dst_reset(ssk);
745 	}
746 
747 	sock_valbool_flag(ssk, SOCK_DBG, sock_flag(sk, SOCK_DBG));
748 
749 	if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops)
750 		tcp_set_congestion_control(ssk, msk->ca_name, false, true);
751 }
752 
__mptcp_sockopt_sync(struct mptcp_sock * msk,struct sock * ssk)753 static void __mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)
754 {
755 	bool slow = lock_sock_fast(ssk);
756 
757 	sync_socket_options(msk, ssk);
758 
759 	unlock_sock_fast(ssk, slow);
760 }
761 
mptcp_sockopt_sync(struct mptcp_sock * msk,struct sock * ssk)762 void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)
763 {
764 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
765 
766 	msk_owned_by_me(msk);
767 
768 	if (READ_ONCE(subflow->setsockopt_seq) != msk->setsockopt_seq) {
769 		__mptcp_sockopt_sync(msk, ssk);
770 
771 		subflow->setsockopt_seq = msk->setsockopt_seq;
772 	}
773 }
774 
mptcp_sockopt_sync_all(struct mptcp_sock * msk)775 void mptcp_sockopt_sync_all(struct mptcp_sock *msk)
776 {
777 	struct mptcp_subflow_context *subflow;
778 	struct sock *sk = (struct sock *)msk;
779 	u32 seq;
780 
781 	seq = sockopt_seq_reset(sk);
782 
783 	mptcp_for_each_subflow(msk, subflow) {
784 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
785 		u32 sseq = READ_ONCE(subflow->setsockopt_seq);
786 
787 		if (sseq != msk->setsockopt_seq) {
788 			__mptcp_sockopt_sync(msk, ssk);
789 			WRITE_ONCE(subflow->setsockopt_seq, seq);
790 		} else if (sseq != seq) {
791 			WRITE_ONCE(subflow->setsockopt_seq, seq);
792 		}
793 
794 		cond_resched();
795 	}
796 
797 	msk->setsockopt_seq = seq;
798 }
799