• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2011
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  */
8 #include <linux/fs.h>
9 #include <linux/net.h>
10 #include <linux/string.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/signal.h>
13 #include <linux/list.h>
14 #include <linux/wait.h>
15 #include <linux/slab.h>
16 #include <linux/pagemap.h>
17 #include <linux/ctype.h>
18 #include <linux/utsname.h>
19 #include <linux/mempool.h>
20 #include <linux/delay.h>
21 #include <linux/completion.h>
22 #include <linux/kthread.h>
23 #include <linux/pagevec.h>
24 #include <linux/freezer.h>
25 #include <linux/namei.h>
26 #include <linux/uuid.h>
27 #include <linux/uaccess.h>
28 #include <asm/processor.h>
29 #include <linux/inet.h>
30 #include <linux/module.h>
31 #include <keys/user-type.h>
32 #include <net/ipv6.h>
33 #include <linux/parser.h>
34 #include <linux/bvec.h>
35 #include "cifspdu.h"
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_unicode.h"
39 #include "cifs_debug.h"
40 #include "cifs_fs_sb.h"
41 #include "ntlmssp.h"
42 #include "nterr.h"
43 #include "rfc1002pdu.h"
44 #include "fscache.h"
45 #include "smb2proto.h"
46 #include "smbdirect.h"
47 #include "dns_resolve.h"
48 #ifdef CONFIG_CIFS_DFS_UPCALL
49 #include "dfs_cache.h"
50 #endif
51 #include "fs_context.h"
52 #include "cifs_swn.h"
53 
54 extern mempool_t *cifs_req_poolp;
55 extern bool disable_legacy_dialects;
56 
57 /* FIXME: should these be tunable? */
58 #define TLINK_ERROR_EXPIRE	(1 * HZ)
59 #define TLINK_IDLE_EXPIRE	(600 * HZ)
60 
61 /* Drop the connection to not overload the server */
62 #define MAX_STATUS_IO_TIMEOUT   5
63 
64 struct mount_ctx {
65 	struct cifs_sb_info *cifs_sb;
66 	struct smb3_fs_context *fs_ctx;
67 	unsigned int xid;
68 	struct TCP_Server_Info *server;
69 	struct cifs_ses *ses;
70 	struct cifs_tcon *tcon;
71 #ifdef CONFIG_CIFS_DFS_UPCALL
72 	struct cifs_ses *root_ses;
73 	uuid_t mount_id;
74 	char *origin_fullpath, *leaf_fullpath;
75 #endif
76 };
77 
78 static int ip_connect(struct TCP_Server_Info *server);
79 static int generic_ip_connect(struct TCP_Server_Info *server);
80 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
81 static void cifs_prune_tlinks(struct work_struct *work);
82 
83 /*
84  * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
85  * get their ip addresses changed at some point.
86  *
87  * This should be called with server->srv_mutex held.
88  */
reconn_set_ipaddr_from_hostname(struct TCP_Server_Info * server)89 static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
90 {
91 	int rc;
92 	int len;
93 	char *unc, *ipaddr = NULL;
94 	time64_t expiry, now;
95 	unsigned long ttl = SMB_DNS_RESOLVE_INTERVAL_DEFAULT;
96 
97 	if (!server->hostname)
98 		return -EINVAL;
99 
100 	/* if server hostname isn't populated, there's nothing to do here */
101 	if (server->hostname[0] == '\0')
102 		return 0;
103 
104 	len = strlen(server->hostname) + 3;
105 
106 	unc = kmalloc(len, GFP_KERNEL);
107 	if (!unc) {
108 		cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
109 		return -ENOMEM;
110 	}
111 	scnprintf(unc, len, "\\\\%s", server->hostname);
112 
113 	rc = dns_resolve_server_name_to_ip(unc, &ipaddr, &expiry);
114 	kfree(unc);
115 
116 	if (rc < 0) {
117 		cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
118 			 __func__, server->hostname, rc);
119 		goto requeue_resolve;
120 	}
121 
122 	spin_lock(&server->srv_lock);
123 	rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
124 				  strlen(ipaddr));
125 	spin_unlock(&server->srv_lock);
126 	kfree(ipaddr);
127 
128 	/* rc == 1 means success here */
129 	if (rc) {
130 		now = ktime_get_real_seconds();
131 		if (expiry && expiry > now)
132 			/*
133 			 * To make sure we don't use the cached entry, retry 1s
134 			 * after expiry.
135 			 */
136 			ttl = max_t(unsigned long, expiry - now, SMB_DNS_RESOLVE_INTERVAL_MIN) + 1;
137 	}
138 	rc = !rc ? -1 : 0;
139 
140 requeue_resolve:
141 	cifs_dbg(FYI, "%s: next dns resolution scheduled for %lu seconds in the future\n",
142 		 __func__, ttl);
143 	mod_delayed_work(cifsiod_wq, &server->resolve, (ttl * HZ));
144 
145 	return rc;
146 }
147 
smb2_query_server_interfaces(struct work_struct * work)148 static void smb2_query_server_interfaces(struct work_struct *work)
149 {
150 	int rc;
151 	struct cifs_tcon *tcon = container_of(work,
152 					struct cifs_tcon,
153 					query_interfaces.work);
154 
155 	/*
156 	 * query server network interfaces, in case they change
157 	 */
158 	rc = SMB3_request_interfaces(0, tcon, false);
159 	if (rc) {
160 		cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
161 				__func__, rc);
162 	}
163 
164 	queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
165 			   (SMB_INTERFACE_POLL_INTERVAL * HZ));
166 }
167 
cifs_resolve_server(struct work_struct * work)168 static void cifs_resolve_server(struct work_struct *work)
169 {
170 	int rc;
171 	struct TCP_Server_Info *server = container_of(work,
172 					struct TCP_Server_Info, resolve.work);
173 
174 	cifs_server_lock(server);
175 
176 	/*
177 	 * Resolve the hostname again to make sure that IP address is up-to-date.
178 	 */
179 	rc = reconn_set_ipaddr_from_hostname(server);
180 	if (rc) {
181 		cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
182 				__func__, rc);
183 	}
184 
185 	cifs_server_unlock(server);
186 }
187 
188 /*
189  * Update the tcpStatus for the server.
190  * This is used to signal the cifsd thread to call cifs_reconnect
191  * ONLY cifsd thread should call cifs_reconnect. For any other
192  * thread, use this function
193  *
194  * @server: the tcp ses for which reconnect is needed
195  * @all_channels: if this needs to be done for all channels
196  */
197 void
cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info * server,bool all_channels)198 cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
199 				bool all_channels)
200 {
201 	struct TCP_Server_Info *pserver;
202 	struct cifs_ses *ses;
203 	int i;
204 
205 	/* If server is a channel, select the primary channel */
206 	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
207 
208 	/* if we need to signal just this channel */
209 	if (!all_channels) {
210 		spin_lock(&server->srv_lock);
211 		if (server->tcpStatus != CifsExiting)
212 			server->tcpStatus = CifsNeedReconnect;
213 		spin_unlock(&server->srv_lock);
214 		return;
215 	}
216 
217 	spin_lock(&cifs_tcp_ses_lock);
218 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
219 		spin_lock(&ses->chan_lock);
220 		for (i = 0; i < ses->chan_count; i++) {
221 			spin_lock(&ses->chans[i].server->srv_lock);
222 			ses->chans[i].server->tcpStatus = CifsNeedReconnect;
223 			spin_unlock(&ses->chans[i].server->srv_lock);
224 		}
225 		spin_unlock(&ses->chan_lock);
226 	}
227 	spin_unlock(&cifs_tcp_ses_lock);
228 }
229 
230 /*
231  * Mark all sessions and tcons for reconnect.
232  * IMPORTANT: make sure that this gets called only from
233  * cifsd thread. For any other thread, use
234  * cifs_signal_cifsd_for_reconnect
235  *
236  * @server: the tcp ses for which reconnect is needed
237  * @server needs to be previously set to CifsNeedReconnect.
238  * @mark_smb_session: whether even sessions need to be marked
239  */
240 void
cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info * server,bool mark_smb_session)241 cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
242 				      bool mark_smb_session)
243 {
244 	struct TCP_Server_Info *pserver;
245 	struct cifs_ses *ses, *nses;
246 	struct cifs_tcon *tcon;
247 
248 	/*
249 	 * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
250 	 * are not used until reconnected.
251 	 */
252 	cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__);
253 
254 	/* If server is a channel, select the primary channel */
255 	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
256 
257 
258 	spin_lock(&cifs_tcp_ses_lock);
259 	list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
260 		/* check if iface is still active */
261 		spin_lock(&ses->chan_lock);
262 		if (!cifs_chan_is_iface_active(ses, server)) {
263 			spin_unlock(&ses->chan_lock);
264 			cifs_chan_update_iface(ses, server);
265 			spin_lock(&ses->chan_lock);
266 		}
267 
268 		if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
269 			spin_unlock(&ses->chan_lock);
270 			continue;
271 		}
272 
273 		if (mark_smb_session)
274 			CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
275 		else
276 			cifs_chan_set_need_reconnect(ses, server);
277 
278 		cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
279 			 __func__, ses->chans_need_reconnect);
280 
281 		/* If all channels need reconnect, then tcon needs reconnect */
282 		if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
283 			spin_unlock(&ses->chan_lock);
284 			continue;
285 		}
286 		spin_unlock(&ses->chan_lock);
287 
288 		spin_lock(&ses->ses_lock);
289 		ses->ses_status = SES_NEED_RECON;
290 		spin_unlock(&ses->ses_lock);
291 
292 		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
293 			tcon->need_reconnect = true;
294 			spin_lock(&tcon->tc_lock);
295 			tcon->status = TID_NEED_RECON;
296 			spin_unlock(&tcon->tc_lock);
297 		}
298 		if (ses->tcon_ipc) {
299 			ses->tcon_ipc->need_reconnect = true;
300 			spin_lock(&ses->tcon_ipc->tc_lock);
301 			ses->tcon_ipc->status = TID_NEED_RECON;
302 			spin_unlock(&ses->tcon_ipc->tc_lock);
303 		}
304 	}
305 	spin_unlock(&cifs_tcp_ses_lock);
306 }
307 
308 static void
cifs_abort_connection(struct TCP_Server_Info * server)309 cifs_abort_connection(struct TCP_Server_Info *server)
310 {
311 	struct mid_q_entry *mid, *nmid;
312 	struct list_head retry_list;
313 
314 	server->maxBuf = 0;
315 	server->max_read = 0;
316 
317 	/* do not want to be sending data on a socket we are freeing */
318 	cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
319 	cifs_server_lock(server);
320 	if (server->ssocket) {
321 		cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state,
322 			 server->ssocket->flags);
323 		kernel_sock_shutdown(server->ssocket, SHUT_WR);
324 		cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state,
325 			 server->ssocket->flags);
326 		sock_release(server->ssocket);
327 		server->ssocket = NULL;
328 	}
329 	server->sequence_number = 0;
330 	server->session_estab = false;
331 	kfree_sensitive(server->session_key.response);
332 	server->session_key.response = NULL;
333 	server->session_key.len = 0;
334 	server->lstrp = jiffies;
335 
336 	/* mark submitted MIDs for retry and issue callback */
337 	INIT_LIST_HEAD(&retry_list);
338 	cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
339 	spin_lock(&server->mid_lock);
340 	list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
341 		kref_get(&mid->refcount);
342 		if (mid->mid_state == MID_REQUEST_SUBMITTED)
343 			mid->mid_state = MID_RETRY_NEEDED;
344 		list_move(&mid->qhead, &retry_list);
345 		mid->mid_flags |= MID_DELETED;
346 	}
347 	spin_unlock(&server->mid_lock);
348 	cifs_server_unlock(server);
349 
350 	cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
351 	list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
352 		list_del_init(&mid->qhead);
353 		mid->callback(mid);
354 		release_mid(mid);
355 	}
356 
357 	if (cifs_rdma_enabled(server)) {
358 		cifs_server_lock(server);
359 		smbd_destroy(server);
360 		cifs_server_unlock(server);
361 	}
362 }
363 
cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info * server,int num_targets)364 static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
365 {
366 	spin_lock(&server->srv_lock);
367 	server->nr_targets = num_targets;
368 	if (server->tcpStatus == CifsExiting) {
369 		/* the demux thread will exit normally next time through the loop */
370 		spin_unlock(&server->srv_lock);
371 		wake_up(&server->response_q);
372 		return false;
373 	}
374 
375 	cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
376 	trace_smb3_reconnect(server->CurrentMid, server->conn_id,
377 			     server->hostname);
378 	server->tcpStatus = CifsNeedReconnect;
379 
380 	spin_unlock(&server->srv_lock);
381 	return true;
382 }
383 
384 /*
385  * cifs tcp session reconnection
386  *
387  * mark tcp session as reconnecting so temporarily locked
388  * mark all smb sessions as reconnecting for tcp session
389  * reconnect tcp session
390  * wake up waiters on reconnection? - (not needed currently)
391  *
392  * if mark_smb_session is passed as true, unconditionally mark
393  * the smb session (and tcon) for reconnect as well. This value
394  * doesn't really matter for non-multichannel scenario.
395  *
396  */
__cifs_reconnect(struct TCP_Server_Info * server,bool mark_smb_session)397 static int __cifs_reconnect(struct TCP_Server_Info *server,
398 			    bool mark_smb_session)
399 {
400 	int rc = 0;
401 
402 	if (!cifs_tcp_ses_needs_reconnect(server, 1))
403 		return 0;
404 
405 	cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
406 
407 	cifs_abort_connection(server);
408 
409 	do {
410 		try_to_freeze();
411 		cifs_server_lock(server);
412 
413 		if (!cifs_swn_set_server_dstaddr(server)) {
414 			/* resolve the hostname again to make sure that IP address is up-to-date */
415 			rc = reconn_set_ipaddr_from_hostname(server);
416 			cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
417 		}
418 
419 		if (cifs_rdma_enabled(server))
420 			rc = smbd_reconnect(server);
421 		else
422 			rc = generic_ip_connect(server);
423 		if (rc) {
424 			cifs_server_unlock(server);
425 			cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
426 			msleep(3000);
427 		} else {
428 			atomic_inc(&tcpSesReconnectCount);
429 			set_credits(server, 1);
430 			spin_lock(&server->srv_lock);
431 			if (server->tcpStatus != CifsExiting)
432 				server->tcpStatus = CifsNeedNegotiate;
433 			spin_unlock(&server->srv_lock);
434 			cifs_swn_reset_server_dstaddr(server);
435 			cifs_server_unlock(server);
436 			mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
437 		}
438 	} while (server->tcpStatus == CifsNeedReconnect);
439 
440 	spin_lock(&server->srv_lock);
441 	if (server->tcpStatus == CifsNeedNegotiate)
442 		mod_delayed_work(cifsiod_wq, &server->echo, 0);
443 	spin_unlock(&server->srv_lock);
444 
445 	wake_up(&server->response_q);
446 	return rc;
447 }
448 
449 #ifdef CONFIG_CIFS_DFS_UPCALL
__reconnect_target_unlocked(struct TCP_Server_Info * server,const char * target)450 static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target)
451 {
452 	int rc;
453 	char *hostname;
454 
455 	if (!cifs_swn_set_server_dstaddr(server)) {
456 		if (server->hostname != target) {
457 			hostname = extract_hostname(target);
458 			if (!IS_ERR(hostname)) {
459 				spin_lock(&server->srv_lock);
460 				kfree(server->hostname);
461 				server->hostname = hostname;
462 				spin_unlock(&server->srv_lock);
463 			} else {
464 				cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
465 					 __func__, PTR_ERR(hostname));
466 				cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__,
467 					 server->hostname);
468 			}
469 		}
470 		/* resolve the hostname again to make sure that IP address is up-to-date. */
471 		rc = reconn_set_ipaddr_from_hostname(server);
472 		cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
473 	}
474 	/* Reconnect the socket */
475 	if (cifs_rdma_enabled(server))
476 		rc = smbd_reconnect(server);
477 	else
478 		rc = generic_ip_connect(server);
479 
480 	return rc;
481 }
482 
reconnect_target_unlocked(struct TCP_Server_Info * server,struct dfs_cache_tgt_list * tl,struct dfs_cache_tgt_iterator ** target_hint)483 static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl,
484 				     struct dfs_cache_tgt_iterator **target_hint)
485 {
486 	int rc;
487 	struct dfs_cache_tgt_iterator *tit;
488 
489 	*target_hint = NULL;
490 
491 	/* If dfs target list is empty, then reconnect to last server */
492 	tit = dfs_cache_get_tgt_iterator(tl);
493 	if (!tit)
494 		return __reconnect_target_unlocked(server, server->hostname);
495 
496 	/* Otherwise, try every dfs target in @tl */
497 	for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
498 		rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit));
499 		if (!rc) {
500 			*target_hint = tit;
501 			break;
502 		}
503 	}
504 	return rc;
505 }
506 
reconnect_dfs_server(struct TCP_Server_Info * server)507 static int reconnect_dfs_server(struct TCP_Server_Info *server)
508 {
509 	int rc = 0;
510 	const char *refpath = server->current_fullpath + 1;
511 	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
512 	struct dfs_cache_tgt_iterator *target_hint = NULL;
513 	int num_targets = 0;
514 
515 	/*
516 	 * Determine the number of dfs targets the referral path in @cifs_sb resolves to.
517 	 *
518 	 * smb2_reconnect() needs to know how long it should wait based upon the number of dfs
519 	 * targets (server->nr_targets).  It's also possible that the cached referral was cleared
520 	 * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
521 	 * refreshing the referral, so, in this case, default it to 1.
522 	 */
523 	if (!dfs_cache_noreq_find(refpath, NULL, &tl))
524 		num_targets = dfs_cache_get_nr_tgts(&tl);
525 	if (!num_targets)
526 		num_targets = 1;
527 
528 	if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
529 		return 0;
530 
531 	/*
532 	 * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a
533 	 * different server or share during failover.  It could be improved by adding some logic to
534 	 * only do that in case it connects to a different server or share, though.
535 	 */
536 	cifs_mark_tcp_ses_conns_for_reconnect(server, true);
537 
538 	cifs_abort_connection(server);
539 
540 	do {
541 		try_to_freeze();
542 		cifs_server_lock(server);
543 
544 		rc = reconnect_target_unlocked(server, &tl, &target_hint);
545 		if (rc) {
546 			/* Failed to reconnect socket */
547 			cifs_server_unlock(server);
548 			cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
549 			msleep(3000);
550 			continue;
551 		}
552 		/*
553 		 * Socket was created.  Update tcp session status to CifsNeedNegotiate so that a
554 		 * process waiting for reconnect will know it needs to re-establish session and tcon
555 		 * through the reconnected target server.
556 		 */
557 		atomic_inc(&tcpSesReconnectCount);
558 		set_credits(server, 1);
559 		spin_lock(&server->srv_lock);
560 		if (server->tcpStatus != CifsExiting)
561 			server->tcpStatus = CifsNeedNegotiate;
562 		spin_unlock(&server->srv_lock);
563 		cifs_swn_reset_server_dstaddr(server);
564 		cifs_server_unlock(server);
565 		mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
566 	} while (server->tcpStatus == CifsNeedReconnect);
567 
568 	if (target_hint)
569 		dfs_cache_noreq_update_tgthint(refpath, target_hint);
570 
571 	dfs_cache_free_tgts(&tl);
572 
573 	/* Need to set up echo worker again once connection has been established */
574 	spin_lock(&server->srv_lock);
575 	if (server->tcpStatus == CifsNeedNegotiate)
576 		mod_delayed_work(cifsiod_wq, &server->echo, 0);
577 	spin_unlock(&server->srv_lock);
578 
579 	wake_up(&server->response_q);
580 	return rc;
581 }
582 
cifs_reconnect(struct TCP_Server_Info * server,bool mark_smb_session)583 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
584 {
585 	/* If tcp session is not an dfs connection, then reconnect to last target server */
586 	spin_lock(&server->srv_lock);
587 	if (!server->is_dfs_conn) {
588 		spin_unlock(&server->srv_lock);
589 		return __cifs_reconnect(server, mark_smb_session);
590 	}
591 	spin_unlock(&server->srv_lock);
592 
593 	mutex_lock(&server->refpath_lock);
594 	if (!server->origin_fullpath || !server->leaf_fullpath) {
595 		mutex_unlock(&server->refpath_lock);
596 		return __cifs_reconnect(server, mark_smb_session);
597 	}
598 	mutex_unlock(&server->refpath_lock);
599 
600 	return reconnect_dfs_server(server);
601 }
602 #else
cifs_reconnect(struct TCP_Server_Info * server,bool mark_smb_session)603 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
604 {
605 	return __cifs_reconnect(server, mark_smb_session);
606 }
607 #endif
608 
609 static void
cifs_echo_request(struct work_struct * work)610 cifs_echo_request(struct work_struct *work)
611 {
612 	int rc;
613 	struct TCP_Server_Info *server = container_of(work,
614 					struct TCP_Server_Info, echo.work);
615 
616 	/*
617 	 * We cannot send an echo if it is disabled.
618 	 * Also, no need to ping if we got a response recently.
619 	 */
620 
621 	if (server->tcpStatus == CifsNeedReconnect ||
622 	    server->tcpStatus == CifsExiting ||
623 	    server->tcpStatus == CifsNew ||
624 	    (server->ops->can_echo && !server->ops->can_echo(server)) ||
625 	    time_before(jiffies, server->lstrp + server->echo_interval - HZ))
626 		goto requeue_echo;
627 
628 	rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
629 	cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc);
630 
631 	/* Check witness registrations */
632 	cifs_swn_check();
633 
634 requeue_echo:
635 	queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval);
636 }
637 
638 static bool
allocate_buffers(struct TCP_Server_Info * server)639 allocate_buffers(struct TCP_Server_Info *server)
640 {
641 	if (!server->bigbuf) {
642 		server->bigbuf = (char *)cifs_buf_get();
643 		if (!server->bigbuf) {
644 			cifs_server_dbg(VFS, "No memory for large SMB response\n");
645 			msleep(3000);
646 			/* retry will check if exiting */
647 			return false;
648 		}
649 	} else if (server->large_buf) {
650 		/* we are reusing a dirty large buf, clear its start */
651 		memset(server->bigbuf, 0, HEADER_SIZE(server));
652 	}
653 
654 	if (!server->smallbuf) {
655 		server->smallbuf = (char *)cifs_small_buf_get();
656 		if (!server->smallbuf) {
657 			cifs_server_dbg(VFS, "No memory for SMB response\n");
658 			msleep(1000);
659 			/* retry will check if exiting */
660 			return false;
661 		}
662 		/* beginning of smb buffer is cleared in our buf_get */
663 	} else {
664 		/* if existing small buf clear beginning */
665 		memset(server->smallbuf, 0, HEADER_SIZE(server));
666 	}
667 
668 	return true;
669 }
670 
671 static bool
server_unresponsive(struct TCP_Server_Info * server)672 server_unresponsive(struct TCP_Server_Info *server)
673 {
674 	/*
675 	 * We need to wait 3 echo intervals to make sure we handle such
676 	 * situations right:
677 	 * 1s  client sends a normal SMB request
678 	 * 2s  client gets a response
679 	 * 30s echo workqueue job pops, and decides we got a response recently
680 	 *     and don't need to send another
681 	 * ...
682 	 * 65s kernel_recvmsg times out, and we see that we haven't gotten
683 	 *     a response in >60s.
684 	 */
685 	spin_lock(&server->srv_lock);
686 	if ((server->tcpStatus == CifsGood ||
687 	    server->tcpStatus == CifsNeedNegotiate) &&
688 	    (!server->ops->can_echo || server->ops->can_echo(server)) &&
689 	    time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
690 		spin_unlock(&server->srv_lock);
691 		cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
692 			 (3 * server->echo_interval) / HZ);
693 		cifs_reconnect(server, false);
694 		return true;
695 	}
696 	spin_unlock(&server->srv_lock);
697 
698 	return false;
699 }
700 
701 static inline bool
zero_credits(struct TCP_Server_Info * server)702 zero_credits(struct TCP_Server_Info *server)
703 {
704 	int val;
705 
706 	spin_lock(&server->req_lock);
707 	val = server->credits + server->echo_credits + server->oplock_credits;
708 	if (server->in_flight == 0 && val == 0) {
709 		spin_unlock(&server->req_lock);
710 		return true;
711 	}
712 	spin_unlock(&server->req_lock);
713 	return false;
714 }
715 
716 static int
cifs_readv_from_socket(struct TCP_Server_Info * server,struct msghdr * smb_msg)717 cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
718 {
719 	int length = 0;
720 	int total_read;
721 
722 	for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
723 		try_to_freeze();
724 
725 		/* reconnect if no credits and no requests in flight */
726 		if (zero_credits(server)) {
727 			cifs_reconnect(server, false);
728 			return -ECONNABORTED;
729 		}
730 
731 		if (server_unresponsive(server))
732 			return -ECONNABORTED;
733 		if (cifs_rdma_enabled(server) && server->smbd_conn)
734 			length = smbd_recv(server->smbd_conn, smb_msg);
735 		else
736 			length = sock_recvmsg(server->ssocket, smb_msg, 0);
737 
738 		spin_lock(&server->srv_lock);
739 		if (server->tcpStatus == CifsExiting) {
740 			spin_unlock(&server->srv_lock);
741 			return -ESHUTDOWN;
742 		}
743 
744 		if (server->tcpStatus == CifsNeedReconnect) {
745 			spin_unlock(&server->srv_lock);
746 			cifs_reconnect(server, false);
747 			return -ECONNABORTED;
748 		}
749 		spin_unlock(&server->srv_lock);
750 
751 		if (length == -ERESTARTSYS ||
752 		    length == -EAGAIN ||
753 		    length == -EINTR) {
754 			/*
755 			 * Minimum sleep to prevent looping, allowing socket
756 			 * to clear and app threads to set tcpStatus
757 			 * CifsNeedReconnect if server hung.
758 			 */
759 			usleep_range(1000, 2000);
760 			length = 0;
761 			continue;
762 		}
763 
764 		if (length <= 0) {
765 			cifs_dbg(FYI, "Received no data or error: %d\n", length);
766 			cifs_reconnect(server, false);
767 			return -ECONNABORTED;
768 		}
769 	}
770 	return total_read;
771 }
772 
773 int
cifs_read_from_socket(struct TCP_Server_Info * server,char * buf,unsigned int to_read)774 cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
775 		      unsigned int to_read)
776 {
777 	struct msghdr smb_msg = {};
778 	struct kvec iov = {.iov_base = buf, .iov_len = to_read};
779 	iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read);
780 
781 	return cifs_readv_from_socket(server, &smb_msg);
782 }
783 
784 ssize_t
cifs_discard_from_socket(struct TCP_Server_Info * server,size_t to_read)785 cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
786 {
787 	struct msghdr smb_msg = {};
788 
789 	/*
790 	 *  iov_iter_discard already sets smb_msg.type and count and iov_offset
791 	 *  and cifs_readv_from_socket sets msg_control and msg_controllen
792 	 *  so little to initialize in struct msghdr
793 	 */
794 	iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read);
795 
796 	return cifs_readv_from_socket(server, &smb_msg);
797 }
798 
799 int
cifs_read_page_from_socket(struct TCP_Server_Info * server,struct page * page,unsigned int page_offset,unsigned int to_read)800 cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
801 	unsigned int page_offset, unsigned int to_read)
802 {
803 	struct msghdr smb_msg = {};
804 	struct bio_vec bv = {
805 		.bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
806 	iov_iter_bvec(&smb_msg.msg_iter, ITER_DEST, &bv, 1, to_read);
807 	return cifs_readv_from_socket(server, &smb_msg);
808 }
809 
810 static bool
is_smb_response(struct TCP_Server_Info * server,unsigned char type)811 is_smb_response(struct TCP_Server_Info *server, unsigned char type)
812 {
813 	/*
814 	 * The first byte big endian of the length field,
815 	 * is actually not part of the length but the type
816 	 * with the most common, zero, as regular data.
817 	 */
818 	switch (type) {
819 	case RFC1002_SESSION_MESSAGE:
820 		/* Regular SMB response */
821 		return true;
822 	case RFC1002_SESSION_KEEP_ALIVE:
823 		cifs_dbg(FYI, "RFC 1002 session keep alive\n");
824 		break;
825 	case RFC1002_POSITIVE_SESSION_RESPONSE:
826 		cifs_dbg(FYI, "RFC 1002 positive session response\n");
827 		break;
828 	case RFC1002_NEGATIVE_SESSION_RESPONSE:
829 		/*
830 		 * We get this from Windows 98 instead of an error on
831 		 * SMB negprot response.
832 		 */
833 		cifs_dbg(FYI, "RFC 1002 negative session response\n");
834 		/* give server a second to clean up */
835 		msleep(1000);
836 		/*
837 		 * Always try 445 first on reconnect since we get NACK
838 		 * on some if we ever connected to port 139 (the NACK
839 		 * is since we do not begin with RFC1001 session
840 		 * initialize frame).
841 		 */
842 		cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
843 		cifs_reconnect(server, true);
844 		break;
845 	default:
846 		cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type);
847 		cifs_reconnect(server, true);
848 	}
849 
850 	return false;
851 }
852 
853 void
dequeue_mid(struct mid_q_entry * mid,bool malformed)854 dequeue_mid(struct mid_q_entry *mid, bool malformed)
855 {
856 #ifdef CONFIG_CIFS_STATS2
857 	mid->when_received = jiffies;
858 #endif
859 	spin_lock(&mid->server->mid_lock);
860 	if (!malformed)
861 		mid->mid_state = MID_RESPONSE_RECEIVED;
862 	else
863 		mid->mid_state = MID_RESPONSE_MALFORMED;
864 	/*
865 	 * Trying to handle/dequeue a mid after the send_recv()
866 	 * function has finished processing it is a bug.
867 	 */
868 	if (mid->mid_flags & MID_DELETED) {
869 		spin_unlock(&mid->server->mid_lock);
870 		pr_warn_once("trying to dequeue a deleted mid\n");
871 	} else {
872 		list_del_init(&mid->qhead);
873 		mid->mid_flags |= MID_DELETED;
874 		spin_unlock(&mid->server->mid_lock);
875 	}
876 }
877 
878 static unsigned int
smb2_get_credits_from_hdr(char * buffer,struct TCP_Server_Info * server)879 smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
880 {
881 	struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
882 
883 	/*
884 	 * SMB1 does not use credits.
885 	 */
886 	if (is_smb1(server))
887 		return 0;
888 
889 	return le16_to_cpu(shdr->CreditRequest);
890 }
891 
892 static void
handle_mid(struct mid_q_entry * mid,struct TCP_Server_Info * server,char * buf,int malformed)893 handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
894 	   char *buf, int malformed)
895 {
896 	if (server->ops->check_trans2 &&
897 	    server->ops->check_trans2(mid, server, buf, malformed))
898 		return;
899 	mid->credits_received = smb2_get_credits_from_hdr(buf, server);
900 	mid->resp_buf = buf;
901 	mid->large_buf = server->large_buf;
902 	/* Was previous buf put in mpx struct for multi-rsp? */
903 	if (!mid->multiRsp) {
904 		/* smb buffer will be freed by user thread */
905 		if (server->large_buf)
906 			server->bigbuf = NULL;
907 		else
908 			server->smallbuf = NULL;
909 	}
910 	dequeue_mid(mid, malformed);
911 }
912 
913 int
cifs_enable_signing(struct TCP_Server_Info * server,bool mnt_sign_required)914 cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
915 {
916 	bool srv_sign_required = server->sec_mode & server->vals->signing_required;
917 	bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled;
918 	bool mnt_sign_enabled;
919 
920 	/*
921 	 * Is signing required by mnt options? If not then check
922 	 * global_secflags to see if it is there.
923 	 */
924 	if (!mnt_sign_required)
925 		mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) ==
926 						CIFSSEC_MUST_SIGN);
927 
928 	/*
929 	 * If signing is required then it's automatically enabled too,
930 	 * otherwise, check to see if the secflags allow it.
931 	 */
932 	mnt_sign_enabled = mnt_sign_required ? mnt_sign_required :
933 				(global_secflags & CIFSSEC_MAY_SIGN);
934 
935 	/* If server requires signing, does client allow it? */
936 	if (srv_sign_required) {
937 		if (!mnt_sign_enabled) {
938 			cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n");
939 			return -EOPNOTSUPP;
940 		}
941 		server->sign = true;
942 	}
943 
944 	/* If client requires signing, does server allow it? */
945 	if (mnt_sign_required) {
946 		if (!srv_sign_enabled) {
947 			cifs_dbg(VFS, "Server does not support signing!\n");
948 			return -EOPNOTSUPP;
949 		}
950 		server->sign = true;
951 	}
952 
953 	if (cifs_rdma_enabled(server) && server->sign)
954 		cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n");
955 
956 	return 0;
957 }
958 
959 
clean_demultiplex_info(struct TCP_Server_Info * server)960 static void clean_demultiplex_info(struct TCP_Server_Info *server)
961 {
962 	int length;
963 
964 	/* take it off the list, if it's not already */
965 	spin_lock(&server->srv_lock);
966 	list_del_init(&server->tcp_ses_list);
967 	spin_unlock(&server->srv_lock);
968 
969 	cancel_delayed_work_sync(&server->echo);
970 	cancel_delayed_work_sync(&server->resolve);
971 
972 	spin_lock(&server->srv_lock);
973 	server->tcpStatus = CifsExiting;
974 	spin_unlock(&server->srv_lock);
975 	wake_up_all(&server->response_q);
976 
977 	/* check if we have blocked requests that need to free */
978 	spin_lock(&server->req_lock);
979 	if (server->credits <= 0)
980 		server->credits = 1;
981 	spin_unlock(&server->req_lock);
982 	/*
983 	 * Although there should not be any requests blocked on this queue it
984 	 * can not hurt to be paranoid and try to wake up requests that may
985 	 * haven been blocked when more than 50 at time were on the wire to the
986 	 * same server - they now will see the session is in exit state and get
987 	 * out of SendReceive.
988 	 */
989 	wake_up_all(&server->request_q);
990 	/* give those requests time to exit */
991 	msleep(125);
992 	if (cifs_rdma_enabled(server))
993 		smbd_destroy(server);
994 	if (server->ssocket) {
995 		sock_release(server->ssocket);
996 		server->ssocket = NULL;
997 	}
998 
999 	if (!list_empty(&server->pending_mid_q)) {
1000 		struct list_head dispose_list;
1001 		struct mid_q_entry *mid_entry;
1002 		struct list_head *tmp, *tmp2;
1003 
1004 		INIT_LIST_HEAD(&dispose_list);
1005 		spin_lock(&server->mid_lock);
1006 		list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
1007 			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
1008 			cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
1009 			kref_get(&mid_entry->refcount);
1010 			mid_entry->mid_state = MID_SHUTDOWN;
1011 			list_move(&mid_entry->qhead, &dispose_list);
1012 			mid_entry->mid_flags |= MID_DELETED;
1013 		}
1014 		spin_unlock(&server->mid_lock);
1015 
1016 		/* now walk dispose list and issue callbacks */
1017 		list_for_each_safe(tmp, tmp2, &dispose_list) {
1018 			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
1019 			cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid);
1020 			list_del_init(&mid_entry->qhead);
1021 			mid_entry->callback(mid_entry);
1022 			release_mid(mid_entry);
1023 		}
1024 		/* 1/8th of sec is more than enough time for them to exit */
1025 		msleep(125);
1026 	}
1027 
1028 	if (!list_empty(&server->pending_mid_q)) {
1029 		/*
1030 		 * mpx threads have not exited yet give them at least the smb
1031 		 * send timeout time for long ops.
1032 		 *
1033 		 * Due to delays on oplock break requests, we need to wait at
1034 		 * least 45 seconds before giving up on a request getting a
1035 		 * response and going ahead and killing cifsd.
1036 		 */
1037 		cifs_dbg(FYI, "Wait for exit from demultiplex thread\n");
1038 		msleep(46000);
1039 		/*
1040 		 * If threads still have not exited they are probably never
1041 		 * coming home not much else we can do but free the memory.
1042 		 */
1043 	}
1044 
1045 #ifdef CONFIG_CIFS_DFS_UPCALL
1046 	kfree(server->origin_fullpath);
1047 	kfree(server->leaf_fullpath);
1048 #endif
1049 	kfree(server);
1050 
1051 	length = atomic_dec_return(&tcpSesAllocCount);
1052 	if (length > 0)
1053 		mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1054 }
1055 
1056 static int
standard_receive3(struct TCP_Server_Info * server,struct mid_q_entry * mid)1057 standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1058 {
1059 	int length;
1060 	char *buf = server->smallbuf;
1061 	unsigned int pdu_length = server->pdu_size;
1062 
1063 	/* make sure this will fit in a large buffer */
1064 	if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) -
1065 	    HEADER_PREAMBLE_SIZE(server)) {
1066 		cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
1067 		cifs_reconnect(server, true);
1068 		return -ECONNABORTED;
1069 	}
1070 
1071 	/* switch to large buffer if too big for a small one */
1072 	if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
1073 		server->large_buf = true;
1074 		memcpy(server->bigbuf, buf, server->total_read);
1075 		buf = server->bigbuf;
1076 	}
1077 
1078 	/* now read the rest */
1079 	length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
1080 				       pdu_length - MID_HEADER_SIZE(server));
1081 
1082 	if (length < 0)
1083 		return length;
1084 	server->total_read += length;
1085 
1086 	dump_smb(buf, server->total_read);
1087 
1088 	return cifs_handle_standard(server, mid);
1089 }
1090 
1091 int
cifs_handle_standard(struct TCP_Server_Info * server,struct mid_q_entry * mid)1092 cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1093 {
1094 	char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
1095 	int rc;
1096 
1097 	/*
1098 	 * We know that we received enough to get to the MID as we
1099 	 * checked the pdu_length earlier. Now check to see
1100 	 * if the rest of the header is OK.
1101 	 *
1102 	 * 48 bytes is enough to display the header and a little bit
1103 	 * into the payload for debugging purposes.
1104 	 */
1105 	rc = server->ops->check_message(buf, server->total_read, server);
1106 	if (rc)
1107 		cifs_dump_mem("Bad SMB: ", buf,
1108 			min_t(unsigned int, server->total_read, 48));
1109 
1110 	if (server->ops->is_session_expired &&
1111 	    server->ops->is_session_expired(buf)) {
1112 		cifs_reconnect(server, true);
1113 		return -1;
1114 	}
1115 
1116 	if (server->ops->is_status_pending &&
1117 	    server->ops->is_status_pending(buf, server))
1118 		return -1;
1119 
1120 	if (!mid)
1121 		return rc;
1122 
1123 	handle_mid(mid, server, buf, rc);
1124 	return 0;
1125 }
1126 
1127 static void
smb2_add_credits_from_hdr(char * buffer,struct TCP_Server_Info * server)1128 smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
1129 {
1130 	struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
1131 	int scredits, in_flight;
1132 
1133 	/*
1134 	 * SMB1 does not use credits.
1135 	 */
1136 	if (is_smb1(server))
1137 		return;
1138 
1139 	if (shdr->CreditRequest) {
1140 		spin_lock(&server->req_lock);
1141 		server->credits += le16_to_cpu(shdr->CreditRequest);
1142 		scredits = server->credits;
1143 		in_flight = server->in_flight;
1144 		spin_unlock(&server->req_lock);
1145 		wake_up(&server->request_q);
1146 
1147 		trace_smb3_hdr_credits(server->CurrentMid,
1148 				server->conn_id, server->hostname, scredits,
1149 				le16_to_cpu(shdr->CreditRequest), in_flight);
1150 		cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
1151 				__func__, le16_to_cpu(shdr->CreditRequest),
1152 				scredits);
1153 	}
1154 }
1155 
1156 
1157 static int
cifs_demultiplex_thread(void * p)1158 cifs_demultiplex_thread(void *p)
1159 {
1160 	int i, num_mids, length;
1161 	struct TCP_Server_Info *server = p;
1162 	unsigned int pdu_length;
1163 	unsigned int next_offset;
1164 	char *buf = NULL;
1165 	struct task_struct *task_to_wake = NULL;
1166 	struct mid_q_entry *mids[MAX_COMPOUND];
1167 	char *bufs[MAX_COMPOUND];
1168 	unsigned int noreclaim_flag, num_io_timeout = 0;
1169 	bool pending_reconnect = false;
1170 
1171 	noreclaim_flag = memalloc_noreclaim_save();
1172 	cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
1173 
1174 	length = atomic_inc_return(&tcpSesAllocCount);
1175 	if (length > 1)
1176 		mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1177 
1178 	set_freezable();
1179 	allow_kernel_signal(SIGKILL);
1180 	while (server->tcpStatus != CifsExiting) {
1181 		if (try_to_freeze())
1182 			continue;
1183 
1184 		if (!allocate_buffers(server))
1185 			continue;
1186 
1187 		server->large_buf = false;
1188 		buf = server->smallbuf;
1189 		pdu_length = 4; /* enough to get RFC1001 header */
1190 
1191 		length = cifs_read_from_socket(server, buf, pdu_length);
1192 		if (length < 0)
1193 			continue;
1194 
1195 		if (is_smb1(server))
1196 			server->total_read = length;
1197 		else
1198 			server->total_read = 0;
1199 
1200 		/*
1201 		 * The right amount was read from socket - 4 bytes,
1202 		 * so we can now interpret the length field.
1203 		 */
1204 		pdu_length = get_rfc1002_length(buf);
1205 
1206 		cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length);
1207 		if (!is_smb_response(server, buf[0]))
1208 			continue;
1209 
1210 		pending_reconnect = false;
1211 next_pdu:
1212 		server->pdu_size = pdu_length;
1213 
1214 		/* make sure we have enough to get to the MID */
1215 		if (server->pdu_size < MID_HEADER_SIZE(server)) {
1216 			cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n",
1217 				 server->pdu_size);
1218 			cifs_reconnect(server, true);
1219 			continue;
1220 		}
1221 
1222 		/* read down to the MID */
1223 		length = cifs_read_from_socket(server,
1224 			     buf + HEADER_PREAMBLE_SIZE(server),
1225 			     MID_HEADER_SIZE(server));
1226 		if (length < 0)
1227 			continue;
1228 		server->total_read += length;
1229 
1230 		if (server->ops->next_header) {
1231 			if (server->ops->next_header(server, buf, &next_offset)) {
1232 				cifs_dbg(VFS, "%s: malformed response (next_offset=%u)\n",
1233 					 __func__, next_offset);
1234 				cifs_reconnect(server, true);
1235 				continue;
1236 			}
1237 			if (next_offset)
1238 				server->pdu_size = next_offset;
1239 		}
1240 
1241 		memset(mids, 0, sizeof(mids));
1242 		memset(bufs, 0, sizeof(bufs));
1243 		num_mids = 0;
1244 
1245 		if (server->ops->is_transform_hdr &&
1246 		    server->ops->receive_transform &&
1247 		    server->ops->is_transform_hdr(buf)) {
1248 			length = server->ops->receive_transform(server,
1249 								mids,
1250 								bufs,
1251 								&num_mids);
1252 		} else {
1253 			mids[0] = server->ops->find_mid(server, buf);
1254 			bufs[0] = buf;
1255 			num_mids = 1;
1256 
1257 			if (!mids[0] || !mids[0]->receive)
1258 				length = standard_receive3(server, mids[0]);
1259 			else
1260 				length = mids[0]->receive(server, mids[0]);
1261 		}
1262 
1263 		if (length < 0) {
1264 			for (i = 0; i < num_mids; i++)
1265 				if (mids[i])
1266 					release_mid(mids[i]);
1267 			continue;
1268 		}
1269 
1270 		if (server->ops->is_status_io_timeout &&
1271 		    server->ops->is_status_io_timeout(buf)) {
1272 			num_io_timeout++;
1273 			if (num_io_timeout > MAX_STATUS_IO_TIMEOUT) {
1274 				cifs_server_dbg(VFS,
1275 						"Number of request timeouts exceeded %d. Reconnecting",
1276 						MAX_STATUS_IO_TIMEOUT);
1277 
1278 				pending_reconnect = true;
1279 				num_io_timeout = 0;
1280 			}
1281 		}
1282 
1283 		server->lstrp = jiffies;
1284 
1285 		for (i = 0; i < num_mids; i++) {
1286 			if (mids[i] != NULL) {
1287 				mids[i]->resp_buf_size = server->pdu_size;
1288 
1289 				if (bufs[i] && server->ops->is_network_name_deleted)
1290 					server->ops->is_network_name_deleted(bufs[i],
1291 									server);
1292 
1293 				if (!mids[i]->multiRsp || mids[i]->multiEnd)
1294 					mids[i]->callback(mids[i]);
1295 
1296 				release_mid(mids[i]);
1297 			} else if (server->ops->is_oplock_break &&
1298 				   server->ops->is_oplock_break(bufs[i],
1299 								server)) {
1300 				smb2_add_credits_from_hdr(bufs[i], server);
1301 				cifs_dbg(FYI, "Received oplock break\n");
1302 			} else {
1303 				cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
1304 						atomic_read(&mid_count));
1305 				cifs_dump_mem("Received Data is: ", bufs[i],
1306 					      HEADER_SIZE(server));
1307 				smb2_add_credits_from_hdr(bufs[i], server);
1308 #ifdef CONFIG_CIFS_DEBUG2
1309 				if (server->ops->dump_detail)
1310 					server->ops->dump_detail(bufs[i],
1311 								 server);
1312 				cifs_dump_mids(server);
1313 #endif /* CIFS_DEBUG2 */
1314 			}
1315 		}
1316 
1317 		if (pdu_length > server->pdu_size) {
1318 			if (!allocate_buffers(server))
1319 				continue;
1320 			pdu_length -= server->pdu_size;
1321 			server->total_read = 0;
1322 			server->large_buf = false;
1323 			buf = server->smallbuf;
1324 			goto next_pdu;
1325 		}
1326 
1327 		/* do this reconnect at the very end after processing all MIDs */
1328 		if (pending_reconnect)
1329 			cifs_reconnect(server, true);
1330 
1331 	} /* end while !EXITING */
1332 
1333 	/* buffer usually freed in free_mid - need to free it here on exit */
1334 	cifs_buf_release(server->bigbuf);
1335 	if (server->smallbuf) /* no sense logging a debug message if NULL */
1336 		cifs_small_buf_release(server->smallbuf);
1337 
1338 	task_to_wake = xchg(&server->tsk, NULL);
1339 	clean_demultiplex_info(server);
1340 
1341 	/* if server->tsk was NULL then wait for a signal before exiting */
1342 	if (!task_to_wake) {
1343 		set_current_state(TASK_INTERRUPTIBLE);
1344 		while (!signal_pending(current)) {
1345 			schedule();
1346 			set_current_state(TASK_INTERRUPTIBLE);
1347 		}
1348 		set_current_state(TASK_RUNNING);
1349 	}
1350 
1351 	memalloc_noreclaim_restore(noreclaim_flag);
1352 	module_put_and_kthread_exit(0);
1353 }
1354 
1355 int
cifs_ipaddr_cmp(struct sockaddr * srcaddr,struct sockaddr * rhs)1356 cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs)
1357 {
1358 	struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
1359 	struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
1360 	struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1361 	struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
1362 
1363 	switch (srcaddr->sa_family) {
1364 	case AF_UNSPEC:
1365 		switch (rhs->sa_family) {
1366 		case AF_UNSPEC:
1367 			return 0;
1368 		case AF_INET:
1369 		case AF_INET6:
1370 			return 1;
1371 		default:
1372 			return -1;
1373 		}
1374 	case AF_INET: {
1375 		switch (rhs->sa_family) {
1376 		case AF_UNSPEC:
1377 			return -1;
1378 		case AF_INET:
1379 			return memcmp(saddr4, vaddr4,
1380 				      sizeof(struct sockaddr_in));
1381 		case AF_INET6:
1382 			return 1;
1383 		default:
1384 			return -1;
1385 		}
1386 	}
1387 	case AF_INET6: {
1388 		switch (rhs->sa_family) {
1389 		case AF_UNSPEC:
1390 		case AF_INET:
1391 			return -1;
1392 		case AF_INET6:
1393 			return memcmp(saddr6,
1394 				      vaddr6,
1395 				      sizeof(struct sockaddr_in6));
1396 		default:
1397 			return -1;
1398 		}
1399 	}
1400 	default:
1401 		return -1; /* don't expect to be here */
1402 	}
1403 }
1404 
1405 /*
1406  * Returns true if srcaddr isn't specified and rhs isn't specified, or
1407  * if srcaddr is specified and matches the IP address of the rhs argument
1408  */
1409 bool
cifs_match_ipaddr(struct sockaddr * srcaddr,struct sockaddr * rhs)1410 cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs)
1411 {
1412 	switch (srcaddr->sa_family) {
1413 	case AF_UNSPEC:
1414 		return (rhs->sa_family == AF_UNSPEC);
1415 	case AF_INET: {
1416 		struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
1417 		struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
1418 		return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
1419 	}
1420 	case AF_INET6: {
1421 		struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1422 		struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
1423 		return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);
1424 	}
1425 	default:
1426 		WARN_ON(1);
1427 		return false; /* don't expect to be here */
1428 	}
1429 }
1430 
1431 /*
1432  * If no port is specified in addr structure, we try to match with 445 port
1433  * and if it fails - with 139 ports. It should be called only if address
1434  * families of server and addr are equal.
1435  */
1436 static bool
match_port(struct TCP_Server_Info * server,struct sockaddr * addr)1437 match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
1438 {
1439 	__be16 port, *sport;
1440 
1441 	/* SMBDirect manages its own ports, don't match it here */
1442 	if (server->rdma)
1443 		return true;
1444 
1445 	switch (addr->sa_family) {
1446 	case AF_INET:
1447 		sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port;
1448 		port = ((struct sockaddr_in *) addr)->sin_port;
1449 		break;
1450 	case AF_INET6:
1451 		sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port;
1452 		port = ((struct sockaddr_in6 *) addr)->sin6_port;
1453 		break;
1454 	default:
1455 		WARN_ON(1);
1456 		return false;
1457 	}
1458 
1459 	if (!port) {
1460 		port = htons(CIFS_PORT);
1461 		if (port == *sport)
1462 			return true;
1463 
1464 		port = htons(RFC1001_PORT);
1465 	}
1466 
1467 	return port == *sport;
1468 }
1469 
1470 static bool
match_address(struct TCP_Server_Info * server,struct sockaddr * addr,struct sockaddr * srcaddr)1471 match_address(struct TCP_Server_Info *server, struct sockaddr *addr,
1472 	      struct sockaddr *srcaddr)
1473 {
1474 	switch (addr->sa_family) {
1475 	case AF_INET: {
1476 		struct sockaddr_in *addr4 = (struct sockaddr_in *)addr;
1477 		struct sockaddr_in *srv_addr4 =
1478 					(struct sockaddr_in *)&server->dstaddr;
1479 
1480 		if (addr4->sin_addr.s_addr != srv_addr4->sin_addr.s_addr)
1481 			return false;
1482 		break;
1483 	}
1484 	case AF_INET6: {
1485 		struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr;
1486 		struct sockaddr_in6 *srv_addr6 =
1487 					(struct sockaddr_in6 *)&server->dstaddr;
1488 
1489 		if (!ipv6_addr_equal(&addr6->sin6_addr,
1490 				     &srv_addr6->sin6_addr))
1491 			return false;
1492 		if (addr6->sin6_scope_id != srv_addr6->sin6_scope_id)
1493 			return false;
1494 		break;
1495 	}
1496 	default:
1497 		WARN_ON(1);
1498 		return false; /* don't expect to be here */
1499 	}
1500 
1501 	if (!cifs_match_ipaddr(srcaddr, (struct sockaddr *)&server->srcaddr))
1502 		return false;
1503 
1504 	return true;
1505 }
1506 
1507 static bool
match_security(struct TCP_Server_Info * server,struct smb3_fs_context * ctx)1508 match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
1509 {
1510 	/*
1511 	 * The select_sectype function should either return the ctx->sectype
1512 	 * that was specified, or "Unspecified" if that sectype was not
1513 	 * compatible with the given NEGOTIATE request.
1514 	 */
1515 	if (server->ops->select_sectype(server, ctx->sectype)
1516 	     == Unspecified)
1517 		return false;
1518 
1519 	/*
1520 	 * Now check if signing mode is acceptable. No need to check
1521 	 * global_secflags at this point since if MUST_SIGN is set then
1522 	 * the server->sign had better be too.
1523 	 */
1524 	if (ctx->sign && !server->sign)
1525 		return false;
1526 
1527 	return true;
1528 }
1529 
1530 /* this function must be called with srv_lock held */
match_server(struct TCP_Server_Info * server,struct smb3_fs_context * ctx)1531 static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
1532 {
1533 	struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
1534 
1535 	lockdep_assert_held(&server->srv_lock);
1536 
1537 	if (ctx->nosharesock)
1538 		return 0;
1539 
1540 	/* this server does not share socket */
1541 	if (server->nosharesock)
1542 		return 0;
1543 
1544 	/* If multidialect negotiation see if existing sessions match one */
1545 	if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
1546 		if (server->vals->protocol_id < SMB30_PROT_ID)
1547 			return 0;
1548 	} else if (strcmp(ctx->vals->version_string,
1549 		   SMBDEFAULT_VERSION_STRING) == 0) {
1550 		if (server->vals->protocol_id < SMB21_PROT_ID)
1551 			return 0;
1552 	} else if ((server->vals != ctx->vals) || (server->ops != ctx->ops))
1553 		return 0;
1554 
1555 	if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
1556 		return 0;
1557 
1558 	if (strcasecmp(server->hostname, ctx->server_hostname))
1559 		return 0;
1560 
1561 	if (!match_address(server, addr,
1562 			   (struct sockaddr *)&ctx->srcaddr))
1563 		return 0;
1564 
1565 	if (!match_port(server, addr))
1566 		return 0;
1567 
1568 	if (!match_security(server, ctx))
1569 		return 0;
1570 
1571 	if (server->echo_interval != ctx->echo_interval * HZ)
1572 		return 0;
1573 
1574 	if (server->rdma != ctx->rdma)
1575 		return 0;
1576 
1577 	if (server->ignore_signature != ctx->ignore_signature)
1578 		return 0;
1579 
1580 	if (server->min_offload != ctx->min_offload)
1581 		return 0;
1582 
1583 	return 1;
1584 }
1585 
1586 struct TCP_Server_Info *
cifs_find_tcp_session(struct smb3_fs_context * ctx)1587 cifs_find_tcp_session(struct smb3_fs_context *ctx)
1588 {
1589 	struct TCP_Server_Info *server;
1590 
1591 	spin_lock(&cifs_tcp_ses_lock);
1592 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1593 		spin_lock(&server->srv_lock);
1594 #ifdef CONFIG_CIFS_DFS_UPCALL
1595 		/*
1596 		 * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for
1597 		 * DFS connections to do failover properly, so avoid sharing them with regular
1598 		 * shares or even links that may connect to same server but having completely
1599 		 * different failover targets.
1600 		 */
1601 		if (server->is_dfs_conn) {
1602 			spin_unlock(&server->srv_lock);
1603 			continue;
1604 		}
1605 #endif
1606 		/*
1607 		 * Skip ses channels since they're only handled in lower layers
1608 		 * (e.g. cifs_send_recv).
1609 		 */
1610 		if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) {
1611 			spin_unlock(&server->srv_lock);
1612 			continue;
1613 		}
1614 		spin_unlock(&server->srv_lock);
1615 
1616 		++server->srv_count;
1617 		spin_unlock(&cifs_tcp_ses_lock);
1618 		cifs_dbg(FYI, "Existing tcp session with server found\n");
1619 		return server;
1620 	}
1621 	spin_unlock(&cifs_tcp_ses_lock);
1622 	return NULL;
1623 }
1624 
1625 void
cifs_put_tcp_session(struct TCP_Server_Info * server,int from_reconnect)1626 cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
1627 {
1628 	struct task_struct *task;
1629 
1630 	spin_lock(&cifs_tcp_ses_lock);
1631 	if (--server->srv_count > 0) {
1632 		spin_unlock(&cifs_tcp_ses_lock);
1633 		return;
1634 	}
1635 
1636 	/* srv_count can never go negative */
1637 	WARN_ON(server->srv_count < 0);
1638 
1639 	put_net(cifs_net_ns(server));
1640 
1641 	list_del_init(&server->tcp_ses_list);
1642 	spin_unlock(&cifs_tcp_ses_lock);
1643 
1644 	/* For secondary channels, we pick up ref-count on the primary server */
1645 	if (CIFS_SERVER_IS_CHAN(server))
1646 		cifs_put_tcp_session(server->primary_server, from_reconnect);
1647 
1648 	cancel_delayed_work_sync(&server->echo);
1649 	cancel_delayed_work_sync(&server->resolve);
1650 
1651 	if (from_reconnect)
1652 		/*
1653 		 * Avoid deadlock here: reconnect work calls
1654 		 * cifs_put_tcp_session() at its end. Need to be sure
1655 		 * that reconnect work does nothing with server pointer after
1656 		 * that step.
1657 		 */
1658 		cancel_delayed_work(&server->reconnect);
1659 	else
1660 		cancel_delayed_work_sync(&server->reconnect);
1661 
1662 	spin_lock(&server->srv_lock);
1663 	server->tcpStatus = CifsExiting;
1664 	spin_unlock(&server->srv_lock);
1665 
1666 	cifs_crypto_secmech_release(server);
1667 
1668 	kfree_sensitive(server->session_key.response);
1669 	server->session_key.response = NULL;
1670 	server->session_key.len = 0;
1671 	kfree(server->hostname);
1672 	server->hostname = NULL;
1673 
1674 	task = xchg(&server->tsk, NULL);
1675 	if (task)
1676 		send_sig(SIGKILL, task, 1);
1677 }
1678 
1679 struct TCP_Server_Info *
cifs_get_tcp_session(struct smb3_fs_context * ctx,struct TCP_Server_Info * primary_server)1680 cifs_get_tcp_session(struct smb3_fs_context *ctx,
1681 		     struct TCP_Server_Info *primary_server)
1682 {
1683 	struct TCP_Server_Info *tcp_ses = NULL;
1684 	int rc;
1685 
1686 	cifs_dbg(FYI, "UNC: %s\n", ctx->UNC);
1687 
1688 	/* see if we already have a matching tcp_ses */
1689 	tcp_ses = cifs_find_tcp_session(ctx);
1690 	if (tcp_ses)
1691 		return tcp_ses;
1692 
1693 	tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL);
1694 	if (!tcp_ses) {
1695 		rc = -ENOMEM;
1696 		goto out_err;
1697 	}
1698 
1699 	tcp_ses->hostname = kstrdup(ctx->server_hostname, GFP_KERNEL);
1700 	if (!tcp_ses->hostname) {
1701 		rc = -ENOMEM;
1702 		goto out_err;
1703 	}
1704 
1705 	if (ctx->nosharesock)
1706 		tcp_ses->nosharesock = true;
1707 
1708 	tcp_ses->ops = ctx->ops;
1709 	tcp_ses->vals = ctx->vals;
1710 	cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
1711 
1712 	tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
1713 	tcp_ses->noblockcnt = ctx->rootfs;
1714 	tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs;
1715 	tcp_ses->noautotune = ctx->noautotune;
1716 	tcp_ses->tcp_nodelay = ctx->sockopt_tcp_nodelay;
1717 	tcp_ses->rdma = ctx->rdma;
1718 	tcp_ses->in_flight = 0;
1719 	tcp_ses->max_in_flight = 0;
1720 	tcp_ses->credits = 1;
1721 	if (primary_server) {
1722 		spin_lock(&cifs_tcp_ses_lock);
1723 		++primary_server->srv_count;
1724 		spin_unlock(&cifs_tcp_ses_lock);
1725 		tcp_ses->primary_server = primary_server;
1726 	}
1727 	init_waitqueue_head(&tcp_ses->response_q);
1728 	init_waitqueue_head(&tcp_ses->request_q);
1729 	INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
1730 	mutex_init(&tcp_ses->_srv_mutex);
1731 	memcpy(tcp_ses->workstation_RFC1001_name,
1732 		ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1733 	memcpy(tcp_ses->server_RFC1001_name,
1734 		ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1735 	tcp_ses->session_estab = false;
1736 	tcp_ses->sequence_number = 0;
1737 	tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */
1738 	tcp_ses->reconnect_instance = 1;
1739 	tcp_ses->lstrp = jiffies;
1740 	tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
1741 	spin_lock_init(&tcp_ses->req_lock);
1742 	spin_lock_init(&tcp_ses->srv_lock);
1743 	spin_lock_init(&tcp_ses->mid_lock);
1744 	INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
1745 	INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
1746 	INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
1747 	INIT_DELAYED_WORK(&tcp_ses->resolve, cifs_resolve_server);
1748 	INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
1749 	mutex_init(&tcp_ses->reconnect_mutex);
1750 #ifdef CONFIG_CIFS_DFS_UPCALL
1751 	mutex_init(&tcp_ses->refpath_lock);
1752 #endif
1753 	memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
1754 	       sizeof(tcp_ses->srcaddr));
1755 	memcpy(&tcp_ses->dstaddr, &ctx->dstaddr,
1756 		sizeof(tcp_ses->dstaddr));
1757 	if (ctx->use_client_guid)
1758 		memcpy(tcp_ses->client_guid, ctx->client_guid,
1759 		       SMB2_CLIENT_GUID_SIZE);
1760 	else
1761 		generate_random_uuid(tcp_ses->client_guid);
1762 	/*
1763 	 * at this point we are the only ones with the pointer
1764 	 * to the struct since the kernel thread not created yet
1765 	 * no need to spinlock this init of tcpStatus or srv_count
1766 	 */
1767 	tcp_ses->tcpStatus = CifsNew;
1768 	++tcp_ses->srv_count;
1769 
1770 	if (ctx->echo_interval >= SMB_ECHO_INTERVAL_MIN &&
1771 		ctx->echo_interval <= SMB_ECHO_INTERVAL_MAX)
1772 		tcp_ses->echo_interval = ctx->echo_interval * HZ;
1773 	else
1774 		tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ;
1775 	if (tcp_ses->rdma) {
1776 #ifndef CONFIG_CIFS_SMB_DIRECT
1777 		cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n");
1778 		rc = -ENOENT;
1779 		goto out_err_crypto_release;
1780 #endif
1781 		tcp_ses->smbd_conn = smbd_get_connection(
1782 			tcp_ses, (struct sockaddr *)&ctx->dstaddr);
1783 		if (tcp_ses->smbd_conn) {
1784 			cifs_dbg(VFS, "RDMA transport established\n");
1785 			rc = 0;
1786 			goto smbd_connected;
1787 		} else {
1788 			rc = -ENOENT;
1789 			goto out_err_crypto_release;
1790 		}
1791 	}
1792 	rc = ip_connect(tcp_ses);
1793 	if (rc < 0) {
1794 		cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n");
1795 		goto out_err_crypto_release;
1796 	}
1797 smbd_connected:
1798 	/*
1799 	 * since we're in a cifs function already, we know that
1800 	 * this will succeed. No need for try_module_get().
1801 	 */
1802 	__module_get(THIS_MODULE);
1803 	tcp_ses->tsk = kthread_run(cifs_demultiplex_thread,
1804 				  tcp_ses, "cifsd");
1805 	if (IS_ERR(tcp_ses->tsk)) {
1806 		rc = PTR_ERR(tcp_ses->tsk);
1807 		cifs_dbg(VFS, "error %d create cifsd thread\n", rc);
1808 		module_put(THIS_MODULE);
1809 		goto out_err_crypto_release;
1810 	}
1811 	tcp_ses->min_offload = ctx->min_offload;
1812 	/*
1813 	 * at this point we are the only ones with the pointer
1814 	 * to the struct since the kernel thread not created yet
1815 	 * no need to spinlock this update of tcpStatus
1816 	 */
1817 	spin_lock(&tcp_ses->srv_lock);
1818 	tcp_ses->tcpStatus = CifsNeedNegotiate;
1819 	spin_unlock(&tcp_ses->srv_lock);
1820 
1821 	if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
1822 		tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
1823 	else
1824 		tcp_ses->max_credits = ctx->max_credits;
1825 
1826 	tcp_ses->nr_targets = 1;
1827 	tcp_ses->ignore_signature = ctx->ignore_signature;
1828 	/* thread spawned, put it on the list */
1829 	spin_lock(&cifs_tcp_ses_lock);
1830 	list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
1831 	spin_unlock(&cifs_tcp_ses_lock);
1832 
1833 	/* queue echo request delayed work */
1834 	queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
1835 
1836 	/* queue dns resolution delayed work */
1837 	cifs_dbg(FYI, "%s: next dns resolution scheduled for %d seconds in the future\n",
1838 		 __func__, SMB_DNS_RESOLVE_INTERVAL_DEFAULT);
1839 
1840 	queue_delayed_work(cifsiod_wq, &tcp_ses->resolve, (SMB_DNS_RESOLVE_INTERVAL_DEFAULT * HZ));
1841 
1842 	return tcp_ses;
1843 
1844 out_err_crypto_release:
1845 	cifs_crypto_secmech_release(tcp_ses);
1846 
1847 	put_net(cifs_net_ns(tcp_ses));
1848 
1849 out_err:
1850 	if (tcp_ses) {
1851 		if (CIFS_SERVER_IS_CHAN(tcp_ses))
1852 			cifs_put_tcp_session(tcp_ses->primary_server, false);
1853 		kfree(tcp_ses->hostname);
1854 		if (tcp_ses->ssocket)
1855 			sock_release(tcp_ses->ssocket);
1856 		kfree(tcp_ses);
1857 	}
1858 	return ERR_PTR(rc);
1859 }
1860 
1861 /* this function must be called with ses_lock and chan_lock held */
match_session(struct cifs_ses * ses,struct smb3_fs_context * ctx)1862 static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
1863 {
1864 	if (ctx->sectype != Unspecified &&
1865 	    ctx->sectype != ses->sectype)
1866 		return 0;
1867 
1868 	/*
1869 	 * If an existing session is limited to less channels than
1870 	 * requested, it should not be reused
1871 	 */
1872 	if (ses->chan_max < ctx->max_channels)
1873 		return 0;
1874 
1875 	switch (ses->sectype) {
1876 	case Kerberos:
1877 		if (!uid_eq(ctx->cred_uid, ses->cred_uid))
1878 			return 0;
1879 		break;
1880 	default:
1881 		/* NULL username means anonymous session */
1882 		if (ses->user_name == NULL) {
1883 			if (!ctx->nullauth)
1884 				return 0;
1885 			break;
1886 		}
1887 
1888 		/* anything else takes username/password */
1889 		if (strncmp(ses->user_name,
1890 			    ctx->username ? ctx->username : "",
1891 			    CIFS_MAX_USERNAME_LEN))
1892 			return 0;
1893 		if ((ctx->username && strlen(ctx->username) != 0) &&
1894 		    ses->password != NULL &&
1895 		    strncmp(ses->password,
1896 			    ctx->password ? ctx->password : "",
1897 			    CIFS_MAX_PASSWORD_LEN))
1898 			return 0;
1899 	}
1900 	return 1;
1901 }
1902 
1903 /**
1904  * cifs_setup_ipc - helper to setup the IPC tcon for the session
1905  * @ses: smb session to issue the request on
1906  * @ctx: the superblock configuration context to use for building the
1907  *       new tree connection for the IPC (interprocess communication RPC)
1908  *
1909  * A new IPC connection is made and stored in the session
1910  * tcon_ipc. The IPC tcon has the same lifetime as the session.
1911  */
1912 static int
cifs_setup_ipc(struct cifs_ses * ses,struct smb3_fs_context * ctx)1913 cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
1914 {
1915 	int rc = 0, xid;
1916 	struct cifs_tcon *tcon;
1917 	char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
1918 	bool seal = false;
1919 	struct TCP_Server_Info *server = ses->server;
1920 
1921 	/*
1922 	 * If the mount request that resulted in the creation of the
1923 	 * session requires encryption, force IPC to be encrypted too.
1924 	 */
1925 	if (ctx->seal) {
1926 		if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)
1927 			seal = true;
1928 		else {
1929 			cifs_server_dbg(VFS,
1930 				 "IPC: server doesn't support encryption\n");
1931 			return -EOPNOTSUPP;
1932 		}
1933 	}
1934 
1935 	tcon = tconInfoAlloc();
1936 	if (tcon == NULL)
1937 		return -ENOMEM;
1938 
1939 	spin_lock(&server->srv_lock);
1940 	scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
1941 	spin_unlock(&server->srv_lock);
1942 
1943 	xid = get_xid();
1944 	tcon->ses = ses;
1945 	tcon->ipc = true;
1946 	tcon->seal = seal;
1947 	rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls);
1948 	free_xid(xid);
1949 
1950 	if (rc) {
1951 		cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
1952 		tconInfoFree(tcon);
1953 		goto out;
1954 	}
1955 
1956 	cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid);
1957 
1958 	spin_lock(&tcon->tc_lock);
1959 	tcon->status = TID_GOOD;
1960 	spin_unlock(&tcon->tc_lock);
1961 	ses->tcon_ipc = tcon;
1962 out:
1963 	return rc;
1964 }
1965 
1966 /**
1967  * cifs_free_ipc - helper to release the session IPC tcon
1968  * @ses: smb session to unmount the IPC from
1969  *
1970  * Needs to be called everytime a session is destroyed.
1971  *
1972  * On session close, the IPC is closed and the server must release all tcons of the session.
1973  * No need to send a tree disconnect here.
1974  *
1975  * Besides, it will make the server to not close durable and resilient files on session close, as
1976  * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request.
1977  */
1978 static int
cifs_free_ipc(struct cifs_ses * ses)1979 cifs_free_ipc(struct cifs_ses *ses)
1980 {
1981 	struct cifs_tcon *tcon = ses->tcon_ipc;
1982 
1983 	if (tcon == NULL)
1984 		return 0;
1985 
1986 	tconInfoFree(tcon);
1987 	ses->tcon_ipc = NULL;
1988 	return 0;
1989 }
1990 
1991 static struct cifs_ses *
cifs_find_smb_ses(struct TCP_Server_Info * server,struct smb3_fs_context * ctx)1992 cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
1993 {
1994 	struct cifs_ses *ses;
1995 
1996 	spin_lock(&cifs_tcp_ses_lock);
1997 	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1998 		spin_lock(&ses->ses_lock);
1999 		if (ses->ses_status == SES_EXITING) {
2000 			spin_unlock(&ses->ses_lock);
2001 			continue;
2002 		}
2003 		spin_lock(&ses->chan_lock);
2004 		if (!match_session(ses, ctx)) {
2005 			spin_unlock(&ses->chan_lock);
2006 			spin_unlock(&ses->ses_lock);
2007 			continue;
2008 		}
2009 		spin_unlock(&ses->chan_lock);
2010 		spin_unlock(&ses->ses_lock);
2011 
2012 		++ses->ses_count;
2013 		spin_unlock(&cifs_tcp_ses_lock);
2014 		return ses;
2015 	}
2016 	spin_unlock(&cifs_tcp_ses_lock);
2017 	return NULL;
2018 }
2019 
cifs_put_smb_ses(struct cifs_ses * ses)2020 void cifs_put_smb_ses(struct cifs_ses *ses)
2021 {
2022 	unsigned int rc, xid;
2023 	unsigned int chan_count;
2024 	struct TCP_Server_Info *server = ses->server;
2025 
2026 	spin_lock(&ses->ses_lock);
2027 	if (ses->ses_status == SES_EXITING) {
2028 		spin_unlock(&ses->ses_lock);
2029 		return;
2030 	}
2031 	spin_unlock(&ses->ses_lock);
2032 
2033 	cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
2034 	cifs_dbg(FYI,
2035 		 "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE");
2036 
2037 	spin_lock(&cifs_tcp_ses_lock);
2038 	if (--ses->ses_count > 0) {
2039 		spin_unlock(&cifs_tcp_ses_lock);
2040 		return;
2041 	}
2042 	spin_unlock(&cifs_tcp_ses_lock);
2043 
2044 	/* ses_count can never go negative */
2045 	WARN_ON(ses->ses_count < 0);
2046 
2047 	if (ses->ses_status == SES_GOOD)
2048 		ses->ses_status = SES_EXITING;
2049 
2050 	cifs_free_ipc(ses);
2051 
2052 	if (ses->ses_status == SES_EXITING && server->ops->logoff) {
2053 		xid = get_xid();
2054 		rc = server->ops->logoff(xid, ses);
2055 		if (rc)
2056 			cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
2057 				__func__, rc);
2058 		_free_xid(xid);
2059 	}
2060 
2061 	spin_lock(&cifs_tcp_ses_lock);
2062 	list_del_init(&ses->smb_ses_list);
2063 	spin_unlock(&cifs_tcp_ses_lock);
2064 
2065 	chan_count = ses->chan_count;
2066 
2067 	/* close any extra channels */
2068 	if (chan_count > 1) {
2069 		int i;
2070 
2071 		for (i = 1; i < chan_count; i++) {
2072 			if (ses->chans[i].iface) {
2073 				kref_put(&ses->chans[i].iface->refcount, release_iface);
2074 				ses->chans[i].iface = NULL;
2075 			}
2076 			cifs_put_tcp_session(ses->chans[i].server, 0);
2077 			ses->chans[i].server = NULL;
2078 		}
2079 	}
2080 
2081 	/* we now account for primary channel in iface->refcount */
2082 	if (ses->chans[0].iface) {
2083 		kref_put(&ses->chans[0].iface->refcount, release_iface);
2084 		ses->chans[0].server = NULL;
2085 	}
2086 
2087 	sesInfoFree(ses);
2088 	cifs_put_tcp_session(server, 0);
2089 }
2090 
2091 #ifdef CONFIG_KEYS
2092 
2093 /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
2094 #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
2095 
2096 /* Populate username and pw fields from keyring if possible */
2097 static int
cifs_set_cifscreds(struct smb3_fs_context * ctx,struct cifs_ses * ses)2098 cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
2099 {
2100 	int rc = 0;
2101 	int is_domain = 0;
2102 	const char *delim, *payload;
2103 	char *desc;
2104 	ssize_t len;
2105 	struct key *key;
2106 	struct TCP_Server_Info *server = ses->server;
2107 	struct sockaddr_in *sa;
2108 	struct sockaddr_in6 *sa6;
2109 	const struct user_key_payload *upayload;
2110 
2111 	desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
2112 	if (!desc)
2113 		return -ENOMEM;
2114 
2115 	/* try to find an address key first */
2116 	switch (server->dstaddr.ss_family) {
2117 	case AF_INET:
2118 		sa = (struct sockaddr_in *)&server->dstaddr;
2119 		sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
2120 		break;
2121 	case AF_INET6:
2122 		sa6 = (struct sockaddr_in6 *)&server->dstaddr;
2123 		sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
2124 		break;
2125 	default:
2126 		cifs_dbg(FYI, "Bad ss_family (%hu)\n",
2127 			 server->dstaddr.ss_family);
2128 		rc = -EINVAL;
2129 		goto out_err;
2130 	}
2131 
2132 	cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
2133 	key = request_key(&key_type_logon, desc, "");
2134 	if (IS_ERR(key)) {
2135 		if (!ses->domainName) {
2136 			cifs_dbg(FYI, "domainName is NULL\n");
2137 			rc = PTR_ERR(key);
2138 			goto out_err;
2139 		}
2140 
2141 		/* didn't work, try to find a domain key */
2142 		sprintf(desc, "cifs:d:%s", ses->domainName);
2143 		cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
2144 		key = request_key(&key_type_logon, desc, "");
2145 		if (IS_ERR(key)) {
2146 			rc = PTR_ERR(key);
2147 			goto out_err;
2148 		}
2149 		is_domain = 1;
2150 	}
2151 
2152 	down_read(&key->sem);
2153 	upayload = user_key_payload_locked(key);
2154 	if (IS_ERR_OR_NULL(upayload)) {
2155 		rc = upayload ? PTR_ERR(upayload) : -EINVAL;
2156 		goto out_key_put;
2157 	}
2158 
2159 	/* find first : in payload */
2160 	payload = upayload->data;
2161 	delim = strnchr(payload, upayload->datalen, ':');
2162 	cifs_dbg(FYI, "payload=%s\n", payload);
2163 	if (!delim) {
2164 		cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n",
2165 			 upayload->datalen);
2166 		rc = -EINVAL;
2167 		goto out_key_put;
2168 	}
2169 
2170 	len = delim - payload;
2171 	if (len > CIFS_MAX_USERNAME_LEN || len <= 0) {
2172 		cifs_dbg(FYI, "Bad value from username search (len=%zd)\n",
2173 			 len);
2174 		rc = -EINVAL;
2175 		goto out_key_put;
2176 	}
2177 
2178 	ctx->username = kstrndup(payload, len, GFP_KERNEL);
2179 	if (!ctx->username) {
2180 		cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n",
2181 			 len);
2182 		rc = -ENOMEM;
2183 		goto out_key_put;
2184 	}
2185 	cifs_dbg(FYI, "%s: username=%s\n", __func__, ctx->username);
2186 
2187 	len = key->datalen - (len + 1);
2188 	if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) {
2189 		cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len);
2190 		rc = -EINVAL;
2191 		kfree(ctx->username);
2192 		ctx->username = NULL;
2193 		goto out_key_put;
2194 	}
2195 
2196 	++delim;
2197 	ctx->password = kstrndup(delim, len, GFP_KERNEL);
2198 	if (!ctx->password) {
2199 		cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
2200 			 len);
2201 		rc = -ENOMEM;
2202 		kfree(ctx->username);
2203 		ctx->username = NULL;
2204 		goto out_key_put;
2205 	}
2206 
2207 	/*
2208 	 * If we have a domain key then we must set the domainName in the
2209 	 * for the request.
2210 	 */
2211 	if (is_domain && ses->domainName) {
2212 		ctx->domainname = kstrdup(ses->domainName, GFP_KERNEL);
2213 		if (!ctx->domainname) {
2214 			cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n",
2215 				 len);
2216 			rc = -ENOMEM;
2217 			kfree(ctx->username);
2218 			ctx->username = NULL;
2219 			kfree_sensitive(ctx->password);
2220 			ctx->password = NULL;
2221 			goto out_key_put;
2222 		}
2223 	}
2224 
2225 	strscpy(ctx->workstation_name, ses->workstation_name, sizeof(ctx->workstation_name));
2226 
2227 out_key_put:
2228 	up_read(&key->sem);
2229 	key_put(key);
2230 out_err:
2231 	kfree(desc);
2232 	cifs_dbg(FYI, "%s: returning %d\n", __func__, rc);
2233 	return rc;
2234 }
2235 #else /* ! CONFIG_KEYS */
2236 static inline int
cifs_set_cifscreds(struct smb3_fs_context * ctx,struct cifs_ses * ses)2237 cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
2238 		   struct cifs_ses *ses __attribute__((unused)))
2239 {
2240 	return -ENOSYS;
2241 }
2242 #endif /* CONFIG_KEYS */
2243 
2244 /**
2245  * cifs_get_smb_ses - get a session matching @ctx data from @server
2246  * @server: server to setup the session to
2247  * @ctx: superblock configuration context to use to setup the session
2248  *
2249  * This function assumes it is being called from cifs_mount() where we
2250  * already got a server reference (server refcount +1). See
2251  * cifs_get_tcon() for refcount explanations.
2252  */
2253 struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info * server,struct smb3_fs_context * ctx)2254 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
2255 {
2256 	int rc = 0;
2257 	unsigned int xid;
2258 	struct cifs_ses *ses;
2259 	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
2260 	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
2261 
2262 	xid = get_xid();
2263 
2264 	ses = cifs_find_smb_ses(server, ctx);
2265 	if (ses) {
2266 		cifs_dbg(FYI, "Existing smb sess found (status=%d)\n",
2267 			 ses->ses_status);
2268 
2269 		spin_lock(&ses->chan_lock);
2270 		if (cifs_chan_needs_reconnect(ses, server)) {
2271 			spin_unlock(&ses->chan_lock);
2272 			cifs_dbg(FYI, "Session needs reconnect\n");
2273 
2274 			mutex_lock(&ses->session_mutex);
2275 			rc = cifs_negotiate_protocol(xid, ses, server);
2276 			if (rc) {
2277 				mutex_unlock(&ses->session_mutex);
2278 				/* problem -- put our ses reference */
2279 				cifs_put_smb_ses(ses);
2280 				free_xid(xid);
2281 				return ERR_PTR(rc);
2282 			}
2283 
2284 			rc = cifs_setup_session(xid, ses, server,
2285 						ctx->local_nls);
2286 			if (rc) {
2287 				mutex_unlock(&ses->session_mutex);
2288 				/* problem -- put our reference */
2289 				cifs_put_smb_ses(ses);
2290 				free_xid(xid);
2291 				return ERR_PTR(rc);
2292 			}
2293 			mutex_unlock(&ses->session_mutex);
2294 
2295 			spin_lock(&ses->chan_lock);
2296 		}
2297 		spin_unlock(&ses->chan_lock);
2298 
2299 		/* existing SMB ses has a server reference already */
2300 		cifs_put_tcp_session(server, 0);
2301 		free_xid(xid);
2302 		return ses;
2303 	}
2304 
2305 	rc = -ENOMEM;
2306 
2307 	cifs_dbg(FYI, "Existing smb sess not found\n");
2308 	ses = sesInfoAlloc();
2309 	if (ses == NULL)
2310 		goto get_ses_fail;
2311 
2312 	/* new SMB session uses our server ref */
2313 	ses->server = server;
2314 	if (server->dstaddr.ss_family == AF_INET6)
2315 		sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr);
2316 	else
2317 		sprintf(ses->ip_addr, "%pI4", &addr->sin_addr);
2318 
2319 	if (ctx->username) {
2320 		ses->user_name = kstrdup(ctx->username, GFP_KERNEL);
2321 		if (!ses->user_name)
2322 			goto get_ses_fail;
2323 	}
2324 
2325 	/* ctx->password freed at unmount */
2326 	if (ctx->password) {
2327 		ses->password = kstrdup(ctx->password, GFP_KERNEL);
2328 		if (!ses->password)
2329 			goto get_ses_fail;
2330 	}
2331 	if (ctx->domainname) {
2332 		ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
2333 		if (!ses->domainName)
2334 			goto get_ses_fail;
2335 	}
2336 
2337 	strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name));
2338 
2339 	if (ctx->domainauto)
2340 		ses->domainAuto = ctx->domainauto;
2341 	ses->cred_uid = ctx->cred_uid;
2342 	ses->linux_uid = ctx->linux_uid;
2343 
2344 	ses->sectype = ctx->sectype;
2345 	ses->sign = ctx->sign;
2346 
2347 	/* add server as first channel */
2348 	spin_lock(&ses->chan_lock);
2349 	ses->chans[0].server = server;
2350 	ses->chan_count = 1;
2351 	ses->chan_max = ctx->multichannel ? ctx->max_channels:1;
2352 	ses->chans_need_reconnect = 1;
2353 	spin_unlock(&ses->chan_lock);
2354 
2355 	mutex_lock(&ses->session_mutex);
2356 	rc = cifs_negotiate_protocol(xid, ses, server);
2357 	if (!rc)
2358 		rc = cifs_setup_session(xid, ses, server, ctx->local_nls);
2359 	mutex_unlock(&ses->session_mutex);
2360 
2361 	/* each channel uses a different signing key */
2362 	spin_lock(&ses->chan_lock);
2363 	memcpy(ses->chans[0].signkey, ses->smb3signingkey,
2364 	       sizeof(ses->smb3signingkey));
2365 	spin_unlock(&ses->chan_lock);
2366 
2367 	if (rc)
2368 		goto get_ses_fail;
2369 
2370 	/*
2371 	 * success, put it on the list and add it as first channel
2372 	 * note: the session becomes active soon after this. So you'll
2373 	 * need to lock before changing something in the session.
2374 	 */
2375 	spin_lock(&cifs_tcp_ses_lock);
2376 	list_add(&ses->smb_ses_list, &server->smb_ses_list);
2377 	spin_unlock(&cifs_tcp_ses_lock);
2378 
2379 	cifs_setup_ipc(ses, ctx);
2380 
2381 	free_xid(xid);
2382 
2383 	return ses;
2384 
2385 get_ses_fail:
2386 	sesInfoFree(ses);
2387 	free_xid(xid);
2388 	return ERR_PTR(rc);
2389 }
2390 
2391 /* this function must be called with tc_lock held */
match_tcon(struct cifs_tcon * tcon,struct smb3_fs_context * ctx)2392 static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
2393 {
2394 	if (tcon->status == TID_EXITING)
2395 		return 0;
2396 	if (strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE))
2397 		return 0;
2398 	if (tcon->seal != ctx->seal)
2399 		return 0;
2400 	if (tcon->snapshot_time != ctx->snapshot_time)
2401 		return 0;
2402 	if (tcon->handle_timeout != ctx->handle_timeout)
2403 		return 0;
2404 	if (tcon->no_lease != ctx->no_lease)
2405 		return 0;
2406 	if (tcon->nodelete != ctx->nodelete)
2407 		return 0;
2408 	return 1;
2409 }
2410 
2411 static struct cifs_tcon *
cifs_find_tcon(struct cifs_ses * ses,struct smb3_fs_context * ctx)2412 cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
2413 {
2414 	struct cifs_tcon *tcon;
2415 
2416 	spin_lock(&cifs_tcp_ses_lock);
2417 	list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
2418 		spin_lock(&tcon->tc_lock);
2419 		if (!match_tcon(tcon, ctx)) {
2420 			spin_unlock(&tcon->tc_lock);
2421 			continue;
2422 		}
2423 		++tcon->tc_count;
2424 		spin_unlock(&tcon->tc_lock);
2425 		spin_unlock(&cifs_tcp_ses_lock);
2426 		return tcon;
2427 	}
2428 	spin_unlock(&cifs_tcp_ses_lock);
2429 	return NULL;
2430 }
2431 
2432 void
cifs_put_tcon(struct cifs_tcon * tcon)2433 cifs_put_tcon(struct cifs_tcon *tcon)
2434 {
2435 	unsigned int xid;
2436 	struct cifs_ses *ses;
2437 
2438 	/*
2439 	 * IPC tcon share the lifetime of their session and are
2440 	 * destroyed in the session put function
2441 	 */
2442 	if (tcon == NULL || tcon->ipc)
2443 		return;
2444 
2445 	ses = tcon->ses;
2446 	cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
2447 	spin_lock(&cifs_tcp_ses_lock);
2448 	spin_lock(&tcon->tc_lock);
2449 	if (--tcon->tc_count > 0) {
2450 		spin_unlock(&tcon->tc_lock);
2451 		spin_unlock(&cifs_tcp_ses_lock);
2452 		return;
2453 	}
2454 
2455 	/* tc_count can never go negative */
2456 	WARN_ON(tcon->tc_count < 0);
2457 
2458 	list_del_init(&tcon->tcon_list);
2459 	tcon->status = TID_EXITING;
2460 	spin_unlock(&tcon->tc_lock);
2461 	spin_unlock(&cifs_tcp_ses_lock);
2462 
2463 	/* cancel polling of interfaces */
2464 	cancel_delayed_work_sync(&tcon->query_interfaces);
2465 
2466 	if (tcon->use_witness) {
2467 		int rc;
2468 
2469 		rc = cifs_swn_unregister(tcon);
2470 		if (rc < 0) {
2471 			cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n",
2472 					__func__, rc);
2473 		}
2474 	}
2475 
2476 	xid = get_xid();
2477 	if (ses->server->ops->tree_disconnect)
2478 		ses->server->ops->tree_disconnect(xid, tcon);
2479 	_free_xid(xid);
2480 
2481 	cifs_fscache_release_super_cookie(tcon);
2482 	tconInfoFree(tcon);
2483 	cifs_put_smb_ses(ses);
2484 }
2485 
2486 /**
2487  * cifs_get_tcon - get a tcon matching @ctx data from @ses
2488  * @ses: smb session to issue the request on
2489  * @ctx: the superblock configuration context to use for building the
2490  *
2491  * - tcon refcount is the number of mount points using the tcon.
2492  * - ses refcount is the number of tcon using the session.
2493  *
2494  * 1. This function assumes it is being called from cifs_mount() where
2495  *    we already got a session reference (ses refcount +1).
2496  *
2497  * 2. Since we're in the context of adding a mount point, the end
2498  *    result should be either:
2499  *
2500  * a) a new tcon already allocated with refcount=1 (1 mount point) and
2501  *    its session refcount incremented (1 new tcon). This +1 was
2502  *    already done in (1).
2503  *
2504  * b) an existing tcon with refcount+1 (add a mount point to it) and
2505  *    identical ses refcount (no new tcon). Because of (1) we need to
2506  *    decrement the ses refcount.
2507  */
2508 static struct cifs_tcon *
cifs_get_tcon(struct cifs_ses * ses,struct smb3_fs_context * ctx)2509 cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
2510 {
2511 	int rc, xid;
2512 	struct cifs_tcon *tcon;
2513 
2514 	tcon = cifs_find_tcon(ses, ctx);
2515 	if (tcon) {
2516 		/*
2517 		 * tcon has refcount already incremented but we need to
2518 		 * decrement extra ses reference gotten by caller (case b)
2519 		 */
2520 		cifs_dbg(FYI, "Found match on UNC path\n");
2521 		cifs_put_smb_ses(ses);
2522 		return tcon;
2523 	}
2524 
2525 	if (!ses->server->ops->tree_connect) {
2526 		rc = -ENOSYS;
2527 		goto out_fail;
2528 	}
2529 
2530 	tcon = tconInfoAlloc();
2531 	if (tcon == NULL) {
2532 		rc = -ENOMEM;
2533 		goto out_fail;
2534 	}
2535 
2536 	if (ctx->snapshot_time) {
2537 		if (ses->server->vals->protocol_id == 0) {
2538 			cifs_dbg(VFS,
2539 			     "Use SMB2 or later for snapshot mount option\n");
2540 			rc = -EOPNOTSUPP;
2541 			goto out_fail;
2542 		} else
2543 			tcon->snapshot_time = ctx->snapshot_time;
2544 	}
2545 
2546 	if (ctx->handle_timeout) {
2547 		if (ses->server->vals->protocol_id == 0) {
2548 			cifs_dbg(VFS,
2549 			     "Use SMB2.1 or later for handle timeout option\n");
2550 			rc = -EOPNOTSUPP;
2551 			goto out_fail;
2552 		} else
2553 			tcon->handle_timeout = ctx->handle_timeout;
2554 	}
2555 
2556 	tcon->ses = ses;
2557 	if (ctx->password) {
2558 		tcon->password = kstrdup(ctx->password, GFP_KERNEL);
2559 		if (!tcon->password) {
2560 			rc = -ENOMEM;
2561 			goto out_fail;
2562 		}
2563 	}
2564 
2565 	if (ctx->seal) {
2566 		if (ses->server->vals->protocol_id == 0) {
2567 			cifs_dbg(VFS,
2568 				 "SMB3 or later required for encryption\n");
2569 			rc = -EOPNOTSUPP;
2570 			goto out_fail;
2571 		} else if (tcon->ses->server->capabilities &
2572 					SMB2_GLOBAL_CAP_ENCRYPTION)
2573 			tcon->seal = true;
2574 		else {
2575 			cifs_dbg(VFS, "Encryption is not supported on share\n");
2576 			rc = -EOPNOTSUPP;
2577 			goto out_fail;
2578 		}
2579 	}
2580 
2581 	if (ctx->linux_ext) {
2582 		if (ses->server->posix_ext_supported) {
2583 			tcon->posix_extensions = true;
2584 			pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
2585 		} else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
2586 		    (strcmp(ses->server->vals->version_string,
2587 		     SMB3ANY_VERSION_STRING) == 0) ||
2588 		    (strcmp(ses->server->vals->version_string,
2589 		     SMBDEFAULT_VERSION_STRING) == 0)) {
2590 			cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
2591 			rc = -EOPNOTSUPP;
2592 			goto out_fail;
2593 		} else {
2594 			cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
2595 				"disabled but required for POSIX extensions\n");
2596 			rc = -EOPNOTSUPP;
2597 			goto out_fail;
2598 		}
2599 	}
2600 
2601 	xid = get_xid();
2602 	rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon,
2603 					    ctx->local_nls);
2604 	free_xid(xid);
2605 	cifs_dbg(FYI, "Tcon rc = %d\n", rc);
2606 	if (rc)
2607 		goto out_fail;
2608 
2609 	tcon->use_persistent = false;
2610 	/* check if SMB2 or later, CIFS does not support persistent handles */
2611 	if (ctx->persistent) {
2612 		if (ses->server->vals->protocol_id == 0) {
2613 			cifs_dbg(VFS,
2614 			     "SMB3 or later required for persistent handles\n");
2615 			rc = -EOPNOTSUPP;
2616 			goto out_fail;
2617 		} else if (ses->server->capabilities &
2618 			   SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
2619 			tcon->use_persistent = true;
2620 		else /* persistent handles requested but not supported */ {
2621 			cifs_dbg(VFS,
2622 				"Persistent handles not supported on share\n");
2623 			rc = -EOPNOTSUPP;
2624 			goto out_fail;
2625 		}
2626 	} else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
2627 	     && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
2628 	     && (ctx->nopersistent == false)) {
2629 		cifs_dbg(FYI, "enabling persistent handles\n");
2630 		tcon->use_persistent = true;
2631 	} else if (ctx->resilient) {
2632 		if (ses->server->vals->protocol_id == 0) {
2633 			cifs_dbg(VFS,
2634 			     "SMB2.1 or later required for resilient handles\n");
2635 			rc = -EOPNOTSUPP;
2636 			goto out_fail;
2637 		}
2638 		tcon->use_resilient = true;
2639 	}
2640 
2641 	tcon->use_witness = false;
2642 	if (IS_ENABLED(CONFIG_CIFS_SWN_UPCALL) && ctx->witness) {
2643 		if (ses->server->vals->protocol_id >= SMB30_PROT_ID) {
2644 			if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) {
2645 				/*
2646 				 * Set witness in use flag in first place
2647 				 * to retry registration in the echo task
2648 				 */
2649 				tcon->use_witness = true;
2650 				/* And try to register immediately */
2651 				rc = cifs_swn_register(tcon);
2652 				if (rc < 0) {
2653 					cifs_dbg(VFS, "Failed to register for witness notifications: %d\n", rc);
2654 					goto out_fail;
2655 				}
2656 			} else {
2657 				/* TODO: try to extend for non-cluster uses (eg multichannel) */
2658 				cifs_dbg(VFS, "witness requested on mount but no CLUSTER capability on share\n");
2659 				rc = -EOPNOTSUPP;
2660 				goto out_fail;
2661 			}
2662 		} else {
2663 			cifs_dbg(VFS, "SMB3 or later required for witness option\n");
2664 			rc = -EOPNOTSUPP;
2665 			goto out_fail;
2666 		}
2667 	}
2668 
2669 	/* If the user really knows what they are doing they can override */
2670 	if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) {
2671 		if (ctx->cache_ro)
2672 			cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n");
2673 		else if (ctx->cache_rw)
2674 			cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n");
2675 	}
2676 
2677 	if (ctx->no_lease) {
2678 		if (ses->server->vals->protocol_id == 0) {
2679 			cifs_dbg(VFS,
2680 				"SMB2 or later required for nolease option\n");
2681 			rc = -EOPNOTSUPP;
2682 			goto out_fail;
2683 		} else
2684 			tcon->no_lease = ctx->no_lease;
2685 	}
2686 
2687 	/*
2688 	 * We can have only one retry value for a connection to a share so for
2689 	 * resources mounted more than once to the same server share the last
2690 	 * value passed in for the retry flag is used.
2691 	 */
2692 	tcon->retry = ctx->retry;
2693 	tcon->nocase = ctx->nocase;
2694 	tcon->broken_sparse_sup = ctx->no_sparse;
2695 	if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
2696 		tcon->nohandlecache = ctx->nohandlecache;
2697 	else
2698 		tcon->nohandlecache = true;
2699 	tcon->nodelete = ctx->nodelete;
2700 	tcon->local_lease = ctx->local_lease;
2701 	INIT_LIST_HEAD(&tcon->pending_opens);
2702 	tcon->status = TID_GOOD;
2703 
2704 	INIT_DELAYED_WORK(&tcon->query_interfaces,
2705 			  smb2_query_server_interfaces);
2706 	if (ses->server->dialect >= SMB30_PROT_ID &&
2707 	    (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
2708 		/* schedule query interfaces poll */
2709 		queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
2710 				   (SMB_INTERFACE_POLL_INTERVAL * HZ));
2711 	}
2712 
2713 	spin_lock(&cifs_tcp_ses_lock);
2714 	list_add(&tcon->tcon_list, &ses->tcon_list);
2715 	spin_unlock(&cifs_tcp_ses_lock);
2716 
2717 	return tcon;
2718 
2719 out_fail:
2720 	tconInfoFree(tcon);
2721 	return ERR_PTR(rc);
2722 }
2723 
2724 void
cifs_put_tlink(struct tcon_link * tlink)2725 cifs_put_tlink(struct tcon_link *tlink)
2726 {
2727 	if (!tlink || IS_ERR(tlink))
2728 		return;
2729 
2730 	if (!atomic_dec_and_test(&tlink->tl_count) ||
2731 	    test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
2732 		tlink->tl_time = jiffies;
2733 		return;
2734 	}
2735 
2736 	if (!IS_ERR(tlink_tcon(tlink)))
2737 		cifs_put_tcon(tlink_tcon(tlink));
2738 	kfree(tlink);
2739 	return;
2740 }
2741 
2742 static int
compare_mount_options(struct super_block * sb,struct cifs_mnt_data * mnt_data)2743 compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2744 {
2745 	struct cifs_sb_info *old = CIFS_SB(sb);
2746 	struct cifs_sb_info *new = mnt_data->cifs_sb;
2747 	unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
2748 	unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
2749 
2750 	if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
2751 		return 0;
2752 
2753 	if (old->mnt_cifs_serverino_autodisabled)
2754 		newflags &= ~CIFS_MOUNT_SERVER_INUM;
2755 
2756 	if (oldflags != newflags)
2757 		return 0;
2758 
2759 	/*
2760 	 * We want to share sb only if we don't specify an r/wsize or
2761 	 * specified r/wsize is greater than or equal to existing one.
2762 	 */
2763 	if (new->ctx->wsize && new->ctx->wsize < old->ctx->wsize)
2764 		return 0;
2765 
2766 	if (new->ctx->rsize && new->ctx->rsize < old->ctx->rsize)
2767 		return 0;
2768 
2769 	if (!uid_eq(old->ctx->linux_uid, new->ctx->linux_uid) ||
2770 	    !gid_eq(old->ctx->linux_gid, new->ctx->linux_gid))
2771 		return 0;
2772 
2773 	if (old->ctx->file_mode != new->ctx->file_mode ||
2774 	    old->ctx->dir_mode != new->ctx->dir_mode)
2775 		return 0;
2776 
2777 	if (strcmp(old->local_nls->charset, new->local_nls->charset))
2778 		return 0;
2779 
2780 	if (old->ctx->acregmax != new->ctx->acregmax)
2781 		return 0;
2782 	if (old->ctx->acdirmax != new->ctx->acdirmax)
2783 		return 0;
2784 	if (old->ctx->closetimeo != new->ctx->closetimeo)
2785 		return 0;
2786 
2787 	return 1;
2788 }
2789 
2790 static int
match_prepath(struct super_block * sb,struct cifs_mnt_data * mnt_data)2791 match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2792 {
2793 	struct cifs_sb_info *old = CIFS_SB(sb);
2794 	struct cifs_sb_info *new = mnt_data->cifs_sb;
2795 	bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
2796 		old->prepath;
2797 	bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
2798 		new->prepath;
2799 
2800 	if (old_set && new_set && !strcmp(new->prepath, old->prepath))
2801 		return 1;
2802 	else if (!old_set && !new_set)
2803 		return 1;
2804 
2805 	return 0;
2806 }
2807 
2808 int
cifs_match_super(struct super_block * sb,void * data)2809 cifs_match_super(struct super_block *sb, void *data)
2810 {
2811 	struct cifs_mnt_data *mnt_data = data;
2812 	struct smb3_fs_context *ctx;
2813 	struct cifs_sb_info *cifs_sb;
2814 	struct TCP_Server_Info *tcp_srv;
2815 	struct cifs_ses *ses;
2816 	struct cifs_tcon *tcon;
2817 	struct tcon_link *tlink;
2818 	int rc = 0;
2819 
2820 	spin_lock(&cifs_tcp_ses_lock);
2821 	cifs_sb = CIFS_SB(sb);
2822 
2823 	/* We do not want to use a superblock that has been shutdown */
2824 	if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) {
2825 		spin_unlock(&cifs_tcp_ses_lock);
2826 		return 0;
2827 	}
2828 
2829 	tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
2830 	if (tlink == NULL) {
2831 		/* can not match superblock if tlink were ever null */
2832 		spin_unlock(&cifs_tcp_ses_lock);
2833 		return 0;
2834 	}
2835 	tcon = tlink_tcon(tlink);
2836 	ses = tcon->ses;
2837 	tcp_srv = ses->server;
2838 
2839 	ctx = mnt_data->ctx;
2840 
2841 	spin_lock(&tcp_srv->srv_lock);
2842 	spin_lock(&ses->ses_lock);
2843 	spin_lock(&ses->chan_lock);
2844 	spin_lock(&tcon->tc_lock);
2845 	if (!match_server(tcp_srv, ctx) ||
2846 	    !match_session(ses, ctx) ||
2847 	    !match_tcon(tcon, ctx) ||
2848 	    !match_prepath(sb, mnt_data)) {
2849 		rc = 0;
2850 		goto out;
2851 	}
2852 
2853 	rc = compare_mount_options(sb, mnt_data);
2854 out:
2855 	spin_unlock(&tcon->tc_lock);
2856 	spin_unlock(&ses->chan_lock);
2857 	spin_unlock(&ses->ses_lock);
2858 	spin_unlock(&tcp_srv->srv_lock);
2859 
2860 	spin_unlock(&cifs_tcp_ses_lock);
2861 	cifs_put_tlink(tlink);
2862 	return rc;
2863 }
2864 
2865 #ifdef CONFIG_DEBUG_LOCK_ALLOC
2866 static struct lock_class_key cifs_key[2];
2867 static struct lock_class_key cifs_slock_key[2];
2868 
2869 static inline void
cifs_reclassify_socket4(struct socket * sock)2870 cifs_reclassify_socket4(struct socket *sock)
2871 {
2872 	struct sock *sk = sock->sk;
2873 	BUG_ON(!sock_allow_reclassification(sk));
2874 	sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
2875 		&cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
2876 }
2877 
2878 static inline void
cifs_reclassify_socket6(struct socket * sock)2879 cifs_reclassify_socket6(struct socket *sock)
2880 {
2881 	struct sock *sk = sock->sk;
2882 	BUG_ON(!sock_allow_reclassification(sk));
2883 	sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
2884 		&cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
2885 }
2886 #else
2887 static inline void
cifs_reclassify_socket4(struct socket * sock)2888 cifs_reclassify_socket4(struct socket *sock)
2889 {
2890 }
2891 
2892 static inline void
cifs_reclassify_socket6(struct socket * sock)2893 cifs_reclassify_socket6(struct socket *sock)
2894 {
2895 }
2896 #endif
2897 
2898 /* See RFC1001 section 14 on representation of Netbios names */
rfc1002mangle(char * target,char * source,unsigned int length)2899 static void rfc1002mangle(char *target, char *source, unsigned int length)
2900 {
2901 	unsigned int i, j;
2902 
2903 	for (i = 0, j = 0; i < (length); i++) {
2904 		/* mask a nibble at a time and encode */
2905 		target[j] = 'A' + (0x0F & (source[i] >> 4));
2906 		target[j+1] = 'A' + (0x0F & source[i]);
2907 		j += 2;
2908 	}
2909 
2910 }
2911 
2912 static int
bind_socket(struct TCP_Server_Info * server)2913 bind_socket(struct TCP_Server_Info *server)
2914 {
2915 	int rc = 0;
2916 	if (server->srcaddr.ss_family != AF_UNSPEC) {
2917 		/* Bind to the specified local IP address */
2918 		struct socket *socket = server->ssocket;
2919 		rc = kernel_bind(socket,
2920 				 (struct sockaddr *) &server->srcaddr,
2921 				 sizeof(server->srcaddr));
2922 		if (rc < 0) {
2923 			struct sockaddr_in *saddr4;
2924 			struct sockaddr_in6 *saddr6;
2925 			saddr4 = (struct sockaddr_in *)&server->srcaddr;
2926 			saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
2927 			if (saddr6->sin6_family == AF_INET6)
2928 				cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n",
2929 					 &saddr6->sin6_addr, rc);
2930 			else
2931 				cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n",
2932 					 &saddr4->sin_addr.s_addr, rc);
2933 		}
2934 	}
2935 	return rc;
2936 }
2937 
2938 static int
ip_rfc1001_connect(struct TCP_Server_Info * server)2939 ip_rfc1001_connect(struct TCP_Server_Info *server)
2940 {
2941 	int rc = 0;
2942 	/*
2943 	 * some servers require RFC1001 sessinit before sending
2944 	 * negprot - BB check reconnection in case where second
2945 	 * sessinit is sent but no second negprot
2946 	 */
2947 	struct rfc1002_session_packet req = {};
2948 	struct smb_hdr *smb_buf = (struct smb_hdr *)&req;
2949 	unsigned int len;
2950 
2951 	req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name);
2952 
2953 	if (server->server_RFC1001_name[0] != 0)
2954 		rfc1002mangle(req.trailer.session_req.called_name,
2955 			      server->server_RFC1001_name,
2956 			      RFC1001_NAME_LEN_WITH_NULL);
2957 	else
2958 		rfc1002mangle(req.trailer.session_req.called_name,
2959 			      DEFAULT_CIFS_CALLED_NAME,
2960 			      RFC1001_NAME_LEN_WITH_NULL);
2961 
2962 	req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name);
2963 
2964 	/* calling name ends in null (byte 16) from old smb convention */
2965 	if (server->workstation_RFC1001_name[0] != 0)
2966 		rfc1002mangle(req.trailer.session_req.calling_name,
2967 			      server->workstation_RFC1001_name,
2968 			      RFC1001_NAME_LEN_WITH_NULL);
2969 	else
2970 		rfc1002mangle(req.trailer.session_req.calling_name,
2971 			      "LINUX_CIFS_CLNT",
2972 			      RFC1001_NAME_LEN_WITH_NULL);
2973 
2974 	/*
2975 	 * As per rfc1002, @len must be the number of bytes that follows the
2976 	 * length field of a rfc1002 session request payload.
2977 	 */
2978 	len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req);
2979 
2980 	smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len);
2981 	rc = smb_send(server, smb_buf, len);
2982 	/*
2983 	 * RFC1001 layer in at least one server requires very short break before
2984 	 * negprot presumably because not expecting negprot to follow so fast.
2985 	 * This is a simple solution that works without complicating the code
2986 	 * and causes no significant slowing down on mount for everyone else
2987 	 */
2988 	usleep_range(1000, 2000);
2989 
2990 	return rc;
2991 }
2992 
2993 static int
generic_ip_connect(struct TCP_Server_Info * server)2994 generic_ip_connect(struct TCP_Server_Info *server)
2995 {
2996 	int rc = 0;
2997 	__be16 sport;
2998 	int slen, sfamily;
2999 	struct socket *socket = server->ssocket;
3000 	struct sockaddr *saddr;
3001 
3002 	saddr = (struct sockaddr *) &server->dstaddr;
3003 
3004 	if (server->dstaddr.ss_family == AF_INET6) {
3005 		struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&server->dstaddr;
3006 
3007 		sport = ipv6->sin6_port;
3008 		slen = sizeof(struct sockaddr_in6);
3009 		sfamily = AF_INET6;
3010 		cifs_dbg(FYI, "%s: connecting to [%pI6]:%d\n", __func__, &ipv6->sin6_addr,
3011 				ntohs(sport));
3012 	} else {
3013 		struct sockaddr_in *ipv4 = (struct sockaddr_in *)&server->dstaddr;
3014 
3015 		sport = ipv4->sin_port;
3016 		slen = sizeof(struct sockaddr_in);
3017 		sfamily = AF_INET;
3018 		cifs_dbg(FYI, "%s: connecting to %pI4:%d\n", __func__, &ipv4->sin_addr,
3019 				ntohs(sport));
3020 	}
3021 
3022 	if (socket == NULL) {
3023 		rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
3024 				   IPPROTO_TCP, &socket, 1);
3025 		if (rc < 0) {
3026 			cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
3027 			server->ssocket = NULL;
3028 			return rc;
3029 		}
3030 
3031 		/* BB other socket options to set KEEPALIVE, NODELAY? */
3032 		cifs_dbg(FYI, "Socket created\n");
3033 		server->ssocket = socket;
3034 		socket->sk->sk_allocation = GFP_NOFS;
3035 		if (sfamily == AF_INET6)
3036 			cifs_reclassify_socket6(socket);
3037 		else
3038 			cifs_reclassify_socket4(socket);
3039 	}
3040 
3041 	rc = bind_socket(server);
3042 	if (rc < 0)
3043 		return rc;
3044 
3045 	/*
3046 	 * Eventually check for other socket options to change from
3047 	 * the default. sock_setsockopt not used because it expects
3048 	 * user space buffer
3049 	 */
3050 	socket->sk->sk_rcvtimeo = 7 * HZ;
3051 	socket->sk->sk_sndtimeo = 5 * HZ;
3052 
3053 	/* make the bufsizes depend on wsize/rsize and max requests */
3054 	if (server->noautotune) {
3055 		if (socket->sk->sk_sndbuf < (200 * 1024))
3056 			socket->sk->sk_sndbuf = 200 * 1024;
3057 		if (socket->sk->sk_rcvbuf < (140 * 1024))
3058 			socket->sk->sk_rcvbuf = 140 * 1024;
3059 	}
3060 
3061 	if (server->tcp_nodelay)
3062 		tcp_sock_set_nodelay(socket->sk);
3063 
3064 	cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n",
3065 		 socket->sk->sk_sndbuf,
3066 		 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
3067 
3068 	rc = kernel_connect(socket, saddr, slen,
3069 			    server->noblockcnt ? O_NONBLOCK : 0);
3070 	/*
3071 	 * When mounting SMB root file systems, we do not want to block in
3072 	 * connect. Otherwise bail out and then let cifs_reconnect() perform
3073 	 * reconnect failover - if possible.
3074 	 */
3075 	if (server->noblockcnt && rc == -EINPROGRESS)
3076 		rc = 0;
3077 	if (rc < 0) {
3078 		cifs_dbg(FYI, "Error %d connecting to server\n", rc);
3079 		trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
3080 		sock_release(socket);
3081 		server->ssocket = NULL;
3082 		return rc;
3083 	}
3084 	trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr);
3085 	if (sport == htons(RFC1001_PORT))
3086 		rc = ip_rfc1001_connect(server);
3087 
3088 	return rc;
3089 }
3090 
3091 static int
ip_connect(struct TCP_Server_Info * server)3092 ip_connect(struct TCP_Server_Info *server)
3093 {
3094 	__be16 *sport;
3095 	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
3096 	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
3097 
3098 	if (server->dstaddr.ss_family == AF_INET6)
3099 		sport = &addr6->sin6_port;
3100 	else
3101 		sport = &addr->sin_port;
3102 
3103 	if (*sport == 0) {
3104 		int rc;
3105 
3106 		/* try with 445 port at first */
3107 		*sport = htons(CIFS_PORT);
3108 
3109 		rc = generic_ip_connect(server);
3110 		if (rc >= 0)
3111 			return rc;
3112 
3113 		/* if it failed, try with 139 port */
3114 		*sport = htons(RFC1001_PORT);
3115 	}
3116 
3117 	return generic_ip_connect(server);
3118 }
3119 
3120 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
reset_cifs_unix_caps(unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,struct smb3_fs_context * ctx)3121 void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
3122 			  struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3123 {
3124 	/*
3125 	 * If we are reconnecting then should we check to see if
3126 	 * any requested capabilities changed locally e.g. via
3127 	 * remount but we can not do much about it here
3128 	 * if they have (even if we could detect it by the following)
3129 	 * Perhaps we could add a backpointer to array of sb from tcon
3130 	 * or if we change to make all sb to same share the same
3131 	 * sb as NFS - then we only have one backpointer to sb.
3132 	 * What if we wanted to mount the server share twice once with
3133 	 * and once without posixacls or posix paths?
3134 	 */
3135 	__u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
3136 
3137 	if (ctx && ctx->no_linux_ext) {
3138 		tcon->fsUnixInfo.Capability = 0;
3139 		tcon->unix_ext = 0; /* Unix Extensions disabled */
3140 		cifs_dbg(FYI, "Linux protocol extensions disabled\n");
3141 		return;
3142 	} else if (ctx)
3143 		tcon->unix_ext = 1; /* Unix Extensions supported */
3144 
3145 	if (!tcon->unix_ext) {
3146 		cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
3147 		return;
3148 	}
3149 
3150 	if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
3151 		__u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
3152 		cifs_dbg(FYI, "unix caps which server supports %lld\n", cap);
3153 		/*
3154 		 * check for reconnect case in which we do not
3155 		 * want to change the mount behavior if we can avoid it
3156 		 */
3157 		if (ctx == NULL) {
3158 			/*
3159 			 * turn off POSIX ACL and PATHNAMES if not set
3160 			 * originally at mount time
3161 			 */
3162 			if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
3163 				cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
3164 			if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
3165 				if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
3166 					cifs_dbg(VFS, "POSIXPATH support change\n");
3167 				cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
3168 			} else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
3169 				cifs_dbg(VFS, "possible reconnect error\n");
3170 				cifs_dbg(VFS, "server disabled POSIX path support\n");
3171 			}
3172 		}
3173 
3174 		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
3175 			cifs_dbg(VFS, "per-share encryption not supported yet\n");
3176 
3177 		cap &= CIFS_UNIX_CAP_MASK;
3178 		if (ctx && ctx->no_psx_acl)
3179 			cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
3180 		else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
3181 			cifs_dbg(FYI, "negotiated posix acl support\n");
3182 			if (cifs_sb)
3183 				cifs_sb->mnt_cifs_flags |=
3184 					CIFS_MOUNT_POSIXACL;
3185 		}
3186 
3187 		if (ctx && ctx->posix_paths == 0)
3188 			cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
3189 		else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
3190 			cifs_dbg(FYI, "negotiate posix pathnames\n");
3191 			if (cifs_sb)
3192 				cifs_sb->mnt_cifs_flags |=
3193 					CIFS_MOUNT_POSIX_PATHS;
3194 		}
3195 
3196 		cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap);
3197 #ifdef CONFIG_CIFS_DEBUG2
3198 		if (cap & CIFS_UNIX_FCNTL_CAP)
3199 			cifs_dbg(FYI, "FCNTL cap\n");
3200 		if (cap & CIFS_UNIX_EXTATTR_CAP)
3201 			cifs_dbg(FYI, "EXTATTR cap\n");
3202 		if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
3203 			cifs_dbg(FYI, "POSIX path cap\n");
3204 		if (cap & CIFS_UNIX_XATTR_CAP)
3205 			cifs_dbg(FYI, "XATTR cap\n");
3206 		if (cap & CIFS_UNIX_POSIX_ACL_CAP)
3207 			cifs_dbg(FYI, "POSIX ACL cap\n");
3208 		if (cap & CIFS_UNIX_LARGE_READ_CAP)
3209 			cifs_dbg(FYI, "very large read cap\n");
3210 		if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
3211 			cifs_dbg(FYI, "very large write cap\n");
3212 		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
3213 			cifs_dbg(FYI, "transport encryption cap\n");
3214 		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
3215 			cifs_dbg(FYI, "mandatory transport encryption cap\n");
3216 #endif /* CIFS_DEBUG2 */
3217 		if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
3218 			if (ctx == NULL)
3219 				cifs_dbg(FYI, "resetting capabilities failed\n");
3220 			else
3221 				cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n");
3222 
3223 		}
3224 	}
3225 }
3226 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
3227 
cifs_setup_cifs_sb(struct cifs_sb_info * cifs_sb)3228 int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
3229 {
3230 	struct smb3_fs_context *ctx = cifs_sb->ctx;
3231 
3232 	INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
3233 
3234 	spin_lock_init(&cifs_sb->tlink_tree_lock);
3235 	cifs_sb->tlink_tree = RB_ROOT;
3236 
3237 	cifs_dbg(FYI, "file mode: %04ho  dir mode: %04ho\n",
3238 		 ctx->file_mode, ctx->dir_mode);
3239 
3240 	/* this is needed for ASCII cp to Unicode converts */
3241 	if (ctx->iocharset == NULL) {
3242 		/* load_nls_default cannot return null */
3243 		cifs_sb->local_nls = load_nls_default();
3244 	} else {
3245 		cifs_sb->local_nls = load_nls(ctx->iocharset);
3246 		if (cifs_sb->local_nls == NULL) {
3247 			cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n",
3248 				 ctx->iocharset);
3249 			return -ELIBACC;
3250 		}
3251 	}
3252 	ctx->local_nls = cifs_sb->local_nls;
3253 
3254 	smb3_update_mnt_flags(cifs_sb);
3255 
3256 	if (ctx->direct_io)
3257 		cifs_dbg(FYI, "mounting share using direct i/o\n");
3258 	if (ctx->cache_ro) {
3259 		cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n");
3260 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE;
3261 	} else if (ctx->cache_rw) {
3262 		cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n");
3263 		cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE |
3264 					    CIFS_MOUNT_RW_CACHE);
3265 	}
3266 
3267 	if ((ctx->cifs_acl) && (ctx->dynperm))
3268 		cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
3269 
3270 	if (ctx->prepath) {
3271 		cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL);
3272 		if (cifs_sb->prepath == NULL)
3273 			return -ENOMEM;
3274 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3275 	}
3276 
3277 	return 0;
3278 }
3279 
3280 /* Release all succeed connections */
mount_put_conns(struct mount_ctx * mnt_ctx)3281 static inline void mount_put_conns(struct mount_ctx *mnt_ctx)
3282 {
3283 	int rc = 0;
3284 
3285 	if (mnt_ctx->tcon)
3286 		cifs_put_tcon(mnt_ctx->tcon);
3287 	else if (mnt_ctx->ses)
3288 		cifs_put_smb_ses(mnt_ctx->ses);
3289 	else if (mnt_ctx->server)
3290 		cifs_put_tcp_session(mnt_ctx->server, 0);
3291 	mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
3292 	free_xid(mnt_ctx->xid);
3293 }
3294 
3295 /* Get connections for tcp, ses and tcon */
mount_get_conns(struct mount_ctx * mnt_ctx)3296 static int mount_get_conns(struct mount_ctx *mnt_ctx)
3297 {
3298 	int rc = 0;
3299 	struct TCP_Server_Info *server = NULL;
3300 	struct cifs_ses *ses = NULL;
3301 	struct cifs_tcon *tcon = NULL;
3302 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3303 	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3304 	unsigned int xid;
3305 
3306 	xid = get_xid();
3307 
3308 	/* get a reference to a tcp session */
3309 	server = cifs_get_tcp_session(ctx, NULL);
3310 	if (IS_ERR(server)) {
3311 		rc = PTR_ERR(server);
3312 		server = NULL;
3313 		goto out;
3314 	}
3315 
3316 	/* get a reference to a SMB session */
3317 	ses = cifs_get_smb_ses(server, ctx);
3318 	if (IS_ERR(ses)) {
3319 		rc = PTR_ERR(ses);
3320 		ses = NULL;
3321 		goto out;
3322 	}
3323 
3324 	if ((ctx->persistent == true) && (!(ses->server->capabilities &
3325 					    SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) {
3326 		cifs_server_dbg(VFS, "persistent handles not supported by server\n");
3327 		rc = -EOPNOTSUPP;
3328 		goto out;
3329 	}
3330 
3331 	/* search for existing tcon to this server share */
3332 	tcon = cifs_get_tcon(ses, ctx);
3333 	if (IS_ERR(tcon)) {
3334 		rc = PTR_ERR(tcon);
3335 		tcon = NULL;
3336 		goto out;
3337 	}
3338 
3339 	/* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
3340 	if (tcon->posix_extensions)
3341 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
3342 
3343 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
3344 	/* tell server which Unix caps we support */
3345 	if (cap_unix(tcon->ses)) {
3346 		/*
3347 		 * reset of caps checks mount to see if unix extensions disabled
3348 		 * for just this mount.
3349 		 */
3350 		reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx);
3351 		spin_lock(&tcon->ses->server->srv_lock);
3352 		if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
3353 		    (le64_to_cpu(tcon->fsUnixInfo.Capability) &
3354 		     CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
3355 			spin_unlock(&tcon->ses->server->srv_lock);
3356 			rc = -EACCES;
3357 			goto out;
3358 		}
3359 		spin_unlock(&tcon->ses->server->srv_lock);
3360 	} else
3361 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
3362 		tcon->unix_ext = 0; /* server does not support them */
3363 
3364 	/* do not care if a following call succeed - informational */
3365 	if (!tcon->pipe && server->ops->qfs_tcon) {
3366 		server->ops->qfs_tcon(xid, tcon, cifs_sb);
3367 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
3368 			if (tcon->fsDevInfo.DeviceCharacteristics &
3369 			    cpu_to_le32(FILE_READ_ONLY_DEVICE))
3370 				cifs_dbg(VFS, "mounted to read only share\n");
3371 			else if ((cifs_sb->mnt_cifs_flags &
3372 				  CIFS_MOUNT_RW_CACHE) == 0)
3373 				cifs_dbg(VFS, "read only mount of RW share\n");
3374 			/* no need to log a RW mount of a typical RW share */
3375 		}
3376 	}
3377 
3378 	/*
3379 	 * Clamp the rsize/wsize mount arguments if they are too big for the server
3380 	 * and set the rsize/wsize to the negotiated values if not passed in by
3381 	 * the user on mount
3382 	 */
3383 	if ((cifs_sb->ctx->wsize == 0) ||
3384 	    (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx)))
3385 		cifs_sb->ctx->wsize = server->ops->negotiate_wsize(tcon, ctx);
3386 	if ((cifs_sb->ctx->rsize == 0) ||
3387 	    (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
3388 		cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
3389 
3390 	/*
3391 	 * The cookie is initialized from volume info returned above.
3392 	 * Inside cifs_fscache_get_super_cookie it checks
3393 	 * that we do not get super cookie twice.
3394 	 */
3395 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
3396 		cifs_fscache_get_super_cookie(tcon);
3397 
3398 out:
3399 	mnt_ctx->server = server;
3400 	mnt_ctx->ses = ses;
3401 	mnt_ctx->tcon = tcon;
3402 	mnt_ctx->xid = xid;
3403 
3404 	return rc;
3405 }
3406 
mount_setup_tlink(struct cifs_sb_info * cifs_sb,struct cifs_ses * ses,struct cifs_tcon * tcon)3407 static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
3408 			     struct cifs_tcon *tcon)
3409 {
3410 	struct tcon_link *tlink;
3411 
3412 	/* hang the tcon off of the superblock */
3413 	tlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
3414 	if (tlink == NULL)
3415 		return -ENOMEM;
3416 
3417 	tlink->tl_uid = ses->linux_uid;
3418 	tlink->tl_tcon = tcon;
3419 	tlink->tl_time = jiffies;
3420 	set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
3421 	set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3422 
3423 	cifs_sb->master_tlink = tlink;
3424 	spin_lock(&cifs_sb->tlink_tree_lock);
3425 	tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
3426 	spin_unlock(&cifs_sb->tlink_tree_lock);
3427 
3428 	queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
3429 				TLINK_IDLE_EXPIRE);
3430 	return 0;
3431 }
3432 
3433 #ifdef CONFIG_CIFS_DFS_UPCALL
3434 /* Get unique dfs connections */
mount_get_dfs_conns(struct mount_ctx * mnt_ctx)3435 static int mount_get_dfs_conns(struct mount_ctx *mnt_ctx)
3436 {
3437 	int rc;
3438 
3439 	mnt_ctx->fs_ctx->nosharesock = true;
3440 	rc = mount_get_conns(mnt_ctx);
3441 	if (mnt_ctx->server) {
3442 		cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__);
3443 		spin_lock(&mnt_ctx->server->srv_lock);
3444 		mnt_ctx->server->is_dfs_conn = true;
3445 		spin_unlock(&mnt_ctx->server->srv_lock);
3446 	}
3447 	return rc;
3448 }
3449 
3450 /*
3451  * cifs_build_path_to_root returns full path to root when we do not have an
3452  * existing connection (tcon)
3453  */
3454 static char *
build_unc_path_to_root(const struct smb3_fs_context * ctx,const struct cifs_sb_info * cifs_sb,bool useppath)3455 build_unc_path_to_root(const struct smb3_fs_context *ctx,
3456 		       const struct cifs_sb_info *cifs_sb, bool useppath)
3457 {
3458 	char *full_path, *pos;
3459 	unsigned int pplen = useppath && ctx->prepath ?
3460 		strlen(ctx->prepath) + 1 : 0;
3461 	unsigned int unc_len = strnlen(ctx->UNC, MAX_TREE_SIZE + 1);
3462 
3463 	if (unc_len > MAX_TREE_SIZE)
3464 		return ERR_PTR(-EINVAL);
3465 
3466 	full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
3467 	if (full_path == NULL)
3468 		return ERR_PTR(-ENOMEM);
3469 
3470 	memcpy(full_path, ctx->UNC, unc_len);
3471 	pos = full_path + unc_len;
3472 
3473 	if (pplen) {
3474 		*pos = CIFS_DIR_SEP(cifs_sb);
3475 		memcpy(pos + 1, ctx->prepath, pplen);
3476 		pos += pplen;
3477 	}
3478 
3479 	*pos = '\0'; /* add trailing null */
3480 	convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
3481 	cifs_dbg(FYI, "%s: full_path=%s\n", __func__, full_path);
3482 	return full_path;
3483 }
3484 
3485 /*
3486  * expand_dfs_referral - Update cifs_sb from dfs referral path
3487  *
3488  * cifs_sb->ctx->mount_options will be (re-)allocated to a string containing updated options for the
3489  * submount.  Otherwise it will be left untouched.
3490  */
expand_dfs_referral(struct mount_ctx * mnt_ctx,const char * full_path,struct dfs_info3_param * referral)3491 static int expand_dfs_referral(struct mount_ctx *mnt_ctx, const char *full_path,
3492 			       struct dfs_info3_param *referral)
3493 {
3494 	int rc;
3495 	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3496 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3497 	char *fake_devname = NULL, *mdata = NULL;
3498 
3499 	mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options, full_path + 1, referral,
3500 					   &fake_devname);
3501 	if (IS_ERR(mdata)) {
3502 		rc = PTR_ERR(mdata);
3503 		mdata = NULL;
3504 	} else {
3505 		/*
3506 		 * We can not clear out the whole structure since we no longer have an explicit
3507 		 * function to parse a mount-string. Instead we need to clear out the individual
3508 		 * fields that are no longer valid.
3509 		 */
3510 		kfree(ctx->prepath);
3511 		ctx->prepath = NULL;
3512 		rc = cifs_setup_volume_info(ctx, mdata, fake_devname);
3513 	}
3514 	kfree(fake_devname);
3515 	kfree(cifs_sb->ctx->mount_options);
3516 	cifs_sb->ctx->mount_options = mdata;
3517 
3518 	return rc;
3519 }
3520 #endif
3521 
3522 /* TODO: all callers to this are broken. We are not parsing mount_options here
3523  * we should pass a clone of the original context?
3524  */
3525 int
cifs_setup_volume_info(struct smb3_fs_context * ctx,const char * mntopts,const char * devname)3526 cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
3527 {
3528 	int rc;
3529 
3530 	if (devname) {
3531 		cifs_dbg(FYI, "%s: devname=%s\n", __func__, devname);
3532 		rc = smb3_parse_devname(devname, ctx);
3533 		if (rc) {
3534 			cifs_dbg(VFS, "%s: failed to parse %s: %d\n", __func__, devname, rc);
3535 			return rc;
3536 		}
3537 	}
3538 
3539 	if (mntopts) {
3540 		char *ip;
3541 
3542 		rc = smb3_parse_opt(mntopts, "ip", &ip);
3543 		if (rc) {
3544 			cifs_dbg(VFS, "%s: failed to parse ip options: %d\n", __func__, rc);
3545 			return rc;
3546 		}
3547 
3548 		rc = cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, strlen(ip));
3549 		kfree(ip);
3550 		if (!rc) {
3551 			cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
3552 			return -EINVAL;
3553 		}
3554 	}
3555 
3556 	if (ctx->nullauth) {
3557 		cifs_dbg(FYI, "Anonymous login\n");
3558 		kfree(ctx->username);
3559 		ctx->username = NULL;
3560 	} else if (ctx->username) {
3561 		/* BB fixme parse for domain name here */
3562 		cifs_dbg(FYI, "Username: %s\n", ctx->username);
3563 	} else {
3564 		cifs_dbg(VFS, "No username specified\n");
3565 	/* In userspace mount helper we can get user name from alternate
3566 	   locations such as env variables and files on disk */
3567 		return -EINVAL;
3568 	}
3569 
3570 	return 0;
3571 }
3572 
3573 static int
cifs_are_all_path_components_accessible(struct TCP_Server_Info * server,unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,char * full_path,int added_treename)3574 cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
3575 					unsigned int xid,
3576 					struct cifs_tcon *tcon,
3577 					struct cifs_sb_info *cifs_sb,
3578 					char *full_path,
3579 					int added_treename)
3580 {
3581 	int rc;
3582 	char *s;
3583 	char sep, tmp;
3584 	int skip = added_treename ? 1 : 0;
3585 
3586 	sep = CIFS_DIR_SEP(cifs_sb);
3587 	s = full_path;
3588 
3589 	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
3590 	while (rc == 0) {
3591 		/* skip separators */
3592 		while (*s == sep)
3593 			s++;
3594 		if (!*s)
3595 			break;
3596 		/* next separator */
3597 		while (*s && *s != sep)
3598 			s++;
3599 		/*
3600 		 * if the treename is added, we then have to skip the first
3601 		 * part within the separators
3602 		 */
3603 		if (skip) {
3604 			skip = 0;
3605 			continue;
3606 		}
3607 		/*
3608 		 * temporarily null-terminate the path at the end of
3609 		 * the current component
3610 		 */
3611 		tmp = *s;
3612 		*s = 0;
3613 		rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3614 						     full_path);
3615 		*s = tmp;
3616 	}
3617 	return rc;
3618 }
3619 
3620 /*
3621  * Check if path is remote (i.e. a DFS share).
3622  *
3623  * Return -EREMOTE if it is, otherwise 0 or -errno.
3624  */
is_path_remote(struct mount_ctx * mnt_ctx)3625 static int is_path_remote(struct mount_ctx *mnt_ctx)
3626 {
3627 	int rc;
3628 	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3629 	struct TCP_Server_Info *server = mnt_ctx->server;
3630 	unsigned int xid = mnt_ctx->xid;
3631 	struct cifs_tcon *tcon = mnt_ctx->tcon;
3632 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3633 	char *full_path;
3634 
3635 	if (!server->ops->is_path_accessible)
3636 		return -EOPNOTSUPP;
3637 
3638 	/*
3639 	 * cifs_build_path_to_root works only when we have a valid tcon
3640 	 */
3641 	full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon,
3642 					    tcon->Flags & SMB_SHARE_IS_IN_DFS);
3643 	if (full_path == NULL)
3644 		return -ENOMEM;
3645 
3646 	cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
3647 
3648 	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3649 					     full_path);
3650 	if (rc != 0 && rc != -EREMOTE)
3651 		goto out;
3652 
3653 	if (rc != -EREMOTE) {
3654 		rc = cifs_are_all_path_components_accessible(server, xid, tcon,
3655 			cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
3656 		if (rc != 0) {
3657 			cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
3658 			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3659 			rc = 0;
3660 		}
3661 	}
3662 
3663 out:
3664 	kfree(full_path);
3665 	return rc;
3666 }
3667 
3668 #ifdef CONFIG_CIFS_DFS_UPCALL
set_root_ses(struct mount_ctx * mnt_ctx)3669 static void set_root_ses(struct mount_ctx *mnt_ctx)
3670 {
3671 	if (mnt_ctx->ses) {
3672 		spin_lock(&cifs_tcp_ses_lock);
3673 		mnt_ctx->ses->ses_count++;
3674 		spin_unlock(&cifs_tcp_ses_lock);
3675 		dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, mnt_ctx->ses);
3676 	}
3677 	mnt_ctx->root_ses = mnt_ctx->ses;
3678 }
3679 
is_dfs_mount(struct mount_ctx * mnt_ctx,bool * isdfs,struct dfs_cache_tgt_list * root_tl)3680 static int is_dfs_mount(struct mount_ctx *mnt_ctx, bool *isdfs, struct dfs_cache_tgt_list *root_tl)
3681 {
3682 	int rc;
3683 	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3684 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3685 
3686 	*isdfs = true;
3687 
3688 	rc = mount_get_conns(mnt_ctx);
3689 	/*
3690 	 * If called with 'nodfs' mount option, then skip DFS resolving.  Otherwise unconditionally
3691 	 * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
3692 	 *
3693 	 * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
3694 	 * to respond with PATH_NOT_COVERED to requests that include the prefix.
3695 	 */
3696 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
3697 	    dfs_cache_find(mnt_ctx->xid, mnt_ctx->ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
3698 			   ctx->UNC + 1, NULL, root_tl)) {
3699 		if (rc)
3700 			return rc;
3701 		/* Check if it is fully accessible and then mount it */
3702 		rc = is_path_remote(mnt_ctx);
3703 		if (!rc)
3704 			*isdfs = false;
3705 		else if (rc != -EREMOTE)
3706 			return rc;
3707 	}
3708 	return 0;
3709 }
3710 
connect_dfs_target(struct mount_ctx * mnt_ctx,const char * full_path,const char * ref_path,struct dfs_cache_tgt_iterator * tit)3711 static int connect_dfs_target(struct mount_ctx *mnt_ctx, const char *full_path,
3712 			      const char *ref_path, struct dfs_cache_tgt_iterator *tit)
3713 {
3714 	int rc;
3715 	struct dfs_info3_param ref = {};
3716 	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3717 	char *oldmnt = cifs_sb->ctx->mount_options;
3718 
3719 	cifs_dbg(FYI, "%s: full_path=%s ref_path=%s target=%s\n", __func__, full_path, ref_path,
3720 		 dfs_cache_get_tgt_name(tit));
3721 
3722 	rc = dfs_cache_get_tgt_referral(ref_path, tit, &ref);
3723 	if (rc)
3724 		goto out;
3725 
3726 	rc = expand_dfs_referral(mnt_ctx, full_path, &ref);
3727 	if (rc)
3728 		goto out;
3729 
3730 	/* Connect to new target only if we were redirected (e.g. mount options changed) */
3731 	if (oldmnt != cifs_sb->ctx->mount_options) {
3732 		mount_put_conns(mnt_ctx);
3733 		rc = mount_get_dfs_conns(mnt_ctx);
3734 	}
3735 	if (!rc) {
3736 		if (cifs_is_referral_server(mnt_ctx->tcon, &ref))
3737 			set_root_ses(mnt_ctx);
3738 		rc = dfs_cache_update_tgthint(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
3739 					      cifs_remap(cifs_sb), ref_path, tit);
3740 	}
3741 
3742 out:
3743 	free_dfs_info_param(&ref);
3744 	return rc;
3745 }
3746 
connect_dfs_root(struct mount_ctx * mnt_ctx,struct dfs_cache_tgt_list * root_tl)3747 static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list *root_tl)
3748 {
3749 	int rc;
3750 	char *full_path;
3751 	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3752 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3753 	struct dfs_cache_tgt_iterator *tit;
3754 
3755 	/* Put initial connections as they might be shared with other mounts.  We need unique dfs
3756 	 * connections per mount to properly failover, so mount_get_dfs_conns() must be used from
3757 	 * now on.
3758 	 */
3759 	mount_put_conns(mnt_ctx);
3760 	mount_get_dfs_conns(mnt_ctx);
3761 	set_root_ses(mnt_ctx);
3762 
3763 	full_path = build_unc_path_to_root(ctx, cifs_sb, true);
3764 	if (IS_ERR(full_path))
3765 		return PTR_ERR(full_path);
3766 
3767 	mnt_ctx->origin_fullpath = dfs_cache_canonical_path(ctx->UNC, cifs_sb->local_nls,
3768 							    cifs_remap(cifs_sb));
3769 	if (IS_ERR(mnt_ctx->origin_fullpath)) {
3770 		rc = PTR_ERR(mnt_ctx->origin_fullpath);
3771 		mnt_ctx->origin_fullpath = NULL;
3772 		goto out;
3773 	}
3774 
3775 	/* Try all dfs root targets */
3776 	for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(root_tl);
3777 	     tit; tit = dfs_cache_get_next_tgt(root_tl, tit)) {
3778 		rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->origin_fullpath + 1, tit);
3779 		if (!rc) {
3780 			mnt_ctx->leaf_fullpath = kstrdup(mnt_ctx->origin_fullpath, GFP_KERNEL);
3781 			if (!mnt_ctx->leaf_fullpath)
3782 				rc = -ENOMEM;
3783 			break;
3784 		}
3785 	}
3786 
3787 out:
3788 	kfree(full_path);
3789 	return rc;
3790 }
3791 
__follow_dfs_link(struct mount_ctx * mnt_ctx)3792 static int __follow_dfs_link(struct mount_ctx *mnt_ctx)
3793 {
3794 	int rc;
3795 	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3796 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3797 	char *full_path;
3798 	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
3799 	struct dfs_cache_tgt_iterator *tit;
3800 
3801 	full_path = build_unc_path_to_root(ctx, cifs_sb, true);
3802 	if (IS_ERR(full_path))
3803 		return PTR_ERR(full_path);
3804 
3805 	kfree(mnt_ctx->leaf_fullpath);
3806 	mnt_ctx->leaf_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
3807 							  cifs_remap(cifs_sb));
3808 	if (IS_ERR(mnt_ctx->leaf_fullpath)) {
3809 		rc = PTR_ERR(mnt_ctx->leaf_fullpath);
3810 		mnt_ctx->leaf_fullpath = NULL;
3811 		goto out;
3812 	}
3813 
3814 	/* Get referral from dfs link */
3815 	rc = dfs_cache_find(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
3816 			    cifs_remap(cifs_sb), mnt_ctx->leaf_fullpath + 1, NULL, &tl);
3817 	if (rc)
3818 		goto out;
3819 
3820 	/* Try all dfs link targets.  If an I/O fails from currently connected DFS target with an
3821 	 * error other than STATUS_PATH_NOT_COVERED (-EREMOTE), then retry it from other targets as
3822 	 * specified in MS-DFSC "3.1.5.2 I/O Operation to Target Fails with an Error Other Than
3823 	 * STATUS_PATH_NOT_COVERED."
3824 	 */
3825 	for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(&tl);
3826 	     tit; tit = dfs_cache_get_next_tgt(&tl, tit)) {
3827 		rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->leaf_fullpath + 1, tit);
3828 		if (!rc) {
3829 			rc = is_path_remote(mnt_ctx);
3830 			if (!rc || rc == -EREMOTE)
3831 				break;
3832 		}
3833 	}
3834 
3835 out:
3836 	kfree(full_path);
3837 	dfs_cache_free_tgts(&tl);
3838 	return rc;
3839 }
3840 
follow_dfs_link(struct mount_ctx * mnt_ctx)3841 static int follow_dfs_link(struct mount_ctx *mnt_ctx)
3842 {
3843 	int rc;
3844 	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3845 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3846 	char *full_path;
3847 	int num_links = 0;
3848 
3849 	full_path = build_unc_path_to_root(ctx, cifs_sb, true);
3850 	if (IS_ERR(full_path))
3851 		return PTR_ERR(full_path);
3852 
3853 	kfree(mnt_ctx->origin_fullpath);
3854 	mnt_ctx->origin_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
3855 							    cifs_remap(cifs_sb));
3856 	kfree(full_path);
3857 
3858 	if (IS_ERR(mnt_ctx->origin_fullpath)) {
3859 		rc = PTR_ERR(mnt_ctx->origin_fullpath);
3860 		mnt_ctx->origin_fullpath = NULL;
3861 		return rc;
3862 	}
3863 
3864 	do {
3865 		rc = __follow_dfs_link(mnt_ctx);
3866 		if (!rc || rc != -EREMOTE)
3867 			break;
3868 	} while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
3869 
3870 	return rc;
3871 }
3872 
3873 /* Set up DFS referral paths for failover */
setup_server_referral_paths(struct mount_ctx * mnt_ctx)3874 static void setup_server_referral_paths(struct mount_ctx *mnt_ctx)
3875 {
3876 	struct TCP_Server_Info *server = mnt_ctx->server;
3877 
3878 	mutex_lock(&server->refpath_lock);
3879 	server->origin_fullpath = mnt_ctx->origin_fullpath;
3880 	server->leaf_fullpath = mnt_ctx->leaf_fullpath;
3881 	server->current_fullpath = mnt_ctx->leaf_fullpath;
3882 	mutex_unlock(&server->refpath_lock);
3883 	mnt_ctx->origin_fullpath = mnt_ctx->leaf_fullpath = NULL;
3884 }
3885 
cifs_mount(struct cifs_sb_info * cifs_sb,struct smb3_fs_context * ctx)3886 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3887 {
3888 	int rc;
3889 	struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3890 	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
3891 	bool isdfs;
3892 
3893 	rc = is_dfs_mount(&mnt_ctx, &isdfs, &tl);
3894 	if (rc)
3895 		goto error;
3896 	if (!isdfs)
3897 		goto out;
3898 
3899 	/* proceed as DFS mount */
3900 	uuid_gen(&mnt_ctx.mount_id);
3901 	rc = connect_dfs_root(&mnt_ctx, &tl);
3902 	dfs_cache_free_tgts(&tl);
3903 
3904 	if (rc)
3905 		goto error;
3906 
3907 	rc = is_path_remote(&mnt_ctx);
3908 	if (rc)
3909 		rc = follow_dfs_link(&mnt_ctx);
3910 	if (rc)
3911 		goto error;
3912 
3913 	setup_server_referral_paths(&mnt_ctx);
3914 	/*
3915 	 * After reconnecting to a different server, unique ids won't match anymore, so we disable
3916 	 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
3917 	 */
3918 	cifs_autodisable_serverino(cifs_sb);
3919 	/*
3920 	 * Force the use of prefix path to support failover on DFS paths that resolve to targets
3921 	 * that have different prefix paths.
3922 	 */
3923 	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3924 	kfree(cifs_sb->prepath);
3925 	cifs_sb->prepath = ctx->prepath;
3926 	ctx->prepath = NULL;
3927 	uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id);
3928 
3929 out:
3930 	cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
3931 	rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3932 	if (rc)
3933 		goto error;
3934 
3935 	free_xid(mnt_ctx.xid);
3936 	return rc;
3937 
3938 error:
3939 	dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id);
3940 	kfree(mnt_ctx.origin_fullpath);
3941 	kfree(mnt_ctx.leaf_fullpath);
3942 	mount_put_conns(&mnt_ctx);
3943 	return rc;
3944 }
3945 #else
cifs_mount(struct cifs_sb_info * cifs_sb,struct smb3_fs_context * ctx)3946 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3947 {
3948 	int rc = 0;
3949 	struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3950 
3951 	rc = mount_get_conns(&mnt_ctx);
3952 	if (rc)
3953 		goto error;
3954 
3955 	if (mnt_ctx.tcon) {
3956 		rc = is_path_remote(&mnt_ctx);
3957 		if (rc == -EREMOTE)
3958 			rc = -EOPNOTSUPP;
3959 		if (rc)
3960 			goto error;
3961 	}
3962 
3963 	rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3964 	if (rc)
3965 		goto error;
3966 
3967 	free_xid(mnt_ctx.xid);
3968 	return rc;
3969 
3970 error:
3971 	mount_put_conns(&mnt_ctx);
3972 	return rc;
3973 }
3974 #endif
3975 
3976 /*
3977  * Issue a TREE_CONNECT request.
3978  */
3979 int
CIFSTCon(const unsigned int xid,struct cifs_ses * ses,const char * tree,struct cifs_tcon * tcon,const struct nls_table * nls_codepage)3980 CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
3981 	 const char *tree, struct cifs_tcon *tcon,
3982 	 const struct nls_table *nls_codepage)
3983 {
3984 	struct smb_hdr *smb_buffer;
3985 	struct smb_hdr *smb_buffer_response;
3986 	TCONX_REQ *pSMB;
3987 	TCONX_RSP *pSMBr;
3988 	unsigned char *bcc_ptr;
3989 	int rc = 0;
3990 	int length;
3991 	__u16 bytes_left, count;
3992 
3993 	if (ses == NULL)
3994 		return -EIO;
3995 
3996 	smb_buffer = cifs_buf_get();
3997 	if (smb_buffer == NULL)
3998 		return -ENOMEM;
3999 
4000 	smb_buffer_response = smb_buffer;
4001 
4002 	header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
4003 			NULL /*no tid */ , 4 /*wct */ );
4004 
4005 	smb_buffer->Mid = get_next_mid(ses->server);
4006 	smb_buffer->Uid = ses->Suid;
4007 	pSMB = (TCONX_REQ *) smb_buffer;
4008 	pSMBr = (TCONX_RSP *) smb_buffer_response;
4009 
4010 	pSMB->AndXCommand = 0xFF;
4011 	pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
4012 	bcc_ptr = &pSMB->Password[0];
4013 
4014 	pSMB->PasswordLength = cpu_to_le16(1);	/* minimum */
4015 	*bcc_ptr = 0; /* password is null byte */
4016 	bcc_ptr++;              /* skip password */
4017 	/* already aligned so no need to do it below */
4018 
4019 	if (ses->server->sign)
4020 		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
4021 
4022 	if (ses->capabilities & CAP_STATUS32) {
4023 		smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
4024 	}
4025 	if (ses->capabilities & CAP_DFS) {
4026 		smb_buffer->Flags2 |= SMBFLG2_DFS;
4027 	}
4028 	if (ses->capabilities & CAP_UNICODE) {
4029 		smb_buffer->Flags2 |= SMBFLG2_UNICODE;
4030 		length =
4031 		    cifs_strtoUTF16((__le16 *) bcc_ptr, tree,
4032 			6 /* max utf8 char length in bytes */ *
4033 			(/* server len*/ + 256 /* share len */), nls_codepage);
4034 		bcc_ptr += 2 * length;	/* convert num 16 bit words to bytes */
4035 		bcc_ptr += 2;	/* skip trailing null */
4036 	} else {		/* ASCII */
4037 		strcpy(bcc_ptr, tree);
4038 		bcc_ptr += strlen(tree) + 1;
4039 	}
4040 	strcpy(bcc_ptr, "?????");
4041 	bcc_ptr += strlen("?????");
4042 	bcc_ptr += 1;
4043 	count = bcc_ptr - &pSMB->Password[0];
4044 	be32_add_cpu(&pSMB->hdr.smb_buf_length, count);
4045 	pSMB->ByteCount = cpu_to_le16(count);
4046 
4047 	rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
4048 			 0);
4049 
4050 	/* above now done in SendReceive */
4051 	if (rc == 0) {
4052 		bool is_unicode;
4053 
4054 		tcon->tid = smb_buffer_response->Tid;
4055 		bcc_ptr = pByteArea(smb_buffer_response);
4056 		bytes_left = get_bcc(smb_buffer_response);
4057 		length = strnlen(bcc_ptr, bytes_left - 2);
4058 		if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
4059 			is_unicode = true;
4060 		else
4061 			is_unicode = false;
4062 
4063 
4064 		/* skip service field (NB: this field is always ASCII) */
4065 		if (length == 3) {
4066 			if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
4067 			    (bcc_ptr[2] == 'C')) {
4068 				cifs_dbg(FYI, "IPC connection\n");
4069 				tcon->ipc = true;
4070 				tcon->pipe = true;
4071 			}
4072 		} else if (length == 2) {
4073 			if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
4074 				/* the most common case */
4075 				cifs_dbg(FYI, "disk share connection\n");
4076 			}
4077 		}
4078 		bcc_ptr += length + 1;
4079 		bytes_left -= (length + 1);
4080 		strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name));
4081 
4082 		/* mostly informational -- no need to fail on error here */
4083 		kfree(tcon->nativeFileSystem);
4084 		tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr,
4085 						      bytes_left, is_unicode,
4086 						      nls_codepage);
4087 
4088 		cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem);
4089 
4090 		if ((smb_buffer_response->WordCount == 3) ||
4091 			 (smb_buffer_response->WordCount == 7))
4092 			/* field is in same location */
4093 			tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
4094 		else
4095 			tcon->Flags = 0;
4096 		cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags);
4097 	}
4098 
4099 	cifs_buf_release(smb_buffer);
4100 	return rc;
4101 }
4102 
delayed_free(struct rcu_head * p)4103 static void delayed_free(struct rcu_head *p)
4104 {
4105 	struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu);
4106 
4107 	unload_nls(cifs_sb->local_nls);
4108 	smb3_cleanup_fs_context(cifs_sb->ctx);
4109 	kfree(cifs_sb);
4110 }
4111 
4112 void
cifs_umount(struct cifs_sb_info * cifs_sb)4113 cifs_umount(struct cifs_sb_info *cifs_sb)
4114 {
4115 	struct rb_root *root = &cifs_sb->tlink_tree;
4116 	struct rb_node *node;
4117 	struct tcon_link *tlink;
4118 
4119 	cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
4120 
4121 	spin_lock(&cifs_sb->tlink_tree_lock);
4122 	while ((node = rb_first(root))) {
4123 		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
4124 		cifs_get_tlink(tlink);
4125 		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
4126 		rb_erase(node, root);
4127 
4128 		spin_unlock(&cifs_sb->tlink_tree_lock);
4129 		cifs_put_tlink(tlink);
4130 		spin_lock(&cifs_sb->tlink_tree_lock);
4131 	}
4132 	spin_unlock(&cifs_sb->tlink_tree_lock);
4133 
4134 	kfree(cifs_sb->prepath);
4135 #ifdef CONFIG_CIFS_DFS_UPCALL
4136 	dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id);
4137 #endif
4138 	call_rcu(&cifs_sb->rcu, delayed_free);
4139 }
4140 
4141 int
cifs_negotiate_protocol(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server)4142 cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
4143 			struct TCP_Server_Info *server)
4144 {
4145 	int rc = 0;
4146 
4147 	if (!server->ops->need_neg || !server->ops->negotiate)
4148 		return -ENOSYS;
4149 
4150 	/* only send once per connect */
4151 	spin_lock(&server->srv_lock);
4152 	if (server->tcpStatus != CifsGood &&
4153 	    server->tcpStatus != CifsNew &&
4154 	    server->tcpStatus != CifsNeedNegotiate) {
4155 		spin_unlock(&server->srv_lock);
4156 		return -EHOSTDOWN;
4157 	}
4158 
4159 	if (!server->ops->need_neg(server) &&
4160 	    server->tcpStatus == CifsGood) {
4161 		spin_unlock(&server->srv_lock);
4162 		return 0;
4163 	}
4164 
4165 	server->tcpStatus = CifsInNegotiate;
4166 	spin_unlock(&server->srv_lock);
4167 
4168 	rc = server->ops->negotiate(xid, ses, server);
4169 	if (rc == 0) {
4170 		spin_lock(&server->srv_lock);
4171 		if (server->tcpStatus == CifsInNegotiate)
4172 			server->tcpStatus = CifsGood;
4173 		else
4174 			rc = -EHOSTDOWN;
4175 		spin_unlock(&server->srv_lock);
4176 	} else {
4177 		spin_lock(&server->srv_lock);
4178 		if (server->tcpStatus == CifsInNegotiate)
4179 			server->tcpStatus = CifsNeedNegotiate;
4180 		spin_unlock(&server->srv_lock);
4181 	}
4182 
4183 	return rc;
4184 }
4185 
4186 int
cifs_setup_session(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,struct nls_table * nls_info)4187 cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
4188 		   struct TCP_Server_Info *server,
4189 		   struct nls_table *nls_info)
4190 {
4191 	int rc = -ENOSYS;
4192 	struct TCP_Server_Info *pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
4193 	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr;
4194 	struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr;
4195 	bool is_binding = false;
4196 
4197 	spin_lock(&ses->ses_lock);
4198 	cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
4199 		 __func__, ses->chans_need_reconnect);
4200 
4201 	if (ses->ses_status != SES_GOOD &&
4202 	    ses->ses_status != SES_NEW &&
4203 	    ses->ses_status != SES_NEED_RECON) {
4204 		spin_unlock(&ses->ses_lock);
4205 		return -EHOSTDOWN;
4206 	}
4207 
4208 	/* only send once per connect */
4209 	spin_lock(&ses->chan_lock);
4210 	if (CIFS_ALL_CHANS_GOOD(ses)) {
4211 		if (ses->ses_status == SES_NEED_RECON)
4212 			ses->ses_status = SES_GOOD;
4213 		spin_unlock(&ses->chan_lock);
4214 		spin_unlock(&ses->ses_lock);
4215 		return 0;
4216 	}
4217 
4218 	cifs_chan_set_in_reconnect(ses, server);
4219 	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
4220 	spin_unlock(&ses->chan_lock);
4221 
4222 	if (!is_binding) {
4223 		ses->ses_status = SES_IN_SETUP;
4224 
4225 		/* force iface_list refresh */
4226 		ses->iface_last_update = 0;
4227 	}
4228 	spin_unlock(&ses->ses_lock);
4229 
4230 	/* update ses ip_addr only for primary chan */
4231 	if (server == pserver) {
4232 		if (server->dstaddr.ss_family == AF_INET6)
4233 			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
4234 		else
4235 			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr);
4236 	}
4237 
4238 	if (!is_binding) {
4239 		ses->capabilities = server->capabilities;
4240 		if (!linuxExtEnabled)
4241 			ses->capabilities &= (~server->vals->cap_unix);
4242 
4243 		if (ses->auth_key.response) {
4244 			cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
4245 				 ses->auth_key.response);
4246 			kfree_sensitive(ses->auth_key.response);
4247 			ses->auth_key.response = NULL;
4248 			ses->auth_key.len = 0;
4249 		}
4250 	}
4251 
4252 	cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
4253 		 server->sec_mode, server->capabilities, server->timeAdj);
4254 
4255 	if (server->ops->sess_setup)
4256 		rc = server->ops->sess_setup(xid, ses, server, nls_info);
4257 
4258 	if (rc) {
4259 		cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
4260 		spin_lock(&ses->ses_lock);
4261 		if (ses->ses_status == SES_IN_SETUP)
4262 			ses->ses_status = SES_NEED_RECON;
4263 		spin_lock(&ses->chan_lock);
4264 		cifs_chan_clear_in_reconnect(ses, server);
4265 		spin_unlock(&ses->chan_lock);
4266 		spin_unlock(&ses->ses_lock);
4267 	} else {
4268 		spin_lock(&ses->ses_lock);
4269 		if (ses->ses_status == SES_IN_SETUP)
4270 			ses->ses_status = SES_GOOD;
4271 		spin_lock(&ses->chan_lock);
4272 		cifs_chan_clear_in_reconnect(ses, server);
4273 		cifs_chan_clear_need_reconnect(ses, server);
4274 		spin_unlock(&ses->chan_lock);
4275 		spin_unlock(&ses->ses_lock);
4276 	}
4277 
4278 	return rc;
4279 }
4280 
4281 static int
cifs_set_vol_auth(struct smb3_fs_context * ctx,struct cifs_ses * ses)4282 cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
4283 {
4284 	ctx->sectype = ses->sectype;
4285 
4286 	/* krb5 is special, since we don't need username or pw */
4287 	if (ctx->sectype == Kerberos)
4288 		return 0;
4289 
4290 	return cifs_set_cifscreds(ctx, ses);
4291 }
4292 
4293 static struct cifs_tcon *
cifs_construct_tcon(struct cifs_sb_info * cifs_sb,kuid_t fsuid)4294 cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
4295 {
4296 	int rc;
4297 	struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
4298 	struct cifs_ses *ses;
4299 	struct cifs_tcon *tcon = NULL;
4300 	struct smb3_fs_context *ctx;
4301 
4302 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
4303 	if (ctx == NULL)
4304 		return ERR_PTR(-ENOMEM);
4305 
4306 	ctx->local_nls = cifs_sb->local_nls;
4307 	ctx->linux_uid = fsuid;
4308 	ctx->cred_uid = fsuid;
4309 	ctx->UNC = master_tcon->tree_name;
4310 	ctx->retry = master_tcon->retry;
4311 	ctx->nocase = master_tcon->nocase;
4312 	ctx->nohandlecache = master_tcon->nohandlecache;
4313 	ctx->local_lease = master_tcon->local_lease;
4314 	ctx->no_lease = master_tcon->no_lease;
4315 	ctx->resilient = master_tcon->use_resilient;
4316 	ctx->persistent = master_tcon->use_persistent;
4317 	ctx->handle_timeout = master_tcon->handle_timeout;
4318 	ctx->no_linux_ext = !master_tcon->unix_ext;
4319 	ctx->linux_ext = master_tcon->posix_extensions;
4320 	ctx->sectype = master_tcon->ses->sectype;
4321 	ctx->sign = master_tcon->ses->sign;
4322 	ctx->seal = master_tcon->seal;
4323 	ctx->witness = master_tcon->use_witness;
4324 
4325 	rc = cifs_set_vol_auth(ctx, master_tcon->ses);
4326 	if (rc) {
4327 		tcon = ERR_PTR(rc);
4328 		goto out;
4329 	}
4330 
4331 	/* get a reference for the same TCP session */
4332 	spin_lock(&cifs_tcp_ses_lock);
4333 	++master_tcon->ses->server->srv_count;
4334 	spin_unlock(&cifs_tcp_ses_lock);
4335 
4336 	ses = cifs_get_smb_ses(master_tcon->ses->server, ctx);
4337 	if (IS_ERR(ses)) {
4338 		tcon = (struct cifs_tcon *)ses;
4339 		cifs_put_tcp_session(master_tcon->ses->server, 0);
4340 		goto out;
4341 	}
4342 
4343 	tcon = cifs_get_tcon(ses, ctx);
4344 	if (IS_ERR(tcon)) {
4345 		cifs_put_smb_ses(ses);
4346 		goto out;
4347 	}
4348 
4349 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
4350 	if (cap_unix(ses))
4351 		reset_cifs_unix_caps(0, tcon, NULL, ctx);
4352 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
4353 
4354 out:
4355 	kfree(ctx->username);
4356 	kfree_sensitive(ctx->password);
4357 	kfree(ctx);
4358 
4359 	return tcon;
4360 }
4361 
4362 struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info * cifs_sb)4363 cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
4364 {
4365 	return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
4366 }
4367 
4368 /* find and return a tlink with given uid */
4369 static struct tcon_link *
tlink_rb_search(struct rb_root * root,kuid_t uid)4370 tlink_rb_search(struct rb_root *root, kuid_t uid)
4371 {
4372 	struct rb_node *node = root->rb_node;
4373 	struct tcon_link *tlink;
4374 
4375 	while (node) {
4376 		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
4377 
4378 		if (uid_gt(tlink->tl_uid, uid))
4379 			node = node->rb_left;
4380 		else if (uid_lt(tlink->tl_uid, uid))
4381 			node = node->rb_right;
4382 		else
4383 			return tlink;
4384 	}
4385 	return NULL;
4386 }
4387 
4388 /* insert a tcon_link into the tree */
4389 static void
tlink_rb_insert(struct rb_root * root,struct tcon_link * new_tlink)4390 tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
4391 {
4392 	struct rb_node **new = &(root->rb_node), *parent = NULL;
4393 	struct tcon_link *tlink;
4394 
4395 	while (*new) {
4396 		tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
4397 		parent = *new;
4398 
4399 		if (uid_gt(tlink->tl_uid, new_tlink->tl_uid))
4400 			new = &((*new)->rb_left);
4401 		else
4402 			new = &((*new)->rb_right);
4403 	}
4404 
4405 	rb_link_node(&new_tlink->tl_rbnode, parent, new);
4406 	rb_insert_color(&new_tlink->tl_rbnode, root);
4407 }
4408 
4409 /*
4410  * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
4411  * current task.
4412  *
4413  * If the superblock doesn't refer to a multiuser mount, then just return
4414  * the master tcon for the mount.
4415  *
4416  * First, search the rbtree for an existing tcon for this fsuid. If one
4417  * exists, then check to see if it's pending construction. If it is then wait
4418  * for construction to complete. Once it's no longer pending, check to see if
4419  * it failed and either return an error or retry construction, depending on
4420  * the timeout.
4421  *
4422  * If one doesn't exist then insert a new tcon_link struct into the tree and
4423  * try to construct a new one.
4424  */
4425 struct tcon_link *
cifs_sb_tlink(struct cifs_sb_info * cifs_sb)4426 cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
4427 {
4428 	int ret;
4429 	kuid_t fsuid = current_fsuid();
4430 	struct tcon_link *tlink, *newtlink;
4431 
4432 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
4433 		return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
4434 
4435 	spin_lock(&cifs_sb->tlink_tree_lock);
4436 	tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4437 	if (tlink)
4438 		cifs_get_tlink(tlink);
4439 	spin_unlock(&cifs_sb->tlink_tree_lock);
4440 
4441 	if (tlink == NULL) {
4442 		newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
4443 		if (newtlink == NULL)
4444 			return ERR_PTR(-ENOMEM);
4445 		newtlink->tl_uid = fsuid;
4446 		newtlink->tl_tcon = ERR_PTR(-EACCES);
4447 		set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
4448 		set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
4449 		cifs_get_tlink(newtlink);
4450 
4451 		spin_lock(&cifs_sb->tlink_tree_lock);
4452 		/* was one inserted after previous search? */
4453 		tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4454 		if (tlink) {
4455 			cifs_get_tlink(tlink);
4456 			spin_unlock(&cifs_sb->tlink_tree_lock);
4457 			kfree(newtlink);
4458 			goto wait_for_construction;
4459 		}
4460 		tlink = newtlink;
4461 		tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
4462 		spin_unlock(&cifs_sb->tlink_tree_lock);
4463 	} else {
4464 wait_for_construction:
4465 		ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
4466 				  TASK_INTERRUPTIBLE);
4467 		if (ret) {
4468 			cifs_put_tlink(tlink);
4469 			return ERR_PTR(-ERESTARTSYS);
4470 		}
4471 
4472 		/* if it's good, return it */
4473 		if (!IS_ERR(tlink->tl_tcon))
4474 			return tlink;
4475 
4476 		/* return error if we tried this already recently */
4477 		if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
4478 			cifs_put_tlink(tlink);
4479 			return ERR_PTR(-EACCES);
4480 		}
4481 
4482 		if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
4483 			goto wait_for_construction;
4484 	}
4485 
4486 	tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
4487 	clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
4488 	wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
4489 
4490 	if (IS_ERR(tlink->tl_tcon)) {
4491 		cifs_put_tlink(tlink);
4492 		return ERR_PTR(-EACCES);
4493 	}
4494 
4495 	return tlink;
4496 }
4497 
4498 /*
4499  * periodic workqueue job that scans tcon_tree for a superblock and closes
4500  * out tcons.
4501  */
4502 static void
cifs_prune_tlinks(struct work_struct * work)4503 cifs_prune_tlinks(struct work_struct *work)
4504 {
4505 	struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
4506 						    prune_tlinks.work);
4507 	struct rb_root *root = &cifs_sb->tlink_tree;
4508 	struct rb_node *node;
4509 	struct rb_node *tmp;
4510 	struct tcon_link *tlink;
4511 
4512 	/*
4513 	 * Because we drop the spinlock in the loop in order to put the tlink
4514 	 * it's not guarded against removal of links from the tree. The only
4515 	 * places that remove entries from the tree are this function and
4516 	 * umounts. Because this function is non-reentrant and is canceled
4517 	 * before umount can proceed, this is safe.
4518 	 */
4519 	spin_lock(&cifs_sb->tlink_tree_lock);
4520 	node = rb_first(root);
4521 	while (node != NULL) {
4522 		tmp = node;
4523 		node = rb_next(tmp);
4524 		tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
4525 
4526 		if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
4527 		    atomic_read(&tlink->tl_count) != 0 ||
4528 		    time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
4529 			continue;
4530 
4531 		cifs_get_tlink(tlink);
4532 		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
4533 		rb_erase(tmp, root);
4534 
4535 		spin_unlock(&cifs_sb->tlink_tree_lock);
4536 		cifs_put_tlink(tlink);
4537 		spin_lock(&cifs_sb->tlink_tree_lock);
4538 	}
4539 	spin_unlock(&cifs_sb->tlink_tree_lock);
4540 
4541 	queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
4542 				TLINK_IDLE_EXPIRE);
4543 }
4544 
4545 #ifdef CONFIG_CIFS_DFS_UPCALL
4546 /* Update dfs referral path of superblock */
update_server_fullpath(struct TCP_Server_Info * server,struct cifs_sb_info * cifs_sb,const char * target)4547 static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb,
4548 				  const char *target)
4549 {
4550 	int rc = 0;
4551 	size_t len = strlen(target);
4552 	char *refpath, *npath;
4553 
4554 	if (unlikely(len < 2 || *target != '\\'))
4555 		return -EINVAL;
4556 
4557 	if (target[1] == '\\') {
4558 		len += 1;
4559 		refpath = kmalloc(len, GFP_KERNEL);
4560 		if (!refpath)
4561 			return -ENOMEM;
4562 
4563 		scnprintf(refpath, len, "%s", target);
4564 	} else {
4565 		len += sizeof("\\");
4566 		refpath = kmalloc(len, GFP_KERNEL);
4567 		if (!refpath)
4568 			return -ENOMEM;
4569 
4570 		scnprintf(refpath, len, "\\%s", target);
4571 	}
4572 
4573 	npath = dfs_cache_canonical_path(refpath, cifs_sb->local_nls, cifs_remap(cifs_sb));
4574 	kfree(refpath);
4575 
4576 	if (IS_ERR(npath)) {
4577 		rc = PTR_ERR(npath);
4578 	} else {
4579 		mutex_lock(&server->refpath_lock);
4580 		kfree(server->leaf_fullpath);
4581 		server->leaf_fullpath = npath;
4582 		mutex_unlock(&server->refpath_lock);
4583 		server->current_fullpath = server->leaf_fullpath;
4584 	}
4585 	return rc;
4586 }
4587 
target_share_matches_server(struct TCP_Server_Info * server,const char * tcp_host,size_t tcp_host_len,char * share,bool * target_match)4588 static int target_share_matches_server(struct TCP_Server_Info *server, const char *tcp_host,
4589 				       size_t tcp_host_len, char *share, bool *target_match)
4590 {
4591 	int rc = 0;
4592 	const char *dfs_host;
4593 	size_t dfs_host_len;
4594 
4595 	*target_match = true;
4596 	extract_unc_hostname(share, &dfs_host, &dfs_host_len);
4597 
4598 	/* Check if hostnames or addresses match */
4599 	if (dfs_host_len != tcp_host_len || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
4600 		cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", __func__, (int)dfs_host_len,
4601 			 dfs_host, (int)tcp_host_len, tcp_host);
4602 		rc = match_target_ip(server, dfs_host, dfs_host_len, target_match);
4603 		if (rc)
4604 			cifs_dbg(VFS, "%s: failed to match target ip: %d\n", __func__, rc);
4605 	}
4606 	return rc;
4607 }
4608 
__tree_connect_dfs_target(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,char * tree,bool islink,struct dfs_cache_tgt_list * tl)4609 static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
4610 				     struct cifs_sb_info *cifs_sb, char *tree, bool islink,
4611 				     struct dfs_cache_tgt_list *tl)
4612 {
4613 	int rc;
4614 	struct TCP_Server_Info *server = tcon->ses->server;
4615 	const struct smb_version_operations *ops = server->ops;
4616 	struct cifs_tcon *ipc = tcon->ses->tcon_ipc;
4617 	char *share = NULL, *prefix = NULL;
4618 	const char *tcp_host;
4619 	size_t tcp_host_len;
4620 	struct dfs_cache_tgt_iterator *tit;
4621 	bool target_match;
4622 
4623 	extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
4624 
4625 	tit = dfs_cache_get_tgt_iterator(tl);
4626 	if (!tit) {
4627 		rc = -ENOENT;
4628 		goto out;
4629 	}
4630 
4631 	/* Try to tree connect to all dfs targets */
4632 	for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
4633 		const char *target = dfs_cache_get_tgt_name(tit);
4634 		struct dfs_cache_tgt_list ntl = DFS_CACHE_TGT_LIST_INIT(ntl);
4635 
4636 		kfree(share);
4637 		kfree(prefix);
4638 		share = prefix = NULL;
4639 
4640 		/* Check if share matches with tcp ses */
4641 		rc = dfs_cache_get_tgt_share(server->current_fullpath + 1, tit, &share, &prefix);
4642 		if (rc) {
4643 			cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc);
4644 			break;
4645 		}
4646 
4647 		rc = target_share_matches_server(server, tcp_host, tcp_host_len, share,
4648 						 &target_match);
4649 		if (rc)
4650 			break;
4651 		if (!target_match) {
4652 			rc = -EHOSTUNREACH;
4653 			continue;
4654 		}
4655 
4656 		if (ipc->need_reconnect) {
4657 			scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
4658 			rc = ops->tree_connect(xid, ipc->ses, tree, ipc, cifs_sb->local_nls);
4659 			if (rc)
4660 				break;
4661 		}
4662 
4663 		scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
4664 		if (!islink) {
4665 			rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
4666 			break;
4667 		}
4668 		/*
4669 		 * If no dfs referrals were returned from link target, then just do a TREE_CONNECT
4670 		 * to it.  Otherwise, cache the dfs referral and then mark current tcp ses for
4671 		 * reconnect so either the demultiplex thread or the echo worker will reconnect to
4672 		 * newly resolved target.
4673 		 */
4674 		if (dfs_cache_find(xid, tcon->ses, cifs_sb->local_nls, cifs_remap(cifs_sb), target,
4675 				   NULL, &ntl)) {
4676 			rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
4677 			if (rc)
4678 				continue;
4679 			rc = dfs_cache_noreq_update_tgthint(server->current_fullpath + 1, tit);
4680 			if (!rc)
4681 				rc = cifs_update_super_prepath(cifs_sb, prefix);
4682 		} else {
4683 			/* Target is another dfs share */
4684 			rc = update_server_fullpath(server, cifs_sb, target);
4685 			dfs_cache_free_tgts(tl);
4686 
4687 			if (!rc) {
4688 				rc = -EREMOTE;
4689 				list_replace_init(&ntl.tl_list, &tl->tl_list);
4690 			} else
4691 				dfs_cache_free_tgts(&ntl);
4692 		}
4693 		break;
4694 	}
4695 
4696 out:
4697 	kfree(share);
4698 	kfree(prefix);
4699 
4700 	return rc;
4701 }
4702 
tree_connect_dfs_target(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,char * tree,bool islink,struct dfs_cache_tgt_list * tl)4703 static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
4704 				   struct cifs_sb_info *cifs_sb, char *tree, bool islink,
4705 				   struct dfs_cache_tgt_list *tl)
4706 {
4707 	int rc;
4708 	int num_links = 0;
4709 	struct TCP_Server_Info *server = tcon->ses->server;
4710 
4711 	do {
4712 		rc = __tree_connect_dfs_target(xid, tcon, cifs_sb, tree, islink, tl);
4713 		if (!rc || rc != -EREMOTE)
4714 			break;
4715 	} while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
4716 	/*
4717 	 * If we couldn't tree connect to any targets from last referral path, then retry from
4718 	 * original referral path.
4719 	 */
4720 	if (rc && server->current_fullpath != server->origin_fullpath) {
4721 		server->current_fullpath = server->origin_fullpath;
4722 		cifs_signal_cifsd_for_reconnect(server, true);
4723 	}
4724 
4725 	dfs_cache_free_tgts(tl);
4726 	return rc;
4727 }
4728 
cifs_tree_connect(const unsigned int xid,struct cifs_tcon * tcon,const struct nls_table * nlsc)4729 int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
4730 {
4731 	int rc;
4732 	struct TCP_Server_Info *server = tcon->ses->server;
4733 	const struct smb_version_operations *ops = server->ops;
4734 	struct super_block *sb = NULL;
4735 	struct cifs_sb_info *cifs_sb;
4736 	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
4737 	char *tree;
4738 	struct dfs_info3_param ref = {0};
4739 
4740 	/* only send once per connect */
4741 	spin_lock(&tcon->tc_lock);
4742 	if (tcon->ses->ses_status != SES_GOOD ||
4743 	    (tcon->status != TID_NEW &&
4744 	    tcon->status != TID_NEED_TCON)) {
4745 		spin_unlock(&tcon->tc_lock);
4746 		return 0;
4747 	}
4748 	tcon->status = TID_IN_TCON;
4749 	spin_unlock(&tcon->tc_lock);
4750 
4751 	tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
4752 	if (!tree) {
4753 		rc = -ENOMEM;
4754 		goto out;
4755 	}
4756 
4757 	if (tcon->ipc) {
4758 		scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
4759 		rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
4760 		goto out;
4761 	}
4762 
4763 	sb = cifs_get_tcp_super(server);
4764 	if (IS_ERR(sb)) {
4765 		rc = PTR_ERR(sb);
4766 		cifs_dbg(VFS, "%s: could not find superblock: %d\n", __func__, rc);
4767 		goto out;
4768 	}
4769 
4770 	cifs_sb = CIFS_SB(sb);
4771 
4772 	/* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
4773 	if (!server->current_fullpath ||
4774 	    dfs_cache_noreq_find(server->current_fullpath + 1, &ref, &tl)) {
4775 		rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, cifs_sb->local_nls);
4776 		goto out;
4777 	}
4778 
4779 	rc = tree_connect_dfs_target(xid, tcon, cifs_sb, tree, ref.server_type == DFS_TYPE_LINK,
4780 				     &tl);
4781 	free_dfs_info_param(&ref);
4782 
4783 out:
4784 	kfree(tree);
4785 	cifs_put_tcp_super(sb);
4786 
4787 	if (rc) {
4788 		spin_lock(&tcon->tc_lock);
4789 		if (tcon->status == TID_IN_TCON)
4790 			tcon->status = TID_NEED_TCON;
4791 		spin_unlock(&tcon->tc_lock);
4792 	} else {
4793 		spin_lock(&tcon->tc_lock);
4794 		if (tcon->status == TID_IN_TCON)
4795 			tcon->status = TID_GOOD;
4796 		spin_unlock(&tcon->tc_lock);
4797 		tcon->need_reconnect = false;
4798 	}
4799 
4800 	return rc;
4801 }
4802 #else
cifs_tree_connect(const unsigned int xid,struct cifs_tcon * tcon,const struct nls_table * nlsc)4803 int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
4804 {
4805 	int rc;
4806 	const struct smb_version_operations *ops = tcon->ses->server->ops;
4807 
4808 	/* only send once per connect */
4809 	spin_lock(&tcon->tc_lock);
4810 	if (tcon->ses->ses_status != SES_GOOD ||
4811 	    (tcon->status != TID_NEW &&
4812 	    tcon->status != TID_NEED_TCON)) {
4813 		spin_unlock(&tcon->tc_lock);
4814 		return 0;
4815 	}
4816 	tcon->status = TID_IN_TCON;
4817 	spin_unlock(&tcon->tc_lock);
4818 
4819 	rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, nlsc);
4820 	if (rc) {
4821 		spin_lock(&tcon->tc_lock);
4822 		if (tcon->status == TID_IN_TCON)
4823 			tcon->status = TID_NEED_TCON;
4824 		spin_unlock(&tcon->tc_lock);
4825 	} else {
4826 		spin_lock(&tcon->tc_lock);
4827 		if (tcon->status == TID_IN_TCON)
4828 			tcon->status = TID_GOOD;
4829 		tcon->need_reconnect = false;
4830 		spin_unlock(&tcon->tc_lock);
4831 	}
4832 
4833 	return rc;
4834 }
4835 #endif
4836