• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *   fs/cifs/misc.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   This library is free software; you can redistribute it and/or modify
8  *   it under the terms of the GNU Lesser General Public License as published
9  *   by the Free Software Foundation; either version 2.1 of the License, or
10  *   (at your option) any later version.
11  *
12  *   This library is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
15  *   the GNU Lesser General Public License for more details.
16  *
17  *   You should have received a copy of the GNU Lesser General Public License
18  *   along with this library; if not, write to the Free Software
19  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20  */
21 
22 #include <linux/slab.h>
23 #include <linux/ctype.h>
24 #include <linux/mempool.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smberr.h"
30 #include "nterr.h"
31 #include "cifs_unicode.h"
32 #ifdef CONFIG_CIFS_SMB2
33 #include "smb2pdu.h"
34 #endif
35 
36 extern mempool_t *cifs_sm_req_poolp;
37 extern mempool_t *cifs_req_poolp;
38 
39 /* The xid serves as a useful identifier for each incoming vfs request,
40    in a similar way to the mid which is useful to track each sent smb,
41    and CurrentXid can also provide a running counter (although it
42    will eventually wrap past zero) of the total vfs operations handled
43    since the cifs fs was mounted */
44 
45 unsigned int
_get_xid(void)46 _get_xid(void)
47 {
48 	unsigned int xid;
49 
50 	spin_lock(&GlobalMid_Lock);
51 	GlobalTotalActiveXid++;
52 
53 	/* keep high water mark for number of simultaneous ops in filesystem */
54 	if (GlobalTotalActiveXid > GlobalMaxActiveXid)
55 		GlobalMaxActiveXid = GlobalTotalActiveXid;
56 	if (GlobalTotalActiveXid > 65000)
57 		cifs_dbg(FYI, "warning: more than 65000 requests active\n");
58 	xid = GlobalCurrentXid++;
59 	spin_unlock(&GlobalMid_Lock);
60 	return xid;
61 }
62 
63 void
_free_xid(unsigned int xid)64 _free_xid(unsigned int xid)
65 {
66 	spin_lock(&GlobalMid_Lock);
67 	/* if (GlobalTotalActiveXid == 0)
68 		BUG(); */
69 	GlobalTotalActiveXid--;
70 	spin_unlock(&GlobalMid_Lock);
71 }
72 
73 struct cifs_ses *
sesInfoAlloc(void)74 sesInfoAlloc(void)
75 {
76 	struct cifs_ses *ret_buf;
77 
78 	ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
79 	if (ret_buf) {
80 		atomic_inc(&sesInfoAllocCount);
81 		ret_buf->status = CifsNew;
82 		++ret_buf->ses_count;
83 		INIT_LIST_HEAD(&ret_buf->smb_ses_list);
84 		INIT_LIST_HEAD(&ret_buf->tcon_list);
85 		mutex_init(&ret_buf->session_mutex);
86 	}
87 	return ret_buf;
88 }
89 
90 void
sesInfoFree(struct cifs_ses * buf_to_free)91 sesInfoFree(struct cifs_ses *buf_to_free)
92 {
93 	if (buf_to_free == NULL) {
94 		cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
95 		return;
96 	}
97 
98 	atomic_dec(&sesInfoAllocCount);
99 	kfree(buf_to_free->serverOS);
100 	kfree(buf_to_free->serverDomain);
101 	kfree(buf_to_free->serverNOS);
102 	kzfree(buf_to_free->password);
103 	kfree(buf_to_free->user_name);
104 	kfree(buf_to_free->domainName);
105 	kzfree(buf_to_free->auth_key.response);
106 	kzfree(buf_to_free);
107 }
108 
109 struct cifs_tcon *
tconInfoAlloc(void)110 tconInfoAlloc(void)
111 {
112 	struct cifs_tcon *ret_buf;
113 	ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
114 	if (ret_buf) {
115 		atomic_inc(&tconInfoAllocCount);
116 		ret_buf->tidStatus = CifsNew;
117 		++ret_buf->tc_count;
118 		INIT_LIST_HEAD(&ret_buf->openFileList);
119 		INIT_LIST_HEAD(&ret_buf->tcon_list);
120 		spin_lock_init(&ret_buf->open_file_lock);
121 #ifdef CONFIG_CIFS_STATS
122 		spin_lock_init(&ret_buf->stat_lock);
123 #endif
124 	}
125 	return ret_buf;
126 }
127 
128 void
tconInfoFree(struct cifs_tcon * buf_to_free)129 tconInfoFree(struct cifs_tcon *buf_to_free)
130 {
131 	if (buf_to_free == NULL) {
132 		cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
133 		return;
134 	}
135 	atomic_dec(&tconInfoAllocCount);
136 	kfree(buf_to_free->nativeFileSystem);
137 	kzfree(buf_to_free->password);
138 	kfree(buf_to_free);
139 }
140 
141 struct smb_hdr *
cifs_buf_get(void)142 cifs_buf_get(void)
143 {
144 	struct smb_hdr *ret_buf = NULL;
145 	size_t buf_size = sizeof(struct smb_hdr);
146 
147 #ifdef CONFIG_CIFS_SMB2
148 	/*
149 	 * SMB2 header is bigger than CIFS one - no problems to clean some
150 	 * more bytes for CIFS.
151 	 */
152 	buf_size = sizeof(struct smb2_hdr);
153 #endif
154 	/*
155 	 * We could use negotiated size instead of max_msgsize -
156 	 * but it may be more efficient to always alloc same size
157 	 * albeit slightly larger than necessary and maxbuffersize
158 	 * defaults to this and can not be bigger.
159 	 */
160 	ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
161 
162 	/* clear the first few header bytes */
163 	/* for most paths, more is cleared in header_assemble */
164 	if (ret_buf) {
165 		memset(ret_buf, 0, buf_size + 3);
166 		atomic_inc(&bufAllocCount);
167 #ifdef CONFIG_CIFS_STATS2
168 		atomic_inc(&totBufAllocCount);
169 #endif /* CONFIG_CIFS_STATS2 */
170 	}
171 
172 	return ret_buf;
173 }
174 
175 void
cifs_buf_release(void * buf_to_free)176 cifs_buf_release(void *buf_to_free)
177 {
178 	if (buf_to_free == NULL) {
179 		/* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
180 		return;
181 	}
182 	mempool_free(buf_to_free, cifs_req_poolp);
183 
184 	atomic_dec(&bufAllocCount);
185 	return;
186 }
187 
188 struct smb_hdr *
cifs_small_buf_get(void)189 cifs_small_buf_get(void)
190 {
191 	struct smb_hdr *ret_buf = NULL;
192 
193 /* We could use negotiated size instead of max_msgsize -
194    but it may be more efficient to always alloc same size
195    albeit slightly larger than necessary and maxbuffersize
196    defaults to this and can not be bigger */
197 	ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
198 	if (ret_buf) {
199 	/* No need to clear memory here, cleared in header assemble */
200 	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
201 		atomic_inc(&smBufAllocCount);
202 #ifdef CONFIG_CIFS_STATS2
203 		atomic_inc(&totSmBufAllocCount);
204 #endif /* CONFIG_CIFS_STATS2 */
205 
206 	}
207 	return ret_buf;
208 }
209 
210 void
cifs_small_buf_release(void * buf_to_free)211 cifs_small_buf_release(void *buf_to_free)
212 {
213 
214 	if (buf_to_free == NULL) {
215 		cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
216 		return;
217 	}
218 	mempool_free(buf_to_free, cifs_sm_req_poolp);
219 
220 	atomic_dec(&smBufAllocCount);
221 	return;
222 }
223 
224 void
free_rsp_buf(int resp_buftype,void * rsp)225 free_rsp_buf(int resp_buftype, void *rsp)
226 {
227 	if (resp_buftype == CIFS_SMALL_BUFFER)
228 		cifs_small_buf_release(rsp);
229 	else if (resp_buftype == CIFS_LARGE_BUFFER)
230 		cifs_buf_release(rsp);
231 }
232 
233 /* NB: MID can not be set if treeCon not passed in, in that
234    case it is responsbility of caller to set the mid */
235 void
header_assemble(struct smb_hdr * buffer,char smb_command,const struct cifs_tcon * treeCon,int word_count)236 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
237 		const struct cifs_tcon *treeCon, int word_count
238 		/* length of fixed section (word count) in two byte units  */)
239 {
240 	char *temp = (char *) buffer;
241 
242 	memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
243 
244 	buffer->smb_buf_length = cpu_to_be32(
245 	    (2 * word_count) + sizeof(struct smb_hdr) -
246 	    4 /*  RFC 1001 length field does not count */  +
247 	    2 /* for bcc field itself */) ;
248 
249 	buffer->Protocol[0] = 0xFF;
250 	buffer->Protocol[1] = 'S';
251 	buffer->Protocol[2] = 'M';
252 	buffer->Protocol[3] = 'B';
253 	buffer->Command = smb_command;
254 	buffer->Flags = 0x00;	/* case sensitive */
255 	buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
256 	buffer->Pid = cpu_to_le16((__u16)current->tgid);
257 	buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
258 	if (treeCon) {
259 		buffer->Tid = treeCon->tid;
260 		if (treeCon->ses) {
261 			if (treeCon->ses->capabilities & CAP_UNICODE)
262 				buffer->Flags2 |= SMBFLG2_UNICODE;
263 			if (treeCon->ses->capabilities & CAP_STATUS32)
264 				buffer->Flags2 |= SMBFLG2_ERR_STATUS;
265 
266 			/* Uid is not converted */
267 			buffer->Uid = treeCon->ses->Suid;
268 			buffer->Mid = get_next_mid(treeCon->ses->server);
269 		}
270 		if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
271 			buffer->Flags2 |= SMBFLG2_DFS;
272 		if (treeCon->nocase)
273 			buffer->Flags  |= SMBFLG_CASELESS;
274 		if ((treeCon->ses) && (treeCon->ses->server))
275 			if (treeCon->ses->server->sign)
276 				buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
277 	}
278 
279 /*  endian conversion of flags is now done just before sending */
280 	buffer->WordCount = (char) word_count;
281 	return;
282 }
283 
284 static int
check_smb_hdr(struct smb_hdr * smb)285 check_smb_hdr(struct smb_hdr *smb)
286 {
287 	/* does it have the right SMB "signature" ? */
288 	if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
289 		cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
290 			 *(unsigned int *)smb->Protocol);
291 		return 1;
292 	}
293 
294 	/* if it's a response then accept */
295 	if (smb->Flags & SMBFLG_RESPONSE)
296 		return 0;
297 
298 	/* only one valid case where server sends us request */
299 	if (smb->Command == SMB_COM_LOCKING_ANDX)
300 		return 0;
301 
302 	cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
303 		 get_mid(smb));
304 	return 1;
305 }
306 
307 int
checkSMB(char * buf,unsigned int total_read)308 checkSMB(char *buf, unsigned int total_read)
309 {
310 	struct smb_hdr *smb = (struct smb_hdr *)buf;
311 	__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
312 	__u32 clc_len;  /* calculated length */
313 	cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
314 		 total_read, rfclen);
315 
316 	/* is this frame too small to even get to a BCC? */
317 	if (total_read < 2 + sizeof(struct smb_hdr)) {
318 		if ((total_read >= sizeof(struct smb_hdr) - 1)
319 			    && (smb->Status.CifsError != 0)) {
320 			/* it's an error return */
321 			smb->WordCount = 0;
322 			/* some error cases do not return wct and bcc */
323 			return 0;
324 		} else if ((total_read == sizeof(struct smb_hdr) + 1) &&
325 				(smb->WordCount == 0)) {
326 			char *tmp = (char *)smb;
327 			/* Need to work around a bug in two servers here */
328 			/* First, check if the part of bcc they sent was zero */
329 			if (tmp[sizeof(struct smb_hdr)] == 0) {
330 				/* some servers return only half of bcc
331 				 * on simple responses (wct, bcc both zero)
332 				 * in particular have seen this on
333 				 * ulogoffX and FindClose. This leaves
334 				 * one byte of bcc potentially unitialized
335 				 */
336 				/* zero rest of bcc */
337 				tmp[sizeof(struct smb_hdr)+1] = 0;
338 				return 0;
339 			}
340 			cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
341 		} else {
342 			cifs_dbg(VFS, "Length less than smb header size\n");
343 		}
344 		return -EIO;
345 	}
346 
347 	/* otherwise, there is enough to get to the BCC */
348 	if (check_smb_hdr(smb))
349 		return -EIO;
350 	clc_len = smbCalcSize(smb);
351 
352 	if (4 + rfclen != total_read) {
353 		cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
354 			 rfclen);
355 		return -EIO;
356 	}
357 
358 	if (4 + rfclen != clc_len) {
359 		__u16 mid = get_mid(smb);
360 		/* check if bcc wrapped around for large read responses */
361 		if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
362 			/* check if lengths match mod 64K */
363 			if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
364 				return 0; /* bcc wrapped */
365 		}
366 		cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
367 			 clc_len, 4 + rfclen, mid);
368 
369 		if (4 + rfclen < clc_len) {
370 			cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
371 				 rfclen, mid);
372 			return -EIO;
373 		} else if (rfclen > clc_len + 512) {
374 			/*
375 			 * Some servers (Windows XP in particular) send more
376 			 * data than the lengths in the SMB packet would
377 			 * indicate on certain calls (byte range locks and
378 			 * trans2 find first calls in particular). While the
379 			 * client can handle such a frame by ignoring the
380 			 * trailing data, we choose limit the amount of extra
381 			 * data to 512 bytes.
382 			 */
383 			cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
384 				 rfclen, mid);
385 			return -EIO;
386 		}
387 	}
388 	return 0;
389 }
390 
391 bool
is_valid_oplock_break(char * buffer,struct TCP_Server_Info * srv)392 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
393 {
394 	struct smb_hdr *buf = (struct smb_hdr *)buffer;
395 	struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
396 	struct list_head *tmp, *tmp1, *tmp2;
397 	struct cifs_ses *ses;
398 	struct cifs_tcon *tcon;
399 	struct cifsInodeInfo *pCifsInode;
400 	struct cifsFileInfo *netfile;
401 
402 	cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
403 	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
404 	   (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
405 		struct smb_com_transaction_change_notify_rsp *pSMBr =
406 			(struct smb_com_transaction_change_notify_rsp *)buf;
407 		struct file_notify_information *pnotify;
408 		__u32 data_offset = 0;
409 		size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
410 
411 		if (get_bcc(buf) > sizeof(struct file_notify_information)) {
412 			data_offset = le32_to_cpu(pSMBr->DataOffset);
413 
414 			if (data_offset >
415 			    len - sizeof(struct file_notify_information)) {
416 				cifs_dbg(FYI, "invalid data_offset %u\n",
417 					 data_offset);
418 				return true;
419 			}
420 			pnotify = (struct file_notify_information *)
421 				((char *)&pSMBr->hdr.Protocol + data_offset);
422 			cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
423 				 pnotify->FileName, pnotify->Action);
424 			/*   cifs_dump_mem("Rcvd notify Data: ",buf,
425 				sizeof(struct smb_hdr)+60); */
426 			return true;
427 		}
428 		if (pSMBr->hdr.Status.CifsError) {
429 			cifs_dbg(FYI, "notify err 0x%x\n",
430 				 pSMBr->hdr.Status.CifsError);
431 			return true;
432 		}
433 		return false;
434 	}
435 	if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
436 		return false;
437 	if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
438 		/* no sense logging error on invalid handle on oplock
439 		   break - harmless race between close request and oplock
440 		   break response is expected from time to time writing out
441 		   large dirty files cached on the client */
442 		if ((NT_STATUS_INVALID_HANDLE) ==
443 		   le32_to_cpu(pSMB->hdr.Status.CifsError)) {
444 			cifs_dbg(FYI, "invalid handle on oplock break\n");
445 			return true;
446 		} else if (ERRbadfid ==
447 		   le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
448 			return true;
449 		} else {
450 			return false; /* on valid oplock brk we get "request" */
451 		}
452 	}
453 	if (pSMB->hdr.WordCount != 8)
454 		return false;
455 
456 	cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
457 		 pSMB->LockType, pSMB->OplockLevel);
458 	if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
459 		return false;
460 
461 	/* look up tcon based on tid & uid */
462 	spin_lock(&cifs_tcp_ses_lock);
463 	list_for_each(tmp, &srv->smb_ses_list) {
464 		ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
465 		list_for_each(tmp1, &ses->tcon_list) {
466 			tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
467 			if (tcon->tid != buf->Tid)
468 				continue;
469 
470 			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
471 			spin_lock(&tcon->open_file_lock);
472 			list_for_each(tmp2, &tcon->openFileList) {
473 				netfile = list_entry(tmp2, struct cifsFileInfo,
474 						     tlist);
475 				if (pSMB->Fid != netfile->fid.netfid)
476 					continue;
477 
478 				cifs_dbg(FYI, "file id match, oplock break\n");
479 				pCifsInode = CIFS_I(d_inode(netfile->dentry));
480 
481 				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
482 					&pCifsInode->flags);
483 
484 				/*
485 				 * Set flag if the server downgrades the oplock
486 				 * to L2 else clear.
487 				 */
488 				if (pSMB->OplockLevel)
489 					set_bit(
490 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
491 					   &pCifsInode->flags);
492 				else
493 					clear_bit(
494 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
495 					   &pCifsInode->flags);
496 
497 				queue_work(cifsiod_wq,
498 					   &netfile->oplock_break);
499 				netfile->oplock_break_cancelled = false;
500 
501 				spin_unlock(&tcon->open_file_lock);
502 				spin_unlock(&cifs_tcp_ses_lock);
503 				return true;
504 			}
505 			spin_unlock(&tcon->open_file_lock);
506 			spin_unlock(&cifs_tcp_ses_lock);
507 			cifs_dbg(FYI, "No matching file for oplock break\n");
508 			return true;
509 		}
510 	}
511 	spin_unlock(&cifs_tcp_ses_lock);
512 	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
513 	return true;
514 }
515 
516 void
dump_smb(void * buf,int smb_buf_length)517 dump_smb(void *buf, int smb_buf_length)
518 {
519 	if (traceSMB == 0)
520 		return;
521 
522 	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
523 		       smb_buf_length, true);
524 }
525 
526 void
cifs_autodisable_serverino(struct cifs_sb_info * cifs_sb)527 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
528 {
529 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
530 		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
531 		cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n",
532 			 cifs_sb_master_tcon(cifs_sb)->treeName);
533 	}
534 }
535 
cifs_set_oplock_level(struct cifsInodeInfo * cinode,__u32 oplock)536 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
537 {
538 	oplock &= 0xF;
539 
540 	if (oplock == OPLOCK_EXCLUSIVE) {
541 		cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
542 		cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
543 			 &cinode->vfs_inode);
544 	} else if (oplock == OPLOCK_READ) {
545 		cinode->oplock = CIFS_CACHE_READ_FLG;
546 		cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
547 			 &cinode->vfs_inode);
548 	} else
549 		cinode->oplock = 0;
550 }
551 
552 /*
553  * We wait for oplock breaks to be processed before we attempt to perform
554  * writes.
555  */
cifs_get_writer(struct cifsInodeInfo * cinode)556 int cifs_get_writer(struct cifsInodeInfo *cinode)
557 {
558 	int rc;
559 
560 start:
561 	rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
562 			 TASK_KILLABLE);
563 	if (rc)
564 		return rc;
565 
566 	spin_lock(&cinode->writers_lock);
567 	if (!cinode->writers)
568 		set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
569 	cinode->writers++;
570 	/* Check to see if we have started servicing an oplock break */
571 	if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
572 		cinode->writers--;
573 		if (cinode->writers == 0) {
574 			clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
575 			wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
576 		}
577 		spin_unlock(&cinode->writers_lock);
578 		goto start;
579 	}
580 	spin_unlock(&cinode->writers_lock);
581 	return 0;
582 }
583 
cifs_put_writer(struct cifsInodeInfo * cinode)584 void cifs_put_writer(struct cifsInodeInfo *cinode)
585 {
586 	spin_lock(&cinode->writers_lock);
587 	cinode->writers--;
588 	if (cinode->writers == 0) {
589 		clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
590 		wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
591 	}
592 	spin_unlock(&cinode->writers_lock);
593 }
594 
cifs_done_oplock_break(struct cifsInodeInfo * cinode)595 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
596 {
597 	clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
598 	wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
599 }
600 
601 bool
backup_cred(struct cifs_sb_info * cifs_sb)602 backup_cred(struct cifs_sb_info *cifs_sb)
603 {
604 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
605 		if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid()))
606 			return true;
607 	}
608 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
609 		if (in_group_p(cifs_sb->mnt_backupgid))
610 			return true;
611 	}
612 
613 	return false;
614 }
615 
616 void
cifs_del_pending_open(struct cifs_pending_open * open)617 cifs_del_pending_open(struct cifs_pending_open *open)
618 {
619 	spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
620 	list_del(&open->olist);
621 	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
622 }
623 
624 void
cifs_add_pending_open_locked(struct cifs_fid * fid,struct tcon_link * tlink,struct cifs_pending_open * open)625 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
626 			     struct cifs_pending_open *open)
627 {
628 #ifdef CONFIG_CIFS_SMB2
629 	memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
630 #endif
631 	open->oplock = CIFS_OPLOCK_NO_CHANGE;
632 	open->tlink = tlink;
633 	fid->pending_open = open;
634 	list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
635 }
636 
637 void
cifs_add_pending_open(struct cifs_fid * fid,struct tcon_link * tlink,struct cifs_pending_open * open)638 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
639 		      struct cifs_pending_open *open)
640 {
641 	spin_lock(&tlink_tcon(tlink)->open_file_lock);
642 	cifs_add_pending_open_locked(fid, tlink, open);
643 	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
644 }
645