1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
cifs_prepare_write(struct netfs_io_subrequest * subreq)47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	size_t wsize = req->rreq.wsize;
56 	int rc;
57 
58 	if (!wdata->have_xid) {
59 		wdata->xid = get_xid();
60 		wdata->have_xid = true;
61 	}
62 
63 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
64 	wdata->server = server;
65 
66 retry:
67 	if (open_file->invalidHandle) {
68 		rc = cifs_reopen_file(open_file, false);
69 		if (rc < 0) {
70 			if (rc == -EAGAIN)
71 				goto retry;
72 			subreq->error = rc;
73 			return netfs_prepare_write_failed(subreq);
74 		}
75 	}
76 
77 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
78 					   &wdata->credits);
79 	if (rc < 0) {
80 		subreq->error = rc;
81 		return netfs_prepare_write_failed(subreq);
82 	}
83 
84 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
85 	wdata->credits.rreq_debug_index = subreq->debug_index;
86 	wdata->credits.in_flight_check = 1;
87 	trace_smb3_rw_credits(wdata->rreq->debug_id,
88 			      wdata->subreq.debug_index,
89 			      wdata->credits.value,
90 			      server->credits, server->in_flight,
91 			      wdata->credits.value,
92 			      cifs_trace_rw_credits_write_prepare);
93 
94 #ifdef CONFIG_CIFS_SMB_DIRECT
95 	if (server->smbd_conn)
96 		stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
97 #endif
98 }
99 
100 /*
101  * Issue a subrequest to upload to the server.
102  */
cifs_issue_write(struct netfs_io_subrequest * subreq)103 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
104 {
105 	struct cifs_io_subrequest *wdata =
106 		container_of(subreq, struct cifs_io_subrequest, subreq);
107 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
108 	int rc;
109 
110 	if (cifs_forced_shutdown(sbi)) {
111 		rc = -EIO;
112 		goto fail;
113 	}
114 
115 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
116 	if (rc)
117 		goto fail;
118 
119 	rc = -EAGAIN;
120 	if (wdata->req->cfile->invalidHandle)
121 		goto fail;
122 
123 	wdata->server->ops->async_writev(wdata);
124 out:
125 	return;
126 
127 fail:
128 	if (rc == -EAGAIN)
129 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
130 	else
131 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
132 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
133 	cifs_write_subrequest_terminated(wdata, rc, false);
134 	goto out;
135 }
136 
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)137 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
138 {
139 	cifs_invalidate_cache(wreq->inode, 0);
140 }
141 
142 /*
143  * Negotiate the size of a read operation on behalf of the netfs library.
144  */
cifs_prepare_read(struct netfs_io_subrequest * subreq)145 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
146 {
147 	struct netfs_io_request *rreq = subreq->rreq;
148 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
149 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
150 	struct TCP_Server_Info *server;
151 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
152 	size_t size;
153 	int rc = 0;
154 
155 	if (!rdata->have_xid) {
156 		rdata->xid = get_xid();
157 		rdata->have_xid = true;
158 	}
159 
160 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
161 	rdata->server = server;
162 
163 	if (cifs_sb->ctx->rsize == 0)
164 		cifs_sb->ctx->rsize =
165 			server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
166 						     cifs_sb->ctx);
167 
168 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
169 					   &size, &rdata->credits);
170 	if (rc)
171 		return rc;
172 
173 	rreq->io_streams[0].sreq_max_len = size;
174 
175 	rdata->credits.in_flight_check = 1;
176 	rdata->credits.rreq_debug_id = rreq->debug_id;
177 	rdata->credits.rreq_debug_index = subreq->debug_index;
178 
179 	trace_smb3_rw_credits(rdata->rreq->debug_id,
180 			      rdata->subreq.debug_index,
181 			      rdata->credits.value,
182 			      server->credits, server->in_flight, 0,
183 			      cifs_trace_rw_credits_read_submit);
184 
185 #ifdef CONFIG_CIFS_SMB_DIRECT
186 	if (server->smbd_conn)
187 		rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
188 #endif
189 	return 0;
190 }
191 
192 /*
193  * Issue a read operation on behalf of the netfs helper functions.  We're asked
194  * to make a read of a certain size at a point in the file.  We are permitted
195  * to only read a portion of that, but as long as we read something, the netfs
196  * helper will call us again so that we can issue another read.
197  */
cifs_issue_read(struct netfs_io_subrequest * subreq)198 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
199 {
200 	struct netfs_io_request *rreq = subreq->rreq;
201 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
202 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
203 	struct TCP_Server_Info *server = rdata->server;
204 	int rc = 0;
205 
206 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
207 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
208 		 subreq->transferred, subreq->len);
209 
210 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
211 	if (rc)
212 		goto failed;
213 
214 	if (req->cfile->invalidHandle) {
215 		do {
216 			rc = cifs_reopen_file(req->cfile, true);
217 		} while (rc == -EAGAIN);
218 		if (rc)
219 			goto failed;
220 	}
221 
222 	if (subreq->rreq->origin != NETFS_DIO_READ)
223 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
224 
225 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
226 	rc = rdata->server->ops->async_readv(rdata);
227 	if (rc)
228 		goto failed;
229 	return;
230 
231 failed:
232 	netfs_read_subreq_terminated(subreq, rc, false);
233 }
234 
235 /*
236  * Writeback calls this when it finds a folio that needs uploading.  This isn't
237  * called if writeback only has copy-to-cache to deal with.
238  */
cifs_begin_writeback(struct netfs_io_request * wreq)239 static void cifs_begin_writeback(struct netfs_io_request *wreq)
240 {
241 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
242 	int ret;
243 
244 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
245 	if (ret) {
246 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
247 		return;
248 	}
249 
250 	wreq->io_streams[0].avail = true;
251 }
252 
253 /*
254  * Initialise a request.
255  */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)256 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
257 {
258 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
259 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
260 	struct cifsFileInfo *open_file = NULL;
261 
262 	rreq->rsize = cifs_sb->ctx->rsize;
263 	rreq->wsize = cifs_sb->ctx->wsize;
264 	req->pid = current->tgid; // Ummm...  This may be a workqueue
265 
266 	if (file) {
267 		open_file = file->private_data;
268 		rreq->netfs_priv = file->private_data;
269 		req->cfile = cifsFileInfo_get(open_file);
270 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
271 			req->pid = req->cfile->pid;
272 	} else if (rreq->origin != NETFS_WRITEBACK) {
273 		WARN_ON_ONCE(1);
274 		return -EIO;
275 	}
276 
277 	return 0;
278 }
279 
280 /*
281  * Completion of a request operation.
282  */
cifs_rreq_done(struct netfs_io_request * rreq)283 static void cifs_rreq_done(struct netfs_io_request *rreq)
284 {
285 	struct timespec64 atime, mtime;
286 	struct inode *inode = rreq->inode;
287 
288 	/* we do not want atime to be less than mtime, it broke some apps */
289 	atime = inode_set_atime_to_ts(inode, current_time(inode));
290 	mtime = inode_get_mtime(inode);
291 	if (timespec64_compare(&atime, &mtime))
292 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
293 }
294 
cifs_free_request(struct netfs_io_request * rreq)295 static void cifs_free_request(struct netfs_io_request *rreq)
296 {
297 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
298 
299 	if (req->cfile)
300 		cifsFileInfo_put(req->cfile);
301 }
302 
cifs_free_subrequest(struct netfs_io_subrequest * subreq)303 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
304 {
305 	struct cifs_io_subrequest *rdata =
306 		container_of(subreq, struct cifs_io_subrequest, subreq);
307 	int rc = subreq->error;
308 
309 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
310 #ifdef CONFIG_CIFS_SMB_DIRECT
311 		if (rdata->mr) {
312 			smbd_deregister_mr(rdata->mr);
313 			rdata->mr = NULL;
314 		}
315 #endif
316 	}
317 
318 	if (rdata->credits.value != 0) {
319 		trace_smb3_rw_credits(rdata->rreq->debug_id,
320 				      rdata->subreq.debug_index,
321 				      rdata->credits.value,
322 				      rdata->server ? rdata->server->credits : 0,
323 				      rdata->server ? rdata->server->in_flight : 0,
324 				      -rdata->credits.value,
325 				      cifs_trace_rw_credits_free_subreq);
326 		if (rdata->server)
327 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
328 		else
329 			rdata->credits.value = 0;
330 	}
331 
332 	if (rdata->have_xid)
333 		free_xid(rdata->xid);
334 }
335 
336 const struct netfs_request_ops cifs_req_ops = {
337 	.request_pool		= &cifs_io_request_pool,
338 	.subrequest_pool	= &cifs_io_subrequest_pool,
339 	.init_request		= cifs_init_request,
340 	.free_request		= cifs_free_request,
341 	.free_subrequest	= cifs_free_subrequest,
342 	.prepare_read		= cifs_prepare_read,
343 	.issue_read		= cifs_issue_read,
344 	.done			= cifs_rreq_done,
345 	.begin_writeback	= cifs_begin_writeback,
346 	.prepare_write		= cifs_prepare_write,
347 	.issue_write		= cifs_issue_write,
348 	.invalidate_cache	= cifs_netfs_invalidate_cache,
349 };
350 
351 /*
352  * Mark as invalid, all open files on tree connections since they
353  * were closed when session to server was lost.
354  */
355 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)356 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
357 {
358 	struct cifsFileInfo *open_file = NULL;
359 	struct list_head *tmp;
360 	struct list_head *tmp1;
361 
362 	/* only send once per connect */
363 	spin_lock(&tcon->tc_lock);
364 	if (tcon->need_reconnect)
365 		tcon->status = TID_NEED_RECON;
366 
367 	if (tcon->status != TID_NEED_RECON) {
368 		spin_unlock(&tcon->tc_lock);
369 		return;
370 	}
371 	tcon->status = TID_IN_FILES_INVALIDATE;
372 	spin_unlock(&tcon->tc_lock);
373 
374 	/* list all files open on tree connection and mark them invalid */
375 	spin_lock(&tcon->open_file_lock);
376 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
377 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
378 		open_file->invalidHandle = true;
379 		open_file->oplock_break_cancelled = true;
380 	}
381 	spin_unlock(&tcon->open_file_lock);
382 
383 	invalidate_all_cached_dirs(tcon);
384 	spin_lock(&tcon->tc_lock);
385 	if (tcon->status == TID_IN_FILES_INVALIDATE)
386 		tcon->status = TID_NEED_TCON;
387 	spin_unlock(&tcon->tc_lock);
388 
389 	/*
390 	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
391 	 * to this tcon.
392 	 */
393 }
394 
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)395 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
396 {
397 	if ((flags & O_ACCMODE) == O_RDONLY)
398 		return GENERIC_READ;
399 	else if ((flags & O_ACCMODE) == O_WRONLY)
400 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
401 	else if ((flags & O_ACCMODE) == O_RDWR) {
402 		/* GENERIC_ALL is too much permission to request
403 		   can cause unnecessary access denied on create */
404 		/* return GENERIC_ALL; */
405 		return (GENERIC_READ | GENERIC_WRITE);
406 	}
407 
408 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
409 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
410 		FILE_READ_DATA);
411 }
412 
413 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)414 static u32 cifs_posix_convert_flags(unsigned int flags)
415 {
416 	u32 posix_flags = 0;
417 
418 	if ((flags & O_ACCMODE) == O_RDONLY)
419 		posix_flags = SMB_O_RDONLY;
420 	else if ((flags & O_ACCMODE) == O_WRONLY)
421 		posix_flags = SMB_O_WRONLY;
422 	else if ((flags & O_ACCMODE) == O_RDWR)
423 		posix_flags = SMB_O_RDWR;
424 
425 	if (flags & O_CREAT) {
426 		posix_flags |= SMB_O_CREAT;
427 		if (flags & O_EXCL)
428 			posix_flags |= SMB_O_EXCL;
429 	} else if (flags & O_EXCL)
430 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
431 			 current->comm, current->tgid);
432 
433 	if (flags & O_TRUNC)
434 		posix_flags |= SMB_O_TRUNC;
435 	/* be safe and imply O_SYNC for O_DSYNC */
436 	if (flags & O_DSYNC)
437 		posix_flags |= SMB_O_SYNC;
438 	if (flags & O_DIRECTORY)
439 		posix_flags |= SMB_O_DIRECTORY;
440 	if (flags & O_NOFOLLOW)
441 		posix_flags |= SMB_O_NOFOLLOW;
442 	if (flags & O_DIRECT)
443 		posix_flags |= SMB_O_DIRECT;
444 
445 	return posix_flags;
446 }
447 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
448 
cifs_get_disposition(unsigned int flags)449 static inline int cifs_get_disposition(unsigned int flags)
450 {
451 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
452 		return FILE_CREATE;
453 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
454 		return FILE_OVERWRITE_IF;
455 	else if ((flags & O_CREAT) == O_CREAT)
456 		return FILE_OPEN_IF;
457 	else if ((flags & O_TRUNC) == O_TRUNC)
458 		return FILE_OVERWRITE;
459 	else
460 		return FILE_OPEN;
461 }
462 
463 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)464 int cifs_posix_open(const char *full_path, struct inode **pinode,
465 			struct super_block *sb, int mode, unsigned int f_flags,
466 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
467 {
468 	int rc;
469 	FILE_UNIX_BASIC_INFO *presp_data;
470 	__u32 posix_flags = 0;
471 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
472 	struct cifs_fattr fattr;
473 	struct tcon_link *tlink;
474 	struct cifs_tcon *tcon;
475 
476 	cifs_dbg(FYI, "posix open %s\n", full_path);
477 
478 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
479 	if (presp_data == NULL)
480 		return -ENOMEM;
481 
482 	tlink = cifs_sb_tlink(cifs_sb);
483 	if (IS_ERR(tlink)) {
484 		rc = PTR_ERR(tlink);
485 		goto posix_open_ret;
486 	}
487 
488 	tcon = tlink_tcon(tlink);
489 	mode &= ~current_umask();
490 
491 	posix_flags = cifs_posix_convert_flags(f_flags);
492 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
493 			     poplock, full_path, cifs_sb->local_nls,
494 			     cifs_remap(cifs_sb));
495 	cifs_put_tlink(tlink);
496 
497 	if (rc)
498 		goto posix_open_ret;
499 
500 	if (presp_data->Type == cpu_to_le32(-1))
501 		goto posix_open_ret; /* open ok, caller does qpathinfo */
502 
503 	if (!pinode)
504 		goto posix_open_ret; /* caller does not need info */
505 
506 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
507 
508 	/* get new inode and set it up */
509 	if (*pinode == NULL) {
510 		cifs_fill_uniqueid(sb, &fattr);
511 		*pinode = cifs_iget(sb, &fattr);
512 		if (!*pinode) {
513 			rc = -ENOMEM;
514 			goto posix_open_ret;
515 		}
516 	} else {
517 		cifs_revalidate_mapping(*pinode);
518 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
519 	}
520 
521 posix_open_ret:
522 	kfree(presp_data);
523 	return rc;
524 }
525 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
526 
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)527 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
528 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
529 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
530 {
531 	int rc;
532 	int desired_access;
533 	int disposition;
534 	int create_options = CREATE_NOT_DIR;
535 	struct TCP_Server_Info *server = tcon->ses->server;
536 	struct cifs_open_parms oparms;
537 	int rdwr_for_fscache = 0;
538 
539 	if (!server->ops->open)
540 		return -ENOSYS;
541 
542 	/* If we're caching, we need to be able to fill in around partial writes. */
543 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
544 		rdwr_for_fscache = 1;
545 
546 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
547 
548 /*********************************************************************
549  *  open flag mapping table:
550  *
551  *	POSIX Flag            CIFS Disposition
552  *	----------            ----------------
553  *	O_CREAT               FILE_OPEN_IF
554  *	O_CREAT | O_EXCL      FILE_CREATE
555  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
556  *	O_TRUNC               FILE_OVERWRITE
557  *	none of the above     FILE_OPEN
558  *
559  *	Note that there is not a direct match between disposition
560  *	FILE_SUPERSEDE (ie create whether or not file exists although
561  *	O_CREAT | O_TRUNC is similar but truncates the existing
562  *	file rather than creating a new file as FILE_SUPERSEDE does
563  *	(which uses the attributes / metadata passed in on open call)
564  *?
565  *?  O_SYNC is a reasonable match to CIFS writethrough flag
566  *?  and the read write flags match reasonably.  O_LARGEFILE
567  *?  is irrelevant because largefile support is always used
568  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
569  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
570  *********************************************************************/
571 
572 	disposition = cifs_get_disposition(f_flags);
573 
574 	/* BB pass O_SYNC flag through on file attributes .. BB */
575 
576 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
577 	if (f_flags & O_SYNC)
578 		create_options |= CREATE_WRITE_THROUGH;
579 
580 	if (f_flags & O_DIRECT)
581 		create_options |= CREATE_NO_BUFFER;
582 
583 retry_open:
584 	oparms = (struct cifs_open_parms) {
585 		.tcon = tcon,
586 		.cifs_sb = cifs_sb,
587 		.desired_access = desired_access,
588 		.create_options = cifs_create_options(cifs_sb, create_options),
589 		.disposition = disposition,
590 		.path = full_path,
591 		.fid = fid,
592 	};
593 
594 	rc = server->ops->open(xid, &oparms, oplock, buf);
595 	if (rc) {
596 		if (rc == -EACCES && rdwr_for_fscache == 1) {
597 			desired_access = cifs_convert_flags(f_flags, 0);
598 			rdwr_for_fscache = 2;
599 			goto retry_open;
600 		}
601 		return rc;
602 	}
603 	if (rdwr_for_fscache == 2)
604 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
605 
606 	/* TODO: Add support for calling posix query info but with passing in fid */
607 	if (tcon->unix_ext)
608 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
609 					      xid);
610 	else
611 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
612 					 xid, fid);
613 
614 	if (rc) {
615 		server->ops->close(xid, tcon, fid);
616 		if (rc == -ESTALE)
617 			rc = -EOPENSTALE;
618 	}
619 
620 	return rc;
621 }
622 
623 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)624 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
625 {
626 	struct cifs_fid_locks *cur;
627 	bool has_locks = false;
628 
629 	down_read(&cinode->lock_sem);
630 	list_for_each_entry(cur, &cinode->llist, llist) {
631 		if (!list_empty(&cur->locks)) {
632 			has_locks = true;
633 			break;
634 		}
635 	}
636 	up_read(&cinode->lock_sem);
637 	return has_locks;
638 }
639 
640 void
cifs_down_write(struct rw_semaphore * sem)641 cifs_down_write(struct rw_semaphore *sem)
642 {
643 	while (!down_write_trylock(sem))
644 		msleep(10);
645 }
646 
647 static void cifsFileInfo_put_work(struct work_struct *work);
648 void serverclose_work(struct work_struct *work);
649 
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)650 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
651 				       struct tcon_link *tlink, __u32 oplock,
652 				       const char *symlink_target)
653 {
654 	struct dentry *dentry = file_dentry(file);
655 	struct inode *inode = d_inode(dentry);
656 	struct cifsInodeInfo *cinode = CIFS_I(inode);
657 	struct cifsFileInfo *cfile;
658 	struct cifs_fid_locks *fdlocks;
659 	struct cifs_tcon *tcon = tlink_tcon(tlink);
660 	struct TCP_Server_Info *server = tcon->ses->server;
661 
662 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
663 	if (cfile == NULL)
664 		return cfile;
665 
666 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
667 	if (!fdlocks) {
668 		kfree(cfile);
669 		return NULL;
670 	}
671 
672 	if (symlink_target) {
673 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
674 		if (!cfile->symlink_target) {
675 			kfree(fdlocks);
676 			kfree(cfile);
677 			return NULL;
678 		}
679 	}
680 
681 	INIT_LIST_HEAD(&fdlocks->locks);
682 	fdlocks->cfile = cfile;
683 	cfile->llist = fdlocks;
684 
685 	cfile->count = 1;
686 	cfile->pid = current->tgid;
687 	cfile->uid = current_fsuid();
688 	cfile->dentry = dget(dentry);
689 	cfile->f_flags = file->f_flags;
690 	cfile->invalidHandle = false;
691 	cfile->deferred_close_scheduled = false;
692 	cfile->tlink = cifs_get_tlink(tlink);
693 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
694 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
695 	INIT_WORK(&cfile->serverclose, serverclose_work);
696 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
697 	mutex_init(&cfile->fh_mutex);
698 	spin_lock_init(&cfile->file_info_lock);
699 
700 	cifs_sb_active(inode->i_sb);
701 
702 	/*
703 	 * If the server returned a read oplock and we have mandatory brlocks,
704 	 * set oplock level to None.
705 	 */
706 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
707 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
708 		oplock = 0;
709 	}
710 
711 	cifs_down_write(&cinode->lock_sem);
712 	list_add(&fdlocks->llist, &cinode->llist);
713 	up_write(&cinode->lock_sem);
714 
715 	spin_lock(&tcon->open_file_lock);
716 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
717 		oplock = fid->pending_open->oplock;
718 	list_del(&fid->pending_open->olist);
719 
720 	fid->purge_cache = false;
721 	server->ops->set_fid(cfile, fid, oplock);
722 
723 	list_add(&cfile->tlist, &tcon->openFileList);
724 	atomic_inc(&tcon->num_local_opens);
725 
726 	/* if readable file instance put first in list*/
727 	spin_lock(&cinode->open_file_lock);
728 	if (file->f_mode & FMODE_READ)
729 		list_add(&cfile->flist, &cinode->openFileList);
730 	else
731 		list_add_tail(&cfile->flist, &cinode->openFileList);
732 	spin_unlock(&cinode->open_file_lock);
733 	spin_unlock(&tcon->open_file_lock);
734 
735 	if (fid->purge_cache)
736 		cifs_zap_mapping(inode);
737 
738 	file->private_data = cfile;
739 	return cfile;
740 }
741 
742 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)743 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
744 {
745 	spin_lock(&cifs_file->file_info_lock);
746 	cifsFileInfo_get_locked(cifs_file);
747 	spin_unlock(&cifs_file->file_info_lock);
748 	return cifs_file;
749 }
750 
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)751 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
752 {
753 	struct inode *inode = d_inode(cifs_file->dentry);
754 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
755 	struct cifsLockInfo *li, *tmp;
756 	struct super_block *sb = inode->i_sb;
757 
758 	/*
759 	 * Delete any outstanding lock records. We'll lose them when the file
760 	 * is closed anyway.
761 	 */
762 	cifs_down_write(&cifsi->lock_sem);
763 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
764 		list_del(&li->llist);
765 		cifs_del_lock_waiters(li);
766 		kfree(li);
767 	}
768 	list_del(&cifs_file->llist->llist);
769 	kfree(cifs_file->llist);
770 	up_write(&cifsi->lock_sem);
771 
772 	cifs_put_tlink(cifs_file->tlink);
773 	dput(cifs_file->dentry);
774 	cifs_sb_deactive(sb);
775 	kfree(cifs_file->symlink_target);
776 	kfree(cifs_file);
777 }
778 
cifsFileInfo_put_work(struct work_struct * work)779 static void cifsFileInfo_put_work(struct work_struct *work)
780 {
781 	struct cifsFileInfo *cifs_file = container_of(work,
782 			struct cifsFileInfo, put);
783 
784 	cifsFileInfo_put_final(cifs_file);
785 }
786 
serverclose_work(struct work_struct * work)787 void serverclose_work(struct work_struct *work)
788 {
789 	struct cifsFileInfo *cifs_file = container_of(work,
790 			struct cifsFileInfo, serverclose);
791 
792 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
793 
794 	struct TCP_Server_Info *server = tcon->ses->server;
795 	int rc = 0;
796 	int retries = 0;
797 	int MAX_RETRIES = 4;
798 
799 	do {
800 		if (server->ops->close_getattr)
801 			rc = server->ops->close_getattr(0, tcon, cifs_file);
802 		else if (server->ops->close)
803 			rc = server->ops->close(0, tcon, &cifs_file->fid);
804 
805 		if (rc == -EBUSY || rc == -EAGAIN) {
806 			retries++;
807 			msleep(250);
808 		}
809 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
810 	);
811 
812 	if (retries == MAX_RETRIES)
813 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
814 
815 	if (cifs_file->offload)
816 		queue_work(fileinfo_put_wq, &cifs_file->put);
817 	else
818 		cifsFileInfo_put_final(cifs_file);
819 }
820 
821 /**
822  * cifsFileInfo_put - release a reference of file priv data
823  *
824  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
825  *
826  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
827  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)828 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
829 {
830 	_cifsFileInfo_put(cifs_file, true, true);
831 }
832 
833 /**
834  * _cifsFileInfo_put - release a reference of file priv data
835  *
836  * This may involve closing the filehandle @cifs_file out on the
837  * server. Must be called without holding tcon->open_file_lock,
838  * cinode->open_file_lock and cifs_file->file_info_lock.
839  *
840  * If @wait_for_oplock_handler is true and we are releasing the last
841  * reference, wait for any running oplock break handler of the file
842  * and cancel any pending one.
843  *
844  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
845  * @wait_oplock_handler: must be false if called from oplock_break_handler
846  * @offload:	not offloaded on close and oplock breaks
847  *
848  */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)849 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
850 		       bool wait_oplock_handler, bool offload)
851 {
852 	struct inode *inode = d_inode(cifs_file->dentry);
853 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
854 	struct TCP_Server_Info *server = tcon->ses->server;
855 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
856 	struct super_block *sb = inode->i_sb;
857 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
858 	struct cifs_fid fid = {};
859 	struct cifs_pending_open open;
860 	bool oplock_break_cancelled;
861 	bool serverclose_offloaded = false;
862 
863 	spin_lock(&tcon->open_file_lock);
864 	spin_lock(&cifsi->open_file_lock);
865 	spin_lock(&cifs_file->file_info_lock);
866 
867 	cifs_file->offload = offload;
868 	if (--cifs_file->count > 0) {
869 		spin_unlock(&cifs_file->file_info_lock);
870 		spin_unlock(&cifsi->open_file_lock);
871 		spin_unlock(&tcon->open_file_lock);
872 		return;
873 	}
874 	spin_unlock(&cifs_file->file_info_lock);
875 
876 	if (server->ops->get_lease_key)
877 		server->ops->get_lease_key(inode, &fid);
878 
879 	/* store open in pending opens to make sure we don't miss lease break */
880 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
881 
882 	/* remove it from the lists */
883 	list_del(&cifs_file->flist);
884 	list_del(&cifs_file->tlist);
885 	atomic_dec(&tcon->num_local_opens);
886 
887 	if (list_empty(&cifsi->openFileList)) {
888 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
889 			 d_inode(cifs_file->dentry));
890 		/*
891 		 * In strict cache mode we need invalidate mapping on the last
892 		 * close  because it may cause a error when we open this file
893 		 * again and get at least level II oplock.
894 		 */
895 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
896 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
897 		cifs_set_oplock_level(cifsi, 0);
898 	}
899 
900 	spin_unlock(&cifsi->open_file_lock);
901 	spin_unlock(&tcon->open_file_lock);
902 
903 	oplock_break_cancelled = wait_oplock_handler ?
904 		cancel_work_sync(&cifs_file->oplock_break) : false;
905 
906 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
907 		struct TCP_Server_Info *server = tcon->ses->server;
908 		unsigned int xid;
909 		int rc = 0;
910 
911 		xid = get_xid();
912 		if (server->ops->close_getattr)
913 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
914 		else if (server->ops->close)
915 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
916 		_free_xid(xid);
917 
918 		if (rc == -EBUSY || rc == -EAGAIN) {
919 			// Server close failed, hence offloading it as an async op
920 			queue_work(serverclose_wq, &cifs_file->serverclose);
921 			serverclose_offloaded = true;
922 		}
923 	}
924 
925 	if (oplock_break_cancelled)
926 		cifs_done_oplock_break(cifsi);
927 
928 	cifs_del_pending_open(&open);
929 
930 	// if serverclose has been offloaded to wq (on failure), it will
931 	// handle offloading put as well. If serverclose not offloaded,
932 	// we need to handle offloading put here.
933 	if (!serverclose_offloaded) {
934 		if (offload)
935 			queue_work(fileinfo_put_wq, &cifs_file->put);
936 		else
937 			cifsFileInfo_put_final(cifs_file);
938 	}
939 }
940 
cifs_open(struct inode * inode,struct file * file)941 int cifs_open(struct inode *inode, struct file *file)
942 
943 {
944 	int rc = -EACCES;
945 	unsigned int xid;
946 	__u32 oplock;
947 	struct cifs_sb_info *cifs_sb;
948 	struct TCP_Server_Info *server;
949 	struct cifs_tcon *tcon;
950 	struct tcon_link *tlink;
951 	struct cifsFileInfo *cfile = NULL;
952 	void *page;
953 	const char *full_path;
954 	bool posix_open_ok = false;
955 	struct cifs_fid fid = {};
956 	struct cifs_pending_open open;
957 	struct cifs_open_info_data data = {};
958 
959 	xid = get_xid();
960 
961 	cifs_sb = CIFS_SB(inode->i_sb);
962 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
963 		free_xid(xid);
964 		return -EIO;
965 	}
966 
967 	tlink = cifs_sb_tlink(cifs_sb);
968 	if (IS_ERR(tlink)) {
969 		free_xid(xid);
970 		return PTR_ERR(tlink);
971 	}
972 	tcon = tlink_tcon(tlink);
973 	server = tcon->ses->server;
974 
975 	page = alloc_dentry_path();
976 	full_path = build_path_from_dentry(file_dentry(file), page);
977 	if (IS_ERR(full_path)) {
978 		rc = PTR_ERR(full_path);
979 		goto out;
980 	}
981 
982 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
983 		 inode, file->f_flags, full_path);
984 
985 	if (file->f_flags & O_DIRECT &&
986 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
987 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
988 			file->f_op = &cifs_file_direct_nobrl_ops;
989 		else
990 			file->f_op = &cifs_file_direct_ops;
991 	}
992 
993 	/* Get the cached handle as SMB2 close is deferred */
994 	rc = cifs_get_readable_path(tcon, full_path, &cfile);
995 	if (rc == 0) {
996 		if (file->f_flags == cfile->f_flags) {
997 			file->private_data = cfile;
998 			spin_lock(&CIFS_I(inode)->deferred_lock);
999 			cifs_del_deferred_close(cfile);
1000 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1001 			goto use_cache;
1002 		} else {
1003 			_cifsFileInfo_put(cfile, true, false);
1004 		}
1005 	} else {
1006 		/* hard link on the defeered close file */
1007 		rc = cifs_get_hardlink_path(tcon, inode, file);
1008 		if (rc)
1009 			cifs_close_deferred_file(CIFS_I(inode));
1010 	}
1011 
1012 	if (server->oplocks)
1013 		oplock = REQ_OPLOCK;
1014 	else
1015 		oplock = 0;
1016 
1017 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1018 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1019 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1020 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1021 		/* can not refresh inode info since size could be stale */
1022 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1023 				cifs_sb->ctx->file_mode /* ignored */,
1024 				file->f_flags, &oplock, &fid.netfid, xid);
1025 		if (rc == 0) {
1026 			cifs_dbg(FYI, "posix open succeeded\n");
1027 			posix_open_ok = true;
1028 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1029 			if (tcon->ses->serverNOS)
1030 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1031 					 tcon->ses->ip_addr,
1032 					 tcon->ses->serverNOS);
1033 			tcon->broken_posix_open = true;
1034 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1035 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1036 			goto out;
1037 		/*
1038 		 * Else fallthrough to retry open the old way on network i/o
1039 		 * or DFS errors.
1040 		 */
1041 	}
1042 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1043 
1044 	if (server->ops->get_lease_key)
1045 		server->ops->get_lease_key(inode, &fid);
1046 
1047 	cifs_add_pending_open(&fid, tlink, &open);
1048 
1049 	if (!posix_open_ok) {
1050 		if (server->ops->get_lease_key)
1051 			server->ops->get_lease_key(inode, &fid);
1052 
1053 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1054 				  xid, &data);
1055 		if (rc) {
1056 			cifs_del_pending_open(&open);
1057 			goto out;
1058 		}
1059 	}
1060 
1061 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1062 	if (cfile == NULL) {
1063 		if (server->ops->close)
1064 			server->ops->close(xid, tcon, &fid);
1065 		cifs_del_pending_open(&open);
1066 		rc = -ENOMEM;
1067 		goto out;
1068 	}
1069 
1070 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1071 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1072 		/*
1073 		 * Time to set mode which we can not set earlier due to
1074 		 * problems creating new read-only files.
1075 		 */
1076 		struct cifs_unix_set_info_args args = {
1077 			.mode	= inode->i_mode,
1078 			.uid	= INVALID_UID, /* no change */
1079 			.gid	= INVALID_GID, /* no change */
1080 			.ctime	= NO_CHANGE_64,
1081 			.atime	= NO_CHANGE_64,
1082 			.mtime	= NO_CHANGE_64,
1083 			.device	= 0,
1084 		};
1085 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1086 				       cfile->pid);
1087 	}
1088 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1089 
1090 use_cache:
1091 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1092 			   file->f_mode & FMODE_WRITE);
1093 	if (!(file->f_flags & O_DIRECT))
1094 		goto out;
1095 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1096 		goto out;
1097 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1098 
1099 out:
1100 	free_dentry_path(page);
1101 	free_xid(xid);
1102 	cifs_put_tlink(tlink);
1103 	cifs_free_open_info(&data);
1104 	return rc;
1105 }
1106 
1107 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1108 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1109 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1110 
1111 /*
1112  * Try to reacquire byte range locks that were released when session
1113  * to server was lost.
1114  */
1115 static int
cifs_relock_file(struct cifsFileInfo * cfile)1116 cifs_relock_file(struct cifsFileInfo *cfile)
1117 {
1118 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1119 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1120 	int rc = 0;
1121 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1122 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1123 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1124 
1125 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1126 	if (cinode->can_cache_brlcks) {
1127 		/* can cache locks - no need to relock */
1128 		up_read(&cinode->lock_sem);
1129 		return rc;
1130 	}
1131 
1132 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1133 	if (cap_unix(tcon->ses) &&
1134 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1135 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1136 		rc = cifs_push_posix_locks(cfile);
1137 	else
1138 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1139 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1140 
1141 	up_read(&cinode->lock_sem);
1142 	return rc;
1143 }
1144 
1145 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1146 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1147 {
1148 	int rc = -EACCES;
1149 	unsigned int xid;
1150 	__u32 oplock;
1151 	struct cifs_sb_info *cifs_sb;
1152 	struct cifs_tcon *tcon;
1153 	struct TCP_Server_Info *server;
1154 	struct cifsInodeInfo *cinode;
1155 	struct inode *inode;
1156 	void *page;
1157 	const char *full_path;
1158 	int desired_access;
1159 	int disposition = FILE_OPEN;
1160 	int create_options = CREATE_NOT_DIR;
1161 	struct cifs_open_parms oparms;
1162 	int rdwr_for_fscache = 0;
1163 
1164 	xid = get_xid();
1165 	mutex_lock(&cfile->fh_mutex);
1166 	if (!cfile->invalidHandle) {
1167 		mutex_unlock(&cfile->fh_mutex);
1168 		free_xid(xid);
1169 		return 0;
1170 	}
1171 
1172 	inode = d_inode(cfile->dentry);
1173 	cifs_sb = CIFS_SB(inode->i_sb);
1174 	tcon = tlink_tcon(cfile->tlink);
1175 	server = tcon->ses->server;
1176 
1177 	/*
1178 	 * Can not grab rename sem here because various ops, including those
1179 	 * that already have the rename sem can end up causing writepage to get
1180 	 * called and if the server was down that means we end up here, and we
1181 	 * can never tell if the caller already has the rename_sem.
1182 	 */
1183 	page = alloc_dentry_path();
1184 	full_path = build_path_from_dentry(cfile->dentry, page);
1185 	if (IS_ERR(full_path)) {
1186 		mutex_unlock(&cfile->fh_mutex);
1187 		free_dentry_path(page);
1188 		free_xid(xid);
1189 		return PTR_ERR(full_path);
1190 	}
1191 
1192 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1193 		 inode, cfile->f_flags, full_path);
1194 
1195 	if (tcon->ses->server->oplocks)
1196 		oplock = REQ_OPLOCK;
1197 	else
1198 		oplock = 0;
1199 
1200 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1201 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1202 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1203 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1204 		/*
1205 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1206 		 * original open. Must mask them off for a reopen.
1207 		 */
1208 		unsigned int oflags = cfile->f_flags &
1209 						~(O_CREAT | O_EXCL | O_TRUNC);
1210 
1211 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1212 				     cifs_sb->ctx->file_mode /* ignored */,
1213 				     oflags, &oplock, &cfile->fid.netfid, xid);
1214 		if (rc == 0) {
1215 			cifs_dbg(FYI, "posix reopen succeeded\n");
1216 			oparms.reconnect = true;
1217 			goto reopen_success;
1218 		}
1219 		/*
1220 		 * fallthrough to retry open the old way on errors, especially
1221 		 * in the reconnect path it is important to retry hard
1222 		 */
1223 	}
1224 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1225 
1226 	/* If we're caching, we need to be able to fill in around partial writes. */
1227 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1228 		rdwr_for_fscache = 1;
1229 
1230 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1231 
1232 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1233 	if (cfile->f_flags & O_SYNC)
1234 		create_options |= CREATE_WRITE_THROUGH;
1235 
1236 	if (cfile->f_flags & O_DIRECT)
1237 		create_options |= CREATE_NO_BUFFER;
1238 
1239 	if (server->ops->get_lease_key)
1240 		server->ops->get_lease_key(inode, &cfile->fid);
1241 
1242 retry_open:
1243 	oparms = (struct cifs_open_parms) {
1244 		.tcon = tcon,
1245 		.cifs_sb = cifs_sb,
1246 		.desired_access = desired_access,
1247 		.create_options = cifs_create_options(cifs_sb, create_options),
1248 		.disposition = disposition,
1249 		.path = full_path,
1250 		.fid = &cfile->fid,
1251 		.reconnect = true,
1252 	};
1253 
1254 	/*
1255 	 * Can not refresh inode by passing in file_info buf to be returned by
1256 	 * ops->open and then calling get_inode_info with returned buf since
1257 	 * file might have write behind data that needs to be flushed and server
1258 	 * version of file size can be stale. If we knew for sure that inode was
1259 	 * not dirty locally we could do this.
1260 	 */
1261 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1262 	if (rc == -ENOENT && oparms.reconnect == false) {
1263 		/* durable handle timeout is expired - open the file again */
1264 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1265 		/* indicate that we need to relock the file */
1266 		oparms.reconnect = true;
1267 	}
1268 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1269 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1270 		rdwr_for_fscache = 2;
1271 		goto retry_open;
1272 	}
1273 
1274 	if (rc) {
1275 		mutex_unlock(&cfile->fh_mutex);
1276 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1277 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1278 		goto reopen_error_exit;
1279 	}
1280 
1281 	if (rdwr_for_fscache == 2)
1282 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1283 
1284 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1285 reopen_success:
1286 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1287 	cfile->invalidHandle = false;
1288 	mutex_unlock(&cfile->fh_mutex);
1289 	cinode = CIFS_I(inode);
1290 
1291 	if (can_flush) {
1292 		rc = filemap_write_and_wait(inode->i_mapping);
1293 		if (!is_interrupt_error(rc))
1294 			mapping_set_error(inode->i_mapping, rc);
1295 
1296 		if (tcon->posix_extensions) {
1297 			rc = smb311_posix_get_inode_info(&inode, full_path,
1298 							 NULL, inode->i_sb, xid);
1299 		} else if (tcon->unix_ext) {
1300 			rc = cifs_get_inode_info_unix(&inode, full_path,
1301 						      inode->i_sb, xid);
1302 		} else {
1303 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1304 						 inode->i_sb, xid, NULL);
1305 		}
1306 	}
1307 	/*
1308 	 * Else we are writing out data to server already and could deadlock if
1309 	 * we tried to flush data, and since we do not know if we have data that
1310 	 * would invalidate the current end of file on the server we can not go
1311 	 * to the server to get the new inode info.
1312 	 */
1313 
1314 	/*
1315 	 * If the server returned a read oplock and we have mandatory brlocks,
1316 	 * set oplock level to None.
1317 	 */
1318 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1319 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1320 		oplock = 0;
1321 	}
1322 
1323 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1324 	if (oparms.reconnect)
1325 		cifs_relock_file(cfile);
1326 
1327 reopen_error_exit:
1328 	free_dentry_path(page);
1329 	free_xid(xid);
1330 	return rc;
1331 }
1332 
smb2_deferred_work_close(struct work_struct * work)1333 void smb2_deferred_work_close(struct work_struct *work)
1334 {
1335 	struct cifsFileInfo *cfile = container_of(work,
1336 			struct cifsFileInfo, deferred.work);
1337 
1338 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1339 	cifs_del_deferred_close(cfile);
1340 	cfile->deferred_close_scheduled = false;
1341 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1342 	_cifsFileInfo_put(cfile, true, false);
1343 }
1344 
1345 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1346 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1347 {
1348 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1349 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1350 
1351 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1352 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1353 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1354 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1355 
1356 }
1357 
cifs_close(struct inode * inode,struct file * file)1358 int cifs_close(struct inode *inode, struct file *file)
1359 {
1360 	struct cifsFileInfo *cfile;
1361 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1362 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1363 	struct cifs_deferred_close *dclose;
1364 
1365 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1366 
1367 	if (file->private_data != NULL) {
1368 		cfile = file->private_data;
1369 		file->private_data = NULL;
1370 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1371 		if ((cfile->status_file_deleted == false) &&
1372 		    (smb2_can_defer_close(inode, dclose))) {
1373 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1374 				inode_set_mtime_to_ts(inode,
1375 						      inode_set_ctime_current(inode));
1376 			}
1377 			spin_lock(&cinode->deferred_lock);
1378 			cifs_add_deferred_close(cfile, dclose);
1379 			if (cfile->deferred_close_scheduled &&
1380 			    delayed_work_pending(&cfile->deferred)) {
1381 				/*
1382 				 * If there is no pending work, mod_delayed_work queues new work.
1383 				 * So, Increase the ref count to avoid use-after-free.
1384 				 */
1385 				if (!mod_delayed_work(deferredclose_wq,
1386 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1387 					cifsFileInfo_get(cfile);
1388 			} else {
1389 				/* Deferred close for files */
1390 				queue_delayed_work(deferredclose_wq,
1391 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1392 				cfile->deferred_close_scheduled = true;
1393 				spin_unlock(&cinode->deferred_lock);
1394 				return 0;
1395 			}
1396 			spin_unlock(&cinode->deferred_lock);
1397 			_cifsFileInfo_put(cfile, true, false);
1398 		} else {
1399 			_cifsFileInfo_put(cfile, true, false);
1400 			kfree(dclose);
1401 		}
1402 	}
1403 
1404 	/* return code from the ->release op is always ignored */
1405 	return 0;
1406 }
1407 
1408 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1409 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1410 {
1411 	struct cifsFileInfo *open_file, *tmp;
1412 	LIST_HEAD(tmp_list);
1413 
1414 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1415 		return;
1416 
1417 	tcon->need_reopen_files = false;
1418 
1419 	cifs_dbg(FYI, "Reopen persistent handles\n");
1420 
1421 	/* list all files open on tree connection, reopen resilient handles  */
1422 	spin_lock(&tcon->open_file_lock);
1423 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1424 		if (!open_file->invalidHandle)
1425 			continue;
1426 		cifsFileInfo_get(open_file);
1427 		list_add_tail(&open_file->rlist, &tmp_list);
1428 	}
1429 	spin_unlock(&tcon->open_file_lock);
1430 
1431 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1432 		if (cifs_reopen_file(open_file, false /* do not flush */))
1433 			tcon->need_reopen_files = true;
1434 		list_del_init(&open_file->rlist);
1435 		cifsFileInfo_put(open_file);
1436 	}
1437 }
1438 
cifs_closedir(struct inode * inode,struct file * file)1439 int cifs_closedir(struct inode *inode, struct file *file)
1440 {
1441 	int rc = 0;
1442 	unsigned int xid;
1443 	struct cifsFileInfo *cfile = file->private_data;
1444 	struct cifs_tcon *tcon;
1445 	struct TCP_Server_Info *server;
1446 	char *buf;
1447 
1448 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1449 
1450 	if (cfile == NULL)
1451 		return rc;
1452 
1453 	xid = get_xid();
1454 	tcon = tlink_tcon(cfile->tlink);
1455 	server = tcon->ses->server;
1456 
1457 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1458 	spin_lock(&cfile->file_info_lock);
1459 	if (server->ops->dir_needs_close(cfile)) {
1460 		cfile->invalidHandle = true;
1461 		spin_unlock(&cfile->file_info_lock);
1462 		if (server->ops->close_dir)
1463 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1464 		else
1465 			rc = -ENOSYS;
1466 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1467 		/* not much we can do if it fails anyway, ignore rc */
1468 		rc = 0;
1469 	} else
1470 		spin_unlock(&cfile->file_info_lock);
1471 
1472 	buf = cfile->srch_inf.ntwrk_buf_start;
1473 	if (buf) {
1474 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1475 		cfile->srch_inf.ntwrk_buf_start = NULL;
1476 		if (cfile->srch_inf.smallBuf)
1477 			cifs_small_buf_release(buf);
1478 		else
1479 			cifs_buf_release(buf);
1480 	}
1481 
1482 	cifs_put_tlink(cfile->tlink);
1483 	kfree(file->private_data);
1484 	file->private_data = NULL;
1485 	/* BB can we lock the filestruct while this is going on? */
1486 	free_xid(xid);
1487 	return rc;
1488 }
1489 
1490 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1491 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1492 {
1493 	struct cifsLockInfo *lock =
1494 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1495 	if (!lock)
1496 		return lock;
1497 	lock->offset = offset;
1498 	lock->length = length;
1499 	lock->type = type;
1500 	lock->pid = current->tgid;
1501 	lock->flags = flags;
1502 	INIT_LIST_HEAD(&lock->blist);
1503 	init_waitqueue_head(&lock->block_q);
1504 	return lock;
1505 }
1506 
1507 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1508 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1509 {
1510 	struct cifsLockInfo *li, *tmp;
1511 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1512 		list_del_init(&li->blist);
1513 		wake_up(&li->block_q);
1514 	}
1515 }
1516 
1517 #define CIFS_LOCK_OP	0
1518 #define CIFS_READ_OP	1
1519 #define CIFS_WRITE_OP	2
1520 
1521 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1522 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1523 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1524 			    __u64 length, __u8 type, __u16 flags,
1525 			    struct cifsFileInfo *cfile,
1526 			    struct cifsLockInfo **conf_lock, int rw_check)
1527 {
1528 	struct cifsLockInfo *li;
1529 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1530 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1531 
1532 	list_for_each_entry(li, &fdlocks->locks, llist) {
1533 		if (offset + length <= li->offset ||
1534 		    offset >= li->offset + li->length)
1535 			continue;
1536 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1537 		    server->ops->compare_fids(cfile, cur_cfile)) {
1538 			/* shared lock prevents write op through the same fid */
1539 			if (!(li->type & server->vals->shared_lock_type) ||
1540 			    rw_check != CIFS_WRITE_OP)
1541 				continue;
1542 		}
1543 		if ((type & server->vals->shared_lock_type) &&
1544 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1545 		     current->tgid == li->pid) || type == li->type))
1546 			continue;
1547 		if (rw_check == CIFS_LOCK_OP &&
1548 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1549 		    server->ops->compare_fids(cfile, cur_cfile))
1550 			continue;
1551 		if (conf_lock)
1552 			*conf_lock = li;
1553 		return true;
1554 	}
1555 	return false;
1556 }
1557 
1558 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1559 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1560 			__u8 type, __u16 flags,
1561 			struct cifsLockInfo **conf_lock, int rw_check)
1562 {
1563 	bool rc = false;
1564 	struct cifs_fid_locks *cur;
1565 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1566 
1567 	list_for_each_entry(cur, &cinode->llist, llist) {
1568 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1569 						 flags, cfile, conf_lock,
1570 						 rw_check);
1571 		if (rc)
1572 			break;
1573 	}
1574 
1575 	return rc;
1576 }
1577 
1578 /*
1579  * Check if there is another lock that prevents us to set the lock (mandatory
1580  * style). If such a lock exists, update the flock structure with its
1581  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1582  * or leave it the same if we can't. Returns 0 if we don't need to request to
1583  * the server or 1 otherwise.
1584  */
1585 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1586 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1587 	       __u8 type, struct file_lock *flock)
1588 {
1589 	int rc = 0;
1590 	struct cifsLockInfo *conf_lock;
1591 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1592 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1593 	bool exist;
1594 
1595 	down_read(&cinode->lock_sem);
1596 
1597 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1598 					flock->c.flc_flags, &conf_lock,
1599 					CIFS_LOCK_OP);
1600 	if (exist) {
1601 		flock->fl_start = conf_lock->offset;
1602 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1603 		flock->c.flc_pid = conf_lock->pid;
1604 		if (conf_lock->type & server->vals->shared_lock_type)
1605 			flock->c.flc_type = F_RDLCK;
1606 		else
1607 			flock->c.flc_type = F_WRLCK;
1608 	} else if (!cinode->can_cache_brlcks)
1609 		rc = 1;
1610 	else
1611 		flock->c.flc_type = F_UNLCK;
1612 
1613 	up_read(&cinode->lock_sem);
1614 	return rc;
1615 }
1616 
1617 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1618 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1619 {
1620 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1621 	cifs_down_write(&cinode->lock_sem);
1622 	list_add_tail(&lock->llist, &cfile->llist->locks);
1623 	up_write(&cinode->lock_sem);
1624 }
1625 
1626 /*
1627  * Set the byte-range lock (mandatory style). Returns:
1628  * 1) 0, if we set the lock and don't need to request to the server;
1629  * 2) 1, if no locks prevent us but we need to request to the server;
1630  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1631  */
1632 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1633 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1634 		 bool wait)
1635 {
1636 	struct cifsLockInfo *conf_lock;
1637 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1638 	bool exist;
1639 	int rc = 0;
1640 
1641 try_again:
1642 	exist = false;
1643 	cifs_down_write(&cinode->lock_sem);
1644 
1645 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1646 					lock->type, lock->flags, &conf_lock,
1647 					CIFS_LOCK_OP);
1648 	if (!exist && cinode->can_cache_brlcks) {
1649 		list_add_tail(&lock->llist, &cfile->llist->locks);
1650 		up_write(&cinode->lock_sem);
1651 		return rc;
1652 	}
1653 
1654 	if (!exist)
1655 		rc = 1;
1656 	else if (!wait)
1657 		rc = -EACCES;
1658 	else {
1659 		list_add_tail(&lock->blist, &conf_lock->blist);
1660 		up_write(&cinode->lock_sem);
1661 		rc = wait_event_interruptible(lock->block_q,
1662 					(lock->blist.prev == &lock->blist) &&
1663 					(lock->blist.next == &lock->blist));
1664 		if (!rc)
1665 			goto try_again;
1666 		cifs_down_write(&cinode->lock_sem);
1667 		list_del_init(&lock->blist);
1668 	}
1669 
1670 	up_write(&cinode->lock_sem);
1671 	return rc;
1672 }
1673 
1674 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1675 /*
1676  * Check if there is another lock that prevents us to set the lock (posix
1677  * style). If such a lock exists, update the flock structure with its
1678  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1679  * or leave it the same if we can't. Returns 0 if we don't need to request to
1680  * the server or 1 otherwise.
1681  */
1682 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1683 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1684 {
1685 	int rc = 0;
1686 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1687 	unsigned char saved_type = flock->c.flc_type;
1688 
1689 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1690 		return 1;
1691 
1692 	down_read(&cinode->lock_sem);
1693 	posix_test_lock(file, flock);
1694 
1695 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1696 		flock->c.flc_type = saved_type;
1697 		rc = 1;
1698 	}
1699 
1700 	up_read(&cinode->lock_sem);
1701 	return rc;
1702 }
1703 
1704 /*
1705  * Set the byte-range lock (posix style). Returns:
1706  * 1) <0, if the error occurs while setting the lock;
1707  * 2) 0, if we set the lock and don't need to request to the server;
1708  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1709  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1710  */
1711 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1712 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1713 {
1714 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1715 	int rc = FILE_LOCK_DEFERRED + 1;
1716 
1717 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1718 		return rc;
1719 
1720 	cifs_down_write(&cinode->lock_sem);
1721 	if (!cinode->can_cache_brlcks) {
1722 		up_write(&cinode->lock_sem);
1723 		return rc;
1724 	}
1725 
1726 	rc = posix_lock_file(file, flock, NULL);
1727 	up_write(&cinode->lock_sem);
1728 	return rc;
1729 }
1730 
1731 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1732 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1733 {
1734 	unsigned int xid;
1735 	int rc = 0, stored_rc;
1736 	struct cifsLockInfo *li, *tmp;
1737 	struct cifs_tcon *tcon;
1738 	unsigned int num, max_num, max_buf;
1739 	LOCKING_ANDX_RANGE *buf, *cur;
1740 	static const int types[] = {
1741 		LOCKING_ANDX_LARGE_FILES,
1742 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1743 	};
1744 	int i;
1745 
1746 	xid = get_xid();
1747 	tcon = tlink_tcon(cfile->tlink);
1748 
1749 	/*
1750 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1751 	 * and check it before using.
1752 	 */
1753 	max_buf = tcon->ses->server->maxBuf;
1754 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1755 		free_xid(xid);
1756 		return -EINVAL;
1757 	}
1758 
1759 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1760 		     PAGE_SIZE);
1761 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1762 			PAGE_SIZE);
1763 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1764 						sizeof(LOCKING_ANDX_RANGE);
1765 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1766 	if (!buf) {
1767 		free_xid(xid);
1768 		return -ENOMEM;
1769 	}
1770 
1771 	for (i = 0; i < 2; i++) {
1772 		cur = buf;
1773 		num = 0;
1774 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1775 			if (li->type != types[i])
1776 				continue;
1777 			cur->Pid = cpu_to_le16(li->pid);
1778 			cur->LengthLow = cpu_to_le32((u32)li->length);
1779 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1780 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1781 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1782 			if (++num == max_num) {
1783 				stored_rc = cifs_lockv(xid, tcon,
1784 						       cfile->fid.netfid,
1785 						       (__u8)li->type, 0, num,
1786 						       buf);
1787 				if (stored_rc)
1788 					rc = stored_rc;
1789 				cur = buf;
1790 				num = 0;
1791 			} else
1792 				cur++;
1793 		}
1794 
1795 		if (num) {
1796 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1797 					       (__u8)types[i], 0, num, buf);
1798 			if (stored_rc)
1799 				rc = stored_rc;
1800 		}
1801 	}
1802 
1803 	kfree(buf);
1804 	free_xid(xid);
1805 	return rc;
1806 }
1807 
1808 static __u32
hash_lockowner(fl_owner_t owner)1809 hash_lockowner(fl_owner_t owner)
1810 {
1811 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1812 }
1813 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1814 
1815 struct lock_to_push {
1816 	struct list_head llist;
1817 	__u64 offset;
1818 	__u64 length;
1819 	__u32 pid;
1820 	__u16 netfid;
1821 	__u8 type;
1822 };
1823 
1824 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1825 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1826 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1827 {
1828 	struct inode *inode = d_inode(cfile->dentry);
1829 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1830 	struct file_lock *flock;
1831 	struct file_lock_context *flctx = locks_inode_context(inode);
1832 	unsigned int count = 0, i;
1833 	int rc = 0, xid, type;
1834 	struct list_head locks_to_send, *el;
1835 	struct lock_to_push *lck, *tmp;
1836 	__u64 length;
1837 
1838 	xid = get_xid();
1839 
1840 	if (!flctx)
1841 		goto out;
1842 
1843 	spin_lock(&flctx->flc_lock);
1844 	list_for_each(el, &flctx->flc_posix) {
1845 		count++;
1846 	}
1847 	spin_unlock(&flctx->flc_lock);
1848 
1849 	INIT_LIST_HEAD(&locks_to_send);
1850 
1851 	/*
1852 	 * Allocating count locks is enough because no FL_POSIX locks can be
1853 	 * added to the list while we are holding cinode->lock_sem that
1854 	 * protects locking operations of this inode.
1855 	 */
1856 	for (i = 0; i < count; i++) {
1857 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1858 		if (!lck) {
1859 			rc = -ENOMEM;
1860 			goto err_out;
1861 		}
1862 		list_add_tail(&lck->llist, &locks_to_send);
1863 	}
1864 
1865 	el = locks_to_send.next;
1866 	spin_lock(&flctx->flc_lock);
1867 	for_each_file_lock(flock, &flctx->flc_posix) {
1868 		unsigned char ftype = flock->c.flc_type;
1869 
1870 		if (el == &locks_to_send) {
1871 			/*
1872 			 * The list ended. We don't have enough allocated
1873 			 * structures - something is really wrong.
1874 			 */
1875 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1876 			break;
1877 		}
1878 		length = cifs_flock_len(flock);
1879 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1880 			type = CIFS_RDLCK;
1881 		else
1882 			type = CIFS_WRLCK;
1883 		lck = list_entry(el, struct lock_to_push, llist);
1884 		lck->pid = hash_lockowner(flock->c.flc_owner);
1885 		lck->netfid = cfile->fid.netfid;
1886 		lck->length = length;
1887 		lck->type = type;
1888 		lck->offset = flock->fl_start;
1889 	}
1890 	spin_unlock(&flctx->flc_lock);
1891 
1892 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1893 		int stored_rc;
1894 
1895 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1896 					     lck->offset, lck->length, NULL,
1897 					     lck->type, 0);
1898 		if (stored_rc)
1899 			rc = stored_rc;
1900 		list_del(&lck->llist);
1901 		kfree(lck);
1902 	}
1903 
1904 out:
1905 	free_xid(xid);
1906 	return rc;
1907 err_out:
1908 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1909 		list_del(&lck->llist);
1910 		kfree(lck);
1911 	}
1912 	goto out;
1913 }
1914 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1915 
1916 static int
cifs_push_locks(struct cifsFileInfo * cfile)1917 cifs_push_locks(struct cifsFileInfo *cfile)
1918 {
1919 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1920 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1921 	int rc = 0;
1922 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1923 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1924 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1925 
1926 	/* we are going to update can_cache_brlcks here - need a write access */
1927 	cifs_down_write(&cinode->lock_sem);
1928 	if (!cinode->can_cache_brlcks) {
1929 		up_write(&cinode->lock_sem);
1930 		return rc;
1931 	}
1932 
1933 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1934 	if (cap_unix(tcon->ses) &&
1935 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1936 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1937 		rc = cifs_push_posix_locks(cfile);
1938 	else
1939 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1940 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1941 
1942 	cinode->can_cache_brlcks = false;
1943 	up_write(&cinode->lock_sem);
1944 	return rc;
1945 }
1946 
1947 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)1948 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1949 		bool *wait_flag, struct TCP_Server_Info *server)
1950 {
1951 	if (flock->c.flc_flags & FL_POSIX)
1952 		cifs_dbg(FYI, "Posix\n");
1953 	if (flock->c.flc_flags & FL_FLOCK)
1954 		cifs_dbg(FYI, "Flock\n");
1955 	if (flock->c.flc_flags & FL_SLEEP) {
1956 		cifs_dbg(FYI, "Blocking lock\n");
1957 		*wait_flag = true;
1958 	}
1959 	if (flock->c.flc_flags & FL_ACCESS)
1960 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1961 	if (flock->c.flc_flags & FL_LEASE)
1962 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1963 	if (flock->c.flc_flags &
1964 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1965 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1966 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1967 		         flock->c.flc_flags);
1968 
1969 	*type = server->vals->large_lock_type;
1970 	if (lock_is_write(flock)) {
1971 		cifs_dbg(FYI, "F_WRLCK\n");
1972 		*type |= server->vals->exclusive_lock_type;
1973 		*lock = 1;
1974 	} else if (lock_is_unlock(flock)) {
1975 		cifs_dbg(FYI, "F_UNLCK\n");
1976 		*type |= server->vals->unlock_lock_type;
1977 		*unlock = 1;
1978 		/* Check if unlock includes more than one lock range */
1979 	} else if (lock_is_read(flock)) {
1980 		cifs_dbg(FYI, "F_RDLCK\n");
1981 		*type |= server->vals->shared_lock_type;
1982 		*lock = 1;
1983 	} else if (flock->c.flc_type == F_EXLCK) {
1984 		cifs_dbg(FYI, "F_EXLCK\n");
1985 		*type |= server->vals->exclusive_lock_type;
1986 		*lock = 1;
1987 	} else if (flock->c.flc_type == F_SHLCK) {
1988 		cifs_dbg(FYI, "F_SHLCK\n");
1989 		*type |= server->vals->shared_lock_type;
1990 		*lock = 1;
1991 	} else
1992 		cifs_dbg(FYI, "Unknown type of lock\n");
1993 }
1994 
1995 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)1996 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1997 	   bool wait_flag, bool posix_lck, unsigned int xid)
1998 {
1999 	int rc = 0;
2000 	__u64 length = cifs_flock_len(flock);
2001 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2002 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2003 	struct TCP_Server_Info *server = tcon->ses->server;
2004 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2005 	__u16 netfid = cfile->fid.netfid;
2006 
2007 	if (posix_lck) {
2008 		int posix_lock_type;
2009 
2010 		rc = cifs_posix_lock_test(file, flock);
2011 		if (!rc)
2012 			return rc;
2013 
2014 		if (type & server->vals->shared_lock_type)
2015 			posix_lock_type = CIFS_RDLCK;
2016 		else
2017 			posix_lock_type = CIFS_WRLCK;
2018 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2019 				      hash_lockowner(flock->c.flc_owner),
2020 				      flock->fl_start, length, flock,
2021 				      posix_lock_type, wait_flag);
2022 		return rc;
2023 	}
2024 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2025 
2026 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2027 	if (!rc)
2028 		return rc;
2029 
2030 	/* BB we could chain these into one lock request BB */
2031 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2032 				    1, 0, false);
2033 	if (rc == 0) {
2034 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2035 					    type, 0, 1, false);
2036 		flock->c.flc_type = F_UNLCK;
2037 		if (rc != 0)
2038 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2039 				 rc);
2040 		return 0;
2041 	}
2042 
2043 	if (type & server->vals->shared_lock_type) {
2044 		flock->c.flc_type = F_WRLCK;
2045 		return 0;
2046 	}
2047 
2048 	type &= ~server->vals->exclusive_lock_type;
2049 
2050 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2051 				    type | server->vals->shared_lock_type,
2052 				    1, 0, false);
2053 	if (rc == 0) {
2054 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2055 			type | server->vals->shared_lock_type, 0, 1, false);
2056 		flock->c.flc_type = F_RDLCK;
2057 		if (rc != 0)
2058 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2059 				 rc);
2060 	} else
2061 		flock->c.flc_type = F_WRLCK;
2062 
2063 	return 0;
2064 }
2065 
2066 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2067 cifs_move_llist(struct list_head *source, struct list_head *dest)
2068 {
2069 	struct list_head *li, *tmp;
2070 	list_for_each_safe(li, tmp, source)
2071 		list_move(li, dest);
2072 }
2073 
2074 int
cifs_get_hardlink_path(struct cifs_tcon * tcon,struct inode * inode,struct file * file)2075 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2076 				struct file *file)
2077 {
2078 	struct cifsFileInfo *open_file = NULL;
2079 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2080 	int rc = 0;
2081 
2082 	spin_lock(&tcon->open_file_lock);
2083 	spin_lock(&cinode->open_file_lock);
2084 
2085 	list_for_each_entry(open_file, &cinode->openFileList, flist) {
2086 		if (file->f_flags == open_file->f_flags) {
2087 			rc = -EINVAL;
2088 			break;
2089 		}
2090 	}
2091 
2092 	spin_unlock(&cinode->open_file_lock);
2093 	spin_unlock(&tcon->open_file_lock);
2094 	return rc;
2095 }
2096 
2097 void
cifs_free_llist(struct list_head * llist)2098 cifs_free_llist(struct list_head *llist)
2099 {
2100 	struct cifsLockInfo *li, *tmp;
2101 	list_for_each_entry_safe(li, tmp, llist, llist) {
2102 		cifs_del_lock_waiters(li);
2103 		list_del(&li->llist);
2104 		kfree(li);
2105 	}
2106 }
2107 
2108 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2109 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2110 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2111 		  unsigned int xid)
2112 {
2113 	int rc = 0, stored_rc;
2114 	static const int types[] = {
2115 		LOCKING_ANDX_LARGE_FILES,
2116 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2117 	};
2118 	unsigned int i;
2119 	unsigned int max_num, num, max_buf;
2120 	LOCKING_ANDX_RANGE *buf, *cur;
2121 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2122 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2123 	struct cifsLockInfo *li, *tmp;
2124 	__u64 length = cifs_flock_len(flock);
2125 	LIST_HEAD(tmp_llist);
2126 
2127 	/*
2128 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2129 	 * and check it before using.
2130 	 */
2131 	max_buf = tcon->ses->server->maxBuf;
2132 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2133 		return -EINVAL;
2134 
2135 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2136 		     PAGE_SIZE);
2137 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2138 			PAGE_SIZE);
2139 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2140 						sizeof(LOCKING_ANDX_RANGE);
2141 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2142 	if (!buf)
2143 		return -ENOMEM;
2144 
2145 	cifs_down_write(&cinode->lock_sem);
2146 	for (i = 0; i < 2; i++) {
2147 		cur = buf;
2148 		num = 0;
2149 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2150 			if (flock->fl_start > li->offset ||
2151 			    (flock->fl_start + length) <
2152 			    (li->offset + li->length))
2153 				continue;
2154 			if (current->tgid != li->pid)
2155 				continue;
2156 			if (types[i] != li->type)
2157 				continue;
2158 			if (cinode->can_cache_brlcks) {
2159 				/*
2160 				 * We can cache brlock requests - simply remove
2161 				 * a lock from the file's list.
2162 				 */
2163 				list_del(&li->llist);
2164 				cifs_del_lock_waiters(li);
2165 				kfree(li);
2166 				continue;
2167 			}
2168 			cur->Pid = cpu_to_le16(li->pid);
2169 			cur->LengthLow = cpu_to_le32((u32)li->length);
2170 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2171 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2172 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2173 			/*
2174 			 * We need to save a lock here to let us add it again to
2175 			 * the file's list if the unlock range request fails on
2176 			 * the server.
2177 			 */
2178 			list_move(&li->llist, &tmp_llist);
2179 			if (++num == max_num) {
2180 				stored_rc = cifs_lockv(xid, tcon,
2181 						       cfile->fid.netfid,
2182 						       li->type, num, 0, buf);
2183 				if (stored_rc) {
2184 					/*
2185 					 * We failed on the unlock range
2186 					 * request - add all locks from the tmp
2187 					 * list to the head of the file's list.
2188 					 */
2189 					cifs_move_llist(&tmp_llist,
2190 							&cfile->llist->locks);
2191 					rc = stored_rc;
2192 				} else
2193 					/*
2194 					 * The unlock range request succeed -
2195 					 * free the tmp list.
2196 					 */
2197 					cifs_free_llist(&tmp_llist);
2198 				cur = buf;
2199 				num = 0;
2200 			} else
2201 				cur++;
2202 		}
2203 		if (num) {
2204 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2205 					       types[i], num, 0, buf);
2206 			if (stored_rc) {
2207 				cifs_move_llist(&tmp_llist,
2208 						&cfile->llist->locks);
2209 				rc = stored_rc;
2210 			} else
2211 				cifs_free_llist(&tmp_llist);
2212 		}
2213 	}
2214 
2215 	up_write(&cinode->lock_sem);
2216 	kfree(buf);
2217 	return rc;
2218 }
2219 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2220 
2221 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2222 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2223 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2224 	   unsigned int xid)
2225 {
2226 	int rc = 0;
2227 	__u64 length = cifs_flock_len(flock);
2228 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2229 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2230 	struct TCP_Server_Info *server = tcon->ses->server;
2231 	struct inode *inode = d_inode(cfile->dentry);
2232 
2233 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2234 	if (posix_lck) {
2235 		int posix_lock_type;
2236 
2237 		rc = cifs_posix_lock_set(file, flock);
2238 		if (rc <= FILE_LOCK_DEFERRED)
2239 			return rc;
2240 
2241 		if (type & server->vals->shared_lock_type)
2242 			posix_lock_type = CIFS_RDLCK;
2243 		else
2244 			posix_lock_type = CIFS_WRLCK;
2245 
2246 		if (unlock == 1)
2247 			posix_lock_type = CIFS_UNLCK;
2248 
2249 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2250 				      hash_lockowner(flock->c.flc_owner),
2251 				      flock->fl_start, length,
2252 				      NULL, posix_lock_type, wait_flag);
2253 		goto out;
2254 	}
2255 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2256 	if (lock) {
2257 		struct cifsLockInfo *lock;
2258 
2259 		lock = cifs_lock_init(flock->fl_start, length, type,
2260 				      flock->c.flc_flags);
2261 		if (!lock)
2262 			return -ENOMEM;
2263 
2264 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2265 		if (rc < 0) {
2266 			kfree(lock);
2267 			return rc;
2268 		}
2269 		if (!rc)
2270 			goto out;
2271 
2272 		/*
2273 		 * Windows 7 server can delay breaking lease from read to None
2274 		 * if we set a byte-range lock on a file - break it explicitly
2275 		 * before sending the lock to the server to be sure the next
2276 		 * read won't conflict with non-overlapted locks due to
2277 		 * pagereading.
2278 		 */
2279 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2280 					CIFS_CACHE_READ(CIFS_I(inode))) {
2281 			cifs_zap_mapping(inode);
2282 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2283 				 inode);
2284 			CIFS_I(inode)->oplock = 0;
2285 		}
2286 
2287 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2288 					    type, 1, 0, wait_flag);
2289 		if (rc) {
2290 			kfree(lock);
2291 			return rc;
2292 		}
2293 
2294 		cifs_lock_add(cfile, lock);
2295 	} else if (unlock)
2296 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2297 
2298 out:
2299 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2300 		/*
2301 		 * If this is a request to remove all locks because we
2302 		 * are closing the file, it doesn't matter if the
2303 		 * unlocking failed as both cifs.ko and the SMB server
2304 		 * remove the lock on file close
2305 		 */
2306 		if (rc) {
2307 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2308 			if (!(flock->c.flc_flags & FL_CLOSE))
2309 				return rc;
2310 		}
2311 		rc = locks_lock_file_wait(file, flock);
2312 	}
2313 	return rc;
2314 }
2315 
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2316 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2317 {
2318 	int rc, xid;
2319 	int lock = 0, unlock = 0;
2320 	bool wait_flag = false;
2321 	bool posix_lck = false;
2322 	struct cifs_sb_info *cifs_sb;
2323 	struct cifs_tcon *tcon;
2324 	struct cifsFileInfo *cfile;
2325 	__u32 type;
2326 
2327 	xid = get_xid();
2328 
2329 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2330 		rc = -ENOLCK;
2331 		free_xid(xid);
2332 		return rc;
2333 	}
2334 
2335 	cfile = (struct cifsFileInfo *)file->private_data;
2336 	tcon = tlink_tcon(cfile->tlink);
2337 
2338 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2339 			tcon->ses->server);
2340 	cifs_sb = CIFS_FILE_SB(file);
2341 
2342 	if (cap_unix(tcon->ses) &&
2343 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2344 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2345 		posix_lck = true;
2346 
2347 	if (!lock && !unlock) {
2348 		/*
2349 		 * if no lock or unlock then nothing to do since we do not
2350 		 * know what it is
2351 		 */
2352 		rc = -EOPNOTSUPP;
2353 		free_xid(xid);
2354 		return rc;
2355 	}
2356 
2357 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2358 			xid);
2359 	free_xid(xid);
2360 	return rc;
2361 
2362 
2363 }
2364 
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2365 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2366 {
2367 	int rc, xid;
2368 	int lock = 0, unlock = 0;
2369 	bool wait_flag = false;
2370 	bool posix_lck = false;
2371 	struct cifs_sb_info *cifs_sb;
2372 	struct cifs_tcon *tcon;
2373 	struct cifsFileInfo *cfile;
2374 	__u32 type;
2375 
2376 	rc = -EACCES;
2377 	xid = get_xid();
2378 
2379 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2380 		 flock->c.flc_flags, flock->c.flc_type,
2381 		 (long long)flock->fl_start,
2382 		 (long long)flock->fl_end);
2383 
2384 	cfile = (struct cifsFileInfo *)file->private_data;
2385 	tcon = tlink_tcon(cfile->tlink);
2386 
2387 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2388 			tcon->ses->server);
2389 	cifs_sb = CIFS_FILE_SB(file);
2390 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2391 
2392 	if (cap_unix(tcon->ses) &&
2393 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2394 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2395 		posix_lck = true;
2396 	/*
2397 	 * BB add code here to normalize offset and length to account for
2398 	 * negative length which we can not accept over the wire.
2399 	 */
2400 	if (IS_GETLK(cmd)) {
2401 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2402 		free_xid(xid);
2403 		return rc;
2404 	}
2405 
2406 	if (!lock && !unlock) {
2407 		/*
2408 		 * if no lock or unlock then nothing to do since we do not
2409 		 * know what it is
2410 		 */
2411 		free_xid(xid);
2412 		return -EOPNOTSUPP;
2413 	}
2414 
2415 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2416 			xid);
2417 	free_xid(xid);
2418 	return rc;
2419 }
2420 
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result,bool was_async)2421 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2422 				      bool was_async)
2423 {
2424 	struct netfs_io_request *wreq = wdata->rreq;
2425 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2426 	loff_t wrend;
2427 
2428 	if (result > 0) {
2429 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2430 
2431 		if (wrend > ictx->zero_point &&
2432 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2433 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2434 			ictx->zero_point = wrend;
2435 		if (wrend > ictx->remote_i_size)
2436 			netfs_resize_file(ictx, wrend, true);
2437 	}
2438 
2439 	netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2440 }
2441 
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)2442 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2443 					bool fsuid_only)
2444 {
2445 	struct cifsFileInfo *open_file = NULL;
2446 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2447 
2448 	/* only filter by fsuid on multiuser mounts */
2449 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2450 		fsuid_only = false;
2451 
2452 	spin_lock(&cifs_inode->open_file_lock);
2453 	/* we could simply get the first_list_entry since write-only entries
2454 	   are always at the end of the list but since the first entry might
2455 	   have a close pending, we go through the whole list */
2456 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2457 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2458 			continue;
2459 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2460 			if ((!open_file->invalidHandle)) {
2461 				/* found a good file */
2462 				/* lock it so it will not be closed on us */
2463 				cifsFileInfo_get(open_file);
2464 				spin_unlock(&cifs_inode->open_file_lock);
2465 				return open_file;
2466 			} /* else might as well continue, and look for
2467 			     another, or simply have the caller reopen it
2468 			     again rather than trying to fix this handle */
2469 		} else /* write only file */
2470 			break; /* write only files are last so must be done */
2471 	}
2472 	spin_unlock(&cifs_inode->open_file_lock);
2473 	return NULL;
2474 }
2475 
2476 /* Return -EBADF if no handle is found and general rc otherwise */
2477 int
cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,int flags,struct cifsFileInfo ** ret_file)2478 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2479 		       struct cifsFileInfo **ret_file)
2480 {
2481 	struct cifsFileInfo *open_file, *inv_file = NULL;
2482 	struct cifs_sb_info *cifs_sb;
2483 	bool any_available = false;
2484 	int rc = -EBADF;
2485 	unsigned int refind = 0;
2486 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2487 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2488 	*ret_file = NULL;
2489 
2490 	/*
2491 	 * Having a null inode here (because mapping->host was set to zero by
2492 	 * the VFS or MM) should not happen but we had reports of on oops (due
2493 	 * to it being zero) during stress testcases so we need to check for it
2494 	 */
2495 
2496 	if (cifs_inode == NULL) {
2497 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2498 		dump_stack();
2499 		return rc;
2500 	}
2501 
2502 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2503 
2504 	/* only filter by fsuid on multiuser mounts */
2505 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2506 		fsuid_only = false;
2507 
2508 	spin_lock(&cifs_inode->open_file_lock);
2509 refind_writable:
2510 	if (refind > MAX_REOPEN_ATT) {
2511 		spin_unlock(&cifs_inode->open_file_lock);
2512 		return rc;
2513 	}
2514 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2515 		if (!any_available && open_file->pid != current->tgid)
2516 			continue;
2517 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2518 			continue;
2519 		if (with_delete && !(open_file->fid.access & DELETE))
2520 			continue;
2521 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2522 			if (!open_file->invalidHandle) {
2523 				/* found a good writable file */
2524 				cifsFileInfo_get(open_file);
2525 				spin_unlock(&cifs_inode->open_file_lock);
2526 				*ret_file = open_file;
2527 				return 0;
2528 			} else {
2529 				if (!inv_file)
2530 					inv_file = open_file;
2531 			}
2532 		}
2533 	}
2534 	/* couldn't find usable FH with same pid, try any available */
2535 	if (!any_available) {
2536 		any_available = true;
2537 		goto refind_writable;
2538 	}
2539 
2540 	if (inv_file) {
2541 		any_available = false;
2542 		cifsFileInfo_get(inv_file);
2543 	}
2544 
2545 	spin_unlock(&cifs_inode->open_file_lock);
2546 
2547 	if (inv_file) {
2548 		rc = cifs_reopen_file(inv_file, false);
2549 		if (!rc) {
2550 			*ret_file = inv_file;
2551 			return 0;
2552 		}
2553 
2554 		spin_lock(&cifs_inode->open_file_lock);
2555 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2556 		spin_unlock(&cifs_inode->open_file_lock);
2557 		cifsFileInfo_put(inv_file);
2558 		++refind;
2559 		inv_file = NULL;
2560 		spin_lock(&cifs_inode->open_file_lock);
2561 		goto refind_writable;
2562 	}
2563 
2564 	return rc;
2565 }
2566 
2567 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2568 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2569 {
2570 	struct cifsFileInfo *cfile;
2571 	int rc;
2572 
2573 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2574 	if (rc)
2575 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2576 
2577 	return cfile;
2578 }
2579 
2580 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)2581 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2582 		       int flags,
2583 		       struct cifsFileInfo **ret_file)
2584 {
2585 	struct cifsFileInfo *cfile;
2586 	void *page = alloc_dentry_path();
2587 
2588 	*ret_file = NULL;
2589 
2590 	spin_lock(&tcon->open_file_lock);
2591 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2592 		struct cifsInodeInfo *cinode;
2593 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2594 		if (IS_ERR(full_path)) {
2595 			spin_unlock(&tcon->open_file_lock);
2596 			free_dentry_path(page);
2597 			return PTR_ERR(full_path);
2598 		}
2599 		if (strcmp(full_path, name))
2600 			continue;
2601 
2602 		cinode = CIFS_I(d_inode(cfile->dentry));
2603 		spin_unlock(&tcon->open_file_lock);
2604 		free_dentry_path(page);
2605 		return cifs_get_writable_file(cinode, flags, ret_file);
2606 	}
2607 
2608 	spin_unlock(&tcon->open_file_lock);
2609 	free_dentry_path(page);
2610 	return -ENOENT;
2611 }
2612 
2613 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2614 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2615 		       struct cifsFileInfo **ret_file)
2616 {
2617 	struct cifsFileInfo *cfile;
2618 	void *page = alloc_dentry_path();
2619 
2620 	*ret_file = NULL;
2621 
2622 	spin_lock(&tcon->open_file_lock);
2623 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2624 		struct cifsInodeInfo *cinode;
2625 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2626 		if (IS_ERR(full_path)) {
2627 			spin_unlock(&tcon->open_file_lock);
2628 			free_dentry_path(page);
2629 			return PTR_ERR(full_path);
2630 		}
2631 		if (strcmp(full_path, name))
2632 			continue;
2633 
2634 		cinode = CIFS_I(d_inode(cfile->dentry));
2635 		spin_unlock(&tcon->open_file_lock);
2636 		free_dentry_path(page);
2637 		*ret_file = find_readable_file(cinode, 0);
2638 		return *ret_file ? 0 : -ENOENT;
2639 	}
2640 
2641 	spin_unlock(&tcon->open_file_lock);
2642 	free_dentry_path(page);
2643 	return -ENOENT;
2644 }
2645 
2646 /*
2647  * Flush data on a strict file.
2648  */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2649 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2650 		      int datasync)
2651 {
2652 	unsigned int xid;
2653 	int rc = 0;
2654 	struct cifs_tcon *tcon;
2655 	struct TCP_Server_Info *server;
2656 	struct cifsFileInfo *smbfile = file->private_data;
2657 	struct inode *inode = file_inode(file);
2658 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2659 
2660 	rc = file_write_and_wait_range(file, start, end);
2661 	if (rc) {
2662 		trace_cifs_fsync_err(inode->i_ino, rc);
2663 		return rc;
2664 	}
2665 
2666 	xid = get_xid();
2667 
2668 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2669 		 file, datasync);
2670 
2671 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2672 		rc = cifs_zap_mapping(inode);
2673 		if (rc) {
2674 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2675 			rc = 0; /* don't care about it in fsync */
2676 		}
2677 	}
2678 
2679 	tcon = tlink_tcon(smbfile->tlink);
2680 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2681 		server = tcon->ses->server;
2682 		if (server->ops->flush == NULL) {
2683 			rc = -ENOSYS;
2684 			goto strict_fsync_exit;
2685 		}
2686 
2687 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2688 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2689 			if (smbfile) {
2690 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2691 				cifsFileInfo_put(smbfile);
2692 			} else
2693 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2694 		} else
2695 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2696 	}
2697 
2698 strict_fsync_exit:
2699 	free_xid(xid);
2700 	return rc;
2701 }
2702 
2703 /*
2704  * Flush data on a non-strict data.
2705  */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2706 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2707 {
2708 	unsigned int xid;
2709 	int rc = 0;
2710 	struct cifs_tcon *tcon;
2711 	struct TCP_Server_Info *server;
2712 	struct cifsFileInfo *smbfile = file->private_data;
2713 	struct inode *inode = file_inode(file);
2714 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2715 
2716 	rc = file_write_and_wait_range(file, start, end);
2717 	if (rc) {
2718 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2719 		return rc;
2720 	}
2721 
2722 	xid = get_xid();
2723 
2724 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2725 		 file, datasync);
2726 
2727 	tcon = tlink_tcon(smbfile->tlink);
2728 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2729 		server = tcon->ses->server;
2730 		if (server->ops->flush == NULL) {
2731 			rc = -ENOSYS;
2732 			goto fsync_exit;
2733 		}
2734 
2735 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2736 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2737 			if (smbfile) {
2738 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2739 				cifsFileInfo_put(smbfile);
2740 			} else
2741 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2742 		} else
2743 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2744 	}
2745 
2746 fsync_exit:
2747 	free_xid(xid);
2748 	return rc;
2749 }
2750 
2751 /*
2752  * As file closes, flush all cached write data for this inode checking
2753  * for write behind errors.
2754  */
cifs_flush(struct file * file,fl_owner_t id)2755 int cifs_flush(struct file *file, fl_owner_t id)
2756 {
2757 	struct inode *inode = file_inode(file);
2758 	int rc = 0;
2759 
2760 	if (file->f_mode & FMODE_WRITE)
2761 		rc = filemap_write_and_wait(inode->i_mapping);
2762 
2763 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2764 	if (rc) {
2765 		/* get more nuanced writeback errors */
2766 		rc = filemap_check_wb_err(file->f_mapping, 0);
2767 		trace_cifs_flush_err(inode->i_ino, rc);
2768 	}
2769 	return rc;
2770 }
2771 
2772 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2773 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2774 {
2775 	struct file *file = iocb->ki_filp;
2776 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2777 	struct inode *inode = file->f_mapping->host;
2778 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2779 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2780 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2781 	ssize_t rc;
2782 
2783 	rc = netfs_start_io_write(inode);
2784 	if (rc < 0)
2785 		return rc;
2786 
2787 	/*
2788 	 * We need to hold the sem to be sure nobody modifies lock list
2789 	 * with a brlock that prevents writing.
2790 	 */
2791 	down_read(&cinode->lock_sem);
2792 
2793 	rc = generic_write_checks(iocb, from);
2794 	if (rc <= 0)
2795 		goto out;
2796 
2797 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2798 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2799 				     server->vals->exclusive_lock_type, 0,
2800 				     NULL, CIFS_WRITE_OP))) {
2801 		rc = -EACCES;
2802 		goto out;
2803 	}
2804 
2805 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2806 
2807 out:
2808 	up_read(&cinode->lock_sem);
2809 	netfs_end_io_write(inode);
2810 	if (rc > 0)
2811 		rc = generic_write_sync(iocb, rc);
2812 	return rc;
2813 }
2814 
2815 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2816 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2817 {
2818 	struct inode *inode = file_inode(iocb->ki_filp);
2819 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2820 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2821 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2822 						iocb->ki_filp->private_data;
2823 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2824 	ssize_t written;
2825 
2826 	written = cifs_get_writer(cinode);
2827 	if (written)
2828 		return written;
2829 
2830 	if (CIFS_CACHE_WRITE(cinode)) {
2831 		if (cap_unix(tcon->ses) &&
2832 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2833 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2834 			written = netfs_file_write_iter(iocb, from);
2835 			goto out;
2836 		}
2837 		written = cifs_writev(iocb, from);
2838 		goto out;
2839 	}
2840 	/*
2841 	 * For non-oplocked files in strict cache mode we need to write the data
2842 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2843 	 * affected pages because it may cause a error with mandatory locks on
2844 	 * these pages but not on the region from pos to ppos+len-1.
2845 	 */
2846 	written = netfs_file_write_iter(iocb, from);
2847 	if (CIFS_CACHE_READ(cinode)) {
2848 		/*
2849 		 * We have read level caching and we have just sent a write
2850 		 * request to the server thus making data in the cache stale.
2851 		 * Zap the cache and set oplock/lease level to NONE to avoid
2852 		 * reading stale data from the cache. All subsequent read
2853 		 * operations will read new data from the server.
2854 		 */
2855 		cifs_zap_mapping(inode);
2856 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2857 			 inode);
2858 		cinode->oplock = 0;
2859 	}
2860 out:
2861 	cifs_put_writer(cinode);
2862 	return written;
2863 }
2864 
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2865 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2866 {
2867 	ssize_t rc;
2868 	struct inode *inode = file_inode(iocb->ki_filp);
2869 
2870 	if (iocb->ki_flags & IOCB_DIRECT)
2871 		return netfs_unbuffered_read_iter(iocb, iter);
2872 
2873 	rc = cifs_revalidate_mapping(inode);
2874 	if (rc)
2875 		return rc;
2876 
2877 	return netfs_file_read_iter(iocb, iter);
2878 }
2879 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2880 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2881 {
2882 	struct inode *inode = file_inode(iocb->ki_filp);
2883 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2884 	ssize_t written;
2885 	int rc;
2886 
2887 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2888 		written = netfs_unbuffered_write_iter(iocb, from);
2889 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2890 			cifs_zap_mapping(inode);
2891 			cifs_dbg(FYI,
2892 				 "Set no oplock for inode=%p after a write operation\n",
2893 				 inode);
2894 			cinode->oplock = 0;
2895 		}
2896 		return written;
2897 	}
2898 
2899 	written = cifs_get_writer(cinode);
2900 	if (written)
2901 		return written;
2902 
2903 	written = netfs_file_write_iter(iocb, from);
2904 
2905 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2906 		rc = filemap_fdatawrite(inode->i_mapping);
2907 		if (rc)
2908 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2909 				 rc, inode);
2910 	}
2911 
2912 	cifs_put_writer(cinode);
2913 	return written;
2914 }
2915 
2916 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)2917 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2918 {
2919 	struct inode *inode = file_inode(iocb->ki_filp);
2920 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2921 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2922 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2923 						iocb->ki_filp->private_data;
2924 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2925 	int rc = -EACCES;
2926 
2927 	/*
2928 	 * In strict cache mode we need to read from the server all the time
2929 	 * if we don't have level II oplock because the server can delay mtime
2930 	 * change - so we can't make a decision about inode invalidating.
2931 	 * And we can also fail with pagereading if there are mandatory locks
2932 	 * on pages affected by this read but not on the region from pos to
2933 	 * pos+len-1.
2934 	 */
2935 	if (!CIFS_CACHE_READ(cinode))
2936 		return netfs_unbuffered_read_iter(iocb, to);
2937 
2938 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2939 		if (iocb->ki_flags & IOCB_DIRECT)
2940 			return netfs_unbuffered_read_iter(iocb, to);
2941 		return netfs_buffered_read_iter(iocb, to);
2942 	}
2943 
2944 	/*
2945 	 * We need to hold the sem to be sure nobody modifies lock list
2946 	 * with a brlock that prevents reading.
2947 	 */
2948 	if (iocb->ki_flags & IOCB_DIRECT) {
2949 		rc = netfs_start_io_direct(inode);
2950 		if (rc < 0)
2951 			goto out;
2952 		rc = -EACCES;
2953 		down_read(&cinode->lock_sem);
2954 		if (!cifs_find_lock_conflict(
2955 			    cfile, iocb->ki_pos, iov_iter_count(to),
2956 			    tcon->ses->server->vals->shared_lock_type,
2957 			    0, NULL, CIFS_READ_OP))
2958 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2959 		up_read(&cinode->lock_sem);
2960 		netfs_end_io_direct(inode);
2961 	} else {
2962 		rc = netfs_start_io_read(inode);
2963 		if (rc < 0)
2964 			goto out;
2965 		rc = -EACCES;
2966 		down_read(&cinode->lock_sem);
2967 		if (!cifs_find_lock_conflict(
2968 			    cfile, iocb->ki_pos, iov_iter_count(to),
2969 			    tcon->ses->server->vals->shared_lock_type,
2970 			    0, NULL, CIFS_READ_OP))
2971 			rc = filemap_read(iocb, to, 0);
2972 		up_read(&cinode->lock_sem);
2973 		netfs_end_io_read(inode);
2974 	}
2975 out:
2976 	return rc;
2977 }
2978 
cifs_page_mkwrite(struct vm_fault * vmf)2979 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2980 {
2981 	return netfs_page_mkwrite(vmf, NULL);
2982 }
2983 
2984 static const struct vm_operations_struct cifs_file_vm_ops = {
2985 	.fault = filemap_fault,
2986 	.map_pages = filemap_map_pages,
2987 	.page_mkwrite = cifs_page_mkwrite,
2988 };
2989 
cifs_file_strict_mmap(struct file * file,struct vm_area_struct * vma)2990 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2991 {
2992 	int xid, rc = 0;
2993 	struct inode *inode = file_inode(file);
2994 
2995 	xid = get_xid();
2996 
2997 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
2998 		rc = cifs_zap_mapping(inode);
2999 	if (!rc)
3000 		rc = generic_file_mmap(file, vma);
3001 	if (!rc)
3002 		vma->vm_ops = &cifs_file_vm_ops;
3003 
3004 	free_xid(xid);
3005 	return rc;
3006 }
3007 
cifs_file_mmap(struct file * file,struct vm_area_struct * vma)3008 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3009 {
3010 	int rc, xid;
3011 
3012 	xid = get_xid();
3013 
3014 	rc = cifs_revalidate_file(file);
3015 	if (rc)
3016 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3017 			 rc);
3018 	if (!rc)
3019 		rc = generic_file_mmap(file, vma);
3020 	if (!rc)
3021 		vma->vm_ops = &cifs_file_vm_ops;
3022 
3023 	free_xid(xid);
3024 	return rc;
3025 }
3026 
is_inode_writable(struct cifsInodeInfo * cifs_inode)3027 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3028 {
3029 	struct cifsFileInfo *open_file;
3030 
3031 	spin_lock(&cifs_inode->open_file_lock);
3032 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3033 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3034 			spin_unlock(&cifs_inode->open_file_lock);
3035 			return 1;
3036 		}
3037 	}
3038 	spin_unlock(&cifs_inode->open_file_lock);
3039 	return 0;
3040 }
3041 
3042 /* We do not want to update the file size from server for inodes
3043    open for write - to avoid races with writepage extending
3044    the file - in the future we could consider allowing
3045    refreshing the inode only on increases in the file size
3046    but this is tricky to do without racing with writebehind
3047    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3048 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3049 			    bool from_readdir)
3050 {
3051 	if (!cifsInode)
3052 		return true;
3053 
3054 	if (is_inode_writable(cifsInode) ||
3055 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3056 		/* This inode is open for write at least once */
3057 		struct cifs_sb_info *cifs_sb;
3058 
3059 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3060 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3061 			/* since no page cache to corrupt on directio
3062 			we can change size safely */
3063 			return true;
3064 		}
3065 
3066 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3067 			return true;
3068 
3069 		return false;
3070 	} else
3071 		return true;
3072 }
3073 
cifs_oplock_break(struct work_struct * work)3074 void cifs_oplock_break(struct work_struct *work)
3075 {
3076 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3077 						  oplock_break);
3078 	struct inode *inode = d_inode(cfile->dentry);
3079 	struct super_block *sb = inode->i_sb;
3080 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
3081 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3082 	struct cifs_tcon *tcon;
3083 	struct TCP_Server_Info *server;
3084 	struct tcon_link *tlink;
3085 	int rc = 0;
3086 	bool purge_cache = false, oplock_break_cancelled;
3087 	__u64 persistent_fid, volatile_fid;
3088 	__u16 net_fid;
3089 
3090 	/*
3091 	 * Hold a reference to the superblock to prevent it and its inodes from
3092 	 * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
3093 	 * may release the last reference to the sb and trigger inode eviction.
3094 	 */
3095 	cifs_sb_active(sb);
3096 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3097 			TASK_UNINTERRUPTIBLE);
3098 
3099 	tlink = cifs_sb_tlink(cifs_sb);
3100 	if (IS_ERR(tlink))
3101 		goto out;
3102 	tcon = tlink_tcon(tlink);
3103 	server = tcon->ses->server;
3104 
3105 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3106 				      cfile->oplock_epoch, &purge_cache);
3107 
3108 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3109 						cifs_has_mand_locks(cinode)) {
3110 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3111 			 inode);
3112 		cinode->oplock = 0;
3113 	}
3114 
3115 	if (inode && S_ISREG(inode->i_mode)) {
3116 		if (CIFS_CACHE_READ(cinode))
3117 			break_lease(inode, O_RDONLY);
3118 		else
3119 			break_lease(inode, O_WRONLY);
3120 		rc = filemap_fdatawrite(inode->i_mapping);
3121 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3122 			rc = filemap_fdatawait(inode->i_mapping);
3123 			mapping_set_error(inode->i_mapping, rc);
3124 			cifs_zap_mapping(inode);
3125 		}
3126 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3127 		if (CIFS_CACHE_WRITE(cinode))
3128 			goto oplock_break_ack;
3129 	}
3130 
3131 	rc = cifs_push_locks(cfile);
3132 	if (rc)
3133 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3134 
3135 oplock_break_ack:
3136 	/*
3137 	 * When oplock break is received and there are no active
3138 	 * file handles but cached, then schedule deferred close immediately.
3139 	 * So, new open will not use cached handle.
3140 	 */
3141 
3142 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3143 		cifs_close_deferred_file(cinode);
3144 
3145 	persistent_fid = cfile->fid.persistent_fid;
3146 	volatile_fid = cfile->fid.volatile_fid;
3147 	net_fid = cfile->fid.netfid;
3148 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3149 
3150 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3151 	/*
3152 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3153 	 * an acknowledgment to be sent when the file has already been closed.
3154 	 */
3155 	spin_lock(&cinode->open_file_lock);
3156 	/* check list empty since can race with kill_sb calling tree disconnect */
3157 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3158 		spin_unlock(&cinode->open_file_lock);
3159 		rc = server->ops->oplock_response(tcon, persistent_fid,
3160 						  volatile_fid, net_fid, cinode);
3161 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3162 	} else
3163 		spin_unlock(&cinode->open_file_lock);
3164 
3165 	cifs_put_tlink(tlink);
3166 out:
3167 	cifs_done_oplock_break(cinode);
3168 	cifs_sb_deactive(sb);
3169 }
3170 
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3171 static int cifs_swap_activate(struct swap_info_struct *sis,
3172 			      struct file *swap_file, sector_t *span)
3173 {
3174 	struct cifsFileInfo *cfile = swap_file->private_data;
3175 	struct inode *inode = swap_file->f_mapping->host;
3176 	unsigned long blocks;
3177 	long long isize;
3178 
3179 	cifs_dbg(FYI, "swap activate\n");
3180 
3181 	if (!swap_file->f_mapping->a_ops->swap_rw)
3182 		/* Cannot support swap */
3183 		return -EINVAL;
3184 
3185 	spin_lock(&inode->i_lock);
3186 	blocks = inode->i_blocks;
3187 	isize = inode->i_size;
3188 	spin_unlock(&inode->i_lock);
3189 	if (blocks*512 < isize) {
3190 		pr_warn("swap activate: swapfile has holes\n");
3191 		return -EINVAL;
3192 	}
3193 	*span = sis->pages;
3194 
3195 	pr_warn_once("Swap support over SMB3 is experimental\n");
3196 
3197 	/*
3198 	 * TODO: consider adding ACL (or documenting how) to prevent other
3199 	 * users (on this or other systems) from reading it
3200 	 */
3201 
3202 
3203 	/* TODO: add sk_set_memalloc(inet) or similar */
3204 
3205 	if (cfile)
3206 		cfile->swapfile = true;
3207 	/*
3208 	 * TODO: Since file already open, we can't open with DENY_ALL here
3209 	 * but we could add call to grab a byte range lock to prevent others
3210 	 * from reading or writing the file
3211 	 */
3212 
3213 	sis->flags |= SWP_FS_OPS;
3214 	return add_swap_extent(sis, 0, sis->max, 0);
3215 }
3216 
cifs_swap_deactivate(struct file * file)3217 static void cifs_swap_deactivate(struct file *file)
3218 {
3219 	struct cifsFileInfo *cfile = file->private_data;
3220 
3221 	cifs_dbg(FYI, "swap deactivate\n");
3222 
3223 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3224 
3225 	if (cfile)
3226 		cfile->swapfile = false;
3227 
3228 	/* do we need to unpin (or unlock) the file */
3229 }
3230 
3231 /**
3232  * cifs_swap_rw - SMB3 address space operation for swap I/O
3233  * @iocb: target I/O control block
3234  * @iter: I/O buffer
3235  *
3236  * Perform IO to the swap-file.  This is much like direct IO.
3237  */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3238 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3239 {
3240 	ssize_t ret;
3241 
3242 	if (iov_iter_rw(iter) == READ)
3243 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3244 	else
3245 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3246 	if (ret < 0)
3247 		return ret;
3248 	return 0;
3249 }
3250 
3251 const struct address_space_operations cifs_addr_ops = {
3252 	.read_folio	= netfs_read_folio,
3253 	.readahead	= netfs_readahead,
3254 	.writepages	= netfs_writepages,
3255 	.dirty_folio	= netfs_dirty_folio,
3256 	.release_folio	= netfs_release_folio,
3257 	.direct_IO	= noop_direct_IO,
3258 	.invalidate_folio = netfs_invalidate_folio,
3259 	.migrate_folio	= filemap_migrate_folio,
3260 	/*
3261 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3262 	 * helper if needed
3263 	 */
3264 	.swap_activate	= cifs_swap_activate,
3265 	.swap_deactivate = cifs_swap_deactivate,
3266 	.swap_rw = cifs_swap_rw,
3267 };
3268 
3269 /*
3270  * cifs_readahead requires the server to support a buffer large enough to
3271  * contain the header plus one complete page of data.  Otherwise, we need
3272  * to leave cifs_readahead out of the address space operations.
3273  */
3274 const struct address_space_operations cifs_addr_ops_smallbuf = {
3275 	.read_folio	= netfs_read_folio,
3276 	.writepages	= netfs_writepages,
3277 	.dirty_folio	= netfs_dirty_folio,
3278 	.release_folio	= netfs_release_folio,
3279 	.invalidate_folio = netfs_invalidate_folio,
3280 	.migrate_folio	= filemap_migrate_folio,
3281 };
3282