• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com>
4  */
5 #include <linux/fs.h>
6 #include <linux/sunrpc/sched.h>
7 #include <linux/nfs.h>
8 #include <linux/nfs3.h>
9 #include <linux/nfs4.h>
10 #include <linux/nfs_xdr.h>
11 #include <linux/nfs_fs.h>
12 #include "nfs4_fs.h"
13 #include "nfs42.h"
14 #include "iostat.h"
15 #include "pnfs.h"
16 #include "nfs4session.h"
17 #include "internal.h"
18 
19 #define NFSDBG_FACILITY NFSDBG_PROC
20 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
21 
_nfs42_proc_fallocate(struct rpc_message * msg,struct file * filep,struct nfs_lock_context * lock,loff_t offset,loff_t len)22 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
23 		struct nfs_lock_context *lock, loff_t offset, loff_t len)
24 {
25 	struct inode *inode = file_inode(filep);
26 	struct nfs_server *server = NFS_SERVER(inode);
27 	struct nfs42_falloc_args args = {
28 		.falloc_fh	= NFS_FH(inode),
29 		.falloc_offset	= offset,
30 		.falloc_length	= len,
31 		.falloc_bitmask	= server->cache_consistency_bitmask,
32 	};
33 	struct nfs42_falloc_res res = {
34 		.falloc_server	= server,
35 	};
36 	int status;
37 
38 	msg->rpc_argp = &args;
39 	msg->rpc_resp = &res;
40 
41 	status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
42 			lock, FMODE_WRITE);
43 	if (status)
44 		return status;
45 
46 	res.falloc_fattr = nfs_alloc_fattr();
47 	if (!res.falloc_fattr)
48 		return -ENOMEM;
49 
50 	status = nfs4_call_sync(server->client, server, msg,
51 				&args.seq_args, &res.seq_res, 0);
52 	if (status == 0)
53 		status = nfs_post_op_update_inode(inode, res.falloc_fattr);
54 
55 	kfree(res.falloc_fattr);
56 	return status;
57 }
58 
nfs42_proc_fallocate(struct rpc_message * msg,struct file * filep,loff_t offset,loff_t len)59 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
60 				loff_t offset, loff_t len)
61 {
62 	struct inode *inode = file_inode(filep);
63 	struct nfs_server *server = NFS_SERVER(inode);
64 	struct nfs4_exception exception = { };
65 	struct nfs_lock_context *lock;
66 	int err;
67 
68 	lock = nfs_get_lock_context(nfs_file_open_context(filep));
69 	if (IS_ERR(lock))
70 		return PTR_ERR(lock);
71 
72 	exception.inode = inode;
73 	exception.state = lock->open_context->state;
74 
75 	err = nfs_sync_inode(inode);
76 	if (err)
77 		goto out;
78 
79 	do {
80 		err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
81 		if (err == -ENOTSUPP) {
82 			err = -EOPNOTSUPP;
83 			break;
84 		}
85 		err = nfs4_handle_exception(server, err, &exception);
86 	} while (exception.retry);
87 out:
88 	nfs_put_lock_context(lock);
89 	return err;
90 }
91 
nfs42_proc_allocate(struct file * filep,loff_t offset,loff_t len)92 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
93 {
94 	struct rpc_message msg = {
95 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
96 	};
97 	struct inode *inode = file_inode(filep);
98 	int err;
99 
100 	if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
101 		return -EOPNOTSUPP;
102 
103 	inode_lock(inode);
104 
105 	err = nfs42_proc_fallocate(&msg, filep, offset, len);
106 	if (err == -EOPNOTSUPP)
107 		NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE;
108 
109 	inode_unlock(inode);
110 	return err;
111 }
112 
nfs42_proc_deallocate(struct file * filep,loff_t offset,loff_t len)113 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
114 {
115 	struct rpc_message msg = {
116 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE],
117 	};
118 	struct inode *inode = file_inode(filep);
119 	int err;
120 
121 	if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
122 		return -EOPNOTSUPP;
123 
124 	inode_lock(inode);
125 
126 	err = nfs42_proc_fallocate(&msg, filep, offset, len);
127 	if (err == 0)
128 		truncate_pagecache_range(inode, offset, (offset + len) -1);
129 	if (err == -EOPNOTSUPP)
130 		NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
131 
132 	inode_unlock(inode);
133 	return err;
134 }
135 
handle_async_copy(struct nfs42_copy_res * res,struct nfs_server * server,struct file * src,struct file * dst,nfs4_stateid * src_stateid)136 static int handle_async_copy(struct nfs42_copy_res *res,
137 			     struct nfs_server *server,
138 			     struct file *src,
139 			     struct file *dst,
140 			     nfs4_stateid *src_stateid)
141 {
142 	struct nfs4_copy_state *copy, *tmp_copy;
143 	int status = NFS4_OK;
144 	bool found_pending = false;
145 	struct nfs_open_context *ctx = nfs_file_open_context(dst);
146 
147 	copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
148 	if (!copy)
149 		return -ENOMEM;
150 
151 	spin_lock(&server->nfs_client->cl_lock);
152 	list_for_each_entry(tmp_copy, &server->nfs_client->pending_cb_stateids,
153 				copies) {
154 		if (memcmp(&res->write_res.stateid, &tmp_copy->stateid,
155 				NFS4_STATEID_SIZE))
156 			continue;
157 		found_pending = true;
158 		list_del(&tmp_copy->copies);
159 		break;
160 	}
161 	if (found_pending) {
162 		spin_unlock(&server->nfs_client->cl_lock);
163 		kfree(copy);
164 		copy = tmp_copy;
165 		goto out;
166 	}
167 
168 	memcpy(&copy->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
169 	init_completion(&copy->completion);
170 	copy->parent_state = ctx->state;
171 
172 	list_add_tail(&copy->copies, &server->ss_copies);
173 	spin_unlock(&server->nfs_client->cl_lock);
174 
175 	status = wait_for_completion_interruptible(&copy->completion);
176 	spin_lock(&server->nfs_client->cl_lock);
177 	list_del_init(&copy->copies);
178 	spin_unlock(&server->nfs_client->cl_lock);
179 	if (status == -ERESTARTSYS) {
180 		goto out_cancel;
181 	} else if (copy->flags) {
182 		status = -EAGAIN;
183 		goto out_cancel;
184 	}
185 out:
186 	res->write_res.count = copy->count;
187 	memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf));
188 	status = -copy->error;
189 
190 	kfree(copy);
191 	return status;
192 out_cancel:
193 	nfs42_do_offload_cancel_async(dst, &copy->stateid);
194 	kfree(copy);
195 	return status;
196 }
197 
process_copy_commit(struct file * dst,loff_t pos_dst,struct nfs42_copy_res * res)198 static int process_copy_commit(struct file *dst, loff_t pos_dst,
199 			       struct nfs42_copy_res *res)
200 {
201 	struct nfs_commitres cres;
202 	int status = -ENOMEM;
203 
204 	cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
205 	if (!cres.verf)
206 		goto out;
207 
208 	status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres);
209 	if (status)
210 		goto out_free;
211 	if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
212 				    &cres.verf->verifier)) {
213 		dprintk("commit verf differs from copy verf\n");
214 		status = -EAGAIN;
215 	}
216 out_free:
217 	kfree(cres.verf);
218 out:
219 	return status;
220 }
221 
_nfs42_proc_copy(struct file * src,struct nfs_lock_context * src_lock,struct file * dst,struct nfs_lock_context * dst_lock,struct nfs42_copy_args * args,struct nfs42_copy_res * res)222 static ssize_t _nfs42_proc_copy(struct file *src,
223 				struct nfs_lock_context *src_lock,
224 				struct file *dst,
225 				struct nfs_lock_context *dst_lock,
226 				struct nfs42_copy_args *args,
227 				struct nfs42_copy_res *res)
228 {
229 	struct rpc_message msg = {
230 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY],
231 		.rpc_argp = args,
232 		.rpc_resp = res,
233 	};
234 	struct inode *dst_inode = file_inode(dst);
235 	struct nfs_server *server = NFS_SERVER(dst_inode);
236 	loff_t pos_src = args->src_pos;
237 	loff_t pos_dst = args->dst_pos;
238 	size_t count = args->count;
239 	ssize_t status;
240 
241 	status = nfs4_set_rw_stateid(&args->src_stateid, src_lock->open_context,
242 				     src_lock, FMODE_READ);
243 	if (status)
244 		return status;
245 
246 	status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
247 			pos_src, pos_src + (loff_t)count - 1);
248 	if (status)
249 		return status;
250 
251 	status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
252 				     dst_lock, FMODE_WRITE);
253 	if (status)
254 		return status;
255 
256 	status = nfs_sync_inode(dst_inode);
257 	if (status)
258 		return status;
259 
260 	res->commit_res.verf = NULL;
261 	if (args->sync) {
262 		res->commit_res.verf =
263 			kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
264 		if (!res->commit_res.verf)
265 			return -ENOMEM;
266 	}
267 	set_bit(NFS_CLNT_DST_SSC_COPY_STATE,
268 		&dst_lock->open_context->state->flags);
269 
270 	status = nfs4_call_sync(server->client, server, &msg,
271 				&args->seq_args, &res->seq_res, 0);
272 	if (status == -ENOTSUPP)
273 		server->caps &= ~NFS_CAP_COPY;
274 	if (status)
275 		goto out;
276 
277 	if (args->sync &&
278 		nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
279 				    &res->commit_res.verf->verifier)) {
280 		status = -EAGAIN;
281 		goto out;
282 	}
283 
284 	if (!res->synchronous) {
285 		status = handle_async_copy(res, server, src, dst,
286 				&args->src_stateid);
287 		if (status)
288 			goto out;
289 	}
290 
291 	if ((!res->synchronous || !args->sync) &&
292 			res->write_res.verifier.committed != NFS_FILE_SYNC) {
293 		status = process_copy_commit(dst, pos_dst, res);
294 		if (status)
295 			goto out;
296 	}
297 
298 	WARN_ON_ONCE(invalidate_inode_pages2_range(dst_inode->i_mapping,
299 					pos_dst >> PAGE_SHIFT,
300 					(pos_dst + res->write_res.count - 1) >> PAGE_SHIFT));
301 
302 	status = res->write_res.count;
303 out:
304 	if (args->sync)
305 		kfree(res->commit_res.verf);
306 	return status;
307 }
308 
nfs42_proc_copy(struct file * src,loff_t pos_src,struct file * dst,loff_t pos_dst,size_t count)309 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
310 			struct file *dst, loff_t pos_dst,
311 			size_t count)
312 {
313 	struct nfs_server *server = NFS_SERVER(file_inode(dst));
314 	struct nfs_lock_context *src_lock;
315 	struct nfs_lock_context *dst_lock;
316 	struct nfs42_copy_args args = {
317 		.src_fh		= NFS_FH(file_inode(src)),
318 		.src_pos	= pos_src,
319 		.dst_fh		= NFS_FH(file_inode(dst)),
320 		.dst_pos	= pos_dst,
321 		.count		= count,
322 		.sync		= false,
323 	};
324 	struct nfs42_copy_res res;
325 	struct nfs4_exception src_exception = {
326 		.inode		= file_inode(src),
327 		.stateid	= &args.src_stateid,
328 	};
329 	struct nfs4_exception dst_exception = {
330 		.inode		= file_inode(dst),
331 		.stateid	= &args.dst_stateid,
332 	};
333 	ssize_t err, err2;
334 
335 	src_lock = nfs_get_lock_context(nfs_file_open_context(src));
336 	if (IS_ERR(src_lock))
337 		return PTR_ERR(src_lock);
338 
339 	src_exception.state = src_lock->open_context->state;
340 
341 	dst_lock = nfs_get_lock_context(nfs_file_open_context(dst));
342 	if (IS_ERR(dst_lock)) {
343 		err = PTR_ERR(dst_lock);
344 		goto out_put_src_lock;
345 	}
346 
347 	dst_exception.state = dst_lock->open_context->state;
348 
349 	do {
350 		inode_lock(file_inode(dst));
351 		err = _nfs42_proc_copy(src, src_lock,
352 				dst, dst_lock,
353 				&args, &res);
354 		inode_unlock(file_inode(dst));
355 
356 		if (err >= 0)
357 			break;
358 		if (err == -ENOTSUPP) {
359 			err = -EOPNOTSUPP;
360 			break;
361 		} else if (err == -EAGAIN) {
362 			dst_exception.retry = 1;
363 			continue;
364 		} else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) {
365 			args.sync = true;
366 			dst_exception.retry = 1;
367 			continue;
368 		}
369 
370 		err2 = nfs4_handle_exception(server, err, &src_exception);
371 		err  = nfs4_handle_exception(server, err, &dst_exception);
372 		if (!err)
373 			err = err2;
374 	} while (src_exception.retry || dst_exception.retry);
375 
376 	nfs_put_lock_context(dst_lock);
377 out_put_src_lock:
378 	nfs_put_lock_context(src_lock);
379 	return err;
380 }
381 
382 struct nfs42_offloadcancel_data {
383 	struct nfs_server *seq_server;
384 	struct nfs42_offload_status_args args;
385 	struct nfs42_offload_status_res res;
386 };
387 
nfs42_offload_cancel_prepare(struct rpc_task * task,void * calldata)388 static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
389 {
390 	struct nfs42_offloadcancel_data *data = calldata;
391 
392 	nfs4_setup_sequence(data->seq_server->nfs_client,
393 				&data->args.osa_seq_args,
394 				&data->res.osr_seq_res, task);
395 }
396 
nfs42_offload_cancel_done(struct rpc_task * task,void * calldata)397 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
398 {
399 	struct nfs42_offloadcancel_data *data = calldata;
400 
401 	nfs41_sequence_done(task, &data->res.osr_seq_res);
402 	if (task->tk_status &&
403 		nfs4_async_handle_error(task, data->seq_server, NULL,
404 			NULL) == -EAGAIN)
405 		rpc_restart_call_prepare(task);
406 }
407 
nfs42_free_offloadcancel_data(void * data)408 static void nfs42_free_offloadcancel_data(void *data)
409 {
410 	kfree(data);
411 }
412 
413 static const struct rpc_call_ops nfs42_offload_cancel_ops = {
414 	.rpc_call_prepare = nfs42_offload_cancel_prepare,
415 	.rpc_call_done = nfs42_offload_cancel_done,
416 	.rpc_release = nfs42_free_offloadcancel_data,
417 };
418 
nfs42_do_offload_cancel_async(struct file * dst,nfs4_stateid * stateid)419 static int nfs42_do_offload_cancel_async(struct file *dst,
420 					 nfs4_stateid *stateid)
421 {
422 	struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
423 	struct nfs42_offloadcancel_data *data = NULL;
424 	struct nfs_open_context *ctx = nfs_file_open_context(dst);
425 	struct rpc_task *task;
426 	struct rpc_message msg = {
427 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL],
428 		.rpc_cred = ctx->cred,
429 	};
430 	struct rpc_task_setup task_setup_data = {
431 		.rpc_client = dst_server->client,
432 		.rpc_message = &msg,
433 		.callback_ops = &nfs42_offload_cancel_ops,
434 		.workqueue = nfsiod_workqueue,
435 		.flags = RPC_TASK_ASYNC,
436 	};
437 	int status;
438 
439 	if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL))
440 		return -EOPNOTSUPP;
441 
442 	data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS);
443 	if (data == NULL)
444 		return -ENOMEM;
445 
446 	data->seq_server = dst_server;
447 	data->args.osa_src_fh = NFS_FH(file_inode(dst));
448 	memcpy(&data->args.osa_stateid, stateid,
449 		sizeof(data->args.osa_stateid));
450 	msg.rpc_argp = &data->args;
451 	msg.rpc_resp = &data->res;
452 	task_setup_data.callback_data = data;
453 	nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res,
454 			   1, 0);
455 	task = rpc_run_task(&task_setup_data);
456 	if (IS_ERR(task))
457 		return PTR_ERR(task);
458 	status = rpc_wait_for_completion_task(task);
459 	if (status == -ENOTSUPP)
460 		dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL;
461 	rpc_put_task(task);
462 	return status;
463 }
464 
_nfs42_proc_llseek(struct file * filep,struct nfs_lock_context * lock,loff_t offset,int whence)465 static loff_t _nfs42_proc_llseek(struct file *filep,
466 		struct nfs_lock_context *lock, loff_t offset, int whence)
467 {
468 	struct inode *inode = file_inode(filep);
469 	struct nfs42_seek_args args = {
470 		.sa_fh		= NFS_FH(inode),
471 		.sa_offset	= offset,
472 		.sa_what	= (whence == SEEK_HOLE) ?
473 					NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA,
474 	};
475 	struct nfs42_seek_res res;
476 	struct rpc_message msg = {
477 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK],
478 		.rpc_argp = &args,
479 		.rpc_resp = &res,
480 	};
481 	struct nfs_server *server = NFS_SERVER(inode);
482 	int status;
483 
484 	if (!nfs_server_capable(inode, NFS_CAP_SEEK))
485 		return -ENOTSUPP;
486 
487 	status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
488 			lock, FMODE_READ);
489 	if (status)
490 		return status;
491 
492 	status = nfs_filemap_write_and_wait_range(inode->i_mapping,
493 			offset, LLONG_MAX);
494 	if (status)
495 		return status;
496 
497 	status = nfs4_call_sync(server->client, server, &msg,
498 				&args.seq_args, &res.seq_res, 0);
499 	if (status == -ENOTSUPP)
500 		server->caps &= ~NFS_CAP_SEEK;
501 	if (status)
502 		return status;
503 
504 	if (whence == SEEK_DATA && res.sr_eof)
505 		return -NFS4ERR_NXIO;
506 	else
507 		return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
508 }
509 
nfs42_proc_llseek(struct file * filep,loff_t offset,int whence)510 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
511 {
512 	struct nfs_server *server = NFS_SERVER(file_inode(filep));
513 	struct nfs4_exception exception = { };
514 	struct nfs_lock_context *lock;
515 	loff_t err;
516 
517 	lock = nfs_get_lock_context(nfs_file_open_context(filep));
518 	if (IS_ERR(lock))
519 		return PTR_ERR(lock);
520 
521 	exception.inode = file_inode(filep);
522 	exception.state = lock->open_context->state;
523 
524 	do {
525 		err = _nfs42_proc_llseek(filep, lock, offset, whence);
526 		if (err >= 0)
527 			break;
528 		if (err == -ENOTSUPP) {
529 			err = -EOPNOTSUPP;
530 			break;
531 		}
532 		err = nfs4_handle_exception(server, err, &exception);
533 	} while (exception.retry);
534 
535 	nfs_put_lock_context(lock);
536 	return err;
537 }
538 
539 
540 static void
nfs42_layoutstat_prepare(struct rpc_task * task,void * calldata)541 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
542 {
543 	struct nfs42_layoutstat_data *data = calldata;
544 	struct inode *inode = data->inode;
545 	struct nfs_server *server = NFS_SERVER(inode);
546 	struct pnfs_layout_hdr *lo;
547 
548 	spin_lock(&inode->i_lock);
549 	lo = NFS_I(inode)->layout;
550 	if (!pnfs_layout_is_valid(lo)) {
551 		spin_unlock(&inode->i_lock);
552 		rpc_exit(task, 0);
553 		return;
554 	}
555 	nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
556 	spin_unlock(&inode->i_lock);
557 	nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
558 			    &data->res.seq_res, task);
559 }
560 
561 static void
nfs42_layoutstat_done(struct rpc_task * task,void * calldata)562 nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
563 {
564 	struct nfs42_layoutstat_data *data = calldata;
565 	struct inode *inode = data->inode;
566 	struct pnfs_layout_hdr *lo;
567 
568 	if (!nfs4_sequence_done(task, &data->res.seq_res))
569 		return;
570 
571 	switch (task->tk_status) {
572 	case 0:
573 		break;
574 	case -NFS4ERR_BADHANDLE:
575 	case -ESTALE:
576 		pnfs_destroy_layout(NFS_I(inode));
577 		break;
578 	case -NFS4ERR_EXPIRED:
579 	case -NFS4ERR_ADMIN_REVOKED:
580 	case -NFS4ERR_DELEG_REVOKED:
581 	case -NFS4ERR_STALE_STATEID:
582 	case -NFS4ERR_BAD_STATEID:
583 		spin_lock(&inode->i_lock);
584 		lo = NFS_I(inode)->layout;
585 		if (pnfs_layout_is_valid(lo) &&
586 		    nfs4_stateid_match(&data->args.stateid,
587 					     &lo->plh_stateid)) {
588 			LIST_HEAD(head);
589 
590 			/*
591 			 * Mark the bad layout state as invalid, then retry
592 			 * with the current stateid.
593 			 */
594 			pnfs_mark_layout_stateid_invalid(lo, &head);
595 			spin_unlock(&inode->i_lock);
596 			pnfs_free_lseg_list(&head);
597 			nfs_commit_inode(inode, 0);
598 		} else
599 			spin_unlock(&inode->i_lock);
600 		break;
601 	case -NFS4ERR_OLD_STATEID:
602 		spin_lock(&inode->i_lock);
603 		lo = NFS_I(inode)->layout;
604 		if (pnfs_layout_is_valid(lo) &&
605 		    nfs4_stateid_match_other(&data->args.stateid,
606 					&lo->plh_stateid)) {
607 			/* Do we need to delay before resending? */
608 			if (!nfs4_stateid_is_newer(&lo->plh_stateid,
609 						&data->args.stateid))
610 				rpc_delay(task, HZ);
611 			rpc_restart_call_prepare(task);
612 		}
613 		spin_unlock(&inode->i_lock);
614 		break;
615 	case -ENOTSUPP:
616 	case -EOPNOTSUPP:
617 		NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
618 	}
619 }
620 
621 static void
nfs42_layoutstat_release(void * calldata)622 nfs42_layoutstat_release(void *calldata)
623 {
624 	struct nfs42_layoutstat_data *data = calldata;
625 	struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo;
626 	int i;
627 
628 	for (i = 0; i < data->args.num_dev; i++) {
629 		if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free)
630 			devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
631 	}
632 
633 	pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout);
634 	smp_mb__before_atomic();
635 	clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags);
636 	smp_mb__after_atomic();
637 	nfs_iput_and_deactive(data->inode);
638 	kfree(data->args.devinfo);
639 	kfree(data);
640 }
641 
642 static const struct rpc_call_ops nfs42_layoutstat_ops = {
643 	.rpc_call_prepare = nfs42_layoutstat_prepare,
644 	.rpc_call_done = nfs42_layoutstat_done,
645 	.rpc_release = nfs42_layoutstat_release,
646 };
647 
nfs42_proc_layoutstats_generic(struct nfs_server * server,struct nfs42_layoutstat_data * data)648 int nfs42_proc_layoutstats_generic(struct nfs_server *server,
649 				   struct nfs42_layoutstat_data *data)
650 {
651 	struct rpc_message msg = {
652 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS],
653 		.rpc_argp = &data->args,
654 		.rpc_resp = &data->res,
655 	};
656 	struct rpc_task_setup task_setup = {
657 		.rpc_client = server->client,
658 		.rpc_message = &msg,
659 		.callback_ops = &nfs42_layoutstat_ops,
660 		.callback_data = data,
661 		.flags = RPC_TASK_ASYNC,
662 	};
663 	struct rpc_task *task;
664 
665 	data->inode = nfs_igrab_and_active(data->args.inode);
666 	if (!data->inode) {
667 		nfs42_layoutstat_release(data);
668 		return -EAGAIN;
669 	}
670 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
671 	task = rpc_run_task(&task_setup);
672 	if (IS_ERR(task))
673 		return PTR_ERR(task);
674 	rpc_put_task(task);
675 	return 0;
676 }
677 
678 static struct nfs42_layouterror_data *
nfs42_alloc_layouterror_data(struct pnfs_layout_segment * lseg,gfp_t gfp_flags)679 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags)
680 {
681 	struct nfs42_layouterror_data *data;
682 	struct inode *inode = lseg->pls_layout->plh_inode;
683 
684 	data = kzalloc(sizeof(*data), gfp_flags);
685 	if (data) {
686 		data->args.inode = data->inode = nfs_igrab_and_active(inode);
687 		if (data->inode) {
688 			data->lseg = pnfs_get_lseg(lseg);
689 			if (data->lseg)
690 				return data;
691 			nfs_iput_and_deactive(data->inode);
692 		}
693 		kfree(data);
694 	}
695 	return NULL;
696 }
697 
698 static void
nfs42_free_layouterror_data(struct nfs42_layouterror_data * data)699 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data)
700 {
701 	pnfs_put_lseg(data->lseg);
702 	nfs_iput_and_deactive(data->inode);
703 	kfree(data);
704 }
705 
706 static void
nfs42_layouterror_prepare(struct rpc_task * task,void * calldata)707 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata)
708 {
709 	struct nfs42_layouterror_data *data = calldata;
710 	struct inode *inode = data->inode;
711 	struct nfs_server *server = NFS_SERVER(inode);
712 	struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
713 	unsigned i;
714 
715 	spin_lock(&inode->i_lock);
716 	if (!pnfs_layout_is_valid(lo)) {
717 		spin_unlock(&inode->i_lock);
718 		rpc_exit(task, 0);
719 		return;
720 	}
721 	for (i = 0; i < data->args.num_errors; i++)
722 		nfs4_stateid_copy(&data->args.errors[i].stateid,
723 				&lo->plh_stateid);
724 	spin_unlock(&inode->i_lock);
725 	nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
726 			    &data->res.seq_res, task);
727 }
728 
729 static void
nfs42_layouterror_done(struct rpc_task * task,void * calldata)730 nfs42_layouterror_done(struct rpc_task *task, void *calldata)
731 {
732 	struct nfs42_layouterror_data *data = calldata;
733 	struct inode *inode = data->inode;
734 	struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
735 
736 	if (!nfs4_sequence_done(task, &data->res.seq_res))
737 		return;
738 
739 	switch (task->tk_status) {
740 	case 0:
741 		break;
742 	case -NFS4ERR_BADHANDLE:
743 	case -ESTALE:
744 		pnfs_destroy_layout(NFS_I(inode));
745 		break;
746 	case -NFS4ERR_EXPIRED:
747 	case -NFS4ERR_ADMIN_REVOKED:
748 	case -NFS4ERR_DELEG_REVOKED:
749 	case -NFS4ERR_STALE_STATEID:
750 	case -NFS4ERR_BAD_STATEID:
751 		spin_lock(&inode->i_lock);
752 		if (pnfs_layout_is_valid(lo) &&
753 		    nfs4_stateid_match(&data->args.errors[0].stateid,
754 					     &lo->plh_stateid)) {
755 			LIST_HEAD(head);
756 
757 			/*
758 			 * Mark the bad layout state as invalid, then retry
759 			 * with the current stateid.
760 			 */
761 			pnfs_mark_layout_stateid_invalid(lo, &head);
762 			spin_unlock(&inode->i_lock);
763 			pnfs_free_lseg_list(&head);
764 			nfs_commit_inode(inode, 0);
765 		} else
766 			spin_unlock(&inode->i_lock);
767 		break;
768 	case -NFS4ERR_OLD_STATEID:
769 		spin_lock(&inode->i_lock);
770 		if (pnfs_layout_is_valid(lo) &&
771 		    nfs4_stateid_match_other(&data->args.errors[0].stateid,
772 					&lo->plh_stateid)) {
773 			/* Do we need to delay before resending? */
774 			if (!nfs4_stateid_is_newer(&lo->plh_stateid,
775 						&data->args.errors[0].stateid))
776 				rpc_delay(task, HZ);
777 			rpc_restart_call_prepare(task);
778 		}
779 		spin_unlock(&inode->i_lock);
780 		break;
781 	case -ENOTSUPP:
782 	case -EOPNOTSUPP:
783 		NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR;
784 	}
785 }
786 
787 static void
nfs42_layouterror_release(void * calldata)788 nfs42_layouterror_release(void *calldata)
789 {
790 	struct nfs42_layouterror_data *data = calldata;
791 
792 	nfs42_free_layouterror_data(data);
793 }
794 
795 static const struct rpc_call_ops nfs42_layouterror_ops = {
796 	.rpc_call_prepare = nfs42_layouterror_prepare,
797 	.rpc_call_done = nfs42_layouterror_done,
798 	.rpc_release = nfs42_layouterror_release,
799 };
800 
nfs42_proc_layouterror(struct pnfs_layout_segment * lseg,const struct nfs42_layout_error * errors,size_t n)801 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
802 		const struct nfs42_layout_error *errors, size_t n)
803 {
804 	struct inode *inode = lseg->pls_layout->plh_inode;
805 	struct nfs42_layouterror_data *data;
806 	struct rpc_task *task;
807 	struct rpc_message msg = {
808 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR],
809 	};
810 	struct rpc_task_setup task_setup = {
811 		.rpc_message = &msg,
812 		.callback_ops = &nfs42_layouterror_ops,
813 		.flags = RPC_TASK_ASYNC,
814 	};
815 	unsigned int i;
816 
817 	if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR))
818 		return -EOPNOTSUPP;
819 	if (n > NFS42_LAYOUTERROR_MAX)
820 		return -EINVAL;
821 	data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS);
822 	if (!data)
823 		return -ENOMEM;
824 	for (i = 0; i < n; i++) {
825 		data->args.errors[i] = errors[i];
826 		data->args.num_errors++;
827 		data->res.num_errors++;
828 	}
829 	msg.rpc_argp = &data->args;
830 	msg.rpc_resp = &data->res;
831 	task_setup.callback_data = data;
832 	task_setup.rpc_client = NFS_SERVER(inode)->client;
833 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
834 	task = rpc_run_task(&task_setup);
835 	if (IS_ERR(task))
836 		return PTR_ERR(task);
837 	rpc_put_task(task);
838 	return 0;
839 }
840 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror);
841 
_nfs42_proc_clone(struct rpc_message * msg,struct file * src_f,struct file * dst_f,struct nfs_lock_context * src_lock,struct nfs_lock_context * dst_lock,loff_t src_offset,loff_t dst_offset,loff_t count)842 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
843 		struct file *dst_f, struct nfs_lock_context *src_lock,
844 		struct nfs_lock_context *dst_lock, loff_t src_offset,
845 		loff_t dst_offset, loff_t count)
846 {
847 	struct inode *src_inode = file_inode(src_f);
848 	struct inode *dst_inode = file_inode(dst_f);
849 	struct nfs_server *server = NFS_SERVER(dst_inode);
850 	struct nfs42_clone_args args = {
851 		.src_fh = NFS_FH(src_inode),
852 		.dst_fh = NFS_FH(dst_inode),
853 		.src_offset = src_offset,
854 		.dst_offset = dst_offset,
855 		.count = count,
856 		.dst_bitmask = server->cache_consistency_bitmask,
857 	};
858 	struct nfs42_clone_res res = {
859 		.server	= server,
860 	};
861 	int status;
862 
863 	msg->rpc_argp = &args;
864 	msg->rpc_resp = &res;
865 
866 	status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
867 			src_lock, FMODE_READ);
868 	if (status)
869 		return status;
870 
871 	status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
872 			dst_lock, FMODE_WRITE);
873 	if (status)
874 		return status;
875 
876 	res.dst_fattr = nfs_alloc_fattr();
877 	if (!res.dst_fattr)
878 		return -ENOMEM;
879 
880 	status = nfs4_call_sync(server->client, server, msg,
881 				&args.seq_args, &res.seq_res, 0);
882 	if (status == 0)
883 		status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
884 
885 	kfree(res.dst_fattr);
886 	return status;
887 }
888 
nfs42_proc_clone(struct file * src_f,struct file * dst_f,loff_t src_offset,loff_t dst_offset,loff_t count)889 int nfs42_proc_clone(struct file *src_f, struct file *dst_f,
890 		     loff_t src_offset, loff_t dst_offset, loff_t count)
891 {
892 	struct rpc_message msg = {
893 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE],
894 	};
895 	struct inode *inode = file_inode(src_f);
896 	struct nfs_server *server = NFS_SERVER(file_inode(src_f));
897 	struct nfs_lock_context *src_lock;
898 	struct nfs_lock_context *dst_lock;
899 	struct nfs4_exception src_exception = { };
900 	struct nfs4_exception dst_exception = { };
901 	int err, err2;
902 
903 	if (!nfs_server_capable(inode, NFS_CAP_CLONE))
904 		return -EOPNOTSUPP;
905 
906 	src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
907 	if (IS_ERR(src_lock))
908 		return PTR_ERR(src_lock);
909 
910 	src_exception.inode = file_inode(src_f);
911 	src_exception.state = src_lock->open_context->state;
912 
913 	dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
914 	if (IS_ERR(dst_lock)) {
915 		err = PTR_ERR(dst_lock);
916 		goto out_put_src_lock;
917 	}
918 
919 	dst_exception.inode = file_inode(dst_f);
920 	dst_exception.state = dst_lock->open_context->state;
921 
922 	do {
923 		err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
924 					src_offset, dst_offset, count);
925 		if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
926 			NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
927 			err = -EOPNOTSUPP;
928 			break;
929 		}
930 
931 		err2 = nfs4_handle_exception(server, err, &src_exception);
932 		err = nfs4_handle_exception(server, err, &dst_exception);
933 		if (!err)
934 			err = err2;
935 	} while (src_exception.retry || dst_exception.retry);
936 
937 	nfs_put_lock_context(dst_lock);
938 out_put_src_lock:
939 	nfs_put_lock_context(src_lock);
940 	return err;
941 }
942