1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com>
4 */
5 #include <linux/fs.h>
6 #include <linux/sunrpc/addr.h>
7 #include <linux/sunrpc/sched.h>
8 #include <linux/nfs.h>
9 #include <linux/nfs3.h>
10 #include <linux/nfs4.h>
11 #include <linux/nfs_xdr.h>
12 #include <linux/nfs_fs.h>
13 #include "nfs4_fs.h"
14 #include "nfs42.h"
15 #include "iostat.h"
16 #include "pnfs.h"
17 #include "nfs4session.h"
18 #include "internal.h"
19 #include "delegation.h"
20 #include "nfs4trace.h"
21
22 #define NFSDBG_FACILITY NFSDBG_PROC
23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
24
nfs42_set_netaddr(struct file * filep,struct nfs42_netaddr * naddr)25 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr)
26 {
27 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client;
28 unsigned short port = 2049;
29
30 rcu_read_lock();
31 naddr->netid_len = scnprintf(naddr->netid,
32 sizeof(naddr->netid), "%s",
33 rpc_peeraddr2str(clp->cl_rpcclient,
34 RPC_DISPLAY_NETID));
35 naddr->addr_len = scnprintf(naddr->addr,
36 sizeof(naddr->addr),
37 "%s.%u.%u",
38 rpc_peeraddr2str(clp->cl_rpcclient,
39 RPC_DISPLAY_ADDR),
40 port >> 8, port & 255);
41 rcu_read_unlock();
42 }
43
_nfs42_proc_fallocate(struct rpc_message * msg,struct file * filep,struct nfs_lock_context * lock,loff_t offset,loff_t len)44 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
45 struct nfs_lock_context *lock, loff_t offset, loff_t len)
46 {
47 struct inode *inode = file_inode(filep);
48 struct nfs_server *server = NFS_SERVER(inode);
49 u32 bitmask[3];
50 struct nfs42_falloc_args args = {
51 .falloc_fh = NFS_FH(inode),
52 .falloc_offset = offset,
53 .falloc_length = len,
54 .falloc_bitmask = bitmask,
55 };
56 struct nfs42_falloc_res res = {
57 .falloc_server = server,
58 };
59 int status;
60
61 msg->rpc_argp = &args;
62 msg->rpc_resp = &res;
63
64 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
65 lock, FMODE_WRITE);
66 if (status) {
67 if (status == -EAGAIN)
68 status = -NFS4ERR_BAD_STATEID;
69 return status;
70 }
71
72 memcpy(bitmask, server->cache_consistency_bitmask, sizeof(bitmask));
73 if (server->attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)
74 bitmask[1] |= FATTR4_WORD1_SPACE_USED;
75
76 res.falloc_fattr = nfs_alloc_fattr();
77 if (!res.falloc_fattr)
78 return -ENOMEM;
79
80 status = nfs4_call_sync(server->client, server, msg,
81 &args.seq_args, &res.seq_res, 0);
82 if (status == 0)
83 status = nfs_post_op_update_inode_force_wcc(inode,
84 res.falloc_fattr);
85
86 kfree(res.falloc_fattr);
87 return status;
88 }
89
nfs42_proc_fallocate(struct rpc_message * msg,struct file * filep,loff_t offset,loff_t len)90 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
91 loff_t offset, loff_t len)
92 {
93 struct inode *inode = file_inode(filep);
94 struct nfs_server *server = NFS_SERVER(inode);
95 struct nfs4_exception exception = { };
96 struct nfs_lock_context *lock;
97 int err;
98
99 lock = nfs_get_lock_context(nfs_file_open_context(filep));
100 if (IS_ERR(lock))
101 return PTR_ERR(lock);
102
103 exception.inode = inode;
104 exception.state = lock->open_context->state;
105
106 err = nfs_sync_inode(inode);
107 if (err)
108 goto out;
109
110 do {
111 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
112 if (err == -ENOTSUPP) {
113 err = -EOPNOTSUPP;
114 break;
115 }
116 err = nfs4_handle_exception(server, err, &exception);
117 } while (exception.retry);
118 out:
119 nfs_put_lock_context(lock);
120 return err;
121 }
122
nfs42_proc_allocate(struct file * filep,loff_t offset,loff_t len)123 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
124 {
125 struct rpc_message msg = {
126 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
127 };
128 struct inode *inode = file_inode(filep);
129 int err;
130
131 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
132 return -EOPNOTSUPP;
133
134 inode_lock(inode);
135
136 err = nfs42_proc_fallocate(&msg, filep, offset, len);
137 if (err == -EOPNOTSUPP)
138 NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE;
139
140 inode_unlock(inode);
141 return err;
142 }
143
nfs42_proc_deallocate(struct file * filep,loff_t offset,loff_t len)144 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
145 {
146 struct rpc_message msg = {
147 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE],
148 };
149 struct inode *inode = file_inode(filep);
150 int err;
151
152 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
153 return -EOPNOTSUPP;
154
155 inode_lock(inode);
156
157 err = nfs42_proc_fallocate(&msg, filep, offset, len);
158 if (err == 0)
159 truncate_pagecache_range(inode, offset, (offset + len) -1);
160 if (err == -EOPNOTSUPP)
161 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
162
163 inode_unlock(inode);
164 return err;
165 }
166
handle_async_copy(struct nfs42_copy_res * res,struct nfs_server * dst_server,struct nfs_server * src_server,struct file * src,struct file * dst,nfs4_stateid * src_stateid,bool * restart)167 static int handle_async_copy(struct nfs42_copy_res *res,
168 struct nfs_server *dst_server,
169 struct nfs_server *src_server,
170 struct file *src,
171 struct file *dst,
172 nfs4_stateid *src_stateid,
173 bool *restart)
174 {
175 struct nfs4_copy_state *copy, *tmp_copy;
176 int status = NFS4_OK;
177 bool found_pending = false;
178 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst);
179 struct nfs_open_context *src_ctx = nfs_file_open_context(src);
180
181 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
182 if (!copy)
183 return -ENOMEM;
184
185 spin_lock(&dst_server->nfs_client->cl_lock);
186 list_for_each_entry(tmp_copy,
187 &dst_server->nfs_client->pending_cb_stateids,
188 copies) {
189 if (memcmp(&res->write_res.stateid, &tmp_copy->stateid,
190 NFS4_STATEID_SIZE))
191 continue;
192 found_pending = true;
193 list_del(&tmp_copy->copies);
194 break;
195 }
196 if (found_pending) {
197 spin_unlock(&dst_server->nfs_client->cl_lock);
198 kfree(copy);
199 copy = tmp_copy;
200 goto out;
201 }
202
203 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
204 init_completion(©->completion);
205 copy->parent_dst_state = dst_ctx->state;
206 copy->parent_src_state = src_ctx->state;
207
208 list_add_tail(©->copies, &dst_server->ss_copies);
209 spin_unlock(&dst_server->nfs_client->cl_lock);
210
211 if (dst_server != src_server) {
212 spin_lock(&src_server->nfs_client->cl_lock);
213 list_add_tail(©->src_copies, &src_server->ss_copies);
214 spin_unlock(&src_server->nfs_client->cl_lock);
215 }
216
217 status = wait_for_completion_interruptible(©->completion);
218 spin_lock(&dst_server->nfs_client->cl_lock);
219 list_del_init(©->copies);
220 spin_unlock(&dst_server->nfs_client->cl_lock);
221 if (dst_server != src_server) {
222 spin_lock(&src_server->nfs_client->cl_lock);
223 list_del_init(©->src_copies);
224 spin_unlock(&src_server->nfs_client->cl_lock);
225 }
226 if (status == -ERESTARTSYS) {
227 goto out_cancel;
228 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) {
229 status = -EAGAIN;
230 *restart = true;
231 goto out_cancel;
232 }
233 out:
234 res->write_res.count = copy->count;
235 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf));
236 status = -copy->error;
237
238 out_free:
239 kfree(copy);
240 return status;
241 out_cancel:
242 nfs42_do_offload_cancel_async(dst, ©->stateid);
243 if (!nfs42_files_from_same_server(src, dst))
244 nfs42_do_offload_cancel_async(src, src_stateid);
245 goto out_free;
246 }
247
process_copy_commit(struct file * dst,loff_t pos_dst,struct nfs42_copy_res * res)248 static int process_copy_commit(struct file *dst, loff_t pos_dst,
249 struct nfs42_copy_res *res)
250 {
251 struct nfs_commitres cres;
252 int status = -ENOMEM;
253
254 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
255 if (!cres.verf)
256 goto out;
257
258 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres);
259 if (status)
260 goto out_free;
261 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
262 &cres.verf->verifier)) {
263 dprintk("commit verf differs from copy verf\n");
264 status = -EAGAIN;
265 }
266 out_free:
267 kfree(cres.verf);
268 out:
269 return status;
270 }
271
272 /**
273 * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload
274 * @inode: pointer to destination inode
275 * @pos: destination offset
276 * @len: copy length
277 *
278 * Punch a hole in the inode page cache, so that the NFS client will
279 * know to retrieve new data.
280 * Update the file size if necessary, and then mark the inode as having
281 * invalid cached values for change attribute, ctime, mtime and space used.
282 */
nfs42_copy_dest_done(struct inode * inode,loff_t pos,loff_t len)283 static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
284 {
285 loff_t newsize = pos + len;
286 loff_t end = newsize - 1;
287
288 WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping,
289 pos >> PAGE_SHIFT, end >> PAGE_SHIFT));
290
291 spin_lock(&inode->i_lock);
292 if (newsize > i_size_read(inode))
293 i_size_write(inode, newsize);
294 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
295 NFS_INO_INVALID_CTIME |
296 NFS_INO_INVALID_MTIME |
297 NFS_INO_INVALID_BLOCKS);
298 spin_unlock(&inode->i_lock);
299 }
300
_nfs42_proc_copy(struct file * src,struct nfs_lock_context * src_lock,struct file * dst,struct nfs_lock_context * dst_lock,struct nfs42_copy_args * args,struct nfs42_copy_res * res,struct nl4_server * nss,nfs4_stateid * cnr_stateid,bool * restart)301 static ssize_t _nfs42_proc_copy(struct file *src,
302 struct nfs_lock_context *src_lock,
303 struct file *dst,
304 struct nfs_lock_context *dst_lock,
305 struct nfs42_copy_args *args,
306 struct nfs42_copy_res *res,
307 struct nl4_server *nss,
308 nfs4_stateid *cnr_stateid,
309 bool *restart)
310 {
311 struct rpc_message msg = {
312 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY],
313 .rpc_argp = args,
314 .rpc_resp = res,
315 };
316 struct inode *dst_inode = file_inode(dst);
317 struct inode *src_inode = file_inode(src);
318 struct nfs_server *dst_server = NFS_SERVER(dst_inode);
319 struct nfs_server *src_server = NFS_SERVER(src_inode);
320 loff_t pos_src = args->src_pos;
321 loff_t pos_dst = args->dst_pos;
322 size_t count = args->count;
323 ssize_t status;
324
325 if (nss) {
326 args->cp_src = nss;
327 nfs4_stateid_copy(&args->src_stateid, cnr_stateid);
328 } else {
329 status = nfs4_set_rw_stateid(&args->src_stateid,
330 src_lock->open_context, src_lock, FMODE_READ);
331 if (status) {
332 if (status == -EAGAIN)
333 status = -NFS4ERR_BAD_STATEID;
334 return status;
335 }
336 }
337 status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
338 pos_src, pos_src + (loff_t)count - 1);
339 if (status)
340 return status;
341
342 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
343 dst_lock, FMODE_WRITE);
344 if (status) {
345 if (status == -EAGAIN)
346 status = -NFS4ERR_BAD_STATEID;
347 return status;
348 }
349
350 status = nfs_sync_inode(dst_inode);
351 if (status)
352 return status;
353
354 res->commit_res.verf = NULL;
355 if (args->sync) {
356 res->commit_res.verf =
357 kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
358 if (!res->commit_res.verf)
359 return -ENOMEM;
360 }
361 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE,
362 &src_lock->open_context->state->flags);
363 set_bit(NFS_CLNT_DST_SSC_COPY_STATE,
364 &dst_lock->open_context->state->flags);
365
366 status = nfs4_call_sync(dst_server->client, dst_server, &msg,
367 &args->seq_args, &res->seq_res, 0);
368 if (status == -ENOTSUPP)
369 dst_server->caps &= ~NFS_CAP_COPY;
370 if (status)
371 goto out;
372
373 if (args->sync &&
374 nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
375 &res->commit_res.verf->verifier)) {
376 status = -EAGAIN;
377 goto out;
378 }
379
380 if (!res->synchronous) {
381 status = handle_async_copy(res, dst_server, src_server, src,
382 dst, &args->src_stateid, restart);
383 if (status)
384 goto out;
385 }
386
387 if ((!res->synchronous || !args->sync) &&
388 res->write_res.verifier.committed != NFS_FILE_SYNC) {
389 status = process_copy_commit(dst, pos_dst, res);
390 if (status)
391 goto out;
392 }
393
394 nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count);
395 nfs_invalidate_atime(src_inode);
396 status = res->write_res.count;
397 out:
398 if (args->sync)
399 kfree(res->commit_res.verf);
400 return status;
401 }
402
nfs42_proc_copy(struct file * src,loff_t pos_src,struct file * dst,loff_t pos_dst,size_t count,struct nl4_server * nss,nfs4_stateid * cnr_stateid,bool sync)403 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
404 struct file *dst, loff_t pos_dst, size_t count,
405 struct nl4_server *nss,
406 nfs4_stateid *cnr_stateid, bool sync)
407 {
408 struct nfs_server *server = NFS_SERVER(file_inode(dst));
409 struct nfs_lock_context *src_lock;
410 struct nfs_lock_context *dst_lock;
411 struct nfs42_copy_args args = {
412 .src_fh = NFS_FH(file_inode(src)),
413 .src_pos = pos_src,
414 .dst_fh = NFS_FH(file_inode(dst)),
415 .dst_pos = pos_dst,
416 .count = count,
417 .sync = sync,
418 };
419 struct nfs42_copy_res res;
420 struct nfs4_exception src_exception = {
421 .inode = file_inode(src),
422 .stateid = &args.src_stateid,
423 };
424 struct nfs4_exception dst_exception = {
425 .inode = file_inode(dst),
426 .stateid = &args.dst_stateid,
427 };
428 ssize_t err, err2;
429 bool restart = false;
430
431 src_lock = nfs_get_lock_context(nfs_file_open_context(src));
432 if (IS_ERR(src_lock))
433 return PTR_ERR(src_lock);
434
435 src_exception.state = src_lock->open_context->state;
436
437 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst));
438 if (IS_ERR(dst_lock)) {
439 err = PTR_ERR(dst_lock);
440 goto out_put_src_lock;
441 }
442
443 dst_exception.state = dst_lock->open_context->state;
444
445 do {
446 inode_lock(file_inode(dst));
447 err = _nfs42_proc_copy(src, src_lock,
448 dst, dst_lock,
449 &args, &res,
450 nss, cnr_stateid, &restart);
451 inode_unlock(file_inode(dst));
452
453 if (err >= 0)
454 break;
455 if (err == -ENOTSUPP &&
456 nfs42_files_from_same_server(src, dst)) {
457 err = -EOPNOTSUPP;
458 break;
459 } else if (err == -EAGAIN) {
460 if (!restart) {
461 dst_exception.retry = 1;
462 continue;
463 }
464 break;
465 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS &&
466 args.sync != res.synchronous) {
467 args.sync = res.synchronous;
468 dst_exception.retry = 1;
469 continue;
470 } else if ((err == -ESTALE ||
471 err == -NFS4ERR_OFFLOAD_DENIED ||
472 err == -ENOTSUPP) &&
473 !nfs42_files_from_same_server(src, dst)) {
474 nfs42_do_offload_cancel_async(src, &args.src_stateid);
475 err = -EOPNOTSUPP;
476 break;
477 }
478
479 err2 = nfs4_handle_exception(server, err, &src_exception);
480 err = nfs4_handle_exception(server, err, &dst_exception);
481 if (!err)
482 err = err2;
483 } while (src_exception.retry || dst_exception.retry);
484
485 nfs_put_lock_context(dst_lock);
486 out_put_src_lock:
487 nfs_put_lock_context(src_lock);
488 return err;
489 }
490
491 struct nfs42_offloadcancel_data {
492 struct nfs_server *seq_server;
493 struct nfs42_offload_status_args args;
494 struct nfs42_offload_status_res res;
495 };
496
nfs42_offload_cancel_prepare(struct rpc_task * task,void * calldata)497 static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
498 {
499 struct nfs42_offloadcancel_data *data = calldata;
500
501 nfs4_setup_sequence(data->seq_server->nfs_client,
502 &data->args.osa_seq_args,
503 &data->res.osr_seq_res, task);
504 }
505
nfs42_offload_cancel_done(struct rpc_task * task,void * calldata)506 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
507 {
508 struct nfs42_offloadcancel_data *data = calldata;
509
510 nfs41_sequence_done(task, &data->res.osr_seq_res);
511 if (task->tk_status &&
512 nfs4_async_handle_error(task, data->seq_server, NULL,
513 NULL) == -EAGAIN)
514 rpc_restart_call_prepare(task);
515 }
516
nfs42_free_offloadcancel_data(void * data)517 static void nfs42_free_offloadcancel_data(void *data)
518 {
519 kfree(data);
520 }
521
522 static const struct rpc_call_ops nfs42_offload_cancel_ops = {
523 .rpc_call_prepare = nfs42_offload_cancel_prepare,
524 .rpc_call_done = nfs42_offload_cancel_done,
525 .rpc_release = nfs42_free_offloadcancel_data,
526 };
527
nfs42_do_offload_cancel_async(struct file * dst,nfs4_stateid * stateid)528 static int nfs42_do_offload_cancel_async(struct file *dst,
529 nfs4_stateid *stateid)
530 {
531 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
532 struct nfs42_offloadcancel_data *data = NULL;
533 struct nfs_open_context *ctx = nfs_file_open_context(dst);
534 struct rpc_task *task;
535 struct rpc_message msg = {
536 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL],
537 .rpc_cred = ctx->cred,
538 };
539 struct rpc_task_setup task_setup_data = {
540 .rpc_client = dst_server->client,
541 .rpc_message = &msg,
542 .callback_ops = &nfs42_offload_cancel_ops,
543 .workqueue = nfsiod_workqueue,
544 .flags = RPC_TASK_ASYNC,
545 };
546 int status;
547
548 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL))
549 return -EOPNOTSUPP;
550
551 data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS);
552 if (data == NULL)
553 return -ENOMEM;
554
555 data->seq_server = dst_server;
556 data->args.osa_src_fh = NFS_FH(file_inode(dst));
557 memcpy(&data->args.osa_stateid, stateid,
558 sizeof(data->args.osa_stateid));
559 msg.rpc_argp = &data->args;
560 msg.rpc_resp = &data->res;
561 task_setup_data.callback_data = data;
562 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res,
563 1, 0);
564 task = rpc_run_task(&task_setup_data);
565 if (IS_ERR(task))
566 return PTR_ERR(task);
567 status = rpc_wait_for_completion_task(task);
568 if (status == -ENOTSUPP)
569 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL;
570 rpc_put_task(task);
571 return status;
572 }
573
_nfs42_proc_copy_notify(struct file * src,struct file * dst,struct nfs42_copy_notify_args * args,struct nfs42_copy_notify_res * res)574 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
575 struct nfs42_copy_notify_args *args,
576 struct nfs42_copy_notify_res *res)
577 {
578 struct nfs_server *src_server = NFS_SERVER(file_inode(src));
579 struct rpc_message msg = {
580 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY],
581 .rpc_argp = args,
582 .rpc_resp = res,
583 };
584 int status;
585 struct nfs_open_context *ctx;
586 struct nfs_lock_context *l_ctx;
587
588 ctx = get_nfs_open_context(nfs_file_open_context(src));
589 l_ctx = nfs_get_lock_context(ctx);
590 if (IS_ERR(l_ctx)) {
591 status = PTR_ERR(l_ctx);
592 goto out;
593 }
594
595 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
596 FMODE_READ);
597 nfs_put_lock_context(l_ctx);
598 if (status) {
599 if (status == -EAGAIN)
600 status = -NFS4ERR_BAD_STATEID;
601 goto out;
602 }
603
604 status = nfs4_call_sync(src_server->client, src_server, &msg,
605 &args->cna_seq_args, &res->cnr_seq_res, 0);
606 if (status == -ENOTSUPP)
607 src_server->caps &= ~NFS_CAP_COPY_NOTIFY;
608
609 out:
610 put_nfs_open_context(nfs_file_open_context(src));
611 return status;
612 }
613
nfs42_proc_copy_notify(struct file * src,struct file * dst,struct nfs42_copy_notify_res * res)614 int nfs42_proc_copy_notify(struct file *src, struct file *dst,
615 struct nfs42_copy_notify_res *res)
616 {
617 struct nfs_server *src_server = NFS_SERVER(file_inode(src));
618 struct nfs42_copy_notify_args *args;
619 struct nfs4_exception exception = {
620 .inode = file_inode(src),
621 };
622 int status;
623
624 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY))
625 return -EOPNOTSUPP;
626
627 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS);
628 if (args == NULL)
629 return -ENOMEM;
630
631 args->cna_src_fh = NFS_FH(file_inode(src)),
632 args->cna_dst.nl4_type = NL4_NETADDR;
633 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr);
634 exception.stateid = &args->cna_src_stateid;
635
636 do {
637 status = _nfs42_proc_copy_notify(src, dst, args, res);
638 if (status == -ENOTSUPP) {
639 status = -EOPNOTSUPP;
640 goto out;
641 }
642 status = nfs4_handle_exception(src_server, status, &exception);
643 } while (exception.retry);
644
645 out:
646 kfree(args);
647 return status;
648 }
649
_nfs42_proc_llseek(struct file * filep,struct nfs_lock_context * lock,loff_t offset,int whence)650 static loff_t _nfs42_proc_llseek(struct file *filep,
651 struct nfs_lock_context *lock, loff_t offset, int whence)
652 {
653 struct inode *inode = file_inode(filep);
654 struct nfs42_seek_args args = {
655 .sa_fh = NFS_FH(inode),
656 .sa_offset = offset,
657 .sa_what = (whence == SEEK_HOLE) ?
658 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA,
659 };
660 struct nfs42_seek_res res;
661 struct rpc_message msg = {
662 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK],
663 .rpc_argp = &args,
664 .rpc_resp = &res,
665 };
666 struct nfs_server *server = NFS_SERVER(inode);
667 int status;
668
669 if (!nfs_server_capable(inode, NFS_CAP_SEEK))
670 return -ENOTSUPP;
671
672 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
673 lock, FMODE_READ);
674 if (status) {
675 if (status == -EAGAIN)
676 status = -NFS4ERR_BAD_STATEID;
677 return status;
678 }
679
680 status = nfs_filemap_write_and_wait_range(inode->i_mapping,
681 offset, LLONG_MAX);
682 if (status)
683 return status;
684
685 status = nfs4_call_sync(server->client, server, &msg,
686 &args.seq_args, &res.seq_res, 0);
687 if (status == -ENOTSUPP)
688 server->caps &= ~NFS_CAP_SEEK;
689 if (status)
690 return status;
691
692 if (whence == SEEK_DATA && res.sr_eof)
693 return -NFS4ERR_NXIO;
694 else
695 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
696 }
697
nfs42_proc_llseek(struct file * filep,loff_t offset,int whence)698 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
699 {
700 struct nfs_server *server = NFS_SERVER(file_inode(filep));
701 struct nfs4_exception exception = { };
702 struct nfs_lock_context *lock;
703 loff_t err;
704
705 lock = nfs_get_lock_context(nfs_file_open_context(filep));
706 if (IS_ERR(lock))
707 return PTR_ERR(lock);
708
709 exception.inode = file_inode(filep);
710 exception.state = lock->open_context->state;
711
712 do {
713 err = _nfs42_proc_llseek(filep, lock, offset, whence);
714 if (err >= 0)
715 break;
716 if (err == -ENOTSUPP) {
717 err = -EOPNOTSUPP;
718 break;
719 }
720 err = nfs4_handle_exception(server, err, &exception);
721 } while (exception.retry);
722
723 nfs_put_lock_context(lock);
724 return err;
725 }
726
727
728 static void
nfs42_layoutstat_prepare(struct rpc_task * task,void * calldata)729 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
730 {
731 struct nfs42_layoutstat_data *data = calldata;
732 struct inode *inode = data->inode;
733 struct nfs_server *server = NFS_SERVER(inode);
734 struct pnfs_layout_hdr *lo;
735
736 spin_lock(&inode->i_lock);
737 lo = NFS_I(inode)->layout;
738 if (!pnfs_layout_is_valid(lo)) {
739 spin_unlock(&inode->i_lock);
740 rpc_exit(task, 0);
741 return;
742 }
743 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
744 spin_unlock(&inode->i_lock);
745 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
746 &data->res.seq_res, task);
747 }
748
749 static void
nfs42_layoutstat_done(struct rpc_task * task,void * calldata)750 nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
751 {
752 struct nfs42_layoutstat_data *data = calldata;
753 struct inode *inode = data->inode;
754 struct pnfs_layout_hdr *lo;
755
756 if (!nfs4_sequence_done(task, &data->res.seq_res))
757 return;
758
759 switch (task->tk_status) {
760 case 0:
761 return;
762 case -NFS4ERR_BADHANDLE:
763 case -ESTALE:
764 pnfs_destroy_layout(NFS_I(inode));
765 break;
766 case -NFS4ERR_EXPIRED:
767 case -NFS4ERR_ADMIN_REVOKED:
768 case -NFS4ERR_DELEG_REVOKED:
769 case -NFS4ERR_STALE_STATEID:
770 case -NFS4ERR_BAD_STATEID:
771 spin_lock(&inode->i_lock);
772 lo = NFS_I(inode)->layout;
773 if (pnfs_layout_is_valid(lo) &&
774 nfs4_stateid_match(&data->args.stateid,
775 &lo->plh_stateid)) {
776 LIST_HEAD(head);
777
778 /*
779 * Mark the bad layout state as invalid, then retry
780 * with the current stateid.
781 */
782 pnfs_mark_layout_stateid_invalid(lo, &head);
783 spin_unlock(&inode->i_lock);
784 pnfs_free_lseg_list(&head);
785 nfs_commit_inode(inode, 0);
786 } else
787 spin_unlock(&inode->i_lock);
788 break;
789 case -NFS4ERR_OLD_STATEID:
790 spin_lock(&inode->i_lock);
791 lo = NFS_I(inode)->layout;
792 if (pnfs_layout_is_valid(lo) &&
793 nfs4_stateid_match_other(&data->args.stateid,
794 &lo->plh_stateid)) {
795 /* Do we need to delay before resending? */
796 if (!nfs4_stateid_is_newer(&lo->plh_stateid,
797 &data->args.stateid))
798 rpc_delay(task, HZ);
799 rpc_restart_call_prepare(task);
800 }
801 spin_unlock(&inode->i_lock);
802 break;
803 case -ENOTSUPP:
804 case -EOPNOTSUPP:
805 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
806 }
807
808 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status);
809 }
810
811 static void
nfs42_layoutstat_release(void * calldata)812 nfs42_layoutstat_release(void *calldata)
813 {
814 struct nfs42_layoutstat_data *data = calldata;
815 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo;
816 int i;
817
818 for (i = 0; i < data->args.num_dev; i++) {
819 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free)
820 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
821 }
822
823 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout);
824 smp_mb__before_atomic();
825 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags);
826 smp_mb__after_atomic();
827 nfs_iput_and_deactive(data->inode);
828 kfree(data->args.devinfo);
829 kfree(data);
830 }
831
832 static const struct rpc_call_ops nfs42_layoutstat_ops = {
833 .rpc_call_prepare = nfs42_layoutstat_prepare,
834 .rpc_call_done = nfs42_layoutstat_done,
835 .rpc_release = nfs42_layoutstat_release,
836 };
837
nfs42_proc_layoutstats_generic(struct nfs_server * server,struct nfs42_layoutstat_data * data)838 int nfs42_proc_layoutstats_generic(struct nfs_server *server,
839 struct nfs42_layoutstat_data *data)
840 {
841 struct rpc_message msg = {
842 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS],
843 .rpc_argp = &data->args,
844 .rpc_resp = &data->res,
845 };
846 struct rpc_task_setup task_setup = {
847 .rpc_client = server->client,
848 .rpc_message = &msg,
849 .callback_ops = &nfs42_layoutstat_ops,
850 .callback_data = data,
851 .flags = RPC_TASK_ASYNC,
852 };
853 struct rpc_task *task;
854
855 data->inode = nfs_igrab_and_active(data->args.inode);
856 if (!data->inode) {
857 nfs42_layoutstat_release(data);
858 return -EAGAIN;
859 }
860 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
861 task = rpc_run_task(&task_setup);
862 if (IS_ERR(task))
863 return PTR_ERR(task);
864 rpc_put_task(task);
865 return 0;
866 }
867
868 static struct nfs42_layouterror_data *
nfs42_alloc_layouterror_data(struct pnfs_layout_segment * lseg,gfp_t gfp_flags)869 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags)
870 {
871 struct nfs42_layouterror_data *data;
872 struct inode *inode = lseg->pls_layout->plh_inode;
873
874 data = kzalloc(sizeof(*data), gfp_flags);
875 if (data) {
876 data->args.inode = data->inode = nfs_igrab_and_active(inode);
877 if (data->inode) {
878 data->lseg = pnfs_get_lseg(lseg);
879 if (data->lseg)
880 return data;
881 nfs_iput_and_deactive(data->inode);
882 }
883 kfree(data);
884 }
885 return NULL;
886 }
887
888 static void
nfs42_free_layouterror_data(struct nfs42_layouterror_data * data)889 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data)
890 {
891 pnfs_put_lseg(data->lseg);
892 nfs_iput_and_deactive(data->inode);
893 kfree(data);
894 }
895
896 static void
nfs42_layouterror_prepare(struct rpc_task * task,void * calldata)897 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata)
898 {
899 struct nfs42_layouterror_data *data = calldata;
900 struct inode *inode = data->inode;
901 struct nfs_server *server = NFS_SERVER(inode);
902 struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
903 unsigned i;
904
905 spin_lock(&inode->i_lock);
906 if (!pnfs_layout_is_valid(lo)) {
907 spin_unlock(&inode->i_lock);
908 rpc_exit(task, 0);
909 return;
910 }
911 for (i = 0; i < data->args.num_errors; i++)
912 nfs4_stateid_copy(&data->args.errors[i].stateid,
913 &lo->plh_stateid);
914 spin_unlock(&inode->i_lock);
915 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
916 &data->res.seq_res, task);
917 }
918
919 static void
nfs42_layouterror_done(struct rpc_task * task,void * calldata)920 nfs42_layouterror_done(struct rpc_task *task, void *calldata)
921 {
922 struct nfs42_layouterror_data *data = calldata;
923 struct inode *inode = data->inode;
924 struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
925
926 if (!nfs4_sequence_done(task, &data->res.seq_res))
927 return;
928
929 switch (task->tk_status) {
930 case 0:
931 return;
932 case -NFS4ERR_BADHANDLE:
933 case -ESTALE:
934 pnfs_destroy_layout(NFS_I(inode));
935 break;
936 case -NFS4ERR_EXPIRED:
937 case -NFS4ERR_ADMIN_REVOKED:
938 case -NFS4ERR_DELEG_REVOKED:
939 case -NFS4ERR_STALE_STATEID:
940 case -NFS4ERR_BAD_STATEID:
941 spin_lock(&inode->i_lock);
942 if (pnfs_layout_is_valid(lo) &&
943 nfs4_stateid_match(&data->args.errors[0].stateid,
944 &lo->plh_stateid)) {
945 LIST_HEAD(head);
946
947 /*
948 * Mark the bad layout state as invalid, then retry
949 * with the current stateid.
950 */
951 pnfs_mark_layout_stateid_invalid(lo, &head);
952 spin_unlock(&inode->i_lock);
953 pnfs_free_lseg_list(&head);
954 nfs_commit_inode(inode, 0);
955 } else
956 spin_unlock(&inode->i_lock);
957 break;
958 case -NFS4ERR_OLD_STATEID:
959 spin_lock(&inode->i_lock);
960 if (pnfs_layout_is_valid(lo) &&
961 nfs4_stateid_match_other(&data->args.errors[0].stateid,
962 &lo->plh_stateid)) {
963 /* Do we need to delay before resending? */
964 if (!nfs4_stateid_is_newer(&lo->plh_stateid,
965 &data->args.errors[0].stateid))
966 rpc_delay(task, HZ);
967 rpc_restart_call_prepare(task);
968 }
969 spin_unlock(&inode->i_lock);
970 break;
971 case -ENOTSUPP:
972 case -EOPNOTSUPP:
973 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR;
974 }
975
976 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid,
977 task->tk_status);
978 }
979
980 static void
nfs42_layouterror_release(void * calldata)981 nfs42_layouterror_release(void *calldata)
982 {
983 struct nfs42_layouterror_data *data = calldata;
984
985 nfs42_free_layouterror_data(data);
986 }
987
988 static const struct rpc_call_ops nfs42_layouterror_ops = {
989 .rpc_call_prepare = nfs42_layouterror_prepare,
990 .rpc_call_done = nfs42_layouterror_done,
991 .rpc_release = nfs42_layouterror_release,
992 };
993
nfs42_proc_layouterror(struct pnfs_layout_segment * lseg,const struct nfs42_layout_error * errors,size_t n)994 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
995 const struct nfs42_layout_error *errors, size_t n)
996 {
997 struct inode *inode = lseg->pls_layout->plh_inode;
998 struct nfs42_layouterror_data *data;
999 struct rpc_task *task;
1000 struct rpc_message msg = {
1001 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR],
1002 };
1003 struct rpc_task_setup task_setup = {
1004 .rpc_message = &msg,
1005 .callback_ops = &nfs42_layouterror_ops,
1006 .flags = RPC_TASK_ASYNC,
1007 };
1008 unsigned int i;
1009
1010 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR))
1011 return -EOPNOTSUPP;
1012 if (n > NFS42_LAYOUTERROR_MAX)
1013 return -EINVAL;
1014 data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS);
1015 if (!data)
1016 return -ENOMEM;
1017 for (i = 0; i < n; i++) {
1018 data->args.errors[i] = errors[i];
1019 data->args.num_errors++;
1020 data->res.num_errors++;
1021 }
1022 msg.rpc_argp = &data->args;
1023 msg.rpc_resp = &data->res;
1024 task_setup.callback_data = data;
1025 task_setup.rpc_client = NFS_SERVER(inode)->client;
1026 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
1027 task = rpc_run_task(&task_setup);
1028 if (IS_ERR(task))
1029 return PTR_ERR(task);
1030 rpc_put_task(task);
1031 return 0;
1032 }
1033 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror);
1034
_nfs42_proc_clone(struct rpc_message * msg,struct file * src_f,struct file * dst_f,struct nfs_lock_context * src_lock,struct nfs_lock_context * dst_lock,loff_t src_offset,loff_t dst_offset,loff_t count)1035 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
1036 struct file *dst_f, struct nfs_lock_context *src_lock,
1037 struct nfs_lock_context *dst_lock, loff_t src_offset,
1038 loff_t dst_offset, loff_t count)
1039 {
1040 struct inode *src_inode = file_inode(src_f);
1041 struct inode *dst_inode = file_inode(dst_f);
1042 struct nfs_server *server = NFS_SERVER(dst_inode);
1043 struct nfs42_clone_args args = {
1044 .src_fh = NFS_FH(src_inode),
1045 .dst_fh = NFS_FH(dst_inode),
1046 .src_offset = src_offset,
1047 .dst_offset = dst_offset,
1048 .count = count,
1049 .dst_bitmask = server->cache_consistency_bitmask,
1050 };
1051 struct nfs42_clone_res res = {
1052 .server = server,
1053 };
1054 int status;
1055
1056 msg->rpc_argp = &args;
1057 msg->rpc_resp = &res;
1058
1059 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
1060 src_lock, FMODE_READ);
1061 if (status) {
1062 if (status == -EAGAIN)
1063 status = -NFS4ERR_BAD_STATEID;
1064 return status;
1065 }
1066 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
1067 dst_lock, FMODE_WRITE);
1068 if (status) {
1069 if (status == -EAGAIN)
1070 status = -NFS4ERR_BAD_STATEID;
1071 return status;
1072 }
1073
1074 res.dst_fattr = nfs_alloc_fattr();
1075 if (!res.dst_fattr)
1076 return -ENOMEM;
1077
1078 status = nfs4_call_sync(server->client, server, msg,
1079 &args.seq_args, &res.seq_res, 0);
1080 if (status == 0) {
1081 /* a zero-length count means clone to EOF in src */
1082 if (count == 0 && res.dst_fattr->valid & NFS_ATTR_FATTR_SIZE)
1083 count = nfs_size_to_loff_t(res.dst_fattr->size) - dst_offset;
1084 nfs42_copy_dest_done(dst_inode, dst_offset, count);
1085 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
1086 }
1087
1088 kfree(res.dst_fattr);
1089 return status;
1090 }
1091
nfs42_proc_clone(struct file * src_f,struct file * dst_f,loff_t src_offset,loff_t dst_offset,loff_t count)1092 int nfs42_proc_clone(struct file *src_f, struct file *dst_f,
1093 loff_t src_offset, loff_t dst_offset, loff_t count)
1094 {
1095 struct rpc_message msg = {
1096 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE],
1097 };
1098 struct inode *inode = file_inode(src_f);
1099 struct nfs_server *server = NFS_SERVER(file_inode(src_f));
1100 struct nfs_lock_context *src_lock;
1101 struct nfs_lock_context *dst_lock;
1102 struct nfs4_exception src_exception = { };
1103 struct nfs4_exception dst_exception = { };
1104 int err, err2;
1105
1106 if (!nfs_server_capable(inode, NFS_CAP_CLONE))
1107 return -EOPNOTSUPP;
1108
1109 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
1110 if (IS_ERR(src_lock))
1111 return PTR_ERR(src_lock);
1112
1113 src_exception.inode = file_inode(src_f);
1114 src_exception.state = src_lock->open_context->state;
1115
1116 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
1117 if (IS_ERR(dst_lock)) {
1118 err = PTR_ERR(dst_lock);
1119 goto out_put_src_lock;
1120 }
1121
1122 dst_exception.inode = file_inode(dst_f);
1123 dst_exception.state = dst_lock->open_context->state;
1124
1125 do {
1126 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
1127 src_offset, dst_offset, count);
1128 if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
1129 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
1130 err = -EOPNOTSUPP;
1131 break;
1132 }
1133
1134 err2 = nfs4_handle_exception(server, err, &src_exception);
1135 err = nfs4_handle_exception(server, err, &dst_exception);
1136 if (!err)
1137 err = err2;
1138 } while (src_exception.retry || dst_exception.retry);
1139
1140 nfs_put_lock_context(dst_lock);
1141 out_put_src_lock:
1142 nfs_put_lock_context(src_lock);
1143 return err;
1144 }
1145
1146 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
1147
_nfs42_proc_removexattr(struct inode * inode,const char * name)1148 static int _nfs42_proc_removexattr(struct inode *inode, const char *name)
1149 {
1150 struct nfs_server *server = NFS_SERVER(inode);
1151 struct nfs42_removexattrargs args = {
1152 .fh = NFS_FH(inode),
1153 .xattr_name = name,
1154 };
1155 struct nfs42_removexattrres res;
1156 struct rpc_message msg = {
1157 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR],
1158 .rpc_argp = &args,
1159 .rpc_resp = &res,
1160 };
1161 int ret;
1162 unsigned long timestamp = jiffies;
1163
1164 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
1165 &res.seq_res, 1);
1166 if (!ret)
1167 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
1168
1169 return ret;
1170 }
1171
_nfs42_proc_setxattr(struct inode * inode,const char * name,const void * buf,size_t buflen,int flags)1172 static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
1173 const void *buf, size_t buflen, int flags)
1174 {
1175 struct nfs_server *server = NFS_SERVER(inode);
1176 struct page *pages[NFS4XATTR_MAXPAGES];
1177 struct nfs42_setxattrargs arg = {
1178 .fh = NFS_FH(inode),
1179 .xattr_pages = pages,
1180 .xattr_len = buflen,
1181 .xattr_name = name,
1182 .xattr_flags = flags,
1183 };
1184 struct nfs42_setxattrres res;
1185 struct rpc_message msg = {
1186 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR],
1187 .rpc_argp = &arg,
1188 .rpc_resp = &res,
1189 };
1190 int ret, np;
1191 unsigned long timestamp = jiffies;
1192
1193 if (buflen > server->sxasize)
1194 return -ERANGE;
1195
1196 if (buflen > 0) {
1197 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages);
1198 if (np < 0)
1199 return np;
1200 } else
1201 np = 0;
1202
1203 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1204 &res.seq_res, 1);
1205
1206 for (; np > 0; np--)
1207 put_page(pages[np - 1]);
1208
1209 if (!ret)
1210 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
1211
1212 return ret;
1213 }
1214
_nfs42_proc_getxattr(struct inode * inode,const char * name,void * buf,size_t buflen,struct page ** pages,size_t plen)1215 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name,
1216 void *buf, size_t buflen, struct page **pages,
1217 size_t plen)
1218 {
1219 struct nfs_server *server = NFS_SERVER(inode);
1220 struct nfs42_getxattrargs arg = {
1221 .fh = NFS_FH(inode),
1222 .xattr_name = name,
1223 };
1224 struct nfs42_getxattrres res;
1225 struct rpc_message msg = {
1226 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR],
1227 .rpc_argp = &arg,
1228 .rpc_resp = &res,
1229 };
1230 ssize_t ret;
1231
1232 arg.xattr_len = plen;
1233 arg.xattr_pages = pages;
1234
1235 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1236 &res.seq_res, 0);
1237 if (ret < 0)
1238 return ret;
1239
1240 /*
1241 * Normally, the caching is done one layer up, but for successful
1242 * RPCS, always cache the result here, even if the caller was
1243 * just querying the length, or if the reply was too big for
1244 * the caller. This avoids a second RPC in the case of the
1245 * common query-alloc-retrieve cycle for xattrs.
1246 *
1247 * Note that xattr_len is always capped to XATTR_SIZE_MAX.
1248 */
1249
1250 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len);
1251
1252 if (buflen) {
1253 if (res.xattr_len > buflen)
1254 return -ERANGE;
1255 _copy_from_pages(buf, pages, 0, res.xattr_len);
1256 }
1257
1258 return res.xattr_len;
1259 }
1260
_nfs42_proc_listxattrs(struct inode * inode,void * buf,size_t buflen,u64 * cookiep,bool * eofp)1261 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
1262 size_t buflen, u64 *cookiep, bool *eofp)
1263 {
1264 struct nfs_server *server = NFS_SERVER(inode);
1265 struct page **pages;
1266 struct nfs42_listxattrsargs arg = {
1267 .fh = NFS_FH(inode),
1268 .cookie = *cookiep,
1269 };
1270 struct nfs42_listxattrsres res = {
1271 .eof = false,
1272 .xattr_buf = buf,
1273 .xattr_len = buflen,
1274 };
1275 struct rpc_message msg = {
1276 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS],
1277 .rpc_argp = &arg,
1278 .rpc_resp = &res,
1279 };
1280 u32 xdrlen;
1281 int ret, np, i;
1282
1283
1284 ret = -ENOMEM;
1285 res.scratch = alloc_page(GFP_KERNEL);
1286 if (!res.scratch)
1287 goto out;
1288
1289 xdrlen = nfs42_listxattr_xdrsize(buflen);
1290 if (xdrlen > server->lxasize)
1291 xdrlen = server->lxasize;
1292 np = xdrlen / PAGE_SIZE + 1;
1293
1294 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL);
1295 if (!pages)
1296 goto out_free_scratch;
1297 for (i = 0; i < np; i++) {
1298 pages[i] = alloc_page(GFP_KERNEL);
1299 if (!pages[i])
1300 goto out_free_pages;
1301 }
1302
1303 arg.xattr_pages = pages;
1304 arg.count = xdrlen;
1305
1306 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1307 &res.seq_res, 0);
1308
1309 if (ret >= 0) {
1310 ret = res.copied;
1311 *cookiep = res.cookie;
1312 *eofp = res.eof;
1313 }
1314
1315 out_free_pages:
1316 while (--np >= 0) {
1317 if (pages[np])
1318 __free_page(pages[np]);
1319 }
1320 kfree(pages);
1321 out_free_scratch:
1322 __free_page(res.scratch);
1323 out:
1324 return ret;
1325
1326 }
1327
nfs42_proc_getxattr(struct inode * inode,const char * name,void * buf,size_t buflen)1328 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
1329 void *buf, size_t buflen)
1330 {
1331 struct nfs4_exception exception = { };
1332 ssize_t err, np, i;
1333 struct page **pages;
1334
1335 np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX);
1336 pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL);
1337 if (!pages)
1338 return -ENOMEM;
1339
1340 for (i = 0; i < np; i++) {
1341 pages[i] = alloc_page(GFP_KERNEL);
1342 if (!pages[i]) {
1343 err = -ENOMEM;
1344 goto out;
1345 }
1346 }
1347
1348 /*
1349 * The GETXATTR op has no length field in the call, and the
1350 * xattr data is at the end of the reply.
1351 *
1352 * There is no downside in using the page-aligned length. It will
1353 * allow receiving and caching xattrs that are too large for the
1354 * caller but still fit in the page-rounded value.
1355 */
1356 do {
1357 err = _nfs42_proc_getxattr(inode, name, buf, buflen,
1358 pages, np * PAGE_SIZE);
1359 if (err >= 0)
1360 break;
1361 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1362 &exception);
1363 } while (exception.retry);
1364
1365 out:
1366 while (--i >= 0)
1367 __free_page(pages[i]);
1368 kfree(pages);
1369
1370 return err;
1371 }
1372
nfs42_proc_setxattr(struct inode * inode,const char * name,const void * buf,size_t buflen,int flags)1373 int nfs42_proc_setxattr(struct inode *inode, const char *name,
1374 const void *buf, size_t buflen, int flags)
1375 {
1376 struct nfs4_exception exception = { };
1377 int err;
1378
1379 do {
1380 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags);
1381 if (!err)
1382 break;
1383 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1384 &exception);
1385 } while (exception.retry);
1386
1387 return err;
1388 }
1389
nfs42_proc_listxattrs(struct inode * inode,void * buf,size_t buflen,u64 * cookiep,bool * eofp)1390 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf,
1391 size_t buflen, u64 *cookiep, bool *eofp)
1392 {
1393 struct nfs4_exception exception = { };
1394 ssize_t err;
1395
1396 do {
1397 err = _nfs42_proc_listxattrs(inode, buf, buflen,
1398 cookiep, eofp);
1399 if (err >= 0)
1400 break;
1401 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1402 &exception);
1403 } while (exception.retry);
1404
1405 return err;
1406 }
1407
nfs42_proc_removexattr(struct inode * inode,const char * name)1408 int nfs42_proc_removexattr(struct inode *inode, const char *name)
1409 {
1410 struct nfs4_exception exception = { };
1411 int err;
1412
1413 do {
1414 err = _nfs42_proc_removexattr(inode, name);
1415 if (!err)
1416 break;
1417 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1418 &exception);
1419 } while (exception.retry);
1420
1421 return err;
1422 }
1423