• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Module for pnfs flexfile layout driver.
4  *
5  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6  *
7  * Tao Peng <bergwolf@primarydata.com>
8  */
9 
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_mount.h>
12 #include <linux/nfs_page.h>
13 #include <linux/module.h>
14 #include <linux/file.h>
15 #include <linux/sched/mm.h>
16 
17 #include <linux/sunrpc/metrics.h>
18 
19 #include "flexfilelayout.h"
20 #include "../nfs4session.h"
21 #include "../nfs4idmap.h"
22 #include "../internal.h"
23 #include "../delegation.h"
24 #include "../nfs4trace.h"
25 #include "../iostat.h"
26 #include "../nfs.h"
27 #include "../nfs42.h"
28 
29 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
30 
31 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
32 #define FF_LAYOUTRETURN_MAXERR 20
33 
34 enum nfs4_ff_op_type {
35 	NFS4_FF_OP_LAYOUTSTATS,
36 	NFS4_FF_OP_LAYOUTRETURN,
37 };
38 
39 static unsigned short io_maxretrans;
40 
41 static const struct pnfs_commit_ops ff_layout_commit_ops;
42 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
43 		struct nfs_pgio_header *hdr);
44 static int
45 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
46 			       struct nfs42_layoutstat_devinfo *devinfo,
47 			       int dev_limit, enum nfs4_ff_op_type type);
48 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
49 			      const struct nfs42_layoutstat_devinfo *devinfo,
50 			      struct nfs4_ff_layout_mirror *mirror);
51 
52 static struct pnfs_layout_hdr *
ff_layout_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags)53 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
54 {
55 	struct nfs4_flexfile_layout *ffl;
56 
57 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
58 	if (ffl) {
59 		pnfs_init_ds_commit_info(&ffl->commit_info);
60 		INIT_LIST_HEAD(&ffl->error_list);
61 		INIT_LIST_HEAD(&ffl->mirrors);
62 		ffl->last_report_time = ktime_get();
63 		ffl->commit_info.ops = &ff_layout_commit_ops;
64 		return &ffl->generic_hdr;
65 	} else
66 		return NULL;
67 }
68 
69 static void
ff_layout_free_layout_hdr(struct pnfs_layout_hdr * lo)70 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
71 {
72 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
73 	struct nfs4_ff_layout_ds_err *err, *n;
74 
75 	list_for_each_entry_safe(err, n, &ffl->error_list, list) {
76 		list_del(&err->list);
77 		kfree(err);
78 	}
79 	kfree_rcu(ffl, generic_hdr.plh_rcu);
80 }
81 
decode_pnfs_stateid(struct xdr_stream * xdr,nfs4_stateid * stateid)82 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
83 {
84 	__be32 *p;
85 
86 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
87 	if (unlikely(p == NULL))
88 		return -ENOBUFS;
89 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
90 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
91 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
92 		p[0], p[1], p[2], p[3]);
93 	return 0;
94 }
95 
decode_deviceid(struct xdr_stream * xdr,struct nfs4_deviceid * devid)96 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
97 {
98 	__be32 *p;
99 
100 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
101 	if (unlikely(!p))
102 		return -ENOBUFS;
103 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
104 	nfs4_print_deviceid(devid);
105 	return 0;
106 }
107 
decode_nfs_fh(struct xdr_stream * xdr,struct nfs_fh * fh)108 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
109 {
110 	__be32 *p;
111 
112 	p = xdr_inline_decode(xdr, 4);
113 	if (unlikely(!p))
114 		return -ENOBUFS;
115 	fh->size = be32_to_cpup(p++);
116 	if (fh->size > NFS_MAXFHSIZE) {
117 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
118 		       fh->size);
119 		return -EOVERFLOW;
120 	}
121 	/* fh.data */
122 	p = xdr_inline_decode(xdr, fh->size);
123 	if (unlikely(!p))
124 		return -ENOBUFS;
125 	memcpy(&fh->data, p, fh->size);
126 	dprintk("%s: fh len %d\n", __func__, fh->size);
127 
128 	return 0;
129 }
130 
131 /*
132  * Currently only stringified uids and gids are accepted.
133  * I.e., kerberos is not supported to the DSes, so no pricipals.
134  *
135  * That means that one common function will suffice, but when
136  * principals are added, this should be split to accomodate
137  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
138  */
139 static int
decode_name(struct xdr_stream * xdr,u32 * id)140 decode_name(struct xdr_stream *xdr, u32 *id)
141 {
142 	__be32 *p;
143 	int len;
144 
145 	/* opaque_length(4)*/
146 	p = xdr_inline_decode(xdr, 4);
147 	if (unlikely(!p))
148 		return -ENOBUFS;
149 	len = be32_to_cpup(p++);
150 	if (len < 0)
151 		return -EINVAL;
152 
153 	dprintk("%s: len %u\n", __func__, len);
154 
155 	/* opaque body */
156 	p = xdr_inline_decode(xdr, len);
157 	if (unlikely(!p))
158 		return -ENOBUFS;
159 
160 	if (!nfs_map_string_to_numeric((char *)p, len, id))
161 		return -EINVAL;
162 
163 	return 0;
164 }
165 
166 static struct nfsd_file *
ff_local_open_fh(struct nfs_client * clp,const struct cred * cred,struct nfs_fh * fh,fmode_t mode)167 ff_local_open_fh(struct nfs_client *clp, const struct cred *cred,
168 		 struct nfs_fh *fh, fmode_t mode)
169 {
170 	if (mode & FMODE_WRITE) {
171 		/*
172 		 * Always request read and write access since this corresponds
173 		 * to a rw layout.
174 		 */
175 		mode |= FMODE_READ;
176 	}
177 
178 	return nfs_local_open_fh(clp, cred, fh, mode);
179 }
180 
ff_mirror_match_fh(const struct nfs4_ff_layout_mirror * m1,const struct nfs4_ff_layout_mirror * m2)181 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
182 		const struct nfs4_ff_layout_mirror *m2)
183 {
184 	int i, j;
185 
186 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
187 		return false;
188 	for (i = 0; i < m1->fh_versions_cnt; i++) {
189 		bool found_fh = false;
190 		for (j = 0; j < m2->fh_versions_cnt; j++) {
191 			if (nfs_compare_fh(&m1->fh_versions[i],
192 					&m2->fh_versions[j]) == 0) {
193 				found_fh = true;
194 				break;
195 			}
196 		}
197 		if (!found_fh)
198 			return false;
199 	}
200 	return true;
201 }
202 
203 static struct nfs4_ff_layout_mirror *
ff_layout_add_mirror(struct pnfs_layout_hdr * lo,struct nfs4_ff_layout_mirror * mirror)204 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
205 		struct nfs4_ff_layout_mirror *mirror)
206 {
207 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
208 	struct nfs4_ff_layout_mirror *pos;
209 	struct inode *inode = lo->plh_inode;
210 
211 	spin_lock(&inode->i_lock);
212 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
213 		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
214 			continue;
215 		if (!ff_mirror_match_fh(mirror, pos))
216 			continue;
217 		if (refcount_inc_not_zero(&pos->ref)) {
218 			spin_unlock(&inode->i_lock);
219 			return pos;
220 		}
221 	}
222 	list_add(&mirror->mirrors, &ff_layout->mirrors);
223 	mirror->layout = lo;
224 	spin_unlock(&inode->i_lock);
225 	return mirror;
226 }
227 
228 static void
ff_layout_remove_mirror(struct nfs4_ff_layout_mirror * mirror)229 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
230 {
231 	struct inode *inode;
232 	if (mirror->layout == NULL)
233 		return;
234 	inode = mirror->layout->plh_inode;
235 	spin_lock(&inode->i_lock);
236 	list_del(&mirror->mirrors);
237 	spin_unlock(&inode->i_lock);
238 	mirror->layout = NULL;
239 }
240 
ff_layout_alloc_mirror(gfp_t gfp_flags)241 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
242 {
243 	struct nfs4_ff_layout_mirror *mirror;
244 
245 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
246 	if (mirror != NULL) {
247 		spin_lock_init(&mirror->lock);
248 		refcount_set(&mirror->ref, 1);
249 		INIT_LIST_HEAD(&mirror->mirrors);
250 	}
251 	return mirror;
252 }
253 
ff_layout_free_mirror(struct nfs4_ff_layout_mirror * mirror)254 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
255 {
256 	const struct cred *cred;
257 
258 	ff_layout_remove_mirror(mirror);
259 	kfree(mirror->fh_versions);
260 	cred = rcu_access_pointer(mirror->ro_cred);
261 	put_cred(cred);
262 	cred = rcu_access_pointer(mirror->rw_cred);
263 	put_cred(cred);
264 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
265 	kfree(mirror);
266 }
267 
ff_layout_put_mirror(struct nfs4_ff_layout_mirror * mirror)268 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
269 {
270 	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
271 		ff_layout_free_mirror(mirror);
272 }
273 
ff_layout_free_mirror_array(struct nfs4_ff_layout_segment * fls)274 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
275 {
276 	u32 i;
277 
278 	for (i = 0; i < fls->mirror_array_cnt; i++)
279 		ff_layout_put_mirror(fls->mirror_array[i]);
280 }
281 
_ff_layout_free_lseg(struct nfs4_ff_layout_segment * fls)282 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
283 {
284 	if (fls) {
285 		ff_layout_free_mirror_array(fls);
286 		kfree(fls);
287 	}
288 }
289 
290 static bool
ff_lseg_match_mirrors(struct pnfs_layout_segment * l1,struct pnfs_layout_segment * l2)291 ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
292 		struct pnfs_layout_segment *l2)
293 {
294 	const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
295 	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2);
296 	u32 i;
297 
298 	if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
299 		return false;
300 	for (i = 0; i < fl1->mirror_array_cnt; i++) {
301 		if (fl1->mirror_array[i] != fl2->mirror_array[i])
302 			return false;
303 	}
304 	return true;
305 }
306 
307 static bool
ff_lseg_range_is_after(const struct pnfs_layout_range * l1,const struct pnfs_layout_range * l2)308 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
309 		const struct pnfs_layout_range *l2)
310 {
311 	u64 end1, end2;
312 
313 	if (l1->iomode != l2->iomode)
314 		return l1->iomode != IOMODE_READ;
315 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
316 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
317 	if (end1 < l2->offset)
318 		return false;
319 	if (end2 < l1->offset)
320 		return true;
321 	return l2->offset <= l1->offset;
322 }
323 
324 static bool
ff_lseg_merge(struct pnfs_layout_segment * new,struct pnfs_layout_segment * old)325 ff_lseg_merge(struct pnfs_layout_segment *new,
326 		struct pnfs_layout_segment *old)
327 {
328 	u64 new_end, old_end;
329 
330 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
331 		return false;
332 	if (new->pls_range.iomode != old->pls_range.iomode)
333 		return false;
334 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
335 			old->pls_range.length);
336 	if (old_end < new->pls_range.offset)
337 		return false;
338 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
339 			new->pls_range.length);
340 	if (new_end < old->pls_range.offset)
341 		return false;
342 	if (!ff_lseg_match_mirrors(new, old))
343 		return false;
344 
345 	/* Mergeable: copy info from 'old' to 'new' */
346 	if (new_end < old_end)
347 		new_end = old_end;
348 	if (new->pls_range.offset < old->pls_range.offset)
349 		new->pls_range.offset = old->pls_range.offset;
350 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
351 			new_end);
352 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
353 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
354 	return true;
355 }
356 
357 static void
ff_layout_add_lseg(struct pnfs_layout_hdr * lo,struct pnfs_layout_segment * lseg,struct list_head * free_me)358 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
359 		struct pnfs_layout_segment *lseg,
360 		struct list_head *free_me)
361 {
362 	pnfs_generic_layout_insert_lseg(lo, lseg,
363 			ff_lseg_range_is_after,
364 			ff_lseg_merge,
365 			free_me);
366 }
367 
ff_layout_sort_mirrors(struct nfs4_ff_layout_segment * fls)368 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
369 {
370 	int i, j;
371 
372 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
373 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
374 			if (fls->mirror_array[i]->efficiency <
375 			    fls->mirror_array[j]->efficiency)
376 				swap(fls->mirror_array[i],
377 				     fls->mirror_array[j]);
378 	}
379 }
380 
381 static struct pnfs_layout_segment *
ff_layout_alloc_lseg(struct pnfs_layout_hdr * lh,struct nfs4_layoutget_res * lgr,gfp_t gfp_flags)382 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
383 		     struct nfs4_layoutget_res *lgr,
384 		     gfp_t gfp_flags)
385 {
386 	struct pnfs_layout_segment *ret;
387 	struct nfs4_ff_layout_segment *fls = NULL;
388 	struct xdr_stream stream;
389 	struct xdr_buf buf;
390 	struct page *scratch;
391 	u64 stripe_unit;
392 	u32 mirror_array_cnt;
393 	__be32 *p;
394 	int i, rc;
395 
396 	dprintk("--> %s\n", __func__);
397 	scratch = alloc_page(gfp_flags);
398 	if (!scratch)
399 		return ERR_PTR(-ENOMEM);
400 
401 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
402 			      lgr->layoutp->len);
403 	xdr_set_scratch_page(&stream, scratch);
404 
405 	/* stripe unit and mirror_array_cnt */
406 	rc = -EIO;
407 	p = xdr_inline_decode(&stream, 8 + 4);
408 	if (!p)
409 		goto out_err_free;
410 
411 	p = xdr_decode_hyper(p, &stripe_unit);
412 	mirror_array_cnt = be32_to_cpup(p++);
413 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
414 		stripe_unit, mirror_array_cnt);
415 
416 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
417 	    mirror_array_cnt == 0)
418 		goto out_err_free;
419 
420 	rc = -ENOMEM;
421 	fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
422 			gfp_flags);
423 	if (!fls)
424 		goto out_err_free;
425 
426 	fls->mirror_array_cnt = mirror_array_cnt;
427 	fls->stripe_unit = stripe_unit;
428 
429 	for (i = 0; i < fls->mirror_array_cnt; i++) {
430 		struct nfs4_ff_layout_mirror *mirror;
431 		struct cred *kcred;
432 		const struct cred __rcu *cred;
433 		kuid_t uid;
434 		kgid_t gid;
435 		u32 ds_count, fh_count, id;
436 		int j;
437 
438 		rc = -EIO;
439 		p = xdr_inline_decode(&stream, 4);
440 		if (!p)
441 			goto out_err_free;
442 		ds_count = be32_to_cpup(p);
443 
444 		/* FIXME: allow for striping? */
445 		if (ds_count != 1)
446 			goto out_err_free;
447 
448 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
449 		if (fls->mirror_array[i] == NULL) {
450 			rc = -ENOMEM;
451 			goto out_err_free;
452 		}
453 
454 		fls->mirror_array[i]->ds_count = ds_count;
455 
456 		/* deviceid */
457 		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
458 		if (rc)
459 			goto out_err_free;
460 
461 		/* efficiency */
462 		rc = -EIO;
463 		p = xdr_inline_decode(&stream, 4);
464 		if (!p)
465 			goto out_err_free;
466 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
467 
468 		/* stateid */
469 		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
470 		if (rc)
471 			goto out_err_free;
472 
473 		/* fh */
474 		rc = -EIO;
475 		p = xdr_inline_decode(&stream, 4);
476 		if (!p)
477 			goto out_err_free;
478 		fh_count = be32_to_cpup(p);
479 
480 		fls->mirror_array[i]->fh_versions =
481 			kcalloc(fh_count, sizeof(struct nfs_fh),
482 				gfp_flags);
483 		if (fls->mirror_array[i]->fh_versions == NULL) {
484 			rc = -ENOMEM;
485 			goto out_err_free;
486 		}
487 
488 		for (j = 0; j < fh_count; j++) {
489 			rc = decode_nfs_fh(&stream,
490 					   &fls->mirror_array[i]->fh_versions[j]);
491 			if (rc)
492 				goto out_err_free;
493 		}
494 
495 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
496 
497 		/* user */
498 		rc = decode_name(&stream, &id);
499 		if (rc)
500 			goto out_err_free;
501 
502 		uid = make_kuid(&init_user_ns, id);
503 
504 		/* group */
505 		rc = decode_name(&stream, &id);
506 		if (rc)
507 			goto out_err_free;
508 
509 		gid = make_kgid(&init_user_ns, id);
510 
511 		if (gfp_flags & __GFP_FS)
512 			kcred = prepare_kernel_cred(&init_task);
513 		else {
514 			unsigned int nofs_flags = memalloc_nofs_save();
515 			kcred = prepare_kernel_cred(&init_task);
516 			memalloc_nofs_restore(nofs_flags);
517 		}
518 		rc = -ENOMEM;
519 		if (!kcred)
520 			goto out_err_free;
521 		kcred->fsuid = uid;
522 		kcred->fsgid = gid;
523 		cred = RCU_INITIALIZER(kcred);
524 
525 		if (lgr->range.iomode == IOMODE_READ)
526 			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
527 		else
528 			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
529 
530 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
531 		if (mirror != fls->mirror_array[i]) {
532 			/* swap cred ptrs so free_mirror will clean up old */
533 			if (lgr->range.iomode == IOMODE_READ) {
534 				cred = xchg(&mirror->ro_cred, cred);
535 				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
536 			} else {
537 				cred = xchg(&mirror->rw_cred, cred);
538 				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
539 			}
540 			ff_layout_free_mirror(fls->mirror_array[i]);
541 			fls->mirror_array[i] = mirror;
542 		}
543 
544 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
545 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
546 			from_kuid(&init_user_ns, uid),
547 			from_kgid(&init_user_ns, gid));
548 	}
549 
550 	p = xdr_inline_decode(&stream, 4);
551 	if (!p)
552 		goto out_sort_mirrors;
553 	fls->flags = be32_to_cpup(p);
554 
555 	p = xdr_inline_decode(&stream, 4);
556 	if (!p)
557 		goto out_sort_mirrors;
558 	for (i=0; i < fls->mirror_array_cnt; i++)
559 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
560 
561 out_sort_mirrors:
562 	ff_layout_sort_mirrors(fls);
563 	ret = &fls->generic_hdr;
564 	dprintk("<-- %s (success)\n", __func__);
565 out_free_page:
566 	__free_page(scratch);
567 	return ret;
568 out_err_free:
569 	_ff_layout_free_lseg(fls);
570 	ret = ERR_PTR(rc);
571 	dprintk("<-- %s (%d)\n", __func__, rc);
572 	goto out_free_page;
573 }
574 
575 static void
ff_layout_free_lseg(struct pnfs_layout_segment * lseg)576 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
577 {
578 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
579 
580 	dprintk("--> %s\n", __func__);
581 
582 	if (lseg->pls_range.iomode == IOMODE_RW) {
583 		struct nfs4_flexfile_layout *ffl;
584 		struct inode *inode;
585 
586 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
587 		inode = ffl->generic_hdr.plh_inode;
588 		spin_lock(&inode->i_lock);
589 		pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
590 		spin_unlock(&inode->i_lock);
591 	}
592 	_ff_layout_free_lseg(fls);
593 }
594 
595 static void
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)596 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
597 {
598 	/* first IO request? */
599 	if (atomic_inc_return(&timer->n_ops) == 1) {
600 		timer->start_time = now;
601 	}
602 }
603 
604 static ktime_t
nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)605 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
606 {
607 	ktime_t start;
608 
609 	if (atomic_dec_return(&timer->n_ops) < 0)
610 		WARN_ON_ONCE(1);
611 
612 	start = timer->start_time;
613 	timer->start_time = now;
614 	return ktime_sub(now, start);
615 }
616 
617 static bool
nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror * mirror,struct nfs4_ff_layoutstat * layoutstat,ktime_t now)618 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
619 			    struct nfs4_ff_layoutstat *layoutstat,
620 			    ktime_t now)
621 {
622 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
623 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
624 
625 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
626 	if (!mirror->start_time)
627 		mirror->start_time = now;
628 	if (mirror->report_interval != 0)
629 		report_interval = (s64)mirror->report_interval * 1000LL;
630 	else if (layoutstats_timer != 0)
631 		report_interval = (s64)layoutstats_timer * 1000LL;
632 	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
633 			report_interval) {
634 		ffl->last_report_time = now;
635 		return true;
636 	}
637 
638 	return false;
639 }
640 
641 static void
nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat * layoutstat,__u64 requested)642 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
643 		__u64 requested)
644 {
645 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
646 
647 	iostat->ops_requested++;
648 	iostat->bytes_requested += requested;
649 }
650 
651 static void
nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat * layoutstat,__u64 requested,__u64 completed,ktime_t time_completed,ktime_t time_started)652 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
653 		__u64 requested,
654 		__u64 completed,
655 		ktime_t time_completed,
656 		ktime_t time_started)
657 {
658 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
659 	ktime_t completion_time = ktime_sub(time_completed, time_started);
660 	ktime_t timer;
661 
662 	iostat->ops_completed++;
663 	iostat->bytes_completed += completed;
664 	iostat->bytes_not_delivered += requested - completed;
665 
666 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
667 	iostat->total_busy_time =
668 			ktime_add(iostat->total_busy_time, timer);
669 	iostat->aggregate_completion_time =
670 			ktime_add(iostat->aggregate_completion_time,
671 					completion_time);
672 }
673 
674 static void
nfs4_ff_layout_stat_io_start_read(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,__u64 requested,ktime_t now)675 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
676 		struct nfs4_ff_layout_mirror *mirror,
677 		__u64 requested, ktime_t now)
678 {
679 	bool report;
680 
681 	spin_lock(&mirror->lock);
682 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
683 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
684 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
685 	spin_unlock(&mirror->lock);
686 
687 	if (report)
688 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
689 }
690 
691 static void
nfs4_ff_layout_stat_io_end_read(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,__u64 requested,__u64 completed)692 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
693 		struct nfs4_ff_layout_mirror *mirror,
694 		__u64 requested,
695 		__u64 completed)
696 {
697 	spin_lock(&mirror->lock);
698 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
699 			requested, completed,
700 			ktime_get(), task->tk_start);
701 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
702 	spin_unlock(&mirror->lock);
703 }
704 
705 static void
nfs4_ff_layout_stat_io_start_write(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,__u64 requested,ktime_t now)706 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
707 		struct nfs4_ff_layout_mirror *mirror,
708 		__u64 requested, ktime_t now)
709 {
710 	bool report;
711 
712 	spin_lock(&mirror->lock);
713 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
714 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
715 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
716 	spin_unlock(&mirror->lock);
717 
718 	if (report)
719 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
720 }
721 
722 static void
nfs4_ff_layout_stat_io_end_write(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,__u64 requested,__u64 completed,enum nfs3_stable_how committed)723 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
724 		struct nfs4_ff_layout_mirror *mirror,
725 		__u64 requested,
726 		__u64 completed,
727 		enum nfs3_stable_how committed)
728 {
729 	if (committed == NFS_UNSTABLE)
730 		requested = completed = 0;
731 
732 	spin_lock(&mirror->lock);
733 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
734 			requested, completed, ktime_get(), task->tk_start);
735 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
736 	spin_unlock(&mirror->lock);
737 }
738 
739 static void
ff_layout_mark_ds_unreachable(struct pnfs_layout_segment * lseg,u32 idx)740 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
741 {
742 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
743 
744 	if (devid)
745 		nfs4_mark_deviceid_unavailable(devid);
746 }
747 
748 static void
ff_layout_mark_ds_reachable(struct pnfs_layout_segment * lseg,u32 idx)749 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
750 {
751 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
752 
753 	if (devid)
754 		nfs4_mark_deviceid_available(devid);
755 }
756 
757 static struct nfs4_pnfs_ds *
ff_layout_choose_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,bool check_device)758 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
759 			     u32 start_idx, u32 *best_idx,
760 			     bool check_device)
761 {
762 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
763 	struct nfs4_ff_layout_mirror *mirror;
764 	struct nfs4_pnfs_ds *ds = ERR_PTR(-EAGAIN);
765 	u32 idx;
766 
767 	/* mirrors are initially sorted by efficiency */
768 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
769 		mirror = FF_LAYOUT_COMP(lseg, idx);
770 		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
771 		if (IS_ERR(ds))
772 			continue;
773 
774 		if (check_device &&
775 		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) {
776 			// reinitialize the error state in case if this is the last iteration
777 			ds = ERR_PTR(-EINVAL);
778 			continue;
779 		}
780 
781 		*best_idx = idx;
782 		break;
783 	}
784 
785 	return ds;
786 }
787 
788 static struct nfs4_pnfs_ds *
ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx)789 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
790 				 u32 start_idx, u32 *best_idx)
791 {
792 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
793 }
794 
795 static struct nfs4_pnfs_ds *
ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx)796 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
797 				   u32 start_idx, u32 *best_idx)
798 {
799 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
800 }
801 
802 static struct nfs4_pnfs_ds *
ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx)803 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
804 				  u32 start_idx, u32 *best_idx)
805 {
806 	struct nfs4_pnfs_ds *ds;
807 
808 	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
809 	if (!IS_ERR(ds))
810 		return ds;
811 	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
812 }
813 
814 static struct nfs4_pnfs_ds *
ff_layout_get_ds_for_read(struct nfs_pageio_descriptor * pgio,u32 * best_idx)815 ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
816 			  u32 *best_idx)
817 {
818 	struct pnfs_layout_segment *lseg = pgio->pg_lseg;
819 	struct nfs4_pnfs_ds *ds;
820 
821 	ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
822 					       best_idx);
823 	if (!IS_ERR(ds) || !pgio->pg_mirror_idx)
824 		return ds;
825 	return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
826 }
827 
828 static void
ff_layout_pg_get_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req,bool strict_iomode)829 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
830 		      struct nfs_page *req,
831 		      bool strict_iomode)
832 {
833 	pnfs_put_lseg(pgio->pg_lseg);
834 	pgio->pg_lseg =
835 		pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
836 				   req_offset(req), req->wb_bytes, IOMODE_READ,
837 				   strict_iomode, nfs_io_gfp_mask());
838 	if (IS_ERR(pgio->pg_lseg)) {
839 		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
840 		pgio->pg_lseg = NULL;
841 	}
842 }
843 
844 static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)845 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
846 			struct nfs_page *req)
847 {
848 	struct nfs_pgio_mirror *pgm;
849 	struct nfs4_ff_layout_mirror *mirror;
850 	struct nfs4_pnfs_ds *ds;
851 	u32 ds_idx;
852 
853 	if (NFS_SERVER(pgio->pg_inode)->flags &
854 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
855 		pgio->pg_maxretrans = io_maxretrans;
856 retry:
857 	pnfs_generic_pg_check_layout(pgio, req);
858 	/* Use full layout for now */
859 	if (!pgio->pg_lseg) {
860 		ff_layout_pg_get_read(pgio, req, false);
861 		if (!pgio->pg_lseg)
862 			goto out_nolseg;
863 	}
864 	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
865 		ff_layout_pg_get_read(pgio, req, true);
866 		if (!pgio->pg_lseg)
867 			goto out_nolseg;
868 	}
869 	/* Reset wb_nio, since getting layout segment was successful */
870 	req->wb_nio = 0;
871 
872 	ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
873 	if (IS_ERR(ds)) {
874 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
875 			goto out_mds;
876 		pnfs_generic_pg_cleanup(pgio);
877 		/* Sleep for 1 second before retrying */
878 		ssleep(1);
879 		goto retry;
880 	}
881 
882 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
883 	pgm = &pgio->pg_mirrors[0];
884 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
885 
886 	pgio->pg_mirror_idx = ds_idx;
887 	return;
888 out_nolseg:
889 	if (pgio->pg_error < 0) {
890 		if (pgio->pg_error != -EAGAIN)
891 			return;
892 		/* Retry getting layout segment if lower layer returned -EAGAIN */
893 		if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
894 			if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
895 				pgio->pg_error = -ETIMEDOUT;
896 			else
897 				pgio->pg_error = -EIO;
898 			return;
899 		}
900 		pgio->pg_error = 0;
901 		/* Sleep for 1 second before retrying */
902 		ssleep(1);
903 		goto retry;
904 	}
905 out_mds:
906 	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
907 			0, NFS4_MAX_UINT64, IOMODE_READ,
908 			NFS_I(pgio->pg_inode)->layout,
909 			pgio->pg_lseg);
910 	pgio->pg_maxretrans = 0;
911 	nfs_pageio_reset_read_mds(pgio);
912 }
913 
914 static void
ff_layout_pg_init_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)915 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
916 			struct nfs_page *req)
917 {
918 	struct nfs4_ff_layout_mirror *mirror;
919 	struct nfs_pgio_mirror *pgm;
920 	struct nfs4_pnfs_ds *ds;
921 	u32 i;
922 
923 retry:
924 	pnfs_generic_pg_check_layout(pgio, req);
925 	if (!pgio->pg_lseg) {
926 		pgio->pg_lseg =
927 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
928 					   req_offset(req), req->wb_bytes,
929 					   IOMODE_RW, false, nfs_io_gfp_mask());
930 		if (IS_ERR(pgio->pg_lseg)) {
931 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
932 			pgio->pg_lseg = NULL;
933 			return;
934 		}
935 	}
936 	/* If no lseg, fall back to write through mds */
937 	if (pgio->pg_lseg == NULL)
938 		goto out_mds;
939 
940 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
941 	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
942 		goto out_eagain;
943 
944 	for (i = 0; i < pgio->pg_mirror_count; i++) {
945 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
946 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
947 		if (IS_ERR(ds)) {
948 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
949 				goto out_mds;
950 			pnfs_generic_pg_cleanup(pgio);
951 			/* Sleep for 1 second before retrying */
952 			ssleep(1);
953 			goto retry;
954 		}
955 		pgm = &pgio->pg_mirrors[i];
956 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
957 	}
958 
959 	if (NFS_SERVER(pgio->pg_inode)->flags &
960 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
961 		pgio->pg_maxretrans = io_maxretrans;
962 	return;
963 out_eagain:
964 	pnfs_generic_pg_cleanup(pgio);
965 	pgio->pg_error = -EAGAIN;
966 	return;
967 out_mds:
968 	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
969 			0, NFS4_MAX_UINT64, IOMODE_RW,
970 			NFS_I(pgio->pg_inode)->layout,
971 			pgio->pg_lseg);
972 	pgio->pg_maxretrans = 0;
973 	nfs_pageio_reset_write_mds(pgio);
974 	pgio->pg_error = -EAGAIN;
975 }
976 
977 static unsigned int
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)978 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
979 				    struct nfs_page *req)
980 {
981 	if (!pgio->pg_lseg) {
982 		pgio->pg_lseg =
983 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
984 					   req_offset(req), req->wb_bytes,
985 					   IOMODE_RW, false, nfs_io_gfp_mask());
986 		if (IS_ERR(pgio->pg_lseg)) {
987 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
988 			pgio->pg_lseg = NULL;
989 			goto out;
990 		}
991 	}
992 	if (pgio->pg_lseg)
993 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
994 
995 	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
996 			0, NFS4_MAX_UINT64, IOMODE_RW,
997 			NFS_I(pgio->pg_inode)->layout,
998 			pgio->pg_lseg);
999 	/* no lseg means that pnfs is not in use, so no mirroring here */
1000 	nfs_pageio_reset_write_mds(pgio);
1001 out:
1002 	return 1;
1003 }
1004 
1005 static u32
ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)1006 ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1007 {
1008 	u32 old = desc->pg_mirror_idx;
1009 
1010 	desc->pg_mirror_idx = idx;
1011 	return old;
1012 }
1013 
1014 static struct nfs_pgio_mirror *
ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)1015 ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1016 {
1017 	return &desc->pg_mirrors[idx];
1018 }
1019 
1020 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1021 	.pg_init = ff_layout_pg_init_read,
1022 	.pg_test = pnfs_generic_pg_test,
1023 	.pg_doio = pnfs_generic_pg_readpages,
1024 	.pg_cleanup = pnfs_generic_pg_cleanup,
1025 };
1026 
1027 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1028 	.pg_init = ff_layout_pg_init_write,
1029 	.pg_test = pnfs_generic_pg_test,
1030 	.pg_doio = pnfs_generic_pg_writepages,
1031 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1032 	.pg_cleanup = pnfs_generic_pg_cleanup,
1033 	.pg_get_mirror = ff_layout_pg_get_mirror_write,
1034 	.pg_set_mirror = ff_layout_pg_set_mirror_write,
1035 };
1036 
ff_layout_reset_write(struct nfs_pgio_header * hdr,bool retry_pnfs)1037 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1038 {
1039 	struct rpc_task *task = &hdr->task;
1040 
1041 	pnfs_layoutcommit_inode(hdr->inode, false);
1042 
1043 	if (retry_pnfs) {
1044 		dprintk("%s Reset task %5u for i/o through pNFS "
1045 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1046 			hdr->task.tk_pid,
1047 			hdr->inode->i_sb->s_id,
1048 			(unsigned long long)NFS_FILEID(hdr->inode),
1049 			hdr->args.count,
1050 			(unsigned long long)hdr->args.offset);
1051 
1052 		hdr->completion_ops->reschedule_io(hdr);
1053 		return;
1054 	}
1055 
1056 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1057 		dprintk("%s Reset task %5u for i/o through MDS "
1058 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1059 			hdr->task.tk_pid,
1060 			hdr->inode->i_sb->s_id,
1061 			(unsigned long long)NFS_FILEID(hdr->inode),
1062 			hdr->args.count,
1063 			(unsigned long long)hdr->args.offset);
1064 
1065 		trace_pnfs_mds_fallback_write_done(hdr->inode,
1066 				hdr->args.offset, hdr->args.count,
1067 				IOMODE_RW, NFS_I(hdr->inode)->layout,
1068 				hdr->lseg);
1069 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1070 	}
1071 }
1072 
ff_layout_resend_pnfs_read(struct nfs_pgio_header * hdr)1073 static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1074 {
1075 	u32 idx = hdr->pgio_mirror_idx + 1;
1076 	u32 new_idx = 0;
1077 	struct nfs4_pnfs_ds *ds;
1078 
1079 	ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx);
1080 	if (IS_ERR(ds))
1081 		pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1082 	else
1083 		ff_layout_send_layouterror(hdr->lseg);
1084 	pnfs_read_resend_pnfs(hdr, new_idx);
1085 }
1086 
ff_layout_reset_read(struct nfs_pgio_header * hdr)1087 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1088 {
1089 	struct rpc_task *task = &hdr->task;
1090 
1091 	pnfs_layoutcommit_inode(hdr->inode, false);
1092 	pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1093 
1094 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1095 		dprintk("%s Reset task %5u for i/o through MDS "
1096 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1097 			hdr->task.tk_pid,
1098 			hdr->inode->i_sb->s_id,
1099 			(unsigned long long)NFS_FILEID(hdr->inode),
1100 			hdr->args.count,
1101 			(unsigned long long)hdr->args.offset);
1102 
1103 		trace_pnfs_mds_fallback_read_done(hdr->inode,
1104 				hdr->args.offset, hdr->args.count,
1105 				IOMODE_READ, NFS_I(hdr->inode)->layout,
1106 				hdr->lseg);
1107 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1108 	}
1109 }
1110 
ff_layout_async_handle_error_v4(struct rpc_task * task,u32 op_status,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx)1111 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1112 					   u32 op_status,
1113 					   struct nfs4_state *state,
1114 					   struct nfs_client *clp,
1115 					   struct pnfs_layout_segment *lseg,
1116 					   u32 idx)
1117 {
1118 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1119 	struct inode *inode = lo->plh_inode;
1120 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1121 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1122 
1123 	switch (op_status) {
1124 	case NFS4_OK:
1125 	case NFS4ERR_NXIO:
1126 		break;
1127 	case NFSERR_PERM:
1128 		if (!task->tk_xprt)
1129 			break;
1130 		xprt_force_disconnect(task->tk_xprt);
1131 		goto out_retry;
1132 	case NFS4ERR_BADSESSION:
1133 	case NFS4ERR_BADSLOT:
1134 	case NFS4ERR_BAD_HIGH_SLOT:
1135 	case NFS4ERR_DEADSESSION:
1136 	case NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1137 	case NFS4ERR_SEQ_FALSE_RETRY:
1138 	case NFS4ERR_SEQ_MISORDERED:
1139 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1140 			"flags 0x%x\n", __func__, task->tk_status,
1141 			clp->cl_exchange_flags);
1142 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1143 		goto out_retry;
1144 	case NFS4ERR_DELAY:
1145 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1146 		fallthrough;
1147 	case NFS4ERR_GRACE:
1148 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1149 		goto out_retry;
1150 	case NFS4ERR_RETRY_UNCACHED_REP:
1151 		goto out_retry;
1152 	/* Invalidate Layout errors */
1153 	case NFS4ERR_PNFS_NO_LAYOUT:
1154 	case NFS4ERR_STALE:
1155 	case NFS4ERR_BADHANDLE:
1156 	case NFS4ERR_ISDIR:
1157 	case NFS4ERR_FHEXPIRED:
1158 	case NFS4ERR_WRONG_TYPE:
1159 		dprintk("%s Invalid layout error %d\n", __func__,
1160 			task->tk_status);
1161 		/*
1162 		 * Destroy layout so new i/o will get a new layout.
1163 		 * Layout will not be destroyed until all current lseg
1164 		 * references are put. Mark layout as invalid to resend failed
1165 		 * i/o and all i/o waiting on the slot table to the MDS until
1166 		 * layout is destroyed and a new valid layout is obtained.
1167 		 */
1168 		pnfs_destroy_layout(NFS_I(inode));
1169 		rpc_wake_up(&tbl->slot_tbl_waitq);
1170 		goto reset;
1171 	default:
1172 		break;
1173 	}
1174 
1175 	switch (task->tk_status) {
1176 	/* RPC connection errors */
1177 	case -ECONNREFUSED:
1178 	case -EHOSTDOWN:
1179 	case -EHOSTUNREACH:
1180 	case -ENETUNREACH:
1181 	case -EIO:
1182 	case -ETIMEDOUT:
1183 	case -EPIPE:
1184 	case -EPROTO:
1185 	case -ENODEV:
1186 		dprintk("%s DS connection error %d\n", __func__,
1187 			task->tk_status);
1188 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1189 				&devid->deviceid);
1190 		rpc_wake_up(&tbl->slot_tbl_waitq);
1191 		break;
1192 	default:
1193 		break;
1194 	}
1195 
1196 	if (ff_layout_avoid_mds_available_ds(lseg))
1197 		return -NFS4ERR_RESET_TO_PNFS;
1198 reset:
1199 	dprintk("%s Retry through MDS. Error %d\n", __func__,
1200 		task->tk_status);
1201 	return -NFS4ERR_RESET_TO_MDS;
1202 
1203 out_retry:
1204 	task->tk_status = 0;
1205 	return -EAGAIN;
1206 }
1207 
1208 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
ff_layout_async_handle_error_v3(struct rpc_task * task,u32 op_status,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx)1209 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1210 					   u32 op_status,
1211 					   struct nfs_client *clp,
1212 					   struct pnfs_layout_segment *lseg,
1213 					   u32 idx)
1214 {
1215 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1216 
1217 	switch (op_status) {
1218 	case NFS_OK:
1219 	case NFSERR_NXIO:
1220 		break;
1221 	case NFSERR_PERM:
1222 		if (!task->tk_xprt)
1223 			break;
1224 		xprt_force_disconnect(task->tk_xprt);
1225 		goto out_retry;
1226 	case NFSERR_ACCES:
1227 	case NFSERR_BADHANDLE:
1228 	case NFSERR_FBIG:
1229 	case NFSERR_IO:
1230 	case NFSERR_NOSPC:
1231 	case NFSERR_ROFS:
1232 	case NFSERR_STALE:
1233 		goto out_reset_to_pnfs;
1234 	case NFSERR_JUKEBOX:
1235 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1236 		goto out_retry;
1237 	default:
1238 		break;
1239 	}
1240 
1241 	switch (task->tk_status) {
1242 	/* File access problems. Don't mark the device as unavailable */
1243 	case -EACCES:
1244 	case -ESTALE:
1245 	case -EISDIR:
1246 	case -EBADHANDLE:
1247 	case -ELOOP:
1248 	case -ENOSPC:
1249 		break;
1250 	case -EJUKEBOX:
1251 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1252 		goto out_retry;
1253 	default:
1254 		dprintk("%s DS connection error %d\n", __func__,
1255 			task->tk_status);
1256 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1257 				&devid->deviceid);
1258 	}
1259 out_reset_to_pnfs:
1260 	/* FIXME: Need to prevent infinite looping here. */
1261 	return -NFS4ERR_RESET_TO_PNFS;
1262 out_retry:
1263 	task->tk_status = 0;
1264 	rpc_restart_call_prepare(task);
1265 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1266 	return -EAGAIN;
1267 }
1268 
ff_layout_async_handle_error(struct rpc_task * task,u32 op_status,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx)1269 static int ff_layout_async_handle_error(struct rpc_task *task,
1270 					u32 op_status,
1271 					struct nfs4_state *state,
1272 					struct nfs_client *clp,
1273 					struct pnfs_layout_segment *lseg,
1274 					u32 idx)
1275 {
1276 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1277 
1278 	if (task->tk_status >= 0) {
1279 		ff_layout_mark_ds_reachable(lseg, idx);
1280 		return 0;
1281 	}
1282 
1283 	/* Handle the case of an invalid layout segment */
1284 	if (!pnfs_is_valid_lseg(lseg))
1285 		return -NFS4ERR_RESET_TO_PNFS;
1286 
1287 	switch (vers) {
1288 	case 3:
1289 		return ff_layout_async_handle_error_v3(task, op_status, clp,
1290 						       lseg, idx);
1291 	case 4:
1292 		return ff_layout_async_handle_error_v4(task, op_status, state,
1293 						       clp, lseg, idx);
1294 	default:
1295 		/* should never happen */
1296 		WARN_ON_ONCE(1);
1297 		return 0;
1298 	}
1299 }
1300 
ff_layout_io_track_ds_error(struct pnfs_layout_segment * lseg,u32 idx,u64 offset,u64 length,u32 * op_status,int opnum,int error)1301 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1302 					u32 idx, u64 offset, u64 length,
1303 					u32 *op_status, int opnum, int error)
1304 {
1305 	struct nfs4_ff_layout_mirror *mirror;
1306 	u32 status = *op_status;
1307 	int err;
1308 
1309 	if (status == 0) {
1310 		switch (error) {
1311 		case -ETIMEDOUT:
1312 		case -EPFNOSUPPORT:
1313 		case -EPROTONOSUPPORT:
1314 		case -EOPNOTSUPP:
1315 		case -EINVAL:
1316 		case -ECONNREFUSED:
1317 		case -ECONNRESET:
1318 		case -EHOSTDOWN:
1319 		case -EHOSTUNREACH:
1320 		case -ENETDOWN:
1321 		case -ENETUNREACH:
1322 		case -EADDRINUSE:
1323 		case -ENOBUFS:
1324 		case -EPIPE:
1325 		case -EPERM:
1326 		case -EPROTO:
1327 		case -ENODEV:
1328 			*op_status = status = NFS4ERR_NXIO;
1329 			break;
1330 		case -EACCES:
1331 			*op_status = status = NFS4ERR_ACCESS;
1332 			break;
1333 		default:
1334 			return;
1335 		}
1336 	}
1337 
1338 	mirror = FF_LAYOUT_COMP(lseg, idx);
1339 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1340 				       mirror, offset, length, status, opnum,
1341 				       nfs_io_gfp_mask());
1342 
1343 	switch (status) {
1344 	case NFS4ERR_DELAY:
1345 	case NFS4ERR_GRACE:
1346 	case NFS4ERR_PERM:
1347 		break;
1348 	case NFS4ERR_NXIO:
1349 		ff_layout_mark_ds_unreachable(lseg, idx);
1350 		/*
1351 		 * Don't return the layout if this is a read and we still
1352 		 * have layouts to try
1353 		 */
1354 		if (opnum == OP_READ)
1355 			break;
1356 		fallthrough;
1357 	default:
1358 		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1359 						  lseg);
1360 	}
1361 
1362 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1363 }
1364 
1365 /* NFS_PROTO call done callback routines */
ff_layout_read_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1366 static int ff_layout_read_done_cb(struct rpc_task *task,
1367 				struct nfs_pgio_header *hdr)
1368 {
1369 	int err;
1370 
1371 	if (task->tk_status < 0) {
1372 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1373 					    hdr->args.offset, hdr->args.count,
1374 					    &hdr->res.op_status, OP_READ,
1375 					    task->tk_status);
1376 		trace_ff_layout_read_error(hdr);
1377 	}
1378 
1379 	err = ff_layout_async_handle_error(task, hdr->res.op_status,
1380 					   hdr->args.context->state,
1381 					   hdr->ds_clp, hdr->lseg,
1382 					   hdr->pgio_mirror_idx);
1383 
1384 	trace_nfs4_pnfs_read(hdr, err);
1385 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1386 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1387 	switch (err) {
1388 	case -NFS4ERR_RESET_TO_PNFS:
1389 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1390 		return task->tk_status;
1391 	case -NFS4ERR_RESET_TO_MDS:
1392 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1393 		return task->tk_status;
1394 	case -EAGAIN:
1395 		goto out_eagain;
1396 	}
1397 
1398 	return 0;
1399 out_eagain:
1400 	rpc_restart_call_prepare(task);
1401 	return -EAGAIN;
1402 }
1403 
1404 static bool
ff_layout_need_layoutcommit(struct pnfs_layout_segment * lseg)1405 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1406 {
1407 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1408 }
1409 
1410 /*
1411  * We reference the rpc_cred of the first WRITE that triggers the need for
1412  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1413  * rfc5661 is not clear about which credential should be used.
1414  *
1415  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1416  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1417  * we always send layoutcommit after DS writes.
1418  */
1419 static void
ff_layout_set_layoutcommit(struct inode * inode,struct pnfs_layout_segment * lseg,loff_t end_offset)1420 ff_layout_set_layoutcommit(struct inode *inode,
1421 		struct pnfs_layout_segment *lseg,
1422 		loff_t end_offset)
1423 {
1424 	if (!ff_layout_need_layoutcommit(lseg))
1425 		return;
1426 
1427 	pnfs_set_layoutcommit(inode, lseg, end_offset);
1428 	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1429 		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1430 }
1431 
ff_layout_read_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1432 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1433 		struct nfs_pgio_header *hdr)
1434 {
1435 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1436 		return;
1437 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1438 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1439 			hdr->args.count,
1440 			task->tk_start);
1441 }
1442 
ff_layout_read_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1443 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1444 		struct nfs_pgio_header *hdr)
1445 {
1446 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1447 		return;
1448 	nfs4_ff_layout_stat_io_end_read(task,
1449 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1450 			hdr->args.count,
1451 			hdr->res.count);
1452 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1453 }
1454 
ff_layout_read_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1455 static int ff_layout_read_prepare_common(struct rpc_task *task,
1456 					 struct nfs_pgio_header *hdr)
1457 {
1458 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1459 		rpc_exit(task, -EIO);
1460 		return -EIO;
1461 	}
1462 
1463 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1464 		rpc_exit(task, -EAGAIN);
1465 		return -EAGAIN;
1466 	}
1467 
1468 	ff_layout_read_record_layoutstats_start(task, hdr);
1469 	return 0;
1470 }
1471 
1472 /*
1473  * Call ops for the async read/write cases
1474  * In the case of dense layouts, the offset needs to be reset to its
1475  * original value.
1476  */
ff_layout_read_prepare_v3(struct rpc_task * task,void * data)1477 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1478 {
1479 	struct nfs_pgio_header *hdr = data;
1480 
1481 	if (ff_layout_read_prepare_common(task, hdr))
1482 		return;
1483 
1484 	rpc_call_start(task);
1485 }
1486 
ff_layout_read_prepare_v4(struct rpc_task * task,void * data)1487 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1488 {
1489 	struct nfs_pgio_header *hdr = data;
1490 
1491 	if (nfs4_setup_sequence(hdr->ds_clp,
1492 				&hdr->args.seq_args,
1493 				&hdr->res.seq_res,
1494 				task))
1495 		return;
1496 
1497 	ff_layout_read_prepare_common(task, hdr);
1498 }
1499 
ff_layout_read_call_done(struct rpc_task * task,void * data)1500 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1501 {
1502 	struct nfs_pgio_header *hdr = data;
1503 
1504 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1505 	    task->tk_status == 0) {
1506 		nfs4_sequence_done(task, &hdr->res.seq_res);
1507 		return;
1508 	}
1509 
1510 	/* Note this may cause RPC to be resent */
1511 	hdr->mds_ops->rpc_call_done(task, hdr);
1512 }
1513 
ff_layout_read_count_stats(struct rpc_task * task,void * data)1514 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1515 {
1516 	struct nfs_pgio_header *hdr = data;
1517 
1518 	ff_layout_read_record_layoutstats_done(task, hdr);
1519 	rpc_count_iostats_metrics(task,
1520 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1521 }
1522 
ff_layout_read_release(void * data)1523 static void ff_layout_read_release(void *data)
1524 {
1525 	struct nfs_pgio_header *hdr = data;
1526 
1527 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1528 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1529 		ff_layout_resend_pnfs_read(hdr);
1530 	else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1531 		ff_layout_reset_read(hdr);
1532 	pnfs_generic_rw_release(data);
1533 }
1534 
1535 
ff_layout_write_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1536 static int ff_layout_write_done_cb(struct rpc_task *task,
1537 				struct nfs_pgio_header *hdr)
1538 {
1539 	loff_t end_offs = 0;
1540 	int err;
1541 
1542 	if (task->tk_status < 0) {
1543 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1544 					    hdr->args.offset, hdr->args.count,
1545 					    &hdr->res.op_status, OP_WRITE,
1546 					    task->tk_status);
1547 		trace_ff_layout_write_error(hdr);
1548 	}
1549 
1550 	err = ff_layout_async_handle_error(task, hdr->res.op_status,
1551 					   hdr->args.context->state,
1552 					   hdr->ds_clp, hdr->lseg,
1553 					   hdr->pgio_mirror_idx);
1554 
1555 	trace_nfs4_pnfs_write(hdr, err);
1556 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1557 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1558 	switch (err) {
1559 	case -NFS4ERR_RESET_TO_PNFS:
1560 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1561 		return task->tk_status;
1562 	case -NFS4ERR_RESET_TO_MDS:
1563 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1564 		return task->tk_status;
1565 	case -EAGAIN:
1566 		return -EAGAIN;
1567 	}
1568 
1569 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1570 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1571 		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1572 
1573 	/* Note: if the write is unstable, don't set end_offs until commit */
1574 	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1575 
1576 	/* zero out fattr since we don't care DS attr at all */
1577 	hdr->fattr.valid = 0;
1578 	if (task->tk_status >= 0)
1579 		nfs_writeback_update_inode(hdr);
1580 
1581 	return 0;
1582 }
1583 
ff_layout_commit_done_cb(struct rpc_task * task,struct nfs_commit_data * data)1584 static int ff_layout_commit_done_cb(struct rpc_task *task,
1585 				     struct nfs_commit_data *data)
1586 {
1587 	int err;
1588 
1589 	if (task->tk_status < 0) {
1590 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1591 					    data->args.offset, data->args.count,
1592 					    &data->res.op_status, OP_COMMIT,
1593 					    task->tk_status);
1594 		trace_ff_layout_commit_error(data);
1595 	}
1596 
1597 	err = ff_layout_async_handle_error(task, data->res.op_status,
1598 					   NULL, data->ds_clp, data->lseg,
1599 					   data->ds_commit_index);
1600 
1601 	trace_nfs4_pnfs_commit_ds(data, err);
1602 	switch (err) {
1603 	case -NFS4ERR_RESET_TO_PNFS:
1604 		pnfs_generic_prepare_to_resend_writes(data);
1605 		return -EAGAIN;
1606 	case -NFS4ERR_RESET_TO_MDS:
1607 		pnfs_generic_prepare_to_resend_writes(data);
1608 		return -EAGAIN;
1609 	case -EAGAIN:
1610 		rpc_restart_call_prepare(task);
1611 		return -EAGAIN;
1612 	}
1613 
1614 	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1615 
1616 	return 0;
1617 }
1618 
ff_layout_write_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1619 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1620 		struct nfs_pgio_header *hdr)
1621 {
1622 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1623 		return;
1624 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1625 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1626 			hdr->args.count,
1627 			task->tk_start);
1628 }
1629 
ff_layout_write_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1630 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1631 		struct nfs_pgio_header *hdr)
1632 {
1633 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1634 		return;
1635 	nfs4_ff_layout_stat_io_end_write(task,
1636 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1637 			hdr->args.count, hdr->res.count,
1638 			hdr->res.verf->committed);
1639 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1640 }
1641 
ff_layout_write_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1642 static int ff_layout_write_prepare_common(struct rpc_task *task,
1643 					  struct nfs_pgio_header *hdr)
1644 {
1645 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1646 		rpc_exit(task, -EIO);
1647 		return -EIO;
1648 	}
1649 
1650 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1651 		rpc_exit(task, -EAGAIN);
1652 		return -EAGAIN;
1653 	}
1654 
1655 	ff_layout_write_record_layoutstats_start(task, hdr);
1656 	return 0;
1657 }
1658 
ff_layout_write_prepare_v3(struct rpc_task * task,void * data)1659 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1660 {
1661 	struct nfs_pgio_header *hdr = data;
1662 
1663 	if (ff_layout_write_prepare_common(task, hdr))
1664 		return;
1665 
1666 	rpc_call_start(task);
1667 }
1668 
ff_layout_write_prepare_v4(struct rpc_task * task,void * data)1669 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1670 {
1671 	struct nfs_pgio_header *hdr = data;
1672 
1673 	if (nfs4_setup_sequence(hdr->ds_clp,
1674 				&hdr->args.seq_args,
1675 				&hdr->res.seq_res,
1676 				task))
1677 		return;
1678 
1679 	ff_layout_write_prepare_common(task, hdr);
1680 }
1681 
ff_layout_write_call_done(struct rpc_task * task,void * data)1682 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1683 {
1684 	struct nfs_pgio_header *hdr = data;
1685 
1686 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1687 	    task->tk_status == 0) {
1688 		nfs4_sequence_done(task, &hdr->res.seq_res);
1689 		return;
1690 	}
1691 
1692 	/* Note this may cause RPC to be resent */
1693 	hdr->mds_ops->rpc_call_done(task, hdr);
1694 }
1695 
ff_layout_write_count_stats(struct rpc_task * task,void * data)1696 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1697 {
1698 	struct nfs_pgio_header *hdr = data;
1699 
1700 	ff_layout_write_record_layoutstats_done(task, hdr);
1701 	rpc_count_iostats_metrics(task,
1702 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1703 }
1704 
ff_layout_write_release(void * data)1705 static void ff_layout_write_release(void *data)
1706 {
1707 	struct nfs_pgio_header *hdr = data;
1708 
1709 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1710 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1711 		ff_layout_send_layouterror(hdr->lseg);
1712 		ff_layout_reset_write(hdr, true);
1713 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1714 		ff_layout_reset_write(hdr, false);
1715 	pnfs_generic_rw_release(data);
1716 }
1717 
ff_layout_commit_record_layoutstats_start(struct rpc_task * task,struct nfs_commit_data * cdata)1718 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1719 		struct nfs_commit_data *cdata)
1720 {
1721 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1722 		return;
1723 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1724 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1725 			0, task->tk_start);
1726 }
1727 
ff_layout_commit_record_layoutstats_done(struct rpc_task * task,struct nfs_commit_data * cdata)1728 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1729 		struct nfs_commit_data *cdata)
1730 {
1731 	struct nfs_page *req;
1732 	__u64 count = 0;
1733 
1734 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1735 		return;
1736 
1737 	if (task->tk_status == 0) {
1738 		list_for_each_entry(req, &cdata->pages, wb_list)
1739 			count += req->wb_bytes;
1740 	}
1741 	nfs4_ff_layout_stat_io_end_write(task,
1742 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1743 			count, count, NFS_FILE_SYNC);
1744 	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1745 }
1746 
ff_layout_commit_prepare_common(struct rpc_task * task,struct nfs_commit_data * cdata)1747 static int ff_layout_commit_prepare_common(struct rpc_task *task,
1748 					   struct nfs_commit_data *cdata)
1749 {
1750 	if (!pnfs_is_valid_lseg(cdata->lseg)) {
1751 		rpc_exit(task, -EAGAIN);
1752 		return -EAGAIN;
1753 	}
1754 
1755 	ff_layout_commit_record_layoutstats_start(task, cdata);
1756 	return 0;
1757 }
1758 
ff_layout_commit_prepare_v3(struct rpc_task * task,void * data)1759 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1760 {
1761 	if (ff_layout_commit_prepare_common(task, data))
1762 		return;
1763 
1764 	rpc_call_start(task);
1765 }
1766 
ff_layout_commit_prepare_v4(struct rpc_task * task,void * data)1767 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1768 {
1769 	struct nfs_commit_data *wdata = data;
1770 
1771 	if (nfs4_setup_sequence(wdata->ds_clp,
1772 				&wdata->args.seq_args,
1773 				&wdata->res.seq_res,
1774 				task))
1775 		return;
1776 	ff_layout_commit_prepare_common(task, data);
1777 }
1778 
ff_layout_commit_done(struct rpc_task * task,void * data)1779 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1780 {
1781 	pnfs_generic_write_commit_done(task, data);
1782 }
1783 
ff_layout_commit_count_stats(struct rpc_task * task,void * data)1784 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1785 {
1786 	struct nfs_commit_data *cdata = data;
1787 
1788 	ff_layout_commit_record_layoutstats_done(task, cdata);
1789 	rpc_count_iostats_metrics(task,
1790 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1791 }
1792 
ff_layout_commit_release(void * data)1793 static void ff_layout_commit_release(void *data)
1794 {
1795 	struct nfs_commit_data *cdata = data;
1796 
1797 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1798 	pnfs_generic_commit_release(data);
1799 }
1800 
1801 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1802 	.rpc_call_prepare = ff_layout_read_prepare_v3,
1803 	.rpc_call_done = ff_layout_read_call_done,
1804 	.rpc_count_stats = ff_layout_read_count_stats,
1805 	.rpc_release = ff_layout_read_release,
1806 };
1807 
1808 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1809 	.rpc_call_prepare = ff_layout_read_prepare_v4,
1810 	.rpc_call_done = ff_layout_read_call_done,
1811 	.rpc_count_stats = ff_layout_read_count_stats,
1812 	.rpc_release = ff_layout_read_release,
1813 };
1814 
1815 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1816 	.rpc_call_prepare = ff_layout_write_prepare_v3,
1817 	.rpc_call_done = ff_layout_write_call_done,
1818 	.rpc_count_stats = ff_layout_write_count_stats,
1819 	.rpc_release = ff_layout_write_release,
1820 };
1821 
1822 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1823 	.rpc_call_prepare = ff_layout_write_prepare_v4,
1824 	.rpc_call_done = ff_layout_write_call_done,
1825 	.rpc_count_stats = ff_layout_write_count_stats,
1826 	.rpc_release = ff_layout_write_release,
1827 };
1828 
1829 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1830 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1831 	.rpc_call_done = ff_layout_commit_done,
1832 	.rpc_count_stats = ff_layout_commit_count_stats,
1833 	.rpc_release = ff_layout_commit_release,
1834 };
1835 
1836 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1837 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1838 	.rpc_call_done = ff_layout_commit_done,
1839 	.rpc_count_stats = ff_layout_commit_count_stats,
1840 	.rpc_release = ff_layout_commit_release,
1841 };
1842 
1843 static enum pnfs_try_status
ff_layout_read_pagelist(struct nfs_pgio_header * hdr)1844 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1845 {
1846 	struct pnfs_layout_segment *lseg = hdr->lseg;
1847 	struct nfs4_pnfs_ds *ds;
1848 	struct rpc_clnt *ds_clnt;
1849 	struct nfsd_file *localio;
1850 	struct nfs4_ff_layout_mirror *mirror;
1851 	const struct cred *ds_cred;
1852 	loff_t offset = hdr->args.offset;
1853 	u32 idx = hdr->pgio_mirror_idx;
1854 	int vers;
1855 	struct nfs_fh *fh;
1856 	bool ds_fatal_error = false;
1857 
1858 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1859 		__func__, hdr->inode->i_ino,
1860 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1861 
1862 	mirror = FF_LAYOUT_COMP(lseg, idx);
1863 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1864 	if (IS_ERR(ds)) {
1865 		ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
1866 		goto out_failed;
1867 	}
1868 
1869 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1870 						   hdr->inode);
1871 	if (IS_ERR(ds_clnt))
1872 		goto out_failed;
1873 
1874 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1875 	if (!ds_cred)
1876 		goto out_failed;
1877 
1878 	vers = nfs4_ff_layout_ds_version(mirror);
1879 
1880 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1881 		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1882 
1883 	hdr->pgio_done_cb = ff_layout_read_done_cb;
1884 	refcount_inc(&ds->ds_clp->cl_count);
1885 	hdr->ds_clp = ds->ds_clp;
1886 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1887 	if (fh)
1888 		hdr->args.fh = fh;
1889 
1890 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1891 
1892 	/*
1893 	 * Note that if we ever decide to split across DSes,
1894 	 * then we may need to handle dense-like offsets.
1895 	 */
1896 	hdr->args.offset = offset;
1897 	hdr->mds_offset = offset;
1898 
1899 	/* Start IO accounting for local read */
1900 	localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, FMODE_READ);
1901 	if (localio) {
1902 		hdr->task.tk_start = ktime_get();
1903 		ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
1904 	}
1905 
1906 	/* Perform an asynchronous read to ds */
1907 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1908 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1909 				      &ff_layout_read_call_ops_v4,
1910 			  0, RPC_TASK_SOFTCONN, localio);
1911 	put_cred(ds_cred);
1912 	return PNFS_ATTEMPTED;
1913 
1914 out_failed:
1915 	if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
1916 		return PNFS_TRY_AGAIN;
1917 	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1918 			hdr->args.offset, hdr->args.count,
1919 			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1920 	return PNFS_NOT_ATTEMPTED;
1921 }
1922 
1923 /* Perform async writes. */
1924 static enum pnfs_try_status
ff_layout_write_pagelist(struct nfs_pgio_header * hdr,int sync)1925 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1926 {
1927 	struct pnfs_layout_segment *lseg = hdr->lseg;
1928 	struct nfs4_pnfs_ds *ds;
1929 	struct rpc_clnt *ds_clnt;
1930 	struct nfsd_file *localio;
1931 	struct nfs4_ff_layout_mirror *mirror;
1932 	const struct cred *ds_cred;
1933 	loff_t offset = hdr->args.offset;
1934 	int vers;
1935 	struct nfs_fh *fh;
1936 	u32 idx = hdr->pgio_mirror_idx;
1937 	bool ds_fatal_error = false;
1938 
1939 	mirror = FF_LAYOUT_COMP(lseg, idx);
1940 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1941 	if (IS_ERR(ds)) {
1942 		ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
1943 		goto out_failed;
1944 	}
1945 
1946 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1947 						   hdr->inode);
1948 	if (IS_ERR(ds_clnt))
1949 		goto out_failed;
1950 
1951 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1952 	if (!ds_cred)
1953 		goto out_failed;
1954 
1955 	vers = nfs4_ff_layout_ds_version(mirror);
1956 
1957 	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1958 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1959 		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1960 		vers);
1961 
1962 	hdr->pgio_done_cb = ff_layout_write_done_cb;
1963 	refcount_inc(&ds->ds_clp->cl_count);
1964 	hdr->ds_clp = ds->ds_clp;
1965 	hdr->ds_commit_idx = idx;
1966 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1967 	if (fh)
1968 		hdr->args.fh = fh;
1969 
1970 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1971 
1972 	/*
1973 	 * Note that if we ever decide to split across DSes,
1974 	 * then we may need to handle dense-like offsets.
1975 	 */
1976 	hdr->args.offset = offset;
1977 
1978 	/* Start IO accounting for local write */
1979 	localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
1980 				   FMODE_READ|FMODE_WRITE);
1981 	if (localio) {
1982 		hdr->task.tk_start = ktime_get();
1983 		ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
1984 	}
1985 
1986 	/* Perform an asynchronous write */
1987 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1988 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1989 				      &ff_layout_write_call_ops_v4,
1990 			  sync, RPC_TASK_SOFTCONN, localio);
1991 	put_cred(ds_cred);
1992 	return PNFS_ATTEMPTED;
1993 
1994 out_failed:
1995 	if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
1996 		return PNFS_TRY_AGAIN;
1997 	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1998 			hdr->args.offset, hdr->args.count,
1999 			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
2000 	return PNFS_NOT_ATTEMPTED;
2001 }
2002 
calc_ds_index_from_commit(struct pnfs_layout_segment * lseg,u32 i)2003 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
2004 {
2005 	return i;
2006 }
2007 
2008 static struct nfs_fh *
select_ds_fh_from_commit(struct pnfs_layout_segment * lseg,u32 i)2009 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
2010 {
2011 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2012 
2013 	/* FIXME: Assume that there is only one NFS version available
2014 	 * for the DS.
2015 	 */
2016 	return &flseg->mirror_array[i]->fh_versions[0];
2017 }
2018 
ff_layout_initiate_commit(struct nfs_commit_data * data,int how)2019 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
2020 {
2021 	struct pnfs_layout_segment *lseg = data->lseg;
2022 	struct nfs4_pnfs_ds *ds;
2023 	struct rpc_clnt *ds_clnt;
2024 	struct nfsd_file *localio;
2025 	struct nfs4_ff_layout_mirror *mirror;
2026 	const struct cred *ds_cred;
2027 	u32 idx;
2028 	int vers, ret;
2029 	struct nfs_fh *fh;
2030 
2031 	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
2032 	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
2033 		goto out_err;
2034 
2035 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
2036 	mirror = FF_LAYOUT_COMP(lseg, idx);
2037 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
2038 	if (IS_ERR(ds))
2039 		goto out_err;
2040 
2041 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
2042 						   data->inode);
2043 	if (IS_ERR(ds_clnt))
2044 		goto out_err;
2045 
2046 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
2047 	if (!ds_cred)
2048 		goto out_err;
2049 
2050 	vers = nfs4_ff_layout_ds_version(mirror);
2051 
2052 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
2053 		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
2054 		vers);
2055 	data->commit_done_cb = ff_layout_commit_done_cb;
2056 	data->cred = ds_cred;
2057 	refcount_inc(&ds->ds_clp->cl_count);
2058 	data->ds_clp = ds->ds_clp;
2059 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
2060 	if (fh)
2061 		data->args.fh = fh;
2062 
2063 	/* Start IO accounting for local commit */
2064 	localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
2065 				   FMODE_READ|FMODE_WRITE);
2066 	if (localio) {
2067 		data->task.tk_start = ktime_get();
2068 		ff_layout_commit_record_layoutstats_start(&data->task, data);
2069 	}
2070 
2071 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
2072 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
2073 					       &ff_layout_commit_call_ops_v4,
2074 				   how, RPC_TASK_SOFTCONN, localio);
2075 	put_cred(ds_cred);
2076 	return ret;
2077 out_err:
2078 	pnfs_generic_prepare_to_resend_writes(data);
2079 	pnfs_generic_commit_release(data);
2080 	return -EAGAIN;
2081 }
2082 
2083 static int
ff_layout_commit_pagelist(struct inode * inode,struct list_head * mds_pages,int how,struct nfs_commit_info * cinfo)2084 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
2085 			   int how, struct nfs_commit_info *cinfo)
2086 {
2087 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
2088 					    ff_layout_initiate_commit);
2089 }
2090 
ff_layout_match_rw(const struct rpc_task * task,const struct nfs_pgio_header * hdr,const struct pnfs_layout_segment * lseg)2091 static bool ff_layout_match_rw(const struct rpc_task *task,
2092 			       const struct nfs_pgio_header *hdr,
2093 			       const struct pnfs_layout_segment *lseg)
2094 {
2095 	return hdr->lseg == lseg;
2096 }
2097 
ff_layout_match_commit(const struct rpc_task * task,const struct nfs_commit_data * cdata,const struct pnfs_layout_segment * lseg)2098 static bool ff_layout_match_commit(const struct rpc_task *task,
2099 				   const struct nfs_commit_data *cdata,
2100 				   const struct pnfs_layout_segment *lseg)
2101 {
2102 	return cdata->lseg == lseg;
2103 }
2104 
ff_layout_match_io(const struct rpc_task * task,const void * data)2105 static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
2106 {
2107 	const struct rpc_call_ops *ops = task->tk_ops;
2108 
2109 	if (ops == &ff_layout_read_call_ops_v3 ||
2110 	    ops == &ff_layout_read_call_ops_v4 ||
2111 	    ops == &ff_layout_write_call_ops_v3 ||
2112 	    ops == &ff_layout_write_call_ops_v4)
2113 		return ff_layout_match_rw(task, task->tk_calldata, data);
2114 	if (ops == &ff_layout_commit_call_ops_v3 ||
2115 	    ops == &ff_layout_commit_call_ops_v4)
2116 		return ff_layout_match_commit(task, task->tk_calldata, data);
2117 	return false;
2118 }
2119 
ff_layout_cancel_io(struct pnfs_layout_segment * lseg)2120 static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
2121 {
2122 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2123 	struct nfs4_ff_layout_mirror *mirror;
2124 	struct nfs4_ff_layout_ds *mirror_ds;
2125 	struct nfs4_pnfs_ds *ds;
2126 	struct nfs_client *ds_clp;
2127 	struct rpc_clnt *clnt;
2128 	u32 idx;
2129 
2130 	for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
2131 		mirror = flseg->mirror_array[idx];
2132 		mirror_ds = mirror->mirror_ds;
2133 		if (IS_ERR_OR_NULL(mirror_ds))
2134 			continue;
2135 		ds = mirror->mirror_ds->ds;
2136 		if (!ds)
2137 			continue;
2138 		ds_clp = ds->ds_clp;
2139 		if (!ds_clp)
2140 			continue;
2141 		clnt = ds_clp->cl_rpcclient;
2142 		if (!clnt)
2143 			continue;
2144 		if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg))
2145 			continue;
2146 		rpc_clnt_disconnect(clnt);
2147 	}
2148 }
2149 
2150 static struct pnfs_ds_commit_info *
ff_layout_get_ds_info(struct inode * inode)2151 ff_layout_get_ds_info(struct inode *inode)
2152 {
2153 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2154 
2155 	if (layout == NULL)
2156 		return NULL;
2157 
2158 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2159 }
2160 
2161 static void
ff_layout_setup_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_layout_segment * lseg)2162 ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2163 		struct pnfs_layout_segment *lseg)
2164 {
2165 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2166 	struct inode *inode = lseg->pls_layout->plh_inode;
2167 	struct pnfs_commit_array *array, *new;
2168 
2169 	new = pnfs_alloc_commit_array(flseg->mirror_array_cnt,
2170 				      nfs_io_gfp_mask());
2171 	if (new) {
2172 		spin_lock(&inode->i_lock);
2173 		array = pnfs_add_commit_array(fl_cinfo, new, lseg);
2174 		spin_unlock(&inode->i_lock);
2175 		if (array != new)
2176 			pnfs_free_commit_array(new);
2177 	}
2178 }
2179 
2180 static void
ff_layout_release_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct inode * inode)2181 ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2182 		struct inode *inode)
2183 {
2184 	spin_lock(&inode->i_lock);
2185 	pnfs_generic_ds_cinfo_destroy(fl_cinfo);
2186 	spin_unlock(&inode->i_lock);
2187 }
2188 
2189 static void
ff_layout_free_deviceid_node(struct nfs4_deviceid_node * d)2190 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2191 {
2192 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2193 						  id_node));
2194 }
2195 
ff_layout_encode_ioerr(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,const struct nfs4_flexfile_layoutreturn_args * ff_args)2196 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2197 				  const struct nfs4_layoutreturn_args *args,
2198 				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
2199 {
2200 	__be32 *start;
2201 
2202 	start = xdr_reserve_space(xdr, 4);
2203 	if (unlikely(!start))
2204 		return -E2BIG;
2205 
2206 	*start = cpu_to_be32(ff_args->num_errors);
2207 	/* This assume we always return _ALL_ layouts */
2208 	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2209 }
2210 
2211 static void
ff_layout_encode_ff_iostat_head(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2212 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2213 			    const nfs4_stateid *stateid,
2214 			    const struct nfs42_layoutstat_devinfo *devinfo)
2215 {
2216 	__be32 *p;
2217 
2218 	p = xdr_reserve_space(xdr, 8 + 8);
2219 	p = xdr_encode_hyper(p, devinfo->offset);
2220 	p = xdr_encode_hyper(p, devinfo->length);
2221 	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2222 	p = xdr_reserve_space(xdr, 4*8);
2223 	p = xdr_encode_hyper(p, devinfo->read_count);
2224 	p = xdr_encode_hyper(p, devinfo->read_bytes);
2225 	p = xdr_encode_hyper(p, devinfo->write_count);
2226 	p = xdr_encode_hyper(p, devinfo->write_bytes);
2227 	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2228 }
2229 
2230 static void
ff_layout_encode_ff_iostat(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2231 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2232 			    const nfs4_stateid *stateid,
2233 			    const struct nfs42_layoutstat_devinfo *devinfo)
2234 {
2235 	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2236 	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2237 			devinfo->ld_private.data);
2238 }
2239 
2240 /* report nothing for now */
ff_layout_encode_iostats_array(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,struct nfs4_flexfile_layoutreturn_args * ff_args)2241 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2242 		const struct nfs4_layoutreturn_args *args,
2243 		struct nfs4_flexfile_layoutreturn_args *ff_args)
2244 {
2245 	__be32 *p;
2246 	int i;
2247 
2248 	p = xdr_reserve_space(xdr, 4);
2249 	*p = cpu_to_be32(ff_args->num_dev);
2250 	for (i = 0; i < ff_args->num_dev; i++)
2251 		ff_layout_encode_ff_iostat(xdr,
2252 				&args->layout->plh_stateid,
2253 				&ff_args->devinfo[i]);
2254 }
2255 
2256 static void
ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo * devinfo,unsigned int num_entries)2257 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2258 		unsigned int num_entries)
2259 {
2260 	unsigned int i;
2261 
2262 	for (i = 0; i < num_entries; i++) {
2263 		if (!devinfo[i].ld_private.ops)
2264 			continue;
2265 		if (!devinfo[i].ld_private.ops->free)
2266 			continue;
2267 		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2268 	}
2269 }
2270 
2271 static struct nfs4_deviceid_node *
ff_layout_alloc_deviceid_node(struct nfs_server * server,struct pnfs_device * pdev,gfp_t gfp_flags)2272 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2273 			      struct pnfs_device *pdev, gfp_t gfp_flags)
2274 {
2275 	struct nfs4_ff_layout_ds *dsaddr;
2276 
2277 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2278 	if (!dsaddr)
2279 		return NULL;
2280 	return &dsaddr->id_node;
2281 }
2282 
2283 static void
ff_layout_encode_layoutreturn(struct xdr_stream * xdr,const void * voidargs,const struct nfs4_xdr_opaque_data * ff_opaque)2284 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2285 		const void *voidargs,
2286 		const struct nfs4_xdr_opaque_data *ff_opaque)
2287 {
2288 	const struct nfs4_layoutreturn_args *args = voidargs;
2289 	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2290 	struct xdr_buf tmp_buf = {
2291 		.head = {
2292 			[0] = {
2293 				.iov_base = page_address(ff_args->pages[0]),
2294 			},
2295 		},
2296 		.buflen = PAGE_SIZE,
2297 	};
2298 	struct xdr_stream tmp_xdr;
2299 	__be32 *start;
2300 
2301 	dprintk("%s: Begin\n", __func__);
2302 
2303 	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2304 
2305 	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2306 	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2307 
2308 	start = xdr_reserve_space(xdr, 4);
2309 	*start = cpu_to_be32(tmp_buf.len);
2310 	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2311 
2312 	dprintk("%s: Return\n", __func__);
2313 }
2314 
2315 static void
ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data * args)2316 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2317 {
2318 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2319 
2320 	if (!args->data)
2321 		return;
2322 	ff_args = args->data;
2323 	args->data = NULL;
2324 
2325 	ff_layout_free_ds_ioerr(&ff_args->errors);
2326 	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2327 
2328 	put_page(ff_args->pages[0]);
2329 	kfree(ff_args);
2330 }
2331 
2332 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2333 	.encode = ff_layout_encode_layoutreturn,
2334 	.free = ff_layout_free_layoutreturn,
2335 };
2336 
2337 static int
ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args * args)2338 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2339 {
2340 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2341 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2342 
2343 	ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
2344 	if (!ff_args)
2345 		goto out_nomem;
2346 	ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
2347 	if (!ff_args->pages[0])
2348 		goto out_nomem_free;
2349 
2350 	INIT_LIST_HEAD(&ff_args->errors);
2351 	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2352 			&args->range, &ff_args->errors,
2353 			FF_LAYOUTRETURN_MAXERR);
2354 
2355 	spin_lock(&args->inode->i_lock);
2356 	ff_args->num_dev = ff_layout_mirror_prepare_stats(
2357 		&ff_layout->generic_hdr, &ff_args->devinfo[0],
2358 		ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
2359 	spin_unlock(&args->inode->i_lock);
2360 
2361 	args->ld_private->ops = &layoutreturn_ops;
2362 	args->ld_private->data = ff_args;
2363 	return 0;
2364 out_nomem_free:
2365 	kfree(ff_args);
2366 out_nomem:
2367 	return -ENOMEM;
2368 }
2369 
2370 #ifdef CONFIG_NFS_V4_2
2371 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2372 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2373 {
2374 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2375 	struct nfs42_layout_error *errors;
2376 	LIST_HEAD(head);
2377 
2378 	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2379 		return;
2380 	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2381 	if (list_empty(&head))
2382 		return;
2383 
2384 	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
2385 			       nfs_io_gfp_mask());
2386 	if (errors != NULL) {
2387 		const struct nfs4_ff_layout_ds_err *pos;
2388 		size_t n = 0;
2389 
2390 		list_for_each_entry(pos, &head, list) {
2391 			errors[n].offset = pos->offset;
2392 			errors[n].length = pos->length;
2393 			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2394 			errors[n].errors[0].dev_id = pos->deviceid;
2395 			errors[n].errors[0].status = pos->status;
2396 			errors[n].errors[0].opnum = pos->opnum;
2397 			n++;
2398 			if (!list_is_last(&pos->list, &head) &&
2399 			    n < NFS42_LAYOUTERROR_MAX)
2400 				continue;
2401 			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2402 				break;
2403 			n = 0;
2404 		}
2405 		kfree(errors);
2406 	}
2407 	ff_layout_free_ds_ioerr(&head);
2408 }
2409 #else
2410 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2411 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2412 {
2413 }
2414 #endif
2415 
2416 static int
ff_layout_ntop4(const struct sockaddr * sap,char * buf,const size_t buflen)2417 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2418 {
2419 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2420 
2421 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2422 }
2423 
2424 static size_t
ff_layout_ntop6_noscopeid(const struct sockaddr * sap,char * buf,const int buflen)2425 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2426 			  const int buflen)
2427 {
2428 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2429 	const struct in6_addr *addr = &sin6->sin6_addr;
2430 
2431 	/*
2432 	 * RFC 4291, Section 2.2.2
2433 	 *
2434 	 * Shorthanded ANY address
2435 	 */
2436 	if (ipv6_addr_any(addr))
2437 		return snprintf(buf, buflen, "::");
2438 
2439 	/*
2440 	 * RFC 4291, Section 2.2.2
2441 	 *
2442 	 * Shorthanded loopback address
2443 	 */
2444 	if (ipv6_addr_loopback(addr))
2445 		return snprintf(buf, buflen, "::1");
2446 
2447 	/*
2448 	 * RFC 4291, Section 2.2.3
2449 	 *
2450 	 * Special presentation address format for mapped v4
2451 	 * addresses.
2452 	 */
2453 	if (ipv6_addr_v4mapped(addr))
2454 		return snprintf(buf, buflen, "::ffff:%pI4",
2455 					&addr->s6_addr32[3]);
2456 
2457 	/*
2458 	 * RFC 4291, Section 2.2.1
2459 	 */
2460 	return snprintf(buf, buflen, "%pI6c", addr);
2461 }
2462 
2463 /* Derived from rpc_sockaddr2uaddr */
2464 static void
ff_layout_encode_netaddr(struct xdr_stream * xdr,struct nfs4_pnfs_ds_addr * da)2465 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2466 {
2467 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2468 	char portbuf[RPCBIND_MAXUADDRPLEN];
2469 	char addrbuf[RPCBIND_MAXUADDRLEN];
2470 	unsigned short port;
2471 	int len, netid_len;
2472 	__be32 *p;
2473 
2474 	switch (sap->sa_family) {
2475 	case AF_INET:
2476 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2477 			return;
2478 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2479 		break;
2480 	case AF_INET6:
2481 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2482 			return;
2483 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2484 		break;
2485 	default:
2486 		WARN_ON_ONCE(1);
2487 		return;
2488 	}
2489 
2490 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2491 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2492 
2493 	netid_len = strlen(da->da_netid);
2494 	p = xdr_reserve_space(xdr, 4 + netid_len);
2495 	xdr_encode_opaque(p, da->da_netid, netid_len);
2496 
2497 	p = xdr_reserve_space(xdr, 4 + len);
2498 	xdr_encode_opaque(p, addrbuf, len);
2499 }
2500 
2501 static void
ff_layout_encode_nfstime(struct xdr_stream * xdr,ktime_t t)2502 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2503 			 ktime_t t)
2504 {
2505 	struct timespec64 ts;
2506 	__be32 *p;
2507 
2508 	p = xdr_reserve_space(xdr, 12);
2509 	ts = ktime_to_timespec64(t);
2510 	p = xdr_encode_hyper(p, ts.tv_sec);
2511 	*p++ = cpu_to_be32(ts.tv_nsec);
2512 }
2513 
2514 static void
ff_layout_encode_io_latency(struct xdr_stream * xdr,struct nfs4_ff_io_stat * stat)2515 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2516 			    struct nfs4_ff_io_stat *stat)
2517 {
2518 	__be32 *p;
2519 
2520 	p = xdr_reserve_space(xdr, 5 * 8);
2521 	p = xdr_encode_hyper(p, stat->ops_requested);
2522 	p = xdr_encode_hyper(p, stat->bytes_requested);
2523 	p = xdr_encode_hyper(p, stat->ops_completed);
2524 	p = xdr_encode_hyper(p, stat->bytes_completed);
2525 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2526 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2527 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2528 }
2529 
2530 static void
ff_layout_encode_ff_layoutupdate(struct xdr_stream * xdr,const struct nfs42_layoutstat_devinfo * devinfo,struct nfs4_ff_layout_mirror * mirror)2531 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2532 			      const struct nfs42_layoutstat_devinfo *devinfo,
2533 			      struct nfs4_ff_layout_mirror *mirror)
2534 {
2535 	struct nfs4_pnfs_ds_addr *da;
2536 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2537 	struct nfs_fh *fh = &mirror->fh_versions[0];
2538 	__be32 *p;
2539 
2540 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2541 	dprintk("%s: DS %s: encoding address %s\n",
2542 		__func__, ds->ds_remotestr, da->da_remotestr);
2543 	/* netaddr4 */
2544 	ff_layout_encode_netaddr(xdr, da);
2545 	/* nfs_fh4 */
2546 	p = xdr_reserve_space(xdr, 4 + fh->size);
2547 	xdr_encode_opaque(p, fh->data, fh->size);
2548 	/* ff_io_latency4 read */
2549 	spin_lock(&mirror->lock);
2550 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2551 	/* ff_io_latency4 write */
2552 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2553 	spin_unlock(&mirror->lock);
2554 	/* nfstime4 */
2555 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2556 	/* bool */
2557 	p = xdr_reserve_space(xdr, 4);
2558 	*p = cpu_to_be32(false);
2559 }
2560 
2561 static void
ff_layout_encode_layoutstats(struct xdr_stream * xdr,const void * args,const struct nfs4_xdr_opaque_data * opaque)2562 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2563 			     const struct nfs4_xdr_opaque_data *opaque)
2564 {
2565 	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2566 			struct nfs42_layoutstat_devinfo, ld_private);
2567 	__be32 *start;
2568 
2569 	/* layoutupdate length */
2570 	start = xdr_reserve_space(xdr, 4);
2571 	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2572 
2573 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2574 }
2575 
2576 static void
ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data * opaque)2577 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2578 {
2579 	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2580 
2581 	ff_layout_put_mirror(mirror);
2582 }
2583 
2584 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2585 	.encode = ff_layout_encode_layoutstats,
2586 	.free	= ff_layout_free_layoutstats,
2587 };
2588 
2589 static int
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr * lo,struct nfs42_layoutstat_devinfo * devinfo,int dev_limit,enum nfs4_ff_op_type type)2590 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2591 			       struct nfs42_layoutstat_devinfo *devinfo,
2592 			       int dev_limit, enum nfs4_ff_op_type type)
2593 {
2594 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2595 	struct nfs4_ff_layout_mirror *mirror;
2596 	struct nfs4_deviceid_node *dev;
2597 	int i = 0;
2598 
2599 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2600 		if (i >= dev_limit)
2601 			break;
2602 		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2603 			continue;
2604 		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
2605 					&mirror->flags) &&
2606 		    type != NFS4_FF_OP_LAYOUTRETURN)
2607 			continue;
2608 		/* mirror refcount put in cleanup_layoutstats */
2609 		if (!refcount_inc_not_zero(&mirror->ref))
2610 			continue;
2611 		dev = &mirror->mirror_ds->id_node;
2612 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2613 		devinfo->offset = 0;
2614 		devinfo->length = NFS4_MAX_UINT64;
2615 		spin_lock(&mirror->lock);
2616 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2617 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2618 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2619 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2620 		spin_unlock(&mirror->lock);
2621 		devinfo->layout_type = LAYOUT_FLEX_FILES;
2622 		devinfo->ld_private.ops = &layoutstat_ops;
2623 		devinfo->ld_private.data = mirror;
2624 
2625 		devinfo++;
2626 		i++;
2627 	}
2628 	return i;
2629 }
2630 
ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args * args)2631 static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2632 {
2633 	struct pnfs_layout_hdr *lo;
2634 	struct nfs4_flexfile_layout *ff_layout;
2635 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2636 
2637 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2638 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
2639 				      nfs_io_gfp_mask());
2640 	if (!args->devinfo)
2641 		return -ENOMEM;
2642 
2643 	spin_lock(&args->inode->i_lock);
2644 	lo = NFS_I(args->inode)->layout;
2645 	if (lo && pnfs_layout_is_valid(lo)) {
2646 		ff_layout = FF_LAYOUT_FROM_HDR(lo);
2647 		args->num_dev = ff_layout_mirror_prepare_stats(
2648 			&ff_layout->generic_hdr, &args->devinfo[0], dev_count,
2649 			NFS4_FF_OP_LAYOUTSTATS);
2650 	} else
2651 		args->num_dev = 0;
2652 	spin_unlock(&args->inode->i_lock);
2653 	if (!args->num_dev) {
2654 		kfree(args->devinfo);
2655 		args->devinfo = NULL;
2656 		return -ENOENT;
2657 	}
2658 
2659 	return 0;
2660 }
2661 
2662 static int
ff_layout_set_layoutdriver(struct nfs_server * server,const struct nfs_fh * dummy)2663 ff_layout_set_layoutdriver(struct nfs_server *server,
2664 		const struct nfs_fh *dummy)
2665 {
2666 #if IS_ENABLED(CONFIG_NFS_V4_2)
2667 	server->caps |= NFS_CAP_LAYOUTSTATS | NFS_CAP_REBOOT_LAYOUTRETURN;
2668 #endif
2669 	return 0;
2670 }
2671 
2672 static const struct pnfs_commit_ops ff_layout_commit_ops = {
2673 	.setup_ds_info		= ff_layout_setup_ds_info,
2674 	.release_ds_info	= ff_layout_release_ds_info,
2675 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2676 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2677 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2678 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2679 	.commit_pagelist	= ff_layout_commit_pagelist,
2680 };
2681 
2682 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2683 	.id			= LAYOUT_FLEX_FILES,
2684 	.name			= "LAYOUT_FLEX_FILES",
2685 	.owner			= THIS_MODULE,
2686 	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2687 	.max_layoutget_response	= 4096, /* 1 page or so... */
2688 	.set_layoutdriver	= ff_layout_set_layoutdriver,
2689 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2690 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2691 	.alloc_lseg		= ff_layout_alloc_lseg,
2692 	.free_lseg		= ff_layout_free_lseg,
2693 	.add_lseg		= ff_layout_add_lseg,
2694 	.pg_read_ops		= &ff_layout_pg_read_ops,
2695 	.pg_write_ops		= &ff_layout_pg_write_ops,
2696 	.get_ds_info		= ff_layout_get_ds_info,
2697 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2698 	.read_pagelist		= ff_layout_read_pagelist,
2699 	.write_pagelist		= ff_layout_write_pagelist,
2700 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2701 	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2702 	.sync			= pnfs_nfs_generic_sync,
2703 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2704 	.cancel_io		= ff_layout_cancel_io,
2705 };
2706 
nfs4flexfilelayout_init(void)2707 static int __init nfs4flexfilelayout_init(void)
2708 {
2709 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2710 	       __func__);
2711 	return pnfs_register_layoutdriver(&flexfilelayout_type);
2712 }
2713 
nfs4flexfilelayout_exit(void)2714 static void __exit nfs4flexfilelayout_exit(void)
2715 {
2716 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2717 	       __func__);
2718 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2719 }
2720 
2721 MODULE_ALIAS("nfs-layouttype4-4");
2722 
2723 MODULE_LICENSE("GPL");
2724 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2725 
2726 module_init(nfs4flexfilelayout_init);
2727 module_exit(nfs4flexfilelayout_exit);
2728 
2729 module_param(io_maxretrans, ushort, 0644);
2730 MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
2731 			"retries an I/O request before returning an error. ");
2732