• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Module for pnfs flexfile layout driver.
4  *
5  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6  *
7  * Tao Peng <bergwolf@primarydata.com>
8  */
9 
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_mount.h>
12 #include <linux/nfs_page.h>
13 #include <linux/module.h>
14 #include <linux/sched/mm.h>
15 
16 #include <linux/sunrpc/metrics.h>
17 
18 #include "flexfilelayout.h"
19 #include "../nfs4session.h"
20 #include "../nfs4idmap.h"
21 #include "../internal.h"
22 #include "../delegation.h"
23 #include "../nfs4trace.h"
24 #include "../iostat.h"
25 #include "../nfs.h"
26 #include "../nfs42.h"
27 
28 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
29 
30 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
31 #define FF_LAYOUTRETURN_MAXERR 20
32 
33 static unsigned short io_maxretrans;
34 
35 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
36 		struct nfs_pgio_header *hdr);
37 static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
38 			       struct nfs42_layoutstat_devinfo *devinfo,
39 			       int dev_limit);
40 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
41 			      const struct nfs42_layoutstat_devinfo *devinfo,
42 			      struct nfs4_ff_layout_mirror *mirror);
43 
44 static struct pnfs_layout_hdr *
ff_layout_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags)45 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
46 {
47 	struct nfs4_flexfile_layout *ffl;
48 
49 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
50 	if (ffl) {
51 		INIT_LIST_HEAD(&ffl->error_list);
52 		INIT_LIST_HEAD(&ffl->mirrors);
53 		ffl->last_report_time = ktime_get();
54 		return &ffl->generic_hdr;
55 	} else
56 		return NULL;
57 }
58 
59 static void
ff_layout_free_layout_hdr(struct pnfs_layout_hdr * lo)60 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
61 {
62 	struct nfs4_ff_layout_ds_err *err, *n;
63 
64 	list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
65 				 list) {
66 		list_del(&err->list);
67 		kfree(err);
68 	}
69 	kfree(FF_LAYOUT_FROM_HDR(lo));
70 }
71 
decode_pnfs_stateid(struct xdr_stream * xdr,nfs4_stateid * stateid)72 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
73 {
74 	__be32 *p;
75 
76 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
77 	if (unlikely(p == NULL))
78 		return -ENOBUFS;
79 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
80 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
81 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
82 		p[0], p[1], p[2], p[3]);
83 	return 0;
84 }
85 
decode_deviceid(struct xdr_stream * xdr,struct nfs4_deviceid * devid)86 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
87 {
88 	__be32 *p;
89 
90 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
91 	if (unlikely(!p))
92 		return -ENOBUFS;
93 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
94 	nfs4_print_deviceid(devid);
95 	return 0;
96 }
97 
decode_nfs_fh(struct xdr_stream * xdr,struct nfs_fh * fh)98 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
99 {
100 	__be32 *p;
101 
102 	p = xdr_inline_decode(xdr, 4);
103 	if (unlikely(!p))
104 		return -ENOBUFS;
105 	fh->size = be32_to_cpup(p++);
106 	if (fh->size > NFS_MAXFHSIZE) {
107 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
108 		       fh->size);
109 		return -EOVERFLOW;
110 	}
111 	/* fh.data */
112 	p = xdr_inline_decode(xdr, fh->size);
113 	if (unlikely(!p))
114 		return -ENOBUFS;
115 	memcpy(&fh->data, p, fh->size);
116 	dprintk("%s: fh len %d\n", __func__, fh->size);
117 
118 	return 0;
119 }
120 
121 /*
122  * Currently only stringified uids and gids are accepted.
123  * I.e., kerberos is not supported to the DSes, so no pricipals.
124  *
125  * That means that one common function will suffice, but when
126  * principals are added, this should be split to accomodate
127  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
128  */
129 static int
decode_name(struct xdr_stream * xdr,u32 * id)130 decode_name(struct xdr_stream *xdr, u32 *id)
131 {
132 	__be32 *p;
133 	int len;
134 
135 	/* opaque_length(4)*/
136 	p = xdr_inline_decode(xdr, 4);
137 	if (unlikely(!p))
138 		return -ENOBUFS;
139 	len = be32_to_cpup(p++);
140 	if (len < 0)
141 		return -EINVAL;
142 
143 	dprintk("%s: len %u\n", __func__, len);
144 
145 	/* opaque body */
146 	p = xdr_inline_decode(xdr, len);
147 	if (unlikely(!p))
148 		return -ENOBUFS;
149 
150 	if (!nfs_map_string_to_numeric((char *)p, len, id))
151 		return -EINVAL;
152 
153 	return 0;
154 }
155 
ff_mirror_match_fh(const struct nfs4_ff_layout_mirror * m1,const struct nfs4_ff_layout_mirror * m2)156 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
157 		const struct nfs4_ff_layout_mirror *m2)
158 {
159 	int i, j;
160 
161 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
162 		return false;
163 	for (i = 0; i < m1->fh_versions_cnt; i++) {
164 		bool found_fh = false;
165 		for (j = 0; j < m2->fh_versions_cnt; j++) {
166 			if (nfs_compare_fh(&m1->fh_versions[i],
167 					&m2->fh_versions[j]) == 0) {
168 				found_fh = true;
169 				break;
170 			}
171 		}
172 		if (!found_fh)
173 			return false;
174 	}
175 	return true;
176 }
177 
178 static struct nfs4_ff_layout_mirror *
ff_layout_add_mirror(struct pnfs_layout_hdr * lo,struct nfs4_ff_layout_mirror * mirror)179 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
180 		struct nfs4_ff_layout_mirror *mirror)
181 {
182 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
183 	struct nfs4_ff_layout_mirror *pos;
184 	struct inode *inode = lo->plh_inode;
185 
186 	spin_lock(&inode->i_lock);
187 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
188 		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
189 			continue;
190 		if (!ff_mirror_match_fh(mirror, pos))
191 			continue;
192 		if (refcount_inc_not_zero(&pos->ref)) {
193 			spin_unlock(&inode->i_lock);
194 			return pos;
195 		}
196 	}
197 	list_add(&mirror->mirrors, &ff_layout->mirrors);
198 	mirror->layout = lo;
199 	spin_unlock(&inode->i_lock);
200 	return mirror;
201 }
202 
203 static void
ff_layout_remove_mirror(struct nfs4_ff_layout_mirror * mirror)204 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
205 {
206 	struct inode *inode;
207 	if (mirror->layout == NULL)
208 		return;
209 	inode = mirror->layout->plh_inode;
210 	spin_lock(&inode->i_lock);
211 	list_del(&mirror->mirrors);
212 	spin_unlock(&inode->i_lock);
213 	mirror->layout = NULL;
214 }
215 
ff_layout_alloc_mirror(gfp_t gfp_flags)216 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
217 {
218 	struct nfs4_ff_layout_mirror *mirror;
219 
220 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
221 	if (mirror != NULL) {
222 		spin_lock_init(&mirror->lock);
223 		refcount_set(&mirror->ref, 1);
224 		INIT_LIST_HEAD(&mirror->mirrors);
225 	}
226 	return mirror;
227 }
228 
ff_layout_free_mirror(struct nfs4_ff_layout_mirror * mirror)229 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
230 {
231 	const struct cred	*cred;
232 
233 	ff_layout_remove_mirror(mirror);
234 	kfree(mirror->fh_versions);
235 	cred = rcu_access_pointer(mirror->ro_cred);
236 	put_cred(cred);
237 	cred = rcu_access_pointer(mirror->rw_cred);
238 	put_cred(cred);
239 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
240 	kfree(mirror);
241 }
242 
ff_layout_put_mirror(struct nfs4_ff_layout_mirror * mirror)243 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
244 {
245 	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
246 		ff_layout_free_mirror(mirror);
247 }
248 
ff_layout_free_mirror_array(struct nfs4_ff_layout_segment * fls)249 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
250 {
251 	int i;
252 
253 	if (fls->mirror_array) {
254 		for (i = 0; i < fls->mirror_array_cnt; i++) {
255 			/* normally mirror_ds is freed in
256 			 * .free_deviceid_node but we still do it here
257 			 * for .alloc_lseg error path */
258 			ff_layout_put_mirror(fls->mirror_array[i]);
259 		}
260 		kfree(fls->mirror_array);
261 		fls->mirror_array = NULL;
262 	}
263 }
264 
ff_layout_check_layout(struct nfs4_layoutget_res * lgr)265 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
266 {
267 	int ret = 0;
268 
269 	dprintk("--> %s\n", __func__);
270 
271 	/* FIXME: remove this check when layout segment support is added */
272 	if (lgr->range.offset != 0 ||
273 	    lgr->range.length != NFS4_MAX_UINT64) {
274 		dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
275 			__func__);
276 		ret = -EINVAL;
277 	}
278 
279 	dprintk("--> %s returns %d\n", __func__, ret);
280 	return ret;
281 }
282 
_ff_layout_free_lseg(struct nfs4_ff_layout_segment * fls)283 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
284 {
285 	if (fls) {
286 		ff_layout_free_mirror_array(fls);
287 		kfree(fls);
288 	}
289 }
290 
291 static bool
ff_lseg_range_is_after(const struct pnfs_layout_range * l1,const struct pnfs_layout_range * l2)292 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
293 		const struct pnfs_layout_range *l2)
294 {
295 	u64 end1, end2;
296 
297 	if (l1->iomode != l2->iomode)
298 		return l1->iomode != IOMODE_READ;
299 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
300 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
301 	if (end1 < l2->offset)
302 		return false;
303 	if (end2 < l1->offset)
304 		return true;
305 	return l2->offset <= l1->offset;
306 }
307 
308 static bool
ff_lseg_merge(struct pnfs_layout_segment * new,struct pnfs_layout_segment * old)309 ff_lseg_merge(struct pnfs_layout_segment *new,
310 		struct pnfs_layout_segment *old)
311 {
312 	u64 new_end, old_end;
313 
314 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
315 		return false;
316 	if (new->pls_range.iomode != old->pls_range.iomode)
317 		return false;
318 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
319 			old->pls_range.length);
320 	if (old_end < new->pls_range.offset)
321 		return false;
322 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
323 			new->pls_range.length);
324 	if (new_end < old->pls_range.offset)
325 		return false;
326 
327 	/* Mergeable: copy info from 'old' to 'new' */
328 	if (new_end < old_end)
329 		new_end = old_end;
330 	if (new->pls_range.offset < old->pls_range.offset)
331 		new->pls_range.offset = old->pls_range.offset;
332 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
333 			new_end);
334 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
335 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
336 	return true;
337 }
338 
339 static void
ff_layout_add_lseg(struct pnfs_layout_hdr * lo,struct pnfs_layout_segment * lseg,struct list_head * free_me)340 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
341 		struct pnfs_layout_segment *lseg,
342 		struct list_head *free_me)
343 {
344 	pnfs_generic_layout_insert_lseg(lo, lseg,
345 			ff_lseg_range_is_after,
346 			ff_lseg_merge,
347 			free_me);
348 }
349 
ff_layout_sort_mirrors(struct nfs4_ff_layout_segment * fls)350 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
351 {
352 	int i, j;
353 
354 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
355 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
356 			if (fls->mirror_array[i]->efficiency <
357 			    fls->mirror_array[j]->efficiency)
358 				swap(fls->mirror_array[i],
359 				     fls->mirror_array[j]);
360 	}
361 }
362 
363 static struct pnfs_layout_segment *
ff_layout_alloc_lseg(struct pnfs_layout_hdr * lh,struct nfs4_layoutget_res * lgr,gfp_t gfp_flags)364 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
365 		     struct nfs4_layoutget_res *lgr,
366 		     gfp_t gfp_flags)
367 {
368 	struct pnfs_layout_segment *ret;
369 	struct nfs4_ff_layout_segment *fls = NULL;
370 	struct xdr_stream stream;
371 	struct xdr_buf buf;
372 	struct page *scratch;
373 	u64 stripe_unit;
374 	u32 mirror_array_cnt;
375 	__be32 *p;
376 	int i, rc;
377 
378 	dprintk("--> %s\n", __func__);
379 	scratch = alloc_page(gfp_flags);
380 	if (!scratch)
381 		return ERR_PTR(-ENOMEM);
382 
383 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
384 			      lgr->layoutp->len);
385 	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
386 
387 	/* stripe unit and mirror_array_cnt */
388 	rc = -EIO;
389 	p = xdr_inline_decode(&stream, 8 + 4);
390 	if (!p)
391 		goto out_err_free;
392 
393 	p = xdr_decode_hyper(p, &stripe_unit);
394 	mirror_array_cnt = be32_to_cpup(p++);
395 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
396 		stripe_unit, mirror_array_cnt);
397 
398 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
399 	    mirror_array_cnt == 0)
400 		goto out_err_free;
401 
402 	rc = -ENOMEM;
403 	fls = kzalloc(sizeof(*fls), gfp_flags);
404 	if (!fls)
405 		goto out_err_free;
406 
407 	fls->mirror_array_cnt = mirror_array_cnt;
408 	fls->stripe_unit = stripe_unit;
409 	fls->mirror_array = kcalloc(fls->mirror_array_cnt,
410 				    sizeof(fls->mirror_array[0]), gfp_flags);
411 	if (fls->mirror_array == NULL)
412 		goto out_err_free;
413 
414 	for (i = 0; i < fls->mirror_array_cnt; i++) {
415 		struct nfs4_ff_layout_mirror *mirror;
416 		struct cred *kcred;
417 		const struct cred __rcu *cred;
418 		kuid_t uid;
419 		kgid_t gid;
420 		u32 ds_count, fh_count, id;
421 		int j;
422 
423 		rc = -EIO;
424 		p = xdr_inline_decode(&stream, 4);
425 		if (!p)
426 			goto out_err_free;
427 		ds_count = be32_to_cpup(p);
428 
429 		/* FIXME: allow for striping? */
430 		if (ds_count != 1)
431 			goto out_err_free;
432 
433 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
434 		if (fls->mirror_array[i] == NULL) {
435 			rc = -ENOMEM;
436 			goto out_err_free;
437 		}
438 
439 		fls->mirror_array[i]->ds_count = ds_count;
440 
441 		/* deviceid */
442 		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
443 		if (rc)
444 			goto out_err_free;
445 
446 		/* efficiency */
447 		rc = -EIO;
448 		p = xdr_inline_decode(&stream, 4);
449 		if (!p)
450 			goto out_err_free;
451 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
452 
453 		/* stateid */
454 		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
455 		if (rc)
456 			goto out_err_free;
457 
458 		/* fh */
459 		rc = -EIO;
460 		p = xdr_inline_decode(&stream, 4);
461 		if (!p)
462 			goto out_err_free;
463 		fh_count = be32_to_cpup(p);
464 
465 		fls->mirror_array[i]->fh_versions =
466 			kcalloc(fh_count, sizeof(struct nfs_fh),
467 				gfp_flags);
468 		if (fls->mirror_array[i]->fh_versions == NULL) {
469 			rc = -ENOMEM;
470 			goto out_err_free;
471 		}
472 
473 		for (j = 0; j < fh_count; j++) {
474 			rc = decode_nfs_fh(&stream,
475 					   &fls->mirror_array[i]->fh_versions[j]);
476 			if (rc)
477 				goto out_err_free;
478 		}
479 
480 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
481 
482 		/* user */
483 		rc = decode_name(&stream, &id);
484 		if (rc)
485 			goto out_err_free;
486 
487 		uid = make_kuid(&init_user_ns, id);
488 
489 		/* group */
490 		rc = decode_name(&stream, &id);
491 		if (rc)
492 			goto out_err_free;
493 
494 		gid = make_kgid(&init_user_ns, id);
495 
496 		if (gfp_flags & __GFP_FS)
497 			kcred = prepare_kernel_cred(NULL);
498 		else {
499 			unsigned int nofs_flags = memalloc_nofs_save();
500 			kcred = prepare_kernel_cred(NULL);
501 			memalloc_nofs_restore(nofs_flags);
502 		}
503 		rc = -ENOMEM;
504 		if (!kcred)
505 			goto out_err_free;
506 		kcred->fsuid = uid;
507 		kcred->fsgid = gid;
508 		cred = RCU_INITIALIZER(kcred);
509 
510 		if (lgr->range.iomode == IOMODE_READ)
511 			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
512 		else
513 			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
514 
515 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
516 		if (mirror != fls->mirror_array[i]) {
517 			/* swap cred ptrs so free_mirror will clean up old */
518 			if (lgr->range.iomode == IOMODE_READ) {
519 				cred = xchg(&mirror->ro_cred, cred);
520 				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
521 			} else {
522 				cred = xchg(&mirror->rw_cred, cred);
523 				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
524 			}
525 			ff_layout_free_mirror(fls->mirror_array[i]);
526 			fls->mirror_array[i] = mirror;
527 		}
528 
529 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
530 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
531 			from_kuid(&init_user_ns, uid),
532 			from_kgid(&init_user_ns, gid));
533 	}
534 
535 	p = xdr_inline_decode(&stream, 4);
536 	if (!p)
537 		goto out_sort_mirrors;
538 	fls->flags = be32_to_cpup(p);
539 
540 	p = xdr_inline_decode(&stream, 4);
541 	if (!p)
542 		goto out_sort_mirrors;
543 	for (i=0; i < fls->mirror_array_cnt; i++)
544 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
545 
546 out_sort_mirrors:
547 	ff_layout_sort_mirrors(fls);
548 	rc = ff_layout_check_layout(lgr);
549 	if (rc)
550 		goto out_err_free;
551 	ret = &fls->generic_hdr;
552 	dprintk("<-- %s (success)\n", __func__);
553 out_free_page:
554 	__free_page(scratch);
555 	return ret;
556 out_err_free:
557 	_ff_layout_free_lseg(fls);
558 	ret = ERR_PTR(rc);
559 	dprintk("<-- %s (%d)\n", __func__, rc);
560 	goto out_free_page;
561 }
562 
ff_layout_has_rw_segments(struct pnfs_layout_hdr * layout)563 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
564 {
565 	struct pnfs_layout_segment *lseg;
566 
567 	list_for_each_entry(lseg, &layout->plh_segs, pls_list)
568 		if (lseg->pls_range.iomode == IOMODE_RW)
569 			return true;
570 
571 	return false;
572 }
573 
574 static void
ff_layout_free_lseg(struct pnfs_layout_segment * lseg)575 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
576 {
577 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
578 
579 	dprintk("--> %s\n", __func__);
580 
581 	if (lseg->pls_range.iomode == IOMODE_RW) {
582 		struct nfs4_flexfile_layout *ffl;
583 		struct inode *inode;
584 
585 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
586 		inode = ffl->generic_hdr.plh_inode;
587 		spin_lock(&inode->i_lock);
588 		if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
589 			ffl->commit_info.nbuckets = 0;
590 			kfree(ffl->commit_info.buckets);
591 			ffl->commit_info.buckets = NULL;
592 		}
593 		spin_unlock(&inode->i_lock);
594 	}
595 	_ff_layout_free_lseg(fls);
596 }
597 
598 /* Return 1 until we have multiple lsegs support */
599 static int
ff_layout_get_lseg_count(struct nfs4_ff_layout_segment * fls)600 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
601 {
602 	return 1;
603 }
604 
605 static void
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)606 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
607 {
608 	/* first IO request? */
609 	if (atomic_inc_return(&timer->n_ops) == 1) {
610 		timer->start_time = now;
611 	}
612 }
613 
614 static ktime_t
nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)615 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
616 {
617 	ktime_t start;
618 
619 	if (atomic_dec_return(&timer->n_ops) < 0)
620 		WARN_ON_ONCE(1);
621 
622 	start = timer->start_time;
623 	timer->start_time = now;
624 	return ktime_sub(now, start);
625 }
626 
627 static bool
nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror * mirror,struct nfs4_ff_layoutstat * layoutstat,ktime_t now)628 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
629 			    struct nfs4_ff_layoutstat *layoutstat,
630 			    ktime_t now)
631 {
632 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
633 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
634 
635 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
636 	if (!mirror->start_time)
637 		mirror->start_time = now;
638 	if (mirror->report_interval != 0)
639 		report_interval = (s64)mirror->report_interval * 1000LL;
640 	else if (layoutstats_timer != 0)
641 		report_interval = (s64)layoutstats_timer * 1000LL;
642 	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
643 			report_interval) {
644 		ffl->last_report_time = now;
645 		return true;
646 	}
647 
648 	return false;
649 }
650 
651 static void
nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat * layoutstat,__u64 requested)652 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
653 		__u64 requested)
654 {
655 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
656 
657 	iostat->ops_requested++;
658 	iostat->bytes_requested += requested;
659 }
660 
661 static void
nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat * layoutstat,__u64 requested,__u64 completed,ktime_t time_completed,ktime_t time_started)662 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
663 		__u64 requested,
664 		__u64 completed,
665 		ktime_t time_completed,
666 		ktime_t time_started)
667 {
668 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
669 	ktime_t completion_time = ktime_sub(time_completed, time_started);
670 	ktime_t timer;
671 
672 	iostat->ops_completed++;
673 	iostat->bytes_completed += completed;
674 	iostat->bytes_not_delivered += requested - completed;
675 
676 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
677 	iostat->total_busy_time =
678 			ktime_add(iostat->total_busy_time, timer);
679 	iostat->aggregate_completion_time =
680 			ktime_add(iostat->aggregate_completion_time,
681 					completion_time);
682 }
683 
684 static void
nfs4_ff_layout_stat_io_start_read(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,__u64 requested,ktime_t now)685 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
686 		struct nfs4_ff_layout_mirror *mirror,
687 		__u64 requested, ktime_t now)
688 {
689 	bool report;
690 
691 	spin_lock(&mirror->lock);
692 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
693 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
694 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
695 	spin_unlock(&mirror->lock);
696 
697 	if (report)
698 		pnfs_report_layoutstat(inode, GFP_KERNEL);
699 }
700 
701 static void
nfs4_ff_layout_stat_io_end_read(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,__u64 requested,__u64 completed)702 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
703 		struct nfs4_ff_layout_mirror *mirror,
704 		__u64 requested,
705 		__u64 completed)
706 {
707 	spin_lock(&mirror->lock);
708 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
709 			requested, completed,
710 			ktime_get(), task->tk_start);
711 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
712 	spin_unlock(&mirror->lock);
713 }
714 
715 static void
nfs4_ff_layout_stat_io_start_write(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,__u64 requested,ktime_t now)716 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
717 		struct nfs4_ff_layout_mirror *mirror,
718 		__u64 requested, ktime_t now)
719 {
720 	bool report;
721 
722 	spin_lock(&mirror->lock);
723 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
724 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
725 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
726 	spin_unlock(&mirror->lock);
727 
728 	if (report)
729 		pnfs_report_layoutstat(inode, GFP_NOIO);
730 }
731 
732 static void
nfs4_ff_layout_stat_io_end_write(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,__u64 requested,__u64 completed,enum nfs3_stable_how committed)733 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
734 		struct nfs4_ff_layout_mirror *mirror,
735 		__u64 requested,
736 		__u64 completed,
737 		enum nfs3_stable_how committed)
738 {
739 	if (committed == NFS_UNSTABLE)
740 		requested = completed = 0;
741 
742 	spin_lock(&mirror->lock);
743 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
744 			requested, completed, ktime_get(), task->tk_start);
745 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
746 	spin_unlock(&mirror->lock);
747 }
748 
749 static int
ff_layout_alloc_commit_info(struct pnfs_layout_segment * lseg,struct nfs_commit_info * cinfo,gfp_t gfp_flags)750 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
751 			    struct nfs_commit_info *cinfo,
752 			    gfp_t gfp_flags)
753 {
754 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
755 	struct pnfs_commit_bucket *buckets;
756 	int size;
757 
758 	if (cinfo->ds->nbuckets != 0) {
759 		/* This assumes there is only one RW lseg per file.
760 		 * To support multiple lseg per file, we need to
761 		 * change struct pnfs_commit_bucket to allow dynamic
762 		 * increasing nbuckets.
763 		 */
764 		return 0;
765 	}
766 
767 	size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
768 
769 	buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
770 			  gfp_flags);
771 	if (!buckets)
772 		return -ENOMEM;
773 	else {
774 		int i;
775 
776 		spin_lock(&cinfo->inode->i_lock);
777 		if (cinfo->ds->nbuckets != 0)
778 			kfree(buckets);
779 		else {
780 			cinfo->ds->buckets = buckets;
781 			cinfo->ds->nbuckets = size;
782 			for (i = 0; i < size; i++) {
783 				INIT_LIST_HEAD(&buckets[i].written);
784 				INIT_LIST_HEAD(&buckets[i].committing);
785 				/* mark direct verifier as unset */
786 				buckets[i].direct_verf.committed =
787 					NFS_INVALID_STABLE_HOW;
788 			}
789 		}
790 		spin_unlock(&cinfo->inode->i_lock);
791 		return 0;
792 	}
793 }
794 
795 static void
ff_layout_mark_ds_unreachable(struct pnfs_layout_segment * lseg,int idx)796 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx)
797 {
798 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
799 
800 	if (devid)
801 		nfs4_mark_deviceid_unavailable(devid);
802 }
803 
804 static void
ff_layout_mark_ds_reachable(struct pnfs_layout_segment * lseg,int idx)805 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, int idx)
806 {
807 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
808 
809 	if (devid)
810 		nfs4_mark_deviceid_available(devid);
811 }
812 
813 static struct nfs4_pnfs_ds *
ff_layout_choose_ds_for_read(struct pnfs_layout_segment * lseg,int start_idx,int * best_idx,bool check_device)814 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
815 			     int start_idx, int *best_idx,
816 			     bool check_device)
817 {
818 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
819 	struct nfs4_ff_layout_mirror *mirror;
820 	struct nfs4_pnfs_ds *ds;
821 	bool fail_return = false;
822 	int idx;
823 
824 	/* mirrors are initially sorted by efficiency */
825 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
826 		if (idx+1 == fls->mirror_array_cnt)
827 			fail_return = !check_device;
828 
829 		mirror = FF_LAYOUT_COMP(lseg, idx);
830 		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, fail_return);
831 		if (!ds)
832 			continue;
833 
834 		if (check_device &&
835 		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
836 			continue;
837 
838 		*best_idx = idx;
839 		return ds;
840 	}
841 
842 	return NULL;
843 }
844 
845 static struct nfs4_pnfs_ds *
ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment * lseg,int start_idx,int * best_idx)846 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
847 				 int start_idx, int *best_idx)
848 {
849 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
850 }
851 
852 static struct nfs4_pnfs_ds *
ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment * lseg,int start_idx,int * best_idx)853 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
854 				   int start_idx, int *best_idx)
855 {
856 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
857 }
858 
859 static struct nfs4_pnfs_ds *
ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment * lseg,int start_idx,int * best_idx)860 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
861 				  int start_idx, int *best_idx)
862 {
863 	struct nfs4_pnfs_ds *ds;
864 
865 	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
866 	if (ds)
867 		return ds;
868 	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
869 }
870 
871 static void
ff_layout_pg_get_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req,bool strict_iomode)872 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
873 		      struct nfs_page *req,
874 		      bool strict_iomode)
875 {
876 	pnfs_put_lseg(pgio->pg_lseg);
877 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
878 					   nfs_req_openctx(req),
879 					   0,
880 					   NFS4_MAX_UINT64,
881 					   IOMODE_READ,
882 					   strict_iomode,
883 					   GFP_KERNEL);
884 	if (IS_ERR(pgio->pg_lseg)) {
885 		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
886 		pgio->pg_lseg = NULL;
887 	}
888 }
889 
890 static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)891 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
892 			struct nfs_page *req)
893 {
894 	struct nfs_pgio_mirror *pgm;
895 	struct nfs4_ff_layout_mirror *mirror;
896 	struct nfs4_pnfs_ds *ds;
897 	int ds_idx;
898 
899 retry:
900 	pnfs_generic_pg_check_layout(pgio);
901 	/* Use full layout for now */
902 	if (!pgio->pg_lseg) {
903 		ff_layout_pg_get_read(pgio, req, false);
904 		if (!pgio->pg_lseg)
905 			goto out_nolseg;
906 	}
907 	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
908 		ff_layout_pg_get_read(pgio, req, true);
909 		if (!pgio->pg_lseg)
910 			goto out_nolseg;
911 	}
912 
913 	ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
914 	if (!ds) {
915 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
916 			goto out_mds;
917 		pnfs_put_lseg(pgio->pg_lseg);
918 		pgio->pg_lseg = NULL;
919 		/* Sleep for 1 second before retrying */
920 		ssleep(1);
921 		goto retry;
922 	}
923 
924 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
925 
926 	pgio->pg_mirror_idx = ds_idx;
927 
928 	/* read always uses only one mirror - idx 0 for pgio layer */
929 	pgm = &pgio->pg_mirrors[0];
930 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
931 
932 	if (NFS_SERVER(pgio->pg_inode)->flags &
933 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
934 		pgio->pg_maxretrans = io_maxretrans;
935 	return;
936 out_nolseg:
937 	if (pgio->pg_error < 0)
938 		return;
939 out_mds:
940 	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
941 			0, NFS4_MAX_UINT64, IOMODE_READ,
942 			NFS_I(pgio->pg_inode)->layout,
943 			pgio->pg_lseg);
944 	pnfs_put_lseg(pgio->pg_lseg);
945 	pgio->pg_lseg = NULL;
946 	pgio->pg_maxretrans = 0;
947 	nfs_pageio_reset_read_mds(pgio);
948 }
949 
950 static void
ff_layout_pg_init_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)951 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
952 			struct nfs_page *req)
953 {
954 	struct nfs4_ff_layout_mirror *mirror;
955 	struct nfs_pgio_mirror *pgm;
956 	struct nfs_commit_info cinfo;
957 	struct nfs4_pnfs_ds *ds;
958 	int i;
959 	int status;
960 
961 retry:
962 	pnfs_generic_pg_check_layout(pgio);
963 	if (!pgio->pg_lseg) {
964 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
965 						   nfs_req_openctx(req),
966 						   0,
967 						   NFS4_MAX_UINT64,
968 						   IOMODE_RW,
969 						   false,
970 						   GFP_NOFS);
971 		if (IS_ERR(pgio->pg_lseg)) {
972 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
973 			pgio->pg_lseg = NULL;
974 			return;
975 		}
976 	}
977 	/* If no lseg, fall back to write through mds */
978 	if (pgio->pg_lseg == NULL)
979 		goto out_mds;
980 
981 	nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
982 	status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
983 	if (status < 0)
984 		goto out_mds;
985 
986 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
987 	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
988 		goto out_eagain;
989 
990 	for (i = 0; i < pgio->pg_mirror_count; i++) {
991 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
992 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
993 		if (!ds) {
994 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
995 				goto out_mds;
996 			pnfs_put_lseg(pgio->pg_lseg);
997 			pgio->pg_lseg = NULL;
998 			/* Sleep for 1 second before retrying */
999 			ssleep(1);
1000 			goto retry;
1001 		}
1002 		pgm = &pgio->pg_mirrors[i];
1003 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
1004 	}
1005 
1006 	if (NFS_SERVER(pgio->pg_inode)->flags &
1007 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1008 		pgio->pg_maxretrans = io_maxretrans;
1009 	return;
1010 out_eagain:
1011 	pnfs_generic_pg_cleanup(pgio);
1012 	pgio->pg_error = -EAGAIN;
1013 	return;
1014 out_mds:
1015 	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
1016 			0, NFS4_MAX_UINT64, IOMODE_RW,
1017 			NFS_I(pgio->pg_inode)->layout,
1018 			pgio->pg_lseg);
1019 	pnfs_put_lseg(pgio->pg_lseg);
1020 	pgio->pg_lseg = NULL;
1021 	pgio->pg_maxretrans = 0;
1022 	nfs_pageio_reset_write_mds(pgio);
1023 	pgio->pg_error = -EAGAIN;
1024 }
1025 
1026 static unsigned int
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)1027 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
1028 				    struct nfs_page *req)
1029 {
1030 	if (!pgio->pg_lseg) {
1031 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1032 						   nfs_req_openctx(req),
1033 						   0,
1034 						   NFS4_MAX_UINT64,
1035 						   IOMODE_RW,
1036 						   false,
1037 						   GFP_NOFS);
1038 		if (IS_ERR(pgio->pg_lseg)) {
1039 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
1040 			pgio->pg_lseg = NULL;
1041 			goto out;
1042 		}
1043 	}
1044 	if (pgio->pg_lseg)
1045 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
1046 
1047 	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
1048 			0, NFS4_MAX_UINT64, IOMODE_RW,
1049 			NFS_I(pgio->pg_inode)->layout,
1050 			pgio->pg_lseg);
1051 	/* no lseg means that pnfs is not in use, so no mirroring here */
1052 	nfs_pageio_reset_write_mds(pgio);
1053 out:
1054 	return 1;
1055 }
1056 
1057 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1058 	.pg_init = ff_layout_pg_init_read,
1059 	.pg_test = pnfs_generic_pg_test,
1060 	.pg_doio = pnfs_generic_pg_readpages,
1061 	.pg_cleanup = pnfs_generic_pg_cleanup,
1062 };
1063 
1064 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1065 	.pg_init = ff_layout_pg_init_write,
1066 	.pg_test = pnfs_generic_pg_test,
1067 	.pg_doio = pnfs_generic_pg_writepages,
1068 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1069 	.pg_cleanup = pnfs_generic_pg_cleanup,
1070 };
1071 
ff_layout_reset_write(struct nfs_pgio_header * hdr,bool retry_pnfs)1072 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1073 {
1074 	struct rpc_task *task = &hdr->task;
1075 
1076 	pnfs_layoutcommit_inode(hdr->inode, false);
1077 
1078 	if (retry_pnfs) {
1079 		dprintk("%s Reset task %5u for i/o through pNFS "
1080 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1081 			hdr->task.tk_pid,
1082 			hdr->inode->i_sb->s_id,
1083 			(unsigned long long)NFS_FILEID(hdr->inode),
1084 			hdr->args.count,
1085 			(unsigned long long)hdr->args.offset);
1086 
1087 		hdr->completion_ops->reschedule_io(hdr);
1088 		return;
1089 	}
1090 
1091 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1092 		dprintk("%s Reset task %5u for i/o through MDS "
1093 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1094 			hdr->task.tk_pid,
1095 			hdr->inode->i_sb->s_id,
1096 			(unsigned long long)NFS_FILEID(hdr->inode),
1097 			hdr->args.count,
1098 			(unsigned long long)hdr->args.offset);
1099 
1100 		trace_pnfs_mds_fallback_write_done(hdr->inode,
1101 				hdr->args.offset, hdr->args.count,
1102 				IOMODE_RW, NFS_I(hdr->inode)->layout,
1103 				hdr->lseg);
1104 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1105 	}
1106 }
1107 
ff_layout_reset_read(struct nfs_pgio_header * hdr)1108 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1109 {
1110 	struct rpc_task *task = &hdr->task;
1111 
1112 	pnfs_layoutcommit_inode(hdr->inode, false);
1113 
1114 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1115 		dprintk("%s Reset task %5u for i/o through MDS "
1116 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1117 			hdr->task.tk_pid,
1118 			hdr->inode->i_sb->s_id,
1119 			(unsigned long long)NFS_FILEID(hdr->inode),
1120 			hdr->args.count,
1121 			(unsigned long long)hdr->args.offset);
1122 
1123 		trace_pnfs_mds_fallback_read_done(hdr->inode,
1124 				hdr->args.offset, hdr->args.count,
1125 				IOMODE_READ, NFS_I(hdr->inode)->layout,
1126 				hdr->lseg);
1127 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1128 	}
1129 }
1130 
ff_layout_async_handle_error_v4(struct rpc_task * task,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,int idx)1131 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1132 					   struct nfs4_state *state,
1133 					   struct nfs_client *clp,
1134 					   struct pnfs_layout_segment *lseg,
1135 					   int idx)
1136 {
1137 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1138 	struct inode *inode = lo->plh_inode;
1139 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1140 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1141 
1142 	switch (task->tk_status) {
1143 	case -NFS4ERR_BADSESSION:
1144 	case -NFS4ERR_BADSLOT:
1145 	case -NFS4ERR_BAD_HIGH_SLOT:
1146 	case -NFS4ERR_DEADSESSION:
1147 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1148 	case -NFS4ERR_SEQ_FALSE_RETRY:
1149 	case -NFS4ERR_SEQ_MISORDERED:
1150 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1151 			"flags 0x%x\n", __func__, task->tk_status,
1152 			clp->cl_exchange_flags);
1153 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1154 		break;
1155 	case -NFS4ERR_DELAY:
1156 	case -NFS4ERR_GRACE:
1157 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1158 		break;
1159 	case -NFS4ERR_RETRY_UNCACHED_REP:
1160 		break;
1161 	/* Invalidate Layout errors */
1162 	case -NFS4ERR_PNFS_NO_LAYOUT:
1163 	case -ESTALE:           /* mapped NFS4ERR_STALE */
1164 	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1165 	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1166 	case -NFS4ERR_FHEXPIRED:
1167 	case -NFS4ERR_WRONG_TYPE:
1168 		dprintk("%s Invalid layout error %d\n", __func__,
1169 			task->tk_status);
1170 		/*
1171 		 * Destroy layout so new i/o will get a new layout.
1172 		 * Layout will not be destroyed until all current lseg
1173 		 * references are put. Mark layout as invalid to resend failed
1174 		 * i/o and all i/o waiting on the slot table to the MDS until
1175 		 * layout is destroyed and a new valid layout is obtained.
1176 		 */
1177 		pnfs_destroy_layout(NFS_I(inode));
1178 		rpc_wake_up(&tbl->slot_tbl_waitq);
1179 		goto reset;
1180 	/* RPC connection errors */
1181 	case -ECONNREFUSED:
1182 	case -EHOSTDOWN:
1183 	case -EHOSTUNREACH:
1184 	case -ENETUNREACH:
1185 	case -EIO:
1186 	case -ETIMEDOUT:
1187 	case -EPIPE:
1188 		dprintk("%s DS connection error %d\n", __func__,
1189 			task->tk_status);
1190 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1191 				&devid->deviceid);
1192 		rpc_wake_up(&tbl->slot_tbl_waitq);
1193 		/* fall through */
1194 	default:
1195 		if (ff_layout_avoid_mds_available_ds(lseg))
1196 			return -NFS4ERR_RESET_TO_PNFS;
1197 reset:
1198 		dprintk("%s Retry through MDS. Error %d\n", __func__,
1199 			task->tk_status);
1200 		return -NFS4ERR_RESET_TO_MDS;
1201 	}
1202 	task->tk_status = 0;
1203 	return -EAGAIN;
1204 }
1205 
1206 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
ff_layout_async_handle_error_v3(struct rpc_task * task,struct pnfs_layout_segment * lseg,int idx)1207 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1208 					   struct pnfs_layout_segment *lseg,
1209 					   int idx)
1210 {
1211 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1212 
1213 	switch (task->tk_status) {
1214 	/* File access problems. Don't mark the device as unavailable */
1215 	case -EACCES:
1216 	case -ESTALE:
1217 	case -EISDIR:
1218 	case -EBADHANDLE:
1219 	case -ELOOP:
1220 	case -ENOSPC:
1221 		break;
1222 	case -EJUKEBOX:
1223 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1224 		goto out_retry;
1225 	default:
1226 		dprintk("%s DS connection error %d\n", __func__,
1227 			task->tk_status);
1228 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1229 				&devid->deviceid);
1230 	}
1231 	/* FIXME: Need to prevent infinite looping here. */
1232 	return -NFS4ERR_RESET_TO_PNFS;
1233 out_retry:
1234 	task->tk_status = 0;
1235 	rpc_restart_call_prepare(task);
1236 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1237 	return -EAGAIN;
1238 }
1239 
ff_layout_async_handle_error(struct rpc_task * task,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,int idx)1240 static int ff_layout_async_handle_error(struct rpc_task *task,
1241 					struct nfs4_state *state,
1242 					struct nfs_client *clp,
1243 					struct pnfs_layout_segment *lseg,
1244 					int idx)
1245 {
1246 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1247 
1248 	if (task->tk_status >= 0) {
1249 		ff_layout_mark_ds_reachable(lseg, idx);
1250 		return 0;
1251 	}
1252 
1253 	/* Handle the case of an invalid layout segment */
1254 	if (!pnfs_is_valid_lseg(lseg))
1255 		return -NFS4ERR_RESET_TO_PNFS;
1256 
1257 	switch (vers) {
1258 	case 3:
1259 		return ff_layout_async_handle_error_v3(task, lseg, idx);
1260 	case 4:
1261 		return ff_layout_async_handle_error_v4(task, state, clp,
1262 						       lseg, idx);
1263 	default:
1264 		/* should never happen */
1265 		WARN_ON_ONCE(1);
1266 		return 0;
1267 	}
1268 }
1269 
ff_layout_io_track_ds_error(struct pnfs_layout_segment * lseg,int idx,u64 offset,u64 length,u32 status,int opnum,int error)1270 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1271 					int idx, u64 offset, u64 length,
1272 					u32 status, int opnum, int error)
1273 {
1274 	struct nfs4_ff_layout_mirror *mirror;
1275 	int err;
1276 
1277 	if (status == 0) {
1278 		switch (error) {
1279 		case -ETIMEDOUT:
1280 		case -EPFNOSUPPORT:
1281 		case -EPROTONOSUPPORT:
1282 		case -EOPNOTSUPP:
1283 		case -EINVAL:
1284 		case -ECONNREFUSED:
1285 		case -ECONNRESET:
1286 		case -EHOSTDOWN:
1287 		case -EHOSTUNREACH:
1288 		case -ENETUNREACH:
1289 		case -EADDRINUSE:
1290 		case -ENOBUFS:
1291 		case -EPIPE:
1292 		case -EPERM:
1293 			status = NFS4ERR_NXIO;
1294 			break;
1295 		case -EACCES:
1296 			status = NFS4ERR_ACCESS;
1297 			break;
1298 		default:
1299 			return;
1300 		}
1301 	}
1302 
1303 	switch (status) {
1304 	case NFS4ERR_DELAY:
1305 	case NFS4ERR_GRACE:
1306 		return;
1307 	default:
1308 		break;
1309 	}
1310 
1311 	mirror = FF_LAYOUT_COMP(lseg, idx);
1312 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1313 				       mirror, offset, length, status, opnum,
1314 				       GFP_NOIO);
1315 	if (status == NFS4ERR_NXIO)
1316 		ff_layout_mark_ds_unreachable(lseg, idx);
1317 	pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
1318 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1319 }
1320 
1321 /* NFS_PROTO call done callback routines */
ff_layout_read_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1322 static int ff_layout_read_done_cb(struct rpc_task *task,
1323 				struct nfs_pgio_header *hdr)
1324 {
1325 	int new_idx = hdr->pgio_mirror_idx;
1326 	int err;
1327 
1328 	trace_nfs4_pnfs_read(hdr, task->tk_status);
1329 	if (task->tk_status < 0)
1330 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1331 					    hdr->args.offset, hdr->args.count,
1332 					    hdr->res.op_status, OP_READ,
1333 					    task->tk_status);
1334 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1335 					   hdr->ds_clp, hdr->lseg,
1336 					   hdr->pgio_mirror_idx);
1337 
1338 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1339 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1340 	switch (err) {
1341 	case -NFS4ERR_RESET_TO_PNFS:
1342 		if (ff_layout_choose_best_ds_for_read(hdr->lseg,
1343 					hdr->pgio_mirror_idx + 1,
1344 					&new_idx))
1345 			goto out_layouterror;
1346 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1347 		return task->tk_status;
1348 	case -NFS4ERR_RESET_TO_MDS:
1349 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1350 		return task->tk_status;
1351 	case -EAGAIN:
1352 		goto out_eagain;
1353 	}
1354 
1355 	return 0;
1356 out_layouterror:
1357 	ff_layout_read_record_layoutstats_done(task, hdr);
1358 	ff_layout_send_layouterror(hdr->lseg);
1359 	hdr->pgio_mirror_idx = new_idx;
1360 out_eagain:
1361 	rpc_restart_call_prepare(task);
1362 	return -EAGAIN;
1363 }
1364 
1365 static bool
ff_layout_need_layoutcommit(struct pnfs_layout_segment * lseg)1366 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1367 {
1368 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1369 }
1370 
1371 /*
1372  * We reference the rpc_cred of the first WRITE that triggers the need for
1373  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1374  * rfc5661 is not clear about which credential should be used.
1375  *
1376  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1377  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1378  * we always send layoutcommit after DS writes.
1379  */
1380 static void
ff_layout_set_layoutcommit(struct inode * inode,struct pnfs_layout_segment * lseg,loff_t end_offset)1381 ff_layout_set_layoutcommit(struct inode *inode,
1382 		struct pnfs_layout_segment *lseg,
1383 		loff_t end_offset)
1384 {
1385 	if (!ff_layout_need_layoutcommit(lseg))
1386 		return;
1387 
1388 	pnfs_set_layoutcommit(inode, lseg, end_offset);
1389 	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1390 		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1391 }
1392 
ff_layout_read_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1393 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1394 		struct nfs_pgio_header *hdr)
1395 {
1396 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1397 		return;
1398 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1399 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1400 			hdr->args.count,
1401 			task->tk_start);
1402 }
1403 
ff_layout_read_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1404 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1405 		struct nfs_pgio_header *hdr)
1406 {
1407 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1408 		return;
1409 	nfs4_ff_layout_stat_io_end_read(task,
1410 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1411 			hdr->args.count,
1412 			hdr->res.count);
1413 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1414 }
1415 
ff_layout_read_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1416 static int ff_layout_read_prepare_common(struct rpc_task *task,
1417 					 struct nfs_pgio_header *hdr)
1418 {
1419 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1420 		rpc_exit(task, -EIO);
1421 		return -EIO;
1422 	}
1423 
1424 	ff_layout_read_record_layoutstats_start(task, hdr);
1425 	return 0;
1426 }
1427 
1428 /*
1429  * Call ops for the async read/write cases
1430  * In the case of dense layouts, the offset needs to be reset to its
1431  * original value.
1432  */
ff_layout_read_prepare_v3(struct rpc_task * task,void * data)1433 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1434 {
1435 	struct nfs_pgio_header *hdr = data;
1436 
1437 	if (ff_layout_read_prepare_common(task, hdr))
1438 		return;
1439 
1440 	rpc_call_start(task);
1441 }
1442 
ff_layout_read_prepare_v4(struct rpc_task * task,void * data)1443 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1444 {
1445 	struct nfs_pgio_header *hdr = data;
1446 
1447 	if (nfs4_setup_sequence(hdr->ds_clp,
1448 				&hdr->args.seq_args,
1449 				&hdr->res.seq_res,
1450 				task))
1451 		return;
1452 
1453 	ff_layout_read_prepare_common(task, hdr);
1454 }
1455 
ff_layout_read_call_done(struct rpc_task * task,void * data)1456 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1457 {
1458 	struct nfs_pgio_header *hdr = data;
1459 
1460 	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1461 
1462 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1463 	    task->tk_status == 0) {
1464 		nfs4_sequence_done(task, &hdr->res.seq_res);
1465 		return;
1466 	}
1467 
1468 	/* Note this may cause RPC to be resent */
1469 	hdr->mds_ops->rpc_call_done(task, hdr);
1470 }
1471 
ff_layout_read_count_stats(struct rpc_task * task,void * data)1472 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1473 {
1474 	struct nfs_pgio_header *hdr = data;
1475 
1476 	ff_layout_read_record_layoutstats_done(task, hdr);
1477 	rpc_count_iostats_metrics(task,
1478 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1479 }
1480 
ff_layout_read_release(void * data)1481 static void ff_layout_read_release(void *data)
1482 {
1483 	struct nfs_pgio_header *hdr = data;
1484 
1485 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1486 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1487 		ff_layout_send_layouterror(hdr->lseg);
1488 		pnfs_read_resend_pnfs(hdr);
1489 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1490 		ff_layout_reset_read(hdr);
1491 	pnfs_generic_rw_release(data);
1492 }
1493 
1494 
ff_layout_write_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1495 static int ff_layout_write_done_cb(struct rpc_task *task,
1496 				struct nfs_pgio_header *hdr)
1497 {
1498 	loff_t end_offs = 0;
1499 	int err;
1500 
1501 	trace_nfs4_pnfs_write(hdr, task->tk_status);
1502 	if (task->tk_status < 0)
1503 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1504 					    hdr->args.offset, hdr->args.count,
1505 					    hdr->res.op_status, OP_WRITE,
1506 					    task->tk_status);
1507 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1508 					   hdr->ds_clp, hdr->lseg,
1509 					   hdr->pgio_mirror_idx);
1510 
1511 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1512 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1513 	switch (err) {
1514 	case -NFS4ERR_RESET_TO_PNFS:
1515 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1516 		return task->tk_status;
1517 	case -NFS4ERR_RESET_TO_MDS:
1518 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1519 		return task->tk_status;
1520 	case -EAGAIN:
1521 		return -EAGAIN;
1522 	}
1523 
1524 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1525 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1526 		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1527 
1528 	/* Note: if the write is unstable, don't set end_offs until commit */
1529 	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1530 
1531 	/* zero out fattr since we don't care DS attr at all */
1532 	hdr->fattr.valid = 0;
1533 	if (task->tk_status >= 0)
1534 		nfs_writeback_update_inode(hdr);
1535 
1536 	return 0;
1537 }
1538 
ff_layout_commit_done_cb(struct rpc_task * task,struct nfs_commit_data * data)1539 static int ff_layout_commit_done_cb(struct rpc_task *task,
1540 				     struct nfs_commit_data *data)
1541 {
1542 	int err;
1543 
1544 	trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1545 	if (task->tk_status < 0)
1546 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1547 					    data->args.offset, data->args.count,
1548 					    data->res.op_status, OP_COMMIT,
1549 					    task->tk_status);
1550 	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1551 					   data->lseg, data->ds_commit_index);
1552 
1553 	switch (err) {
1554 	case -NFS4ERR_RESET_TO_PNFS:
1555 		pnfs_generic_prepare_to_resend_writes(data);
1556 		return -EAGAIN;
1557 	case -NFS4ERR_RESET_TO_MDS:
1558 		pnfs_generic_prepare_to_resend_writes(data);
1559 		return -EAGAIN;
1560 	case -EAGAIN:
1561 		rpc_restart_call_prepare(task);
1562 		return -EAGAIN;
1563 	}
1564 
1565 	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1566 
1567 	return 0;
1568 }
1569 
ff_layout_write_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1570 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1571 		struct nfs_pgio_header *hdr)
1572 {
1573 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1574 		return;
1575 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1576 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1577 			hdr->args.count,
1578 			task->tk_start);
1579 }
1580 
ff_layout_write_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1581 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1582 		struct nfs_pgio_header *hdr)
1583 {
1584 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1585 		return;
1586 	nfs4_ff_layout_stat_io_end_write(task,
1587 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1588 			hdr->args.count, hdr->res.count,
1589 			hdr->res.verf->committed);
1590 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1591 }
1592 
ff_layout_write_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1593 static int ff_layout_write_prepare_common(struct rpc_task *task,
1594 					  struct nfs_pgio_header *hdr)
1595 {
1596 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1597 		rpc_exit(task, -EIO);
1598 		return -EIO;
1599 	}
1600 
1601 	ff_layout_write_record_layoutstats_start(task, hdr);
1602 	return 0;
1603 }
1604 
ff_layout_write_prepare_v3(struct rpc_task * task,void * data)1605 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1606 {
1607 	struct nfs_pgio_header *hdr = data;
1608 
1609 	if (ff_layout_write_prepare_common(task, hdr))
1610 		return;
1611 
1612 	rpc_call_start(task);
1613 }
1614 
ff_layout_write_prepare_v4(struct rpc_task * task,void * data)1615 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1616 {
1617 	struct nfs_pgio_header *hdr = data;
1618 
1619 	if (nfs4_setup_sequence(hdr->ds_clp,
1620 				&hdr->args.seq_args,
1621 				&hdr->res.seq_res,
1622 				task))
1623 		return;
1624 
1625 	ff_layout_write_prepare_common(task, hdr);
1626 }
1627 
ff_layout_write_call_done(struct rpc_task * task,void * data)1628 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1629 {
1630 	struct nfs_pgio_header *hdr = data;
1631 
1632 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1633 	    task->tk_status == 0) {
1634 		nfs4_sequence_done(task, &hdr->res.seq_res);
1635 		return;
1636 	}
1637 
1638 	/* Note this may cause RPC to be resent */
1639 	hdr->mds_ops->rpc_call_done(task, hdr);
1640 }
1641 
ff_layout_write_count_stats(struct rpc_task * task,void * data)1642 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1643 {
1644 	struct nfs_pgio_header *hdr = data;
1645 
1646 	ff_layout_write_record_layoutstats_done(task, hdr);
1647 	rpc_count_iostats_metrics(task,
1648 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1649 }
1650 
ff_layout_write_release(void * data)1651 static void ff_layout_write_release(void *data)
1652 {
1653 	struct nfs_pgio_header *hdr = data;
1654 
1655 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1656 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1657 		ff_layout_send_layouterror(hdr->lseg);
1658 		ff_layout_reset_write(hdr, true);
1659 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1660 		ff_layout_reset_write(hdr, false);
1661 	pnfs_generic_rw_release(data);
1662 }
1663 
ff_layout_commit_record_layoutstats_start(struct rpc_task * task,struct nfs_commit_data * cdata)1664 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1665 		struct nfs_commit_data *cdata)
1666 {
1667 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1668 		return;
1669 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1670 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1671 			0, task->tk_start);
1672 }
1673 
ff_layout_commit_record_layoutstats_done(struct rpc_task * task,struct nfs_commit_data * cdata)1674 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1675 		struct nfs_commit_data *cdata)
1676 {
1677 	struct nfs_page *req;
1678 	__u64 count = 0;
1679 
1680 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1681 		return;
1682 
1683 	if (task->tk_status == 0) {
1684 		list_for_each_entry(req, &cdata->pages, wb_list)
1685 			count += req->wb_bytes;
1686 	}
1687 	nfs4_ff_layout_stat_io_end_write(task,
1688 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1689 			count, count, NFS_FILE_SYNC);
1690 	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1691 }
1692 
ff_layout_commit_prepare_common(struct rpc_task * task,struct nfs_commit_data * cdata)1693 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1694 		struct nfs_commit_data *cdata)
1695 {
1696 	ff_layout_commit_record_layoutstats_start(task, cdata);
1697 }
1698 
ff_layout_commit_prepare_v3(struct rpc_task * task,void * data)1699 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1700 {
1701 	ff_layout_commit_prepare_common(task, data);
1702 	rpc_call_start(task);
1703 }
1704 
ff_layout_commit_prepare_v4(struct rpc_task * task,void * data)1705 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1706 {
1707 	struct nfs_commit_data *wdata = data;
1708 
1709 	if (nfs4_setup_sequence(wdata->ds_clp,
1710 				&wdata->args.seq_args,
1711 				&wdata->res.seq_res,
1712 				task))
1713 		return;
1714 	ff_layout_commit_prepare_common(task, data);
1715 }
1716 
ff_layout_commit_done(struct rpc_task * task,void * data)1717 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1718 {
1719 	pnfs_generic_write_commit_done(task, data);
1720 }
1721 
ff_layout_commit_count_stats(struct rpc_task * task,void * data)1722 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1723 {
1724 	struct nfs_commit_data *cdata = data;
1725 
1726 	ff_layout_commit_record_layoutstats_done(task, cdata);
1727 	rpc_count_iostats_metrics(task,
1728 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1729 }
1730 
ff_layout_commit_release(void * data)1731 static void ff_layout_commit_release(void *data)
1732 {
1733 	struct nfs_commit_data *cdata = data;
1734 
1735 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1736 	pnfs_generic_commit_release(data);
1737 }
1738 
1739 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1740 	.rpc_call_prepare = ff_layout_read_prepare_v3,
1741 	.rpc_call_done = ff_layout_read_call_done,
1742 	.rpc_count_stats = ff_layout_read_count_stats,
1743 	.rpc_release = ff_layout_read_release,
1744 };
1745 
1746 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1747 	.rpc_call_prepare = ff_layout_read_prepare_v4,
1748 	.rpc_call_done = ff_layout_read_call_done,
1749 	.rpc_count_stats = ff_layout_read_count_stats,
1750 	.rpc_release = ff_layout_read_release,
1751 };
1752 
1753 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1754 	.rpc_call_prepare = ff_layout_write_prepare_v3,
1755 	.rpc_call_done = ff_layout_write_call_done,
1756 	.rpc_count_stats = ff_layout_write_count_stats,
1757 	.rpc_release = ff_layout_write_release,
1758 };
1759 
1760 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1761 	.rpc_call_prepare = ff_layout_write_prepare_v4,
1762 	.rpc_call_done = ff_layout_write_call_done,
1763 	.rpc_count_stats = ff_layout_write_count_stats,
1764 	.rpc_release = ff_layout_write_release,
1765 };
1766 
1767 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1768 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1769 	.rpc_call_done = ff_layout_commit_done,
1770 	.rpc_count_stats = ff_layout_commit_count_stats,
1771 	.rpc_release = ff_layout_commit_release,
1772 };
1773 
1774 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1775 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1776 	.rpc_call_done = ff_layout_commit_done,
1777 	.rpc_count_stats = ff_layout_commit_count_stats,
1778 	.rpc_release = ff_layout_commit_release,
1779 };
1780 
1781 static enum pnfs_try_status
ff_layout_read_pagelist(struct nfs_pgio_header * hdr)1782 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1783 {
1784 	struct pnfs_layout_segment *lseg = hdr->lseg;
1785 	struct nfs4_pnfs_ds *ds;
1786 	struct rpc_clnt *ds_clnt;
1787 	struct nfs4_ff_layout_mirror *mirror;
1788 	const struct cred *ds_cred;
1789 	loff_t offset = hdr->args.offset;
1790 	u32 idx = hdr->pgio_mirror_idx;
1791 	int vers;
1792 	struct nfs_fh *fh;
1793 
1794 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1795 		__func__, hdr->inode->i_ino,
1796 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1797 
1798 	mirror = FF_LAYOUT_COMP(lseg, idx);
1799 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1800 	if (!ds)
1801 		goto out_failed;
1802 
1803 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1804 						   hdr->inode);
1805 	if (IS_ERR(ds_clnt))
1806 		goto out_failed;
1807 
1808 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1809 	if (!ds_cred)
1810 		goto out_failed;
1811 
1812 	vers = nfs4_ff_layout_ds_version(mirror);
1813 
1814 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1815 		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1816 
1817 	hdr->pgio_done_cb = ff_layout_read_done_cb;
1818 	refcount_inc(&ds->ds_clp->cl_count);
1819 	hdr->ds_clp = ds->ds_clp;
1820 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1821 	if (fh)
1822 		hdr->args.fh = fh;
1823 
1824 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1825 
1826 	/*
1827 	 * Note that if we ever decide to split across DSes,
1828 	 * then we may need to handle dense-like offsets.
1829 	 */
1830 	hdr->args.offset = offset;
1831 	hdr->mds_offset = offset;
1832 
1833 	/* Perform an asynchronous read to ds */
1834 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1835 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1836 				      &ff_layout_read_call_ops_v4,
1837 			  0, RPC_TASK_SOFTCONN);
1838 	put_cred(ds_cred);
1839 	return PNFS_ATTEMPTED;
1840 
1841 out_failed:
1842 	if (ff_layout_avoid_mds_available_ds(lseg))
1843 		return PNFS_TRY_AGAIN;
1844 	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1845 			hdr->args.offset, hdr->args.count,
1846 			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1847 	return PNFS_NOT_ATTEMPTED;
1848 }
1849 
1850 /* Perform async writes. */
1851 static enum pnfs_try_status
ff_layout_write_pagelist(struct nfs_pgio_header * hdr,int sync)1852 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1853 {
1854 	struct pnfs_layout_segment *lseg = hdr->lseg;
1855 	struct nfs4_pnfs_ds *ds;
1856 	struct rpc_clnt *ds_clnt;
1857 	struct nfs4_ff_layout_mirror *mirror;
1858 	const struct cred *ds_cred;
1859 	loff_t offset = hdr->args.offset;
1860 	int vers;
1861 	struct nfs_fh *fh;
1862 	int idx = hdr->pgio_mirror_idx;
1863 
1864 	mirror = FF_LAYOUT_COMP(lseg, idx);
1865 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1866 	if (!ds)
1867 		goto out_failed;
1868 
1869 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1870 						   hdr->inode);
1871 	if (IS_ERR(ds_clnt))
1872 		goto out_failed;
1873 
1874 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1875 	if (!ds_cred)
1876 		goto out_failed;
1877 
1878 	vers = nfs4_ff_layout_ds_version(mirror);
1879 
1880 	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1881 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1882 		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1883 		vers);
1884 
1885 	hdr->pgio_done_cb = ff_layout_write_done_cb;
1886 	refcount_inc(&ds->ds_clp->cl_count);
1887 	hdr->ds_clp = ds->ds_clp;
1888 	hdr->ds_commit_idx = idx;
1889 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1890 	if (fh)
1891 		hdr->args.fh = fh;
1892 
1893 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1894 
1895 	/*
1896 	 * Note that if we ever decide to split across DSes,
1897 	 * then we may need to handle dense-like offsets.
1898 	 */
1899 	hdr->args.offset = offset;
1900 
1901 	/* Perform an asynchronous write */
1902 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1903 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1904 				      &ff_layout_write_call_ops_v4,
1905 			  sync, RPC_TASK_SOFTCONN);
1906 	put_cred(ds_cred);
1907 	return PNFS_ATTEMPTED;
1908 
1909 out_failed:
1910 	if (ff_layout_avoid_mds_available_ds(lseg))
1911 		return PNFS_TRY_AGAIN;
1912 	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1913 			hdr->args.offset, hdr->args.count,
1914 			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
1915 	return PNFS_NOT_ATTEMPTED;
1916 }
1917 
calc_ds_index_from_commit(struct pnfs_layout_segment * lseg,u32 i)1918 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1919 {
1920 	return i;
1921 }
1922 
1923 static struct nfs_fh *
select_ds_fh_from_commit(struct pnfs_layout_segment * lseg,u32 i)1924 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1925 {
1926 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1927 
1928 	/* FIXME: Assume that there is only one NFS version available
1929 	 * for the DS.
1930 	 */
1931 	return &flseg->mirror_array[i]->fh_versions[0];
1932 }
1933 
ff_layout_initiate_commit(struct nfs_commit_data * data,int how)1934 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1935 {
1936 	struct pnfs_layout_segment *lseg = data->lseg;
1937 	struct nfs4_pnfs_ds *ds;
1938 	struct rpc_clnt *ds_clnt;
1939 	struct nfs4_ff_layout_mirror *mirror;
1940 	const struct cred *ds_cred;
1941 	u32 idx;
1942 	int vers, ret;
1943 	struct nfs_fh *fh;
1944 
1945 	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1946 	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1947 		goto out_err;
1948 
1949 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1950 	mirror = FF_LAYOUT_COMP(lseg, idx);
1951 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1952 	if (!ds)
1953 		goto out_err;
1954 
1955 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1956 						   data->inode);
1957 	if (IS_ERR(ds_clnt))
1958 		goto out_err;
1959 
1960 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
1961 	if (!ds_cred)
1962 		goto out_err;
1963 
1964 	vers = nfs4_ff_layout_ds_version(mirror);
1965 
1966 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1967 		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1968 		vers);
1969 	data->commit_done_cb = ff_layout_commit_done_cb;
1970 	data->cred = ds_cred;
1971 	refcount_inc(&ds->ds_clp->cl_count);
1972 	data->ds_clp = ds->ds_clp;
1973 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1974 	if (fh)
1975 		data->args.fh = fh;
1976 
1977 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1978 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
1979 					       &ff_layout_commit_call_ops_v4,
1980 				   how, RPC_TASK_SOFTCONN);
1981 	put_cred(ds_cred);
1982 	return ret;
1983 out_err:
1984 	pnfs_generic_prepare_to_resend_writes(data);
1985 	pnfs_generic_commit_release(data);
1986 	return -EAGAIN;
1987 }
1988 
1989 static int
ff_layout_commit_pagelist(struct inode * inode,struct list_head * mds_pages,int how,struct nfs_commit_info * cinfo)1990 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1991 			   int how, struct nfs_commit_info *cinfo)
1992 {
1993 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1994 					    ff_layout_initiate_commit);
1995 }
1996 
1997 static struct pnfs_ds_commit_info *
ff_layout_get_ds_info(struct inode * inode)1998 ff_layout_get_ds_info(struct inode *inode)
1999 {
2000 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2001 
2002 	if (layout == NULL)
2003 		return NULL;
2004 
2005 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2006 }
2007 
2008 static void
ff_layout_free_deviceid_node(struct nfs4_deviceid_node * d)2009 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2010 {
2011 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2012 						  id_node));
2013 }
2014 
ff_layout_encode_ioerr(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,const struct nfs4_flexfile_layoutreturn_args * ff_args)2015 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2016 				  const struct nfs4_layoutreturn_args *args,
2017 				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
2018 {
2019 	__be32 *start;
2020 
2021 	start = xdr_reserve_space(xdr, 4);
2022 	if (unlikely(!start))
2023 		return -E2BIG;
2024 
2025 	*start = cpu_to_be32(ff_args->num_errors);
2026 	/* This assume we always return _ALL_ layouts */
2027 	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2028 }
2029 
2030 static void
encode_opaque_fixed(struct xdr_stream * xdr,const void * buf,size_t len)2031 encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
2032 {
2033 	WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
2034 }
2035 
2036 static void
ff_layout_encode_ff_iostat_head(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2037 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2038 			    const nfs4_stateid *stateid,
2039 			    const struct nfs42_layoutstat_devinfo *devinfo)
2040 {
2041 	__be32 *p;
2042 
2043 	p = xdr_reserve_space(xdr, 8 + 8);
2044 	p = xdr_encode_hyper(p, devinfo->offset);
2045 	p = xdr_encode_hyper(p, devinfo->length);
2046 	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2047 	p = xdr_reserve_space(xdr, 4*8);
2048 	p = xdr_encode_hyper(p, devinfo->read_count);
2049 	p = xdr_encode_hyper(p, devinfo->read_bytes);
2050 	p = xdr_encode_hyper(p, devinfo->write_count);
2051 	p = xdr_encode_hyper(p, devinfo->write_bytes);
2052 	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2053 }
2054 
2055 static void
ff_layout_encode_ff_iostat(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2056 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2057 			    const nfs4_stateid *stateid,
2058 			    const struct nfs42_layoutstat_devinfo *devinfo)
2059 {
2060 	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2061 	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2062 			devinfo->ld_private.data);
2063 }
2064 
2065 /* report nothing for now */
ff_layout_encode_iostats_array(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,struct nfs4_flexfile_layoutreturn_args * ff_args)2066 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2067 		const struct nfs4_layoutreturn_args *args,
2068 		struct nfs4_flexfile_layoutreturn_args *ff_args)
2069 {
2070 	__be32 *p;
2071 	int i;
2072 
2073 	p = xdr_reserve_space(xdr, 4);
2074 	*p = cpu_to_be32(ff_args->num_dev);
2075 	for (i = 0; i < ff_args->num_dev; i++)
2076 		ff_layout_encode_ff_iostat(xdr,
2077 				&args->layout->plh_stateid,
2078 				&ff_args->devinfo[i]);
2079 }
2080 
2081 static void
ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo * devinfo,unsigned int num_entries)2082 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2083 		unsigned int num_entries)
2084 {
2085 	unsigned int i;
2086 
2087 	for (i = 0; i < num_entries; i++) {
2088 		if (!devinfo[i].ld_private.ops)
2089 			continue;
2090 		if (!devinfo[i].ld_private.ops->free)
2091 			continue;
2092 		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2093 	}
2094 }
2095 
2096 static struct nfs4_deviceid_node *
ff_layout_alloc_deviceid_node(struct nfs_server * server,struct pnfs_device * pdev,gfp_t gfp_flags)2097 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2098 			      struct pnfs_device *pdev, gfp_t gfp_flags)
2099 {
2100 	struct nfs4_ff_layout_ds *dsaddr;
2101 
2102 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2103 	if (!dsaddr)
2104 		return NULL;
2105 	return &dsaddr->id_node;
2106 }
2107 
2108 static void
ff_layout_encode_layoutreturn(struct xdr_stream * xdr,const void * voidargs,const struct nfs4_xdr_opaque_data * ff_opaque)2109 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2110 		const void *voidargs,
2111 		const struct nfs4_xdr_opaque_data *ff_opaque)
2112 {
2113 	const struct nfs4_layoutreturn_args *args = voidargs;
2114 	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2115 	struct xdr_buf tmp_buf = {
2116 		.head = {
2117 			[0] = {
2118 				.iov_base = page_address(ff_args->pages[0]),
2119 			},
2120 		},
2121 		.buflen = PAGE_SIZE,
2122 	};
2123 	struct xdr_stream tmp_xdr;
2124 	__be32 *start;
2125 
2126 	dprintk("%s: Begin\n", __func__);
2127 
2128 	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2129 
2130 	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2131 	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2132 
2133 	start = xdr_reserve_space(xdr, 4);
2134 	*start = cpu_to_be32(tmp_buf.len);
2135 	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2136 
2137 	dprintk("%s: Return\n", __func__);
2138 }
2139 
2140 static void
ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data * args)2141 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2142 {
2143 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2144 
2145 	if (!args->data)
2146 		return;
2147 	ff_args = args->data;
2148 	args->data = NULL;
2149 
2150 	ff_layout_free_ds_ioerr(&ff_args->errors);
2151 	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2152 
2153 	put_page(ff_args->pages[0]);
2154 	kfree(ff_args);
2155 }
2156 
2157 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2158 	.encode = ff_layout_encode_layoutreturn,
2159 	.free = ff_layout_free_layoutreturn,
2160 };
2161 
2162 static int
ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args * args)2163 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2164 {
2165 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2166 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2167 
2168 	ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
2169 	if (!ff_args)
2170 		goto out_nomem;
2171 	ff_args->pages[0] = alloc_page(GFP_KERNEL);
2172 	if (!ff_args->pages[0])
2173 		goto out_nomem_free;
2174 
2175 	INIT_LIST_HEAD(&ff_args->errors);
2176 	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2177 			&args->range, &ff_args->errors,
2178 			FF_LAYOUTRETURN_MAXERR);
2179 
2180 	spin_lock(&args->inode->i_lock);
2181 	ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2182 			&ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
2183 	spin_unlock(&args->inode->i_lock);
2184 
2185 	args->ld_private->ops = &layoutreturn_ops;
2186 	args->ld_private->data = ff_args;
2187 	return 0;
2188 out_nomem_free:
2189 	kfree(ff_args);
2190 out_nomem:
2191 	return -ENOMEM;
2192 }
2193 
2194 #ifdef CONFIG_NFS_V4_2
2195 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2196 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2197 {
2198 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2199 	struct nfs42_layout_error *errors;
2200 	LIST_HEAD(head);
2201 
2202 	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2203 		return;
2204 	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2205 	if (list_empty(&head))
2206 		return;
2207 
2208 	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX,
2209 			sizeof(*errors), GFP_NOFS);
2210 	if (errors != NULL) {
2211 		const struct nfs4_ff_layout_ds_err *pos;
2212 		size_t n = 0;
2213 
2214 		list_for_each_entry(pos, &head, list) {
2215 			errors[n].offset = pos->offset;
2216 			errors[n].length = pos->length;
2217 			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2218 			errors[n].errors[0].dev_id = pos->deviceid;
2219 			errors[n].errors[0].status = pos->status;
2220 			errors[n].errors[0].opnum = pos->opnum;
2221 			n++;
2222 			if (!list_is_last(&pos->list, &head) &&
2223 			    n < NFS42_LAYOUTERROR_MAX)
2224 				continue;
2225 			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2226 				break;
2227 			n = 0;
2228 		}
2229 		kfree(errors);
2230 	}
2231 	ff_layout_free_ds_ioerr(&head);
2232 }
2233 #else
2234 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2235 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2236 {
2237 }
2238 #endif
2239 
2240 static int
ff_layout_ntop4(const struct sockaddr * sap,char * buf,const size_t buflen)2241 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2242 {
2243 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2244 
2245 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2246 }
2247 
2248 static size_t
ff_layout_ntop6_noscopeid(const struct sockaddr * sap,char * buf,const int buflen)2249 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2250 			  const int buflen)
2251 {
2252 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2253 	const struct in6_addr *addr = &sin6->sin6_addr;
2254 
2255 	/*
2256 	 * RFC 4291, Section 2.2.2
2257 	 *
2258 	 * Shorthanded ANY address
2259 	 */
2260 	if (ipv6_addr_any(addr))
2261 		return snprintf(buf, buflen, "::");
2262 
2263 	/*
2264 	 * RFC 4291, Section 2.2.2
2265 	 *
2266 	 * Shorthanded loopback address
2267 	 */
2268 	if (ipv6_addr_loopback(addr))
2269 		return snprintf(buf, buflen, "::1");
2270 
2271 	/*
2272 	 * RFC 4291, Section 2.2.3
2273 	 *
2274 	 * Special presentation address format for mapped v4
2275 	 * addresses.
2276 	 */
2277 	if (ipv6_addr_v4mapped(addr))
2278 		return snprintf(buf, buflen, "::ffff:%pI4",
2279 					&addr->s6_addr32[3]);
2280 
2281 	/*
2282 	 * RFC 4291, Section 2.2.1
2283 	 */
2284 	return snprintf(buf, buflen, "%pI6c", addr);
2285 }
2286 
2287 /* Derived from rpc_sockaddr2uaddr */
2288 static void
ff_layout_encode_netaddr(struct xdr_stream * xdr,struct nfs4_pnfs_ds_addr * da)2289 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2290 {
2291 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2292 	char portbuf[RPCBIND_MAXUADDRPLEN];
2293 	char addrbuf[RPCBIND_MAXUADDRLEN];
2294 	char *netid;
2295 	unsigned short port;
2296 	int len, netid_len;
2297 	__be32 *p;
2298 
2299 	switch (sap->sa_family) {
2300 	case AF_INET:
2301 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2302 			return;
2303 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2304 		netid = "tcp";
2305 		netid_len = 3;
2306 		break;
2307 	case AF_INET6:
2308 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2309 			return;
2310 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2311 		netid = "tcp6";
2312 		netid_len = 4;
2313 		break;
2314 	default:
2315 		/* we only support tcp and tcp6 */
2316 		WARN_ON_ONCE(1);
2317 		return;
2318 	}
2319 
2320 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2321 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2322 
2323 	p = xdr_reserve_space(xdr, 4 + netid_len);
2324 	xdr_encode_opaque(p, netid, netid_len);
2325 
2326 	p = xdr_reserve_space(xdr, 4 + len);
2327 	xdr_encode_opaque(p, addrbuf, len);
2328 }
2329 
2330 static void
ff_layout_encode_nfstime(struct xdr_stream * xdr,ktime_t t)2331 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2332 			 ktime_t t)
2333 {
2334 	struct timespec64 ts;
2335 	__be32 *p;
2336 
2337 	p = xdr_reserve_space(xdr, 12);
2338 	ts = ktime_to_timespec64(t);
2339 	p = xdr_encode_hyper(p, ts.tv_sec);
2340 	*p++ = cpu_to_be32(ts.tv_nsec);
2341 }
2342 
2343 static void
ff_layout_encode_io_latency(struct xdr_stream * xdr,struct nfs4_ff_io_stat * stat)2344 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2345 			    struct nfs4_ff_io_stat *stat)
2346 {
2347 	__be32 *p;
2348 
2349 	p = xdr_reserve_space(xdr, 5 * 8);
2350 	p = xdr_encode_hyper(p, stat->ops_requested);
2351 	p = xdr_encode_hyper(p, stat->bytes_requested);
2352 	p = xdr_encode_hyper(p, stat->ops_completed);
2353 	p = xdr_encode_hyper(p, stat->bytes_completed);
2354 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2355 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2356 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2357 }
2358 
2359 static void
ff_layout_encode_ff_layoutupdate(struct xdr_stream * xdr,const struct nfs42_layoutstat_devinfo * devinfo,struct nfs4_ff_layout_mirror * mirror)2360 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2361 			      const struct nfs42_layoutstat_devinfo *devinfo,
2362 			      struct nfs4_ff_layout_mirror *mirror)
2363 {
2364 	struct nfs4_pnfs_ds_addr *da;
2365 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2366 	struct nfs_fh *fh = &mirror->fh_versions[0];
2367 	__be32 *p;
2368 
2369 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2370 	dprintk("%s: DS %s: encoding address %s\n",
2371 		__func__, ds->ds_remotestr, da->da_remotestr);
2372 	/* netaddr4 */
2373 	ff_layout_encode_netaddr(xdr, da);
2374 	/* nfs_fh4 */
2375 	p = xdr_reserve_space(xdr, 4 + fh->size);
2376 	xdr_encode_opaque(p, fh->data, fh->size);
2377 	/* ff_io_latency4 read */
2378 	spin_lock(&mirror->lock);
2379 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2380 	/* ff_io_latency4 write */
2381 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2382 	spin_unlock(&mirror->lock);
2383 	/* nfstime4 */
2384 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2385 	/* bool */
2386 	p = xdr_reserve_space(xdr, 4);
2387 	*p = cpu_to_be32(false);
2388 }
2389 
2390 static void
ff_layout_encode_layoutstats(struct xdr_stream * xdr,const void * args,const struct nfs4_xdr_opaque_data * opaque)2391 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2392 			     const struct nfs4_xdr_opaque_data *opaque)
2393 {
2394 	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2395 			struct nfs42_layoutstat_devinfo, ld_private);
2396 	__be32 *start;
2397 
2398 	/* layoutupdate length */
2399 	start = xdr_reserve_space(xdr, 4);
2400 	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2401 
2402 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2403 }
2404 
2405 static void
ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data * opaque)2406 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2407 {
2408 	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2409 
2410 	ff_layout_put_mirror(mirror);
2411 }
2412 
2413 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2414 	.encode = ff_layout_encode_layoutstats,
2415 	.free	= ff_layout_free_layoutstats,
2416 };
2417 
2418 static int
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr * lo,struct nfs42_layoutstat_devinfo * devinfo,int dev_limit)2419 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2420 			       struct nfs42_layoutstat_devinfo *devinfo,
2421 			       int dev_limit)
2422 {
2423 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2424 	struct nfs4_ff_layout_mirror *mirror;
2425 	struct nfs4_deviceid_node *dev;
2426 	int i = 0;
2427 
2428 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2429 		if (i >= dev_limit)
2430 			break;
2431 		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2432 			continue;
2433 		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
2434 			continue;
2435 		/* mirror refcount put in cleanup_layoutstats */
2436 		if (!refcount_inc_not_zero(&mirror->ref))
2437 			continue;
2438 		dev = &mirror->mirror_ds->id_node;
2439 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2440 		devinfo->offset = 0;
2441 		devinfo->length = NFS4_MAX_UINT64;
2442 		spin_lock(&mirror->lock);
2443 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2444 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2445 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2446 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2447 		spin_unlock(&mirror->lock);
2448 		devinfo->layout_type = LAYOUT_FLEX_FILES;
2449 		devinfo->ld_private.ops = &layoutstat_ops;
2450 		devinfo->ld_private.data = mirror;
2451 
2452 		devinfo++;
2453 		i++;
2454 	}
2455 	return i;
2456 }
2457 
2458 static int
ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args * args)2459 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2460 {
2461 	struct nfs4_flexfile_layout *ff_layout;
2462 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2463 
2464 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2465 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2466 	if (!args->devinfo)
2467 		return -ENOMEM;
2468 
2469 	spin_lock(&args->inode->i_lock);
2470 	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2471 	args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2472 			&args->devinfo[0], dev_count);
2473 	spin_unlock(&args->inode->i_lock);
2474 	if (!args->num_dev) {
2475 		kfree(args->devinfo);
2476 		args->devinfo = NULL;
2477 		return -ENOENT;
2478 	}
2479 
2480 	return 0;
2481 }
2482 
2483 static int
ff_layout_set_layoutdriver(struct nfs_server * server,const struct nfs_fh * dummy)2484 ff_layout_set_layoutdriver(struct nfs_server *server,
2485 		const struct nfs_fh *dummy)
2486 {
2487 #if IS_ENABLED(CONFIG_NFS_V4_2)
2488 	server->caps |= NFS_CAP_LAYOUTSTATS;
2489 #endif
2490 	return 0;
2491 }
2492 
2493 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2494 	.id			= LAYOUT_FLEX_FILES,
2495 	.name			= "LAYOUT_FLEX_FILES",
2496 	.owner			= THIS_MODULE,
2497 	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2498 	.max_layoutget_response	= 4096, /* 1 page or so... */
2499 	.set_layoutdriver	= ff_layout_set_layoutdriver,
2500 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2501 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2502 	.alloc_lseg		= ff_layout_alloc_lseg,
2503 	.free_lseg		= ff_layout_free_lseg,
2504 	.add_lseg		= ff_layout_add_lseg,
2505 	.pg_read_ops		= &ff_layout_pg_read_ops,
2506 	.pg_write_ops		= &ff_layout_pg_write_ops,
2507 	.get_ds_info		= ff_layout_get_ds_info,
2508 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2509 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2510 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2511 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2512 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2513 	.commit_pagelist	= ff_layout_commit_pagelist,
2514 	.read_pagelist		= ff_layout_read_pagelist,
2515 	.write_pagelist		= ff_layout_write_pagelist,
2516 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2517 	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2518 	.sync			= pnfs_nfs_generic_sync,
2519 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2520 };
2521 
nfs4flexfilelayout_init(void)2522 static int __init nfs4flexfilelayout_init(void)
2523 {
2524 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2525 	       __func__);
2526 	return pnfs_register_layoutdriver(&flexfilelayout_type);
2527 }
2528 
nfs4flexfilelayout_exit(void)2529 static void __exit nfs4flexfilelayout_exit(void)
2530 {
2531 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2532 	       __func__);
2533 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2534 }
2535 
2536 MODULE_ALIAS("nfs-layouttype4-4");
2537 
2538 MODULE_LICENSE("GPL");
2539 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2540 
2541 module_init(nfs4flexfilelayout_init);
2542 module_exit(nfs4flexfilelayout_exit);
2543 
2544 module_param(io_maxretrans, ushort, 0644);
2545 MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
2546 			"retries an I/O request before returning an error. ");
2547