• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Module for pnfs flexfile layout driver.
3  *
4  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
5  *
6  * Tao Peng <bergwolf@primarydata.com>
7  */
8 
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
12 
13 #include <linux/sunrpc/metrics.h>
14 
15 #include "flexfilelayout.h"
16 #include "../nfs4session.h"
17 #include "../nfs4idmap.h"
18 #include "../internal.h"
19 #include "../delegation.h"
20 #include "../nfs4trace.h"
21 #include "../iostat.h"
22 #include "../nfs.h"
23 #include "../nfs42.h"
24 
25 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
26 
27 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
28 
29 static struct pnfs_layout_hdr *
ff_layout_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags)30 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
31 {
32 	struct nfs4_flexfile_layout *ffl;
33 
34 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
35 	if (ffl) {
36 		INIT_LIST_HEAD(&ffl->error_list);
37 		INIT_LIST_HEAD(&ffl->mirrors);
38 		return &ffl->generic_hdr;
39 	} else
40 		return NULL;
41 }
42 
43 static void
ff_layout_free_layout_hdr(struct pnfs_layout_hdr * lo)44 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
45 {
46 	struct nfs4_ff_layout_ds_err *err, *n;
47 
48 	list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
49 				 list) {
50 		list_del(&err->list);
51 		kfree(err);
52 	}
53 	kfree(FF_LAYOUT_FROM_HDR(lo));
54 }
55 
decode_stateid(struct xdr_stream * xdr,nfs4_stateid * stateid)56 static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
57 {
58 	__be32 *p;
59 
60 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
61 	if (unlikely(p == NULL))
62 		return -ENOBUFS;
63 	memcpy(stateid, p, NFS4_STATEID_SIZE);
64 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
65 		p[0], p[1], p[2], p[3]);
66 	return 0;
67 }
68 
decode_deviceid(struct xdr_stream * xdr,struct nfs4_deviceid * devid)69 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
70 {
71 	__be32 *p;
72 
73 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
74 	if (unlikely(!p))
75 		return -ENOBUFS;
76 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
77 	nfs4_print_deviceid(devid);
78 	return 0;
79 }
80 
decode_nfs_fh(struct xdr_stream * xdr,struct nfs_fh * fh)81 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
82 {
83 	__be32 *p;
84 
85 	p = xdr_inline_decode(xdr, 4);
86 	if (unlikely(!p))
87 		return -ENOBUFS;
88 	fh->size = be32_to_cpup(p++);
89 	if (fh->size > NFS_MAXFHSIZE) {
90 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
91 		       fh->size);
92 		return -EOVERFLOW;
93 	}
94 	/* fh.data */
95 	p = xdr_inline_decode(xdr, fh->size);
96 	if (unlikely(!p))
97 		return -ENOBUFS;
98 	memcpy(&fh->data, p, fh->size);
99 	dprintk("%s: fh len %d\n", __func__, fh->size);
100 
101 	return 0;
102 }
103 
104 /*
105  * Currently only stringified uids and gids are accepted.
106  * I.e., kerberos is not supported to the DSes, so no pricipals.
107  *
108  * That means that one common function will suffice, but when
109  * principals are added, this should be split to accomodate
110  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
111  */
112 static int
decode_name(struct xdr_stream * xdr,u32 * id)113 decode_name(struct xdr_stream *xdr, u32 *id)
114 {
115 	__be32 *p;
116 	int len;
117 
118 	/* opaque_length(4)*/
119 	p = xdr_inline_decode(xdr, 4);
120 	if (unlikely(!p))
121 		return -ENOBUFS;
122 	len = be32_to_cpup(p++);
123 	if (len < 0)
124 		return -EINVAL;
125 
126 	dprintk("%s: len %u\n", __func__, len);
127 
128 	/* opaque body */
129 	p = xdr_inline_decode(xdr, len);
130 	if (unlikely(!p))
131 		return -ENOBUFS;
132 
133 	if (!nfs_map_string_to_numeric((char *)p, len, id))
134 		return -EINVAL;
135 
136 	return 0;
137 }
138 
ff_mirror_match_fh(const struct nfs4_ff_layout_mirror * m1,const struct nfs4_ff_layout_mirror * m2)139 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
140 		const struct nfs4_ff_layout_mirror *m2)
141 {
142 	int i, j;
143 
144 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
145 		return false;
146 	for (i = 0; i < m1->fh_versions_cnt; i++) {
147 		bool found_fh = false;
148 		for (j = 0; j < m2->fh_versions_cnt; j++) {
149 			if (nfs_compare_fh(&m1->fh_versions[i],
150 					&m2->fh_versions[j]) == 0) {
151 				found_fh = true;
152 				break;
153 			}
154 		}
155 		if (!found_fh)
156 			return false;
157 	}
158 	return true;
159 }
160 
161 static struct nfs4_ff_layout_mirror *
ff_layout_add_mirror(struct pnfs_layout_hdr * lo,struct nfs4_ff_layout_mirror * mirror)162 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
163 		struct nfs4_ff_layout_mirror *mirror)
164 {
165 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
166 	struct nfs4_ff_layout_mirror *pos;
167 	struct inode *inode = lo->plh_inode;
168 
169 	spin_lock(&inode->i_lock);
170 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
171 		if (mirror->mirror_ds != pos->mirror_ds)
172 			continue;
173 		if (!ff_mirror_match_fh(mirror, pos))
174 			continue;
175 		if (atomic_inc_not_zero(&pos->ref)) {
176 			spin_unlock(&inode->i_lock);
177 			return pos;
178 		}
179 	}
180 	list_add(&mirror->mirrors, &ff_layout->mirrors);
181 	mirror->layout = lo;
182 	spin_unlock(&inode->i_lock);
183 	return mirror;
184 }
185 
186 static void
ff_layout_remove_mirror(struct nfs4_ff_layout_mirror * mirror)187 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
188 {
189 	struct inode *inode;
190 	if (mirror->layout == NULL)
191 		return;
192 	inode = mirror->layout->plh_inode;
193 	spin_lock(&inode->i_lock);
194 	list_del(&mirror->mirrors);
195 	spin_unlock(&inode->i_lock);
196 	mirror->layout = NULL;
197 }
198 
ff_layout_alloc_mirror(gfp_t gfp_flags)199 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
200 {
201 	struct nfs4_ff_layout_mirror *mirror;
202 
203 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
204 	if (mirror != NULL) {
205 		spin_lock_init(&mirror->lock);
206 		atomic_set(&mirror->ref, 1);
207 		INIT_LIST_HEAD(&mirror->mirrors);
208 	}
209 	return mirror;
210 }
211 
ff_layout_free_mirror(struct nfs4_ff_layout_mirror * mirror)212 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
213 {
214 	ff_layout_remove_mirror(mirror);
215 	kfree(mirror->fh_versions);
216 	if (mirror->cred)
217 		put_rpccred(mirror->cred);
218 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
219 	kfree(mirror);
220 }
221 
ff_layout_put_mirror(struct nfs4_ff_layout_mirror * mirror)222 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
223 {
224 	if (mirror != NULL && atomic_dec_and_test(&mirror->ref))
225 		ff_layout_free_mirror(mirror);
226 }
227 
ff_layout_free_mirror_array(struct nfs4_ff_layout_segment * fls)228 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
229 {
230 	int i;
231 
232 	if (fls->mirror_array) {
233 		for (i = 0; i < fls->mirror_array_cnt; i++) {
234 			/* normally mirror_ds is freed in
235 			 * .free_deviceid_node but we still do it here
236 			 * for .alloc_lseg error path */
237 			ff_layout_put_mirror(fls->mirror_array[i]);
238 		}
239 		kfree(fls->mirror_array);
240 		fls->mirror_array = NULL;
241 	}
242 }
243 
ff_layout_check_layout(struct nfs4_layoutget_res * lgr)244 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
245 {
246 	int ret = 0;
247 
248 	dprintk("--> %s\n", __func__);
249 
250 	/* FIXME: remove this check when layout segment support is added */
251 	if (lgr->range.offset != 0 ||
252 	    lgr->range.length != NFS4_MAX_UINT64) {
253 		dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
254 			__func__);
255 		ret = -EINVAL;
256 	}
257 
258 	dprintk("--> %s returns %d\n", __func__, ret);
259 	return ret;
260 }
261 
_ff_layout_free_lseg(struct nfs4_ff_layout_segment * fls)262 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
263 {
264 	if (fls) {
265 		ff_layout_free_mirror_array(fls);
266 		kfree(fls);
267 	}
268 }
269 
270 static bool
ff_lseg_range_is_after(const struct pnfs_layout_range * l1,const struct pnfs_layout_range * l2)271 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
272 		const struct pnfs_layout_range *l2)
273 {
274 	u64 end1, end2;
275 
276 	if (l1->iomode != l2->iomode)
277 		return l1->iomode != IOMODE_READ;
278 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
279 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
280 	if (end1 < l2->offset)
281 		return false;
282 	if (end2 < l1->offset)
283 		return true;
284 	return l2->offset <= l1->offset;
285 }
286 
287 static bool
ff_lseg_merge(struct pnfs_layout_segment * new,struct pnfs_layout_segment * old)288 ff_lseg_merge(struct pnfs_layout_segment *new,
289 		struct pnfs_layout_segment *old)
290 {
291 	u64 new_end, old_end;
292 
293 	if (new->pls_range.iomode != old->pls_range.iomode)
294 		return false;
295 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
296 			old->pls_range.length);
297 	if (old_end < new->pls_range.offset)
298 		return false;
299 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
300 			new->pls_range.length);
301 	if (new_end < old->pls_range.offset)
302 		return false;
303 
304 	/* Mergeable: copy info from 'old' to 'new' */
305 	if (new_end < old_end)
306 		new_end = old_end;
307 	if (new->pls_range.offset < old->pls_range.offset)
308 		new->pls_range.offset = old->pls_range.offset;
309 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
310 			new_end);
311 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
312 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
313 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
314 		set_bit(NFS_LSEG_LAYOUTRETURN, &new->pls_flags);
315 	return true;
316 }
317 
318 static void
ff_layout_add_lseg(struct pnfs_layout_hdr * lo,struct pnfs_layout_segment * lseg,struct list_head * free_me)319 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
320 		struct pnfs_layout_segment *lseg,
321 		struct list_head *free_me)
322 {
323 	pnfs_generic_layout_insert_lseg(lo, lseg,
324 			ff_lseg_range_is_after,
325 			ff_lseg_merge,
326 			free_me);
327 }
328 
ff_layout_sort_mirrors(struct nfs4_ff_layout_segment * fls)329 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
330 {
331 	int i, j;
332 
333 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
334 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
335 			if (fls->mirror_array[i]->efficiency <
336 			    fls->mirror_array[j]->efficiency)
337 				swap(fls->mirror_array[i],
338 				     fls->mirror_array[j]);
339 	}
340 }
341 
ff_layout_mark_devices_valid(struct nfs4_ff_layout_segment * fls)342 static void ff_layout_mark_devices_valid(struct nfs4_ff_layout_segment *fls)
343 {
344 	struct nfs4_deviceid_node *node;
345 	int i;
346 
347 	if (!(fls->flags & FF_FLAGS_NO_IO_THRU_MDS))
348 		return;
349 	for (i = 0; i < fls->mirror_array_cnt; i++) {
350 		node = &fls->mirror_array[i]->mirror_ds->id_node;
351 		clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
352 	}
353 }
354 
355 static struct pnfs_layout_segment *
ff_layout_alloc_lseg(struct pnfs_layout_hdr * lh,struct nfs4_layoutget_res * lgr,gfp_t gfp_flags)356 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
357 		     struct nfs4_layoutget_res *lgr,
358 		     gfp_t gfp_flags)
359 {
360 	struct pnfs_layout_segment *ret;
361 	struct nfs4_ff_layout_segment *fls = NULL;
362 	struct xdr_stream stream;
363 	struct xdr_buf buf;
364 	struct page *scratch;
365 	u64 stripe_unit;
366 	u32 mirror_array_cnt;
367 	__be32 *p;
368 	int i, rc;
369 
370 	dprintk("--> %s\n", __func__);
371 	scratch = alloc_page(gfp_flags);
372 	if (!scratch)
373 		return ERR_PTR(-ENOMEM);
374 
375 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
376 			      lgr->layoutp->len);
377 	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
378 
379 	/* stripe unit and mirror_array_cnt */
380 	rc = -EIO;
381 	p = xdr_inline_decode(&stream, 8 + 4);
382 	if (!p)
383 		goto out_err_free;
384 
385 	p = xdr_decode_hyper(p, &stripe_unit);
386 	mirror_array_cnt = be32_to_cpup(p++);
387 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
388 		stripe_unit, mirror_array_cnt);
389 
390 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
391 	    mirror_array_cnt == 0)
392 		goto out_err_free;
393 
394 	rc = -ENOMEM;
395 	fls = kzalloc(sizeof(*fls), gfp_flags);
396 	if (!fls)
397 		goto out_err_free;
398 
399 	fls->mirror_array_cnt = mirror_array_cnt;
400 	fls->stripe_unit = stripe_unit;
401 	fls->mirror_array = kcalloc(fls->mirror_array_cnt,
402 				    sizeof(fls->mirror_array[0]), gfp_flags);
403 	if (fls->mirror_array == NULL)
404 		goto out_err_free;
405 
406 	for (i = 0; i < fls->mirror_array_cnt; i++) {
407 		struct nfs4_ff_layout_mirror *mirror;
408 		struct nfs4_deviceid devid;
409 		struct nfs4_deviceid_node *idnode;
410 		u32 ds_count;
411 		u32 fh_count;
412 		int j;
413 
414 		rc = -EIO;
415 		p = xdr_inline_decode(&stream, 4);
416 		if (!p)
417 			goto out_err_free;
418 		ds_count = be32_to_cpup(p);
419 
420 		/* FIXME: allow for striping? */
421 		if (ds_count != 1)
422 			goto out_err_free;
423 
424 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
425 		if (fls->mirror_array[i] == NULL) {
426 			rc = -ENOMEM;
427 			goto out_err_free;
428 		}
429 
430 		fls->mirror_array[i]->ds_count = ds_count;
431 
432 		/* deviceid */
433 		rc = decode_deviceid(&stream, &devid);
434 		if (rc)
435 			goto out_err_free;
436 
437 		idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
438 						&devid, lh->plh_lc_cred,
439 						gfp_flags);
440 		/*
441 		 * upon success, mirror_ds is allocated by previous
442 		 * getdeviceinfo, or newly by .alloc_deviceid_node
443 		 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
444 		 */
445 		if (idnode)
446 			fls->mirror_array[i]->mirror_ds =
447 				FF_LAYOUT_MIRROR_DS(idnode);
448 		else
449 			goto out_err_free;
450 
451 		/* efficiency */
452 		rc = -EIO;
453 		p = xdr_inline_decode(&stream, 4);
454 		if (!p)
455 			goto out_err_free;
456 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
457 
458 		/* stateid */
459 		rc = decode_stateid(&stream, &fls->mirror_array[i]->stateid);
460 		if (rc)
461 			goto out_err_free;
462 
463 		/* fh */
464 		rc = -EIO;
465 		p = xdr_inline_decode(&stream, 4);
466 		if (!p)
467 			goto out_err_free;
468 		fh_count = be32_to_cpup(p);
469 
470 		fls->mirror_array[i]->fh_versions =
471 			kzalloc(fh_count * sizeof(struct nfs_fh),
472 				gfp_flags);
473 		if (fls->mirror_array[i]->fh_versions == NULL) {
474 			rc = -ENOMEM;
475 			goto out_err_free;
476 		}
477 
478 		for (j = 0; j < fh_count; j++) {
479 			rc = decode_nfs_fh(&stream,
480 					   &fls->mirror_array[i]->fh_versions[j]);
481 			if (rc)
482 				goto out_err_free;
483 		}
484 
485 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
486 
487 		/* user */
488 		rc = decode_name(&stream, &fls->mirror_array[i]->uid);
489 		if (rc)
490 			goto out_err_free;
491 
492 		/* group */
493 		rc = decode_name(&stream, &fls->mirror_array[i]->gid);
494 		if (rc)
495 			goto out_err_free;
496 
497 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
498 		if (mirror != fls->mirror_array[i]) {
499 			ff_layout_free_mirror(fls->mirror_array[i]);
500 			fls->mirror_array[i] = mirror;
501 		}
502 
503 		dprintk("%s: uid %d gid %d\n", __func__,
504 			fls->mirror_array[i]->uid,
505 			fls->mirror_array[i]->gid);
506 	}
507 
508 	p = xdr_inline_decode(&stream, 4);
509 	if (p)
510 		fls->flags = be32_to_cpup(p);
511 
512 	ff_layout_sort_mirrors(fls);
513 	rc = ff_layout_check_layout(lgr);
514 	if (rc)
515 		goto out_err_free;
516 	ff_layout_mark_devices_valid(fls);
517 
518 	ret = &fls->generic_hdr;
519 	dprintk("<-- %s (success)\n", __func__);
520 out_free_page:
521 	__free_page(scratch);
522 	return ret;
523 out_err_free:
524 	_ff_layout_free_lseg(fls);
525 	ret = ERR_PTR(rc);
526 	dprintk("<-- %s (%d)\n", __func__, rc);
527 	goto out_free_page;
528 }
529 
ff_layout_has_rw_segments(struct pnfs_layout_hdr * layout)530 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
531 {
532 	struct pnfs_layout_segment *lseg;
533 
534 	list_for_each_entry(lseg, &layout->plh_segs, pls_list)
535 		if (lseg->pls_range.iomode == IOMODE_RW)
536 			return true;
537 
538 	return false;
539 }
540 
541 static void
ff_layout_free_lseg(struct pnfs_layout_segment * lseg)542 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
543 {
544 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
545 
546 	dprintk("--> %s\n", __func__);
547 
548 	if (lseg->pls_range.iomode == IOMODE_RW) {
549 		struct nfs4_flexfile_layout *ffl;
550 		struct inode *inode;
551 
552 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
553 		inode = ffl->generic_hdr.plh_inode;
554 		spin_lock(&inode->i_lock);
555 		if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
556 			ffl->commit_info.nbuckets = 0;
557 			kfree(ffl->commit_info.buckets);
558 			ffl->commit_info.buckets = NULL;
559 		}
560 		spin_unlock(&inode->i_lock);
561 	}
562 	_ff_layout_free_lseg(fls);
563 }
564 
565 /* Return 1 until we have multiple lsegs support */
566 static int
ff_layout_get_lseg_count(struct nfs4_ff_layout_segment * fls)567 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
568 {
569 	return 1;
570 }
571 
572 static void
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)573 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
574 {
575 	/* first IO request? */
576 	if (atomic_inc_return(&timer->n_ops) == 1) {
577 		timer->start_time = now;
578 	}
579 }
580 
581 static ktime_t
nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)582 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
583 {
584 	ktime_t start;
585 
586 	if (atomic_dec_return(&timer->n_ops) < 0)
587 		WARN_ON_ONCE(1);
588 
589 	start = timer->start_time;
590 	timer->start_time = now;
591 	return ktime_sub(now, start);
592 }
593 
594 static bool
nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror * mirror,struct nfs4_ff_layoutstat * layoutstat,ktime_t now)595 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
596 			    struct nfs4_ff_layoutstat *layoutstat,
597 			    ktime_t now)
598 {
599 	static const ktime_t notime = {0};
600 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
601 
602 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
603 	if (ktime_equal(mirror->start_time, notime))
604 		mirror->start_time = now;
605 	if (ktime_equal(mirror->last_report_time, notime))
606 		mirror->last_report_time = now;
607 	if (layoutstats_timer != 0)
608 		report_interval = (s64)layoutstats_timer * 1000LL;
609 	if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
610 			report_interval) {
611 		mirror->last_report_time = now;
612 		return true;
613 	}
614 
615 	return false;
616 }
617 
618 static void
nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat * layoutstat,__u64 requested)619 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
620 		__u64 requested)
621 {
622 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
623 
624 	iostat->ops_requested++;
625 	iostat->bytes_requested += requested;
626 }
627 
628 static void
nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat * layoutstat,__u64 requested,__u64 completed,ktime_t time_completed,ktime_t time_started)629 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
630 		__u64 requested,
631 		__u64 completed,
632 		ktime_t time_completed,
633 		ktime_t time_started)
634 {
635 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
636 	ktime_t completion_time = ktime_sub(time_completed, time_started);
637 	ktime_t timer;
638 
639 	iostat->ops_completed++;
640 	iostat->bytes_completed += completed;
641 	iostat->bytes_not_delivered += requested - completed;
642 
643 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
644 	iostat->total_busy_time =
645 			ktime_add(iostat->total_busy_time, timer);
646 	iostat->aggregate_completion_time =
647 			ktime_add(iostat->aggregate_completion_time,
648 					completion_time);
649 }
650 
651 static void
nfs4_ff_layout_stat_io_start_read(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,__u64 requested,ktime_t now)652 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
653 		struct nfs4_ff_layout_mirror *mirror,
654 		__u64 requested, ktime_t now)
655 {
656 	bool report;
657 
658 	spin_lock(&mirror->lock);
659 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
660 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
661 	spin_unlock(&mirror->lock);
662 
663 	if (report)
664 		pnfs_report_layoutstat(inode, GFP_KERNEL);
665 }
666 
667 static void
nfs4_ff_layout_stat_io_end_read(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,__u64 requested,__u64 completed)668 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
669 		struct nfs4_ff_layout_mirror *mirror,
670 		__u64 requested,
671 		__u64 completed)
672 {
673 	spin_lock(&mirror->lock);
674 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
675 			requested, completed,
676 			ktime_get(), task->tk_start);
677 	spin_unlock(&mirror->lock);
678 }
679 
680 static void
nfs4_ff_layout_stat_io_start_write(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,__u64 requested,ktime_t now)681 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
682 		struct nfs4_ff_layout_mirror *mirror,
683 		__u64 requested, ktime_t now)
684 {
685 	bool report;
686 
687 	spin_lock(&mirror->lock);
688 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
689 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
690 	spin_unlock(&mirror->lock);
691 
692 	if (report)
693 		pnfs_report_layoutstat(inode, GFP_NOIO);
694 }
695 
696 static void
nfs4_ff_layout_stat_io_end_write(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,__u64 requested,__u64 completed,enum nfs3_stable_how committed)697 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
698 		struct nfs4_ff_layout_mirror *mirror,
699 		__u64 requested,
700 		__u64 completed,
701 		enum nfs3_stable_how committed)
702 {
703 	if (committed == NFS_UNSTABLE)
704 		requested = completed = 0;
705 
706 	spin_lock(&mirror->lock);
707 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
708 			requested, completed, ktime_get(), task->tk_start);
709 	spin_unlock(&mirror->lock);
710 }
711 
712 static int
ff_layout_alloc_commit_info(struct pnfs_layout_segment * lseg,struct nfs_commit_info * cinfo,gfp_t gfp_flags)713 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
714 			    struct nfs_commit_info *cinfo,
715 			    gfp_t gfp_flags)
716 {
717 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
718 	struct pnfs_commit_bucket *buckets;
719 	int size;
720 
721 	if (cinfo->ds->nbuckets != 0) {
722 		/* This assumes there is only one RW lseg per file.
723 		 * To support multiple lseg per file, we need to
724 		 * change struct pnfs_commit_bucket to allow dynamic
725 		 * increasing nbuckets.
726 		 */
727 		return 0;
728 	}
729 
730 	size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
731 
732 	buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
733 			  gfp_flags);
734 	if (!buckets)
735 		return -ENOMEM;
736 	else {
737 		int i;
738 
739 		spin_lock(cinfo->lock);
740 		if (cinfo->ds->nbuckets != 0)
741 			kfree(buckets);
742 		else {
743 			cinfo->ds->buckets = buckets;
744 			cinfo->ds->nbuckets = size;
745 			for (i = 0; i < size; i++) {
746 				INIT_LIST_HEAD(&buckets[i].written);
747 				INIT_LIST_HEAD(&buckets[i].committing);
748 				/* mark direct verifier as unset */
749 				buckets[i].direct_verf.committed =
750 					NFS_INVALID_STABLE_HOW;
751 			}
752 		}
753 		spin_unlock(cinfo->lock);
754 		return 0;
755 	}
756 }
757 
758 static struct nfs4_pnfs_ds *
ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment * lseg,int start_idx,int * best_idx)759 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
760 				  int start_idx,
761 				  int *best_idx)
762 {
763 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
764 	struct nfs4_pnfs_ds *ds;
765 	int idx;
766 
767 	/* mirrors are sorted by efficiency */
768 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
769 		ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
770 		if (ds) {
771 			*best_idx = idx;
772 			return ds;
773 		}
774 	}
775 
776 	return NULL;
777 }
778 
779 static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)780 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
781 			struct nfs_page *req)
782 {
783 	struct nfs_pgio_mirror *pgm;
784 	struct nfs4_ff_layout_mirror *mirror;
785 	struct nfs4_pnfs_ds *ds;
786 	int ds_idx;
787 
788 	/* Use full layout for now */
789 	if (!pgio->pg_lseg) {
790 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
791 						   req->wb_context,
792 						   0,
793 						   NFS4_MAX_UINT64,
794 						   IOMODE_READ,
795 						   GFP_KERNEL);
796 		if (IS_ERR(pgio->pg_lseg)) {
797 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
798 			pgio->pg_lseg = NULL;
799 			return;
800 		}
801 	}
802 	/* If no lseg, fall back to read through mds */
803 	if (pgio->pg_lseg == NULL)
804 		goto out_mds;
805 
806 	ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
807 	if (!ds)
808 		goto out_mds;
809 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
810 
811 	pgio->pg_mirror_idx = ds_idx;
812 
813 	/* read always uses only one mirror - idx 0 for pgio layer */
814 	pgm = &pgio->pg_mirrors[0];
815 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
816 
817 	return;
818 out_mds:
819 	pnfs_put_lseg(pgio->pg_lseg);
820 	pgio->pg_lseg = NULL;
821 	nfs_pageio_reset_read_mds(pgio);
822 }
823 
824 static void
ff_layout_pg_init_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)825 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
826 			struct nfs_page *req)
827 {
828 	struct nfs4_ff_layout_mirror *mirror;
829 	struct nfs_pgio_mirror *pgm;
830 	struct nfs_commit_info cinfo;
831 	struct nfs4_pnfs_ds *ds;
832 	int i;
833 	int status;
834 
835 	if (!pgio->pg_lseg) {
836 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
837 						   req->wb_context,
838 						   0,
839 						   NFS4_MAX_UINT64,
840 						   IOMODE_RW,
841 						   GFP_NOFS);
842 		if (IS_ERR(pgio->pg_lseg)) {
843 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
844 			pgio->pg_lseg = NULL;
845 			return;
846 		}
847 	}
848 	/* If no lseg, fall back to write through mds */
849 	if (pgio->pg_lseg == NULL)
850 		goto out_mds;
851 
852 	nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
853 	status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
854 	if (status < 0)
855 		goto out_mds;
856 
857 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
858 	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
859 		goto out_eagain;
860 
861 	for (i = 0; i < pgio->pg_mirror_count; i++) {
862 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
863 		if (!ds)
864 			goto out_mds;
865 		pgm = &pgio->pg_mirrors[i];
866 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
867 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
868 	}
869 
870 	return;
871 out_eagain:
872 	pnfs_generic_pg_cleanup(pgio);
873 	pgio->pg_error = -EAGAIN;
874 	return;
875 out_mds:
876 	pnfs_put_lseg(pgio->pg_lseg);
877 	pgio->pg_lseg = NULL;
878 	nfs_pageio_reset_write_mds(pgio);
879 	pgio->pg_error = -EAGAIN;
880 }
881 
882 static unsigned int
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)883 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
884 				    struct nfs_page *req)
885 {
886 	if (!pgio->pg_lseg) {
887 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
888 						   req->wb_context,
889 						   0,
890 						   NFS4_MAX_UINT64,
891 						   IOMODE_RW,
892 						   GFP_NOFS);
893 		if (IS_ERR(pgio->pg_lseg)) {
894 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
895 			pgio->pg_lseg = NULL;
896 			goto out;
897 		}
898 	}
899 	if (pgio->pg_lseg)
900 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
901 
902 	/* no lseg means that pnfs is not in use, so no mirroring here */
903 	nfs_pageio_reset_write_mds(pgio);
904 out:
905 	return 1;
906 }
907 
908 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
909 	.pg_init = ff_layout_pg_init_read,
910 	.pg_test = pnfs_generic_pg_test,
911 	.pg_doio = pnfs_generic_pg_readpages,
912 	.pg_cleanup = pnfs_generic_pg_cleanup,
913 };
914 
915 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
916 	.pg_init = ff_layout_pg_init_write,
917 	.pg_test = pnfs_generic_pg_test,
918 	.pg_doio = pnfs_generic_pg_writepages,
919 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
920 	.pg_cleanup = pnfs_generic_pg_cleanup,
921 };
922 
ff_layout_reset_write(struct nfs_pgio_header * hdr,bool retry_pnfs)923 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
924 {
925 	struct rpc_task *task = &hdr->task;
926 
927 	pnfs_layoutcommit_inode(hdr->inode, false);
928 
929 	if (retry_pnfs) {
930 		dprintk("%s Reset task %5u for i/o through pNFS "
931 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
932 			hdr->task.tk_pid,
933 			hdr->inode->i_sb->s_id,
934 			(unsigned long long)NFS_FILEID(hdr->inode),
935 			hdr->args.count,
936 			(unsigned long long)hdr->args.offset);
937 
938 		if (!hdr->dreq) {
939 			struct nfs_open_context *ctx;
940 
941 			ctx = nfs_list_entry(hdr->pages.next)->wb_context;
942 			set_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
943 			hdr->completion_ops->error_cleanup(&hdr->pages);
944 		} else {
945 			nfs_direct_set_resched_writes(hdr->dreq);
946 			/* fake unstable write to let common nfs resend pages */
947 			hdr->verf.committed = NFS_UNSTABLE;
948 			hdr->good_bytes = hdr->args.count;
949 		}
950 		return;
951 	}
952 
953 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
954 		dprintk("%s Reset task %5u for i/o through MDS "
955 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
956 			hdr->task.tk_pid,
957 			hdr->inode->i_sb->s_id,
958 			(unsigned long long)NFS_FILEID(hdr->inode),
959 			hdr->args.count,
960 			(unsigned long long)hdr->args.offset);
961 
962 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
963 	}
964 }
965 
ff_layout_reset_read(struct nfs_pgio_header * hdr)966 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
967 {
968 	struct rpc_task *task = &hdr->task;
969 
970 	pnfs_layoutcommit_inode(hdr->inode, false);
971 
972 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
973 		dprintk("%s Reset task %5u for i/o through MDS "
974 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
975 			hdr->task.tk_pid,
976 			hdr->inode->i_sb->s_id,
977 			(unsigned long long)NFS_FILEID(hdr->inode),
978 			hdr->args.count,
979 			(unsigned long long)hdr->args.offset);
980 
981 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
982 	}
983 }
984 
ff_layout_async_handle_error_v4(struct rpc_task * task,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,int idx)985 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
986 					   struct nfs4_state *state,
987 					   struct nfs_client *clp,
988 					   struct pnfs_layout_segment *lseg,
989 					   int idx)
990 {
991 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
992 	struct inode *inode = lo->plh_inode;
993 	struct nfs_server *mds_server = NFS_SERVER(inode);
994 
995 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
996 	struct nfs_client *mds_client = mds_server->nfs_client;
997 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
998 
999 	if (task->tk_status >= 0)
1000 		return 0;
1001 
1002 	switch (task->tk_status) {
1003 	/* MDS state errors */
1004 	case -NFS4ERR_DELEG_REVOKED:
1005 	case -NFS4ERR_ADMIN_REVOKED:
1006 	case -NFS4ERR_BAD_STATEID:
1007 		if (state == NULL)
1008 			break;
1009 		nfs_remove_bad_delegation(state->inode);
1010 	case -NFS4ERR_OPENMODE:
1011 		if (state == NULL)
1012 			break;
1013 		if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
1014 			goto out_bad_stateid;
1015 		goto wait_on_recovery;
1016 	case -NFS4ERR_EXPIRED:
1017 		if (state != NULL) {
1018 			if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
1019 				goto out_bad_stateid;
1020 		}
1021 		nfs4_schedule_lease_recovery(mds_client);
1022 		goto wait_on_recovery;
1023 	/* DS session errors */
1024 	case -NFS4ERR_BADSESSION:
1025 	case -NFS4ERR_BADSLOT:
1026 	case -NFS4ERR_BAD_HIGH_SLOT:
1027 	case -NFS4ERR_DEADSESSION:
1028 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1029 	case -NFS4ERR_SEQ_FALSE_RETRY:
1030 	case -NFS4ERR_SEQ_MISORDERED:
1031 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1032 			"flags 0x%x\n", __func__, task->tk_status,
1033 			clp->cl_exchange_flags);
1034 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1035 		break;
1036 	case -NFS4ERR_DELAY:
1037 	case -NFS4ERR_GRACE:
1038 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1039 		break;
1040 	case -NFS4ERR_RETRY_UNCACHED_REP:
1041 		break;
1042 	/* Invalidate Layout errors */
1043 	case -NFS4ERR_PNFS_NO_LAYOUT:
1044 	case -ESTALE:           /* mapped NFS4ERR_STALE */
1045 	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1046 	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1047 	case -NFS4ERR_FHEXPIRED:
1048 	case -NFS4ERR_WRONG_TYPE:
1049 		dprintk("%s Invalid layout error %d\n", __func__,
1050 			task->tk_status);
1051 		/*
1052 		 * Destroy layout so new i/o will get a new layout.
1053 		 * Layout will not be destroyed until all current lseg
1054 		 * references are put. Mark layout as invalid to resend failed
1055 		 * i/o and all i/o waiting on the slot table to the MDS until
1056 		 * layout is destroyed and a new valid layout is obtained.
1057 		 */
1058 		pnfs_destroy_layout(NFS_I(inode));
1059 		rpc_wake_up(&tbl->slot_tbl_waitq);
1060 		goto reset;
1061 	/* RPC connection errors */
1062 	case -ECONNREFUSED:
1063 	case -EHOSTDOWN:
1064 	case -EHOSTUNREACH:
1065 	case -ENETUNREACH:
1066 	case -EIO:
1067 	case -ETIMEDOUT:
1068 	case -EPIPE:
1069 		dprintk("%s DS connection error %d\n", __func__,
1070 			task->tk_status);
1071 		nfs4_mark_deviceid_unavailable(devid);
1072 		rpc_wake_up(&tbl->slot_tbl_waitq);
1073 		/* fall through */
1074 	default:
1075 		if (ff_layout_no_fallback_to_mds(lseg) ||
1076 		    ff_layout_has_available_ds(lseg))
1077 			return -NFS4ERR_RESET_TO_PNFS;
1078 reset:
1079 		dprintk("%s Retry through MDS. Error %d\n", __func__,
1080 			task->tk_status);
1081 		return -NFS4ERR_RESET_TO_MDS;
1082 	}
1083 out:
1084 	task->tk_status = 0;
1085 	return -EAGAIN;
1086 out_bad_stateid:
1087 	task->tk_status = -EIO;
1088 	return 0;
1089 wait_on_recovery:
1090 	rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
1091 	if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
1092 		rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
1093 	goto out;
1094 }
1095 
1096 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
ff_layout_async_handle_error_v3(struct rpc_task * task,struct pnfs_layout_segment * lseg,int idx)1097 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1098 					   struct pnfs_layout_segment *lseg,
1099 					   int idx)
1100 {
1101 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1102 
1103 	if (task->tk_status >= 0)
1104 		return 0;
1105 
1106 	switch (task->tk_status) {
1107 	/* File access problems. Don't mark the device as unavailable */
1108 	case -EACCES:
1109 	case -ESTALE:
1110 	case -EISDIR:
1111 	case -EBADHANDLE:
1112 	case -ELOOP:
1113 	case -ENOSPC:
1114 		break;
1115 	case -EJUKEBOX:
1116 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1117 		goto out_retry;
1118 	default:
1119 		dprintk("%s DS connection error %d\n", __func__,
1120 			task->tk_status);
1121 		nfs4_mark_deviceid_unavailable(devid);
1122 	}
1123 	/* FIXME: Need to prevent infinite looping here. */
1124 	return -NFS4ERR_RESET_TO_PNFS;
1125 out_retry:
1126 	task->tk_status = 0;
1127 	rpc_restart_call(task);
1128 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1129 	return -EAGAIN;
1130 }
1131 
ff_layout_async_handle_error(struct rpc_task * task,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,int idx)1132 static int ff_layout_async_handle_error(struct rpc_task *task,
1133 					struct nfs4_state *state,
1134 					struct nfs_client *clp,
1135 					struct pnfs_layout_segment *lseg,
1136 					int idx)
1137 {
1138 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1139 
1140 	switch (vers) {
1141 	case 3:
1142 		return ff_layout_async_handle_error_v3(task, lseg, idx);
1143 	case 4:
1144 		return ff_layout_async_handle_error_v4(task, state, clp,
1145 						       lseg, idx);
1146 	default:
1147 		/* should never happen */
1148 		WARN_ON_ONCE(1);
1149 		return 0;
1150 	}
1151 }
1152 
ff_layout_io_track_ds_error(struct pnfs_layout_segment * lseg,int idx,u64 offset,u64 length,u32 status,int opnum,int error)1153 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1154 					int idx, u64 offset, u64 length,
1155 					u32 status, int opnum, int error)
1156 {
1157 	struct nfs4_ff_layout_mirror *mirror;
1158 	int err;
1159 
1160 	if (status == 0) {
1161 		switch (error) {
1162 		case -ETIMEDOUT:
1163 		case -EPFNOSUPPORT:
1164 		case -EPROTONOSUPPORT:
1165 		case -EOPNOTSUPP:
1166 		case -ECONNREFUSED:
1167 		case -ECONNRESET:
1168 		case -EHOSTDOWN:
1169 		case -EHOSTUNREACH:
1170 		case -ENETUNREACH:
1171 		case -EADDRINUSE:
1172 		case -ENOBUFS:
1173 		case -EPIPE:
1174 		case -EPERM:
1175 			status = NFS4ERR_NXIO;
1176 			break;
1177 		case -EACCES:
1178 			status = NFS4ERR_ACCESS;
1179 			break;
1180 		default:
1181 			return;
1182 		}
1183 	}
1184 
1185 	mirror = FF_LAYOUT_COMP(lseg, idx);
1186 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1187 				       mirror, offset, length, status, opnum,
1188 				       GFP_NOIO);
1189 	pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
1190 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1191 }
1192 
1193 /* NFS_PROTO call done callback routines */
ff_layout_read_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1194 static int ff_layout_read_done_cb(struct rpc_task *task,
1195 				struct nfs_pgio_header *hdr)
1196 {
1197 	int err;
1198 
1199 	trace_nfs4_pnfs_read(hdr, task->tk_status);
1200 	if (task->tk_status < 0)
1201 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1202 					    hdr->args.offset, hdr->args.count,
1203 					    hdr->res.op_status, OP_READ,
1204 					    task->tk_status);
1205 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1206 					   hdr->ds_clp, hdr->lseg,
1207 					   hdr->pgio_mirror_idx);
1208 
1209 	switch (err) {
1210 	case -NFS4ERR_RESET_TO_PNFS:
1211 		if (ff_layout_choose_best_ds_for_read(hdr->lseg,
1212 					hdr->pgio_mirror_idx + 1,
1213 					&hdr->pgio_mirror_idx))
1214 			goto out_eagain;
1215 		set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1216 			&hdr->lseg->pls_layout->plh_flags);
1217 		pnfs_read_resend_pnfs(hdr);
1218 		return task->tk_status;
1219 	case -NFS4ERR_RESET_TO_MDS:
1220 		ff_layout_reset_read(hdr);
1221 		return task->tk_status;
1222 	case -EAGAIN:
1223 		goto out_eagain;
1224 	}
1225 
1226 	return 0;
1227 out_eagain:
1228 	rpc_restart_call_prepare(task);
1229 	return -EAGAIN;
1230 }
1231 
1232 static bool
ff_layout_need_layoutcommit(struct pnfs_layout_segment * lseg)1233 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1234 {
1235 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1236 }
1237 
1238 /*
1239  * We reference the rpc_cred of the first WRITE that triggers the need for
1240  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1241  * rfc5661 is not clear about which credential should be used.
1242  *
1243  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1244  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1245  * we always send layoutcommit after DS writes.
1246  */
1247 static void
ff_layout_set_layoutcommit(struct nfs_pgio_header * hdr)1248 ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
1249 {
1250 	if (!ff_layout_need_layoutcommit(hdr->lseg))
1251 		return;
1252 
1253 	pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
1254 			hdr->mds_offset + hdr->res.count);
1255 	dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
1256 		(unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
1257 }
1258 
1259 static bool
ff_layout_reset_to_mds(struct pnfs_layout_segment * lseg,int idx)1260 ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
1261 {
1262 	/* No mirroring for now */
1263 	struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
1264 
1265 	return ff_layout_test_devid_unavailable(node);
1266 }
1267 
ff_layout_read_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1268 static int ff_layout_read_prepare_common(struct rpc_task *task,
1269 					 struct nfs_pgio_header *hdr)
1270 {
1271 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1272 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1273 			hdr->args.count,
1274 			task->tk_start);
1275 
1276 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1277 		rpc_exit(task, -EIO);
1278 		return -EIO;
1279 	}
1280 	if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1281 		dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
1282 		if (ff_layout_has_available_ds(hdr->lseg))
1283 			pnfs_read_resend_pnfs(hdr);
1284 		else
1285 			ff_layout_reset_read(hdr);
1286 		rpc_exit(task, 0);
1287 		return -EAGAIN;
1288 	}
1289 	hdr->pgio_done_cb = ff_layout_read_done_cb;
1290 
1291 	return 0;
1292 }
1293 
1294 /*
1295  * Call ops for the async read/write cases
1296  * In the case of dense layouts, the offset needs to be reset to its
1297  * original value.
1298  */
ff_layout_read_prepare_v3(struct rpc_task * task,void * data)1299 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1300 {
1301 	struct nfs_pgio_header *hdr = data;
1302 
1303 	if (ff_layout_read_prepare_common(task, hdr))
1304 		return;
1305 
1306 	rpc_call_start(task);
1307 }
1308 
ff_layout_setup_sequence(struct nfs_client * ds_clp,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,struct rpc_task * task)1309 static int ff_layout_setup_sequence(struct nfs_client *ds_clp,
1310 				    struct nfs4_sequence_args *args,
1311 				    struct nfs4_sequence_res *res,
1312 				    struct rpc_task *task)
1313 {
1314 	if (ds_clp->cl_session)
1315 		return nfs41_setup_sequence(ds_clp->cl_session,
1316 					   args,
1317 					   res,
1318 					   task);
1319 	return nfs40_setup_sequence(ds_clp->cl_slot_tbl,
1320 				   args,
1321 				   res,
1322 				   task);
1323 }
1324 
ff_layout_read_prepare_v4(struct rpc_task * task,void * data)1325 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1326 {
1327 	struct nfs_pgio_header *hdr = data;
1328 
1329 	if (ff_layout_setup_sequence(hdr->ds_clp,
1330 				     &hdr->args.seq_args,
1331 				     &hdr->res.seq_res,
1332 				     task))
1333 		return;
1334 
1335 	if (ff_layout_read_prepare_common(task, hdr))
1336 		return;
1337 
1338 	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1339 			hdr->args.lock_context, FMODE_READ) == -EIO)
1340 		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1341 }
1342 
ff_layout_read_call_done(struct rpc_task * task,void * data)1343 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1344 {
1345 	struct nfs_pgio_header *hdr = data;
1346 
1347 	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1348 
1349 	nfs4_ff_layout_stat_io_end_read(task,
1350 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1351 			hdr->args.count, hdr->res.count);
1352 
1353 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1354 	    task->tk_status == 0) {
1355 		nfs4_sequence_done(task, &hdr->res.seq_res);
1356 		return;
1357 	}
1358 
1359 	/* Note this may cause RPC to be resent */
1360 	hdr->mds_ops->rpc_call_done(task, hdr);
1361 }
1362 
ff_layout_read_count_stats(struct rpc_task * task,void * data)1363 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1364 {
1365 	struct nfs_pgio_header *hdr = data;
1366 
1367 	rpc_count_iostats_metrics(task,
1368 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1369 }
1370 
ff_layout_write_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1371 static int ff_layout_write_done_cb(struct rpc_task *task,
1372 				struct nfs_pgio_header *hdr)
1373 {
1374 	int err;
1375 
1376 	trace_nfs4_pnfs_write(hdr, task->tk_status);
1377 	if (task->tk_status < 0)
1378 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1379 					    hdr->args.offset, hdr->args.count,
1380 					    hdr->res.op_status, OP_WRITE,
1381 					    task->tk_status);
1382 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1383 					   hdr->ds_clp, hdr->lseg,
1384 					   hdr->pgio_mirror_idx);
1385 
1386 	switch (err) {
1387 	case -NFS4ERR_RESET_TO_PNFS:
1388 		pnfs_set_retry_layoutget(hdr->lseg->pls_layout);
1389 		ff_layout_reset_write(hdr, true);
1390 		return task->tk_status;
1391 	case -NFS4ERR_RESET_TO_MDS:
1392 		pnfs_clear_retry_layoutget(hdr->lseg->pls_layout);
1393 		ff_layout_reset_write(hdr, false);
1394 		return task->tk_status;
1395 	case -EAGAIN:
1396 		rpc_restart_call_prepare(task);
1397 		return -EAGAIN;
1398 	}
1399 
1400 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1401 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1402 		ff_layout_set_layoutcommit(hdr);
1403 
1404 	/* zero out fattr since we don't care DS attr at all */
1405 	hdr->fattr.valid = 0;
1406 	if (task->tk_status >= 0)
1407 		nfs_writeback_update_inode(hdr);
1408 
1409 	return 0;
1410 }
1411 
ff_layout_commit_done_cb(struct rpc_task * task,struct nfs_commit_data * data)1412 static int ff_layout_commit_done_cb(struct rpc_task *task,
1413 				     struct nfs_commit_data *data)
1414 {
1415 	int err;
1416 
1417 	trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1418 	if (task->tk_status < 0)
1419 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1420 					    data->args.offset, data->args.count,
1421 					    data->res.op_status, OP_COMMIT,
1422 					    task->tk_status);
1423 	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1424 					   data->lseg, data->ds_commit_index);
1425 
1426 	switch (err) {
1427 	case -NFS4ERR_RESET_TO_PNFS:
1428 		pnfs_set_retry_layoutget(data->lseg->pls_layout);
1429 		pnfs_generic_prepare_to_resend_writes(data);
1430 		return -EAGAIN;
1431 	case -NFS4ERR_RESET_TO_MDS:
1432 		pnfs_clear_retry_layoutget(data->lseg->pls_layout);
1433 		pnfs_generic_prepare_to_resend_writes(data);
1434 		return -EAGAIN;
1435 	case -EAGAIN:
1436 		rpc_restart_call_prepare(task);
1437 		return -EAGAIN;
1438 	}
1439 
1440 	if (ff_layout_need_layoutcommit(data->lseg))
1441 		pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
1442 
1443 	return 0;
1444 }
1445 
ff_layout_write_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1446 static int ff_layout_write_prepare_common(struct rpc_task *task,
1447 					  struct nfs_pgio_header *hdr)
1448 {
1449 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1450 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1451 			hdr->args.count,
1452 			task->tk_start);
1453 
1454 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1455 		rpc_exit(task, -EIO);
1456 		return -EIO;
1457 	}
1458 
1459 	if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1460 		bool retry_pnfs;
1461 
1462 		retry_pnfs = ff_layout_has_available_ds(hdr->lseg);
1463 		dprintk("%s task %u reset io to %s\n", __func__,
1464 			task->tk_pid, retry_pnfs ? "pNFS" : "MDS");
1465 		ff_layout_reset_write(hdr, retry_pnfs);
1466 		rpc_exit(task, 0);
1467 		return -EAGAIN;
1468 	}
1469 
1470 	return 0;
1471 }
1472 
ff_layout_write_prepare_v3(struct rpc_task * task,void * data)1473 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1474 {
1475 	struct nfs_pgio_header *hdr = data;
1476 
1477 	if (ff_layout_write_prepare_common(task, hdr))
1478 		return;
1479 
1480 	rpc_call_start(task);
1481 }
1482 
ff_layout_write_prepare_v4(struct rpc_task * task,void * data)1483 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1484 {
1485 	struct nfs_pgio_header *hdr = data;
1486 
1487 	if (ff_layout_setup_sequence(hdr->ds_clp,
1488 				     &hdr->args.seq_args,
1489 				     &hdr->res.seq_res,
1490 				     task))
1491 		return;
1492 
1493 	if (ff_layout_write_prepare_common(task, hdr))
1494 		return;
1495 
1496 	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1497 			hdr->args.lock_context, FMODE_WRITE) == -EIO)
1498 		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1499 }
1500 
ff_layout_write_call_done(struct rpc_task * task,void * data)1501 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1502 {
1503 	struct nfs_pgio_header *hdr = data;
1504 
1505 	nfs4_ff_layout_stat_io_end_write(task,
1506 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1507 			hdr->args.count, hdr->res.count,
1508 			hdr->res.verf->committed);
1509 
1510 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1511 	    task->tk_status == 0) {
1512 		nfs4_sequence_done(task, &hdr->res.seq_res);
1513 		return;
1514 	}
1515 
1516 	/* Note this may cause RPC to be resent */
1517 	hdr->mds_ops->rpc_call_done(task, hdr);
1518 }
1519 
ff_layout_write_count_stats(struct rpc_task * task,void * data)1520 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1521 {
1522 	struct nfs_pgio_header *hdr = data;
1523 
1524 	rpc_count_iostats_metrics(task,
1525 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1526 }
1527 
ff_layout_commit_prepare_common(struct rpc_task * task,struct nfs_commit_data * cdata)1528 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1529 		struct nfs_commit_data *cdata)
1530 {
1531 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1532 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1533 			0, task->tk_start);
1534 }
1535 
ff_layout_commit_prepare_v3(struct rpc_task * task,void * data)1536 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1537 {
1538 	ff_layout_commit_prepare_common(task, data);
1539 	rpc_call_start(task);
1540 }
1541 
ff_layout_commit_prepare_v4(struct rpc_task * task,void * data)1542 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1543 {
1544 	struct nfs_commit_data *wdata = data;
1545 
1546 	if (ff_layout_setup_sequence(wdata->ds_clp,
1547 				 &wdata->args.seq_args,
1548 				 &wdata->res.seq_res,
1549 				 task))
1550 		return;
1551 	ff_layout_commit_prepare_common(task, data);
1552 }
1553 
ff_layout_commit_done(struct rpc_task * task,void * data)1554 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1555 {
1556 	struct nfs_commit_data *cdata = data;
1557 	struct nfs_page *req;
1558 	__u64 count = 0;
1559 
1560 	if (task->tk_status == 0) {
1561 		list_for_each_entry(req, &cdata->pages, wb_list)
1562 			count += req->wb_bytes;
1563 	}
1564 
1565 	nfs4_ff_layout_stat_io_end_write(task,
1566 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1567 			count, count, NFS_FILE_SYNC);
1568 
1569 	pnfs_generic_write_commit_done(task, data);
1570 }
1571 
ff_layout_commit_count_stats(struct rpc_task * task,void * data)1572 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1573 {
1574 	struct nfs_commit_data *cdata = data;
1575 
1576 	rpc_count_iostats_metrics(task,
1577 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1578 }
1579 
1580 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1581 	.rpc_call_prepare = ff_layout_read_prepare_v3,
1582 	.rpc_call_done = ff_layout_read_call_done,
1583 	.rpc_count_stats = ff_layout_read_count_stats,
1584 	.rpc_release = pnfs_generic_rw_release,
1585 };
1586 
1587 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1588 	.rpc_call_prepare = ff_layout_read_prepare_v4,
1589 	.rpc_call_done = ff_layout_read_call_done,
1590 	.rpc_count_stats = ff_layout_read_count_stats,
1591 	.rpc_release = pnfs_generic_rw_release,
1592 };
1593 
1594 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1595 	.rpc_call_prepare = ff_layout_write_prepare_v3,
1596 	.rpc_call_done = ff_layout_write_call_done,
1597 	.rpc_count_stats = ff_layout_write_count_stats,
1598 	.rpc_release = pnfs_generic_rw_release,
1599 };
1600 
1601 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1602 	.rpc_call_prepare = ff_layout_write_prepare_v4,
1603 	.rpc_call_done = ff_layout_write_call_done,
1604 	.rpc_count_stats = ff_layout_write_count_stats,
1605 	.rpc_release = pnfs_generic_rw_release,
1606 };
1607 
1608 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1609 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1610 	.rpc_call_done = ff_layout_commit_done,
1611 	.rpc_count_stats = ff_layout_commit_count_stats,
1612 	.rpc_release = pnfs_generic_commit_release,
1613 };
1614 
1615 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1616 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1617 	.rpc_call_done = ff_layout_commit_done,
1618 	.rpc_count_stats = ff_layout_commit_count_stats,
1619 	.rpc_release = pnfs_generic_commit_release,
1620 };
1621 
1622 static enum pnfs_try_status
ff_layout_read_pagelist(struct nfs_pgio_header * hdr)1623 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1624 {
1625 	struct pnfs_layout_segment *lseg = hdr->lseg;
1626 	struct nfs4_pnfs_ds *ds;
1627 	struct rpc_clnt *ds_clnt;
1628 	struct rpc_cred *ds_cred;
1629 	loff_t offset = hdr->args.offset;
1630 	u32 idx = hdr->pgio_mirror_idx;
1631 	int vers;
1632 	struct nfs_fh *fh;
1633 
1634 	dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
1635 		__func__, hdr->inode->i_ino,
1636 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1637 
1638 	ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
1639 	if (!ds)
1640 		goto out_failed;
1641 
1642 	ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1643 						   hdr->inode);
1644 	if (IS_ERR(ds_clnt))
1645 		goto out_failed;
1646 
1647 	ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1648 	if (IS_ERR(ds_cred))
1649 		goto out_failed;
1650 
1651 	vers = nfs4_ff_layout_ds_version(lseg, idx);
1652 
1653 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1654 		ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
1655 
1656 	atomic_inc(&ds->ds_clp->cl_count);
1657 	hdr->ds_clp = ds->ds_clp;
1658 	fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1659 	if (fh)
1660 		hdr->args.fh = fh;
1661 	/*
1662 	 * Note that if we ever decide to split across DSes,
1663 	 * then we may need to handle dense-like offsets.
1664 	 */
1665 	hdr->args.offset = offset;
1666 	hdr->mds_offset = offset;
1667 
1668 	/* Perform an asynchronous read to ds */
1669 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1670 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1671 				      &ff_layout_read_call_ops_v4,
1672 			  0, RPC_TASK_SOFTCONN);
1673 
1674 	return PNFS_ATTEMPTED;
1675 
1676 out_failed:
1677 	if (ff_layout_has_available_ds(lseg))
1678 		return PNFS_TRY_AGAIN;
1679 	return PNFS_NOT_ATTEMPTED;
1680 }
1681 
1682 /* Perform async writes. */
1683 static enum pnfs_try_status
ff_layout_write_pagelist(struct nfs_pgio_header * hdr,int sync)1684 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1685 {
1686 	struct pnfs_layout_segment *lseg = hdr->lseg;
1687 	struct nfs4_pnfs_ds *ds;
1688 	struct rpc_clnt *ds_clnt;
1689 	struct rpc_cred *ds_cred;
1690 	loff_t offset = hdr->args.offset;
1691 	int vers;
1692 	struct nfs_fh *fh;
1693 	int idx = hdr->pgio_mirror_idx;
1694 
1695 	ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1696 	if (!ds)
1697 		return PNFS_NOT_ATTEMPTED;
1698 
1699 	ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1700 						   hdr->inode);
1701 	if (IS_ERR(ds_clnt))
1702 		return PNFS_NOT_ATTEMPTED;
1703 
1704 	ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1705 	if (IS_ERR(ds_cred))
1706 		return PNFS_NOT_ATTEMPTED;
1707 
1708 	vers = nfs4_ff_layout_ds_version(lseg, idx);
1709 
1710 	dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
1711 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1712 		offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
1713 		vers);
1714 
1715 	hdr->pgio_done_cb = ff_layout_write_done_cb;
1716 	atomic_inc(&ds->ds_clp->cl_count);
1717 	hdr->ds_clp = ds->ds_clp;
1718 	hdr->ds_commit_idx = idx;
1719 	fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1720 	if (fh)
1721 		hdr->args.fh = fh;
1722 
1723 	/*
1724 	 * Note that if we ever decide to split across DSes,
1725 	 * then we may need to handle dense-like offsets.
1726 	 */
1727 	hdr->args.offset = offset;
1728 
1729 	/* Perform an asynchronous write */
1730 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1731 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1732 				      &ff_layout_write_call_ops_v4,
1733 			  sync, RPC_TASK_SOFTCONN);
1734 	return PNFS_ATTEMPTED;
1735 }
1736 
calc_ds_index_from_commit(struct pnfs_layout_segment * lseg,u32 i)1737 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1738 {
1739 	return i;
1740 }
1741 
1742 static struct nfs_fh *
select_ds_fh_from_commit(struct pnfs_layout_segment * lseg,u32 i)1743 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1744 {
1745 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1746 
1747 	/* FIXME: Assume that there is only one NFS version available
1748 	 * for the DS.
1749 	 */
1750 	return &flseg->mirror_array[i]->fh_versions[0];
1751 }
1752 
ff_layout_initiate_commit(struct nfs_commit_data * data,int how)1753 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1754 {
1755 	struct pnfs_layout_segment *lseg = data->lseg;
1756 	struct nfs4_pnfs_ds *ds;
1757 	struct rpc_clnt *ds_clnt;
1758 	struct rpc_cred *ds_cred;
1759 	u32 idx;
1760 	int vers;
1761 	struct nfs_fh *fh;
1762 
1763 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1764 	ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1765 	if (!ds)
1766 		goto out_err;
1767 
1768 	ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1769 						   data->inode);
1770 	if (IS_ERR(ds_clnt))
1771 		goto out_err;
1772 
1773 	ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
1774 	if (IS_ERR(ds_cred))
1775 		goto out_err;
1776 
1777 	vers = nfs4_ff_layout_ds_version(lseg, idx);
1778 
1779 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1780 		data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
1781 		vers);
1782 	data->commit_done_cb = ff_layout_commit_done_cb;
1783 	data->cred = ds_cred;
1784 	atomic_inc(&ds->ds_clp->cl_count);
1785 	data->ds_clp = ds->ds_clp;
1786 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1787 	if (fh)
1788 		data->args.fh = fh;
1789 
1790 	return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1791 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
1792 					       &ff_layout_commit_call_ops_v4,
1793 				   how, RPC_TASK_SOFTCONN);
1794 out_err:
1795 	pnfs_generic_prepare_to_resend_writes(data);
1796 	pnfs_generic_commit_release(data);
1797 	return -EAGAIN;
1798 }
1799 
1800 static int
ff_layout_commit_pagelist(struct inode * inode,struct list_head * mds_pages,int how,struct nfs_commit_info * cinfo)1801 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1802 			   int how, struct nfs_commit_info *cinfo)
1803 {
1804 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1805 					    ff_layout_initiate_commit);
1806 }
1807 
1808 static struct pnfs_ds_commit_info *
ff_layout_get_ds_info(struct inode * inode)1809 ff_layout_get_ds_info(struct inode *inode)
1810 {
1811 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1812 
1813 	if (layout == NULL)
1814 		return NULL;
1815 
1816 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1817 }
1818 
1819 static void
ff_layout_free_deviceid_node(struct nfs4_deviceid_node * d)1820 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1821 {
1822 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1823 						  id_node));
1824 }
1825 
ff_layout_encode_ioerr(struct nfs4_flexfile_layout * flo,struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args)1826 static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
1827 				  struct xdr_stream *xdr,
1828 				  const struct nfs4_layoutreturn_args *args)
1829 {
1830 	struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
1831 	__be32 *start;
1832 	int count = 0, ret = 0;
1833 
1834 	start = xdr_reserve_space(xdr, 4);
1835 	if (unlikely(!start))
1836 		return -E2BIG;
1837 
1838 	/* This assume we always return _ALL_ layouts */
1839 	spin_lock(&hdr->plh_inode->i_lock);
1840 	ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
1841 	spin_unlock(&hdr->plh_inode->i_lock);
1842 
1843 	*start = cpu_to_be32(count);
1844 
1845 	return ret;
1846 }
1847 
1848 /* report nothing for now */
ff_layout_encode_iostats(struct nfs4_flexfile_layout * flo,struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args)1849 static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
1850 				     struct xdr_stream *xdr,
1851 				     const struct nfs4_layoutreturn_args *args)
1852 {
1853 	__be32 *p;
1854 
1855 	p = xdr_reserve_space(xdr, 4);
1856 	if (likely(p))
1857 		*p = cpu_to_be32(0);
1858 }
1859 
1860 static struct nfs4_deviceid_node *
ff_layout_alloc_deviceid_node(struct nfs_server * server,struct pnfs_device * pdev,gfp_t gfp_flags)1861 ff_layout_alloc_deviceid_node(struct nfs_server *server,
1862 			      struct pnfs_device *pdev, gfp_t gfp_flags)
1863 {
1864 	struct nfs4_ff_layout_ds *dsaddr;
1865 
1866 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
1867 	if (!dsaddr)
1868 		return NULL;
1869 	return &dsaddr->id_node;
1870 }
1871 
1872 static void
ff_layout_encode_layoutreturn(struct pnfs_layout_hdr * lo,struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args)1873 ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
1874 			      struct xdr_stream *xdr,
1875 			      const struct nfs4_layoutreturn_args *args)
1876 {
1877 	struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
1878 	__be32 *start;
1879 
1880 	dprintk("%s: Begin\n", __func__);
1881 	start = xdr_reserve_space(xdr, 4);
1882 	BUG_ON(!start);
1883 
1884 	ff_layout_encode_ioerr(flo, xdr, args);
1885 	ff_layout_encode_iostats(flo, xdr, args);
1886 
1887 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
1888 	dprintk("%s: Return\n", __func__);
1889 }
1890 
1891 static int
ff_layout_ntop4(const struct sockaddr * sap,char * buf,const size_t buflen)1892 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
1893 {
1894 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
1895 
1896 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
1897 }
1898 
1899 static size_t
ff_layout_ntop6_noscopeid(const struct sockaddr * sap,char * buf,const int buflen)1900 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
1901 			  const int buflen)
1902 {
1903 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
1904 	const struct in6_addr *addr = &sin6->sin6_addr;
1905 
1906 	/*
1907 	 * RFC 4291, Section 2.2.2
1908 	 *
1909 	 * Shorthanded ANY address
1910 	 */
1911 	if (ipv6_addr_any(addr))
1912 		return snprintf(buf, buflen, "::");
1913 
1914 	/*
1915 	 * RFC 4291, Section 2.2.2
1916 	 *
1917 	 * Shorthanded loopback address
1918 	 */
1919 	if (ipv6_addr_loopback(addr))
1920 		return snprintf(buf, buflen, "::1");
1921 
1922 	/*
1923 	 * RFC 4291, Section 2.2.3
1924 	 *
1925 	 * Special presentation address format for mapped v4
1926 	 * addresses.
1927 	 */
1928 	if (ipv6_addr_v4mapped(addr))
1929 		return snprintf(buf, buflen, "::ffff:%pI4",
1930 					&addr->s6_addr32[3]);
1931 
1932 	/*
1933 	 * RFC 4291, Section 2.2.1
1934 	 */
1935 	return snprintf(buf, buflen, "%pI6c", addr);
1936 }
1937 
1938 /* Derived from rpc_sockaddr2uaddr */
1939 static void
ff_layout_encode_netaddr(struct xdr_stream * xdr,struct nfs4_pnfs_ds_addr * da)1940 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
1941 {
1942 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
1943 	char portbuf[RPCBIND_MAXUADDRPLEN];
1944 	char addrbuf[RPCBIND_MAXUADDRLEN];
1945 	char *netid;
1946 	unsigned short port;
1947 	int len, netid_len;
1948 	__be32 *p;
1949 
1950 	switch (sap->sa_family) {
1951 	case AF_INET:
1952 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
1953 			return;
1954 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
1955 		netid = "tcp";
1956 		netid_len = 3;
1957 		break;
1958 	case AF_INET6:
1959 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
1960 			return;
1961 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
1962 		netid = "tcp6";
1963 		netid_len = 4;
1964 		break;
1965 	default:
1966 		/* we only support tcp and tcp6 */
1967 		WARN_ON_ONCE(1);
1968 		return;
1969 	}
1970 
1971 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
1972 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
1973 
1974 	p = xdr_reserve_space(xdr, 4 + netid_len);
1975 	xdr_encode_opaque(p, netid, netid_len);
1976 
1977 	p = xdr_reserve_space(xdr, 4 + len);
1978 	xdr_encode_opaque(p, addrbuf, len);
1979 }
1980 
1981 static void
ff_layout_encode_nfstime(struct xdr_stream * xdr,ktime_t t)1982 ff_layout_encode_nfstime(struct xdr_stream *xdr,
1983 			 ktime_t t)
1984 {
1985 	struct timespec64 ts;
1986 	__be32 *p;
1987 
1988 	p = xdr_reserve_space(xdr, 12);
1989 	ts = ktime_to_timespec64(t);
1990 	p = xdr_encode_hyper(p, ts.tv_sec);
1991 	*p++ = cpu_to_be32(ts.tv_nsec);
1992 }
1993 
1994 static void
ff_layout_encode_io_latency(struct xdr_stream * xdr,struct nfs4_ff_io_stat * stat)1995 ff_layout_encode_io_latency(struct xdr_stream *xdr,
1996 			    struct nfs4_ff_io_stat *stat)
1997 {
1998 	__be32 *p;
1999 
2000 	p = xdr_reserve_space(xdr, 5 * 8);
2001 	p = xdr_encode_hyper(p, stat->ops_requested);
2002 	p = xdr_encode_hyper(p, stat->bytes_requested);
2003 	p = xdr_encode_hyper(p, stat->ops_completed);
2004 	p = xdr_encode_hyper(p, stat->bytes_completed);
2005 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2006 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2007 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2008 }
2009 
2010 static void
ff_layout_encode_layoutstats(struct xdr_stream * xdr,struct nfs42_layoutstat_args * args,struct nfs42_layoutstat_devinfo * devinfo)2011 ff_layout_encode_layoutstats(struct xdr_stream *xdr,
2012 			     struct nfs42_layoutstat_args *args,
2013 			     struct nfs42_layoutstat_devinfo *devinfo)
2014 {
2015 	struct nfs4_ff_layout_mirror *mirror = devinfo->layout_private;
2016 	struct nfs4_pnfs_ds_addr *da;
2017 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2018 	struct nfs_fh *fh = &mirror->fh_versions[0];
2019 	__be32 *p, *start;
2020 
2021 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2022 	dprintk("%s: DS %s: encoding address %s\n",
2023 		__func__, ds->ds_remotestr, da->da_remotestr);
2024 	/* layoutupdate length */
2025 	start = xdr_reserve_space(xdr, 4);
2026 	/* netaddr4 */
2027 	ff_layout_encode_netaddr(xdr, da);
2028 	/* nfs_fh4 */
2029 	p = xdr_reserve_space(xdr, 4 + fh->size);
2030 	xdr_encode_opaque(p, fh->data, fh->size);
2031 	/* ff_io_latency4 read */
2032 	spin_lock(&mirror->lock);
2033 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2034 	/* ff_io_latency4 write */
2035 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2036 	spin_unlock(&mirror->lock);
2037 	/* nfstime4 */
2038 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2039 	/* bool */
2040 	p = xdr_reserve_space(xdr, 4);
2041 	*p = cpu_to_be32(false);
2042 
2043 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2044 }
2045 
2046 static int
ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args * args,struct pnfs_layout_hdr * lo,int dev_limit)2047 ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
2048 			       struct pnfs_layout_hdr *lo,
2049 			       int dev_limit)
2050 {
2051 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2052 	struct nfs4_ff_layout_mirror *mirror;
2053 	struct nfs4_deviceid_node *dev;
2054 	struct nfs42_layoutstat_devinfo *devinfo;
2055 	int i = 0;
2056 
2057 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2058 		if (i >= dev_limit)
2059 			break;
2060 		if (!mirror->mirror_ds)
2061 			continue;
2062 		/* mirror refcount put in cleanup_layoutstats */
2063 		if (!atomic_inc_not_zero(&mirror->ref))
2064 			continue;
2065 		dev = &mirror->mirror_ds->id_node;
2066 		devinfo = &args->devinfo[i];
2067 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2068 		devinfo->offset = 0;
2069 		devinfo->length = NFS4_MAX_UINT64;
2070 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2071 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2072 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2073 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2074 		devinfo->layout_type = LAYOUT_FLEX_FILES;
2075 		devinfo->layoutstats_encode = ff_layout_encode_layoutstats;
2076 		devinfo->layout_private = mirror;
2077 
2078 		i++;
2079 	}
2080 	return i;
2081 }
2082 
2083 static int
ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args * args)2084 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2085 {
2086 	struct nfs4_flexfile_layout *ff_layout;
2087 	struct nfs4_ff_layout_mirror *mirror;
2088 	int dev_count = 0;
2089 
2090 	spin_lock(&args->inode->i_lock);
2091 	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2092 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2093 		if (atomic_read(&mirror->ref) != 0)
2094 			dev_count ++;
2095 	}
2096 	spin_unlock(&args->inode->i_lock);
2097 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2098 	if (dev_count > PNFS_LAYOUTSTATS_MAXDEV) {
2099 		dprintk("%s: truncating devinfo to limit (%d:%d)\n",
2100 			__func__, dev_count, PNFS_LAYOUTSTATS_MAXDEV);
2101 		dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2102 	}
2103 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2104 	if (!args->devinfo)
2105 		return -ENOMEM;
2106 
2107 	spin_lock(&args->inode->i_lock);
2108 	args->num_dev = ff_layout_mirror_prepare_stats(args,
2109 			&ff_layout->generic_hdr, dev_count);
2110 	spin_unlock(&args->inode->i_lock);
2111 
2112 	return 0;
2113 }
2114 
2115 static void
ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data * data)2116 ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data *data)
2117 {
2118 	struct nfs4_ff_layout_mirror *mirror;
2119 	int i;
2120 
2121 	for (i = 0; i < data->args.num_dev; i++) {
2122 		mirror = data->args.devinfo[i].layout_private;
2123 		data->args.devinfo[i].layout_private = NULL;
2124 		ff_layout_put_mirror(mirror);
2125 	}
2126 }
2127 
2128 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2129 	.id			= LAYOUT_FLEX_FILES,
2130 	.name			= "LAYOUT_FLEX_FILES",
2131 	.owner			= THIS_MODULE,
2132 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2133 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2134 	.alloc_lseg		= ff_layout_alloc_lseg,
2135 	.free_lseg		= ff_layout_free_lseg,
2136 	.add_lseg		= ff_layout_add_lseg,
2137 	.pg_read_ops		= &ff_layout_pg_read_ops,
2138 	.pg_write_ops		= &ff_layout_pg_write_ops,
2139 	.get_ds_info		= ff_layout_get_ds_info,
2140 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2141 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2142 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2143 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2144 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2145 	.commit_pagelist	= ff_layout_commit_pagelist,
2146 	.read_pagelist		= ff_layout_read_pagelist,
2147 	.write_pagelist		= ff_layout_write_pagelist,
2148 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2149 	.encode_layoutreturn    = ff_layout_encode_layoutreturn,
2150 	.sync			= pnfs_nfs_generic_sync,
2151 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2152 	.cleanup_layoutstats	= ff_layout_cleanup_layoutstats,
2153 };
2154 
nfs4flexfilelayout_init(void)2155 static int __init nfs4flexfilelayout_init(void)
2156 {
2157 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2158 	       __func__);
2159 	return pnfs_register_layoutdriver(&flexfilelayout_type);
2160 }
2161 
nfs4flexfilelayout_exit(void)2162 static void __exit nfs4flexfilelayout_exit(void)
2163 {
2164 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2165 	       __func__);
2166 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2167 }
2168 
2169 MODULE_ALIAS("nfs-layouttype4-4");
2170 
2171 MODULE_LICENSE("GPL");
2172 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2173 
2174 module_init(nfs4flexfilelayout_init);
2175 module_exit(nfs4flexfilelayout_exit);
2176