1 /*
2 * Device operations for the pnfs nfs4 file layout driver.
3 *
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
5 *
6 * Tao Peng <bergwolf@primarydata.com>
7 */
8
9 #include <linux/nfs_fs.h>
10 #include <linux/vmalloc.h>
11 #include <linux/module.h>
12 #include <linux/sunrpc/addr.h>
13
14 #include "../internal.h"
15 #include "../nfs4session.h"
16 #include "flexfilelayout.h"
17
18 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
19
20 static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
21 static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
22
nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds * mirror_ds)23 void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
24 {
25 if (mirror_ds)
26 nfs4_put_deviceid_node(&mirror_ds->id_node);
27 }
28
nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds * mirror_ds)29 void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
30 {
31 nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
32 nfs4_pnfs_ds_put(mirror_ds->ds);
33 kfree(mirror_ds->ds_versions);
34 kfree_rcu(mirror_ds, id_node.rcu);
35 }
36
37 /* Decode opaque device data and construct new_ds using it */
38 struct nfs4_ff_layout_ds *
nfs4_ff_alloc_deviceid_node(struct nfs_server * server,struct pnfs_device * pdev,gfp_t gfp_flags)39 nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
40 gfp_t gfp_flags)
41 {
42 struct xdr_stream stream;
43 struct xdr_buf buf;
44 struct page *scratch;
45 struct list_head dsaddrs;
46 struct nfs4_pnfs_ds_addr *da;
47 struct nfs4_ff_layout_ds *new_ds = NULL;
48 struct nfs4_ff_ds_version *ds_versions = NULL;
49 u32 mp_count;
50 u32 version_count;
51 __be32 *p;
52 int i, ret = -ENOMEM;
53
54 /* set up xdr stream */
55 scratch = alloc_page(gfp_flags);
56 if (!scratch)
57 goto out_err;
58
59 new_ds = kzalloc(sizeof(struct nfs4_ff_layout_ds), gfp_flags);
60 if (!new_ds)
61 goto out_scratch;
62
63 nfs4_init_deviceid_node(&new_ds->id_node,
64 server,
65 &pdev->dev_id);
66 INIT_LIST_HEAD(&dsaddrs);
67
68 xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
69 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
70
71 /* multipath count */
72 p = xdr_inline_decode(&stream, 4);
73 if (unlikely(!p))
74 goto out_err_drain_dsaddrs;
75 mp_count = be32_to_cpup(p);
76 dprintk("%s: multipath ds count %d\n", __func__, mp_count);
77
78 for (i = 0; i < mp_count; i++) {
79 /* multipath ds */
80 da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
81 &stream, gfp_flags);
82 if (da)
83 list_add_tail(&da->da_node, &dsaddrs);
84 }
85 if (list_empty(&dsaddrs)) {
86 dprintk("%s: no suitable DS addresses found\n",
87 __func__);
88 ret = -ENOMEDIUM;
89 goto out_err_drain_dsaddrs;
90 }
91
92 /* version count */
93 p = xdr_inline_decode(&stream, 4);
94 if (unlikely(!p))
95 goto out_err_drain_dsaddrs;
96 version_count = be32_to_cpup(p);
97 dprintk("%s: version count %d\n", __func__, version_count);
98
99 ds_versions = kzalloc(version_count * sizeof(struct nfs4_ff_ds_version),
100 gfp_flags);
101 if (!ds_versions)
102 goto out_scratch;
103
104 for (i = 0; i < version_count; i++) {
105 /* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) +
106 * tightly_coupled(4) */
107 p = xdr_inline_decode(&stream, 20);
108 if (unlikely(!p))
109 goto out_err_drain_dsaddrs;
110 ds_versions[i].version = be32_to_cpup(p++);
111 ds_versions[i].minor_version = be32_to_cpup(p++);
112 ds_versions[i].rsize = nfs_block_size(be32_to_cpup(p++), NULL);
113 ds_versions[i].wsize = nfs_block_size(be32_to_cpup(p++), NULL);
114 ds_versions[i].tightly_coupled = be32_to_cpup(p);
115
116 if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE)
117 ds_versions[i].rsize = NFS_MAX_FILE_IO_SIZE;
118 if (ds_versions[i].wsize > NFS_MAX_FILE_IO_SIZE)
119 ds_versions[i].wsize = NFS_MAX_FILE_IO_SIZE;
120
121 if (ds_versions[i].version != 3 || ds_versions[i].minor_version != 0) {
122 dprintk("%s: [%d] unsupported ds version %d-%d\n", __func__,
123 i, ds_versions[i].version,
124 ds_versions[i].minor_version);
125 ret = -EPROTONOSUPPORT;
126 goto out_err_drain_dsaddrs;
127 }
128
129 dprintk("%s: [%d] vers %u minor_ver %u rsize %u wsize %u coupled %d\n",
130 __func__, i, ds_versions[i].version,
131 ds_versions[i].minor_version,
132 ds_versions[i].rsize,
133 ds_versions[i].wsize,
134 ds_versions[i].tightly_coupled);
135 }
136
137 new_ds->ds_versions = ds_versions;
138 new_ds->ds_versions_cnt = version_count;
139
140 new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
141 if (!new_ds->ds)
142 goto out_err_drain_dsaddrs;
143
144 /* If DS was already in cache, free ds addrs */
145 while (!list_empty(&dsaddrs)) {
146 da = list_first_entry(&dsaddrs,
147 struct nfs4_pnfs_ds_addr,
148 da_node);
149 list_del_init(&da->da_node);
150 kfree(da->da_remotestr);
151 kfree(da);
152 }
153
154 __free_page(scratch);
155 return new_ds;
156
157 out_err_drain_dsaddrs:
158 while (!list_empty(&dsaddrs)) {
159 da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr,
160 da_node);
161 list_del_init(&da->da_node);
162 kfree(da->da_remotestr);
163 kfree(da);
164 }
165
166 kfree(ds_versions);
167 out_scratch:
168 __free_page(scratch);
169 out_err:
170 kfree(new_ds);
171
172 dprintk("%s ERROR: returning %d\n", __func__, ret);
173 return NULL;
174 }
175
ff_layout_mark_devid_invalid(struct pnfs_layout_segment * lseg,struct nfs4_deviceid_node * devid)176 static void ff_layout_mark_devid_invalid(struct pnfs_layout_segment *lseg,
177 struct nfs4_deviceid_node *devid)
178 {
179 nfs4_mark_deviceid_unavailable(devid);
180 if (!ff_layout_has_available_ds(lseg))
181 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
182 lseg);
183 }
184
ff_layout_mirror_valid(struct pnfs_layout_segment * lseg,struct nfs4_ff_layout_mirror * mirror)185 static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
186 struct nfs4_ff_layout_mirror *mirror)
187 {
188 if (mirror == NULL || mirror->mirror_ds == NULL) {
189 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
190 lseg);
191 return false;
192 }
193 if (mirror->mirror_ds->ds == NULL) {
194 struct nfs4_deviceid_node *devid;
195 devid = &mirror->mirror_ds->id_node;
196 ff_layout_mark_devid_invalid(lseg, devid);
197 return false;
198 }
199 return true;
200 }
201
202 static u64
end_offset(u64 start,u64 len)203 end_offset(u64 start, u64 len)
204 {
205 u64 end;
206
207 end = start + len;
208 return end >= start ? end : NFS4_MAX_UINT64;
209 }
210
extend_ds_error(struct nfs4_ff_layout_ds_err * err,u64 offset,u64 length)211 static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
212 u64 offset, u64 length)
213 {
214 u64 end;
215
216 end = max_t(u64, end_offset(err->offset, err->length),
217 end_offset(offset, length));
218 err->offset = min_t(u64, err->offset, offset);
219 err->length = end - err->offset;
220 }
221
ds_error_can_merge(struct nfs4_ff_layout_ds_err * err,u64 offset,u64 length,int status,enum nfs_opnum4 opnum,nfs4_stateid * stateid,struct nfs4_deviceid * deviceid)222 static bool ds_error_can_merge(struct nfs4_ff_layout_ds_err *err, u64 offset,
223 u64 length, int status, enum nfs_opnum4 opnum,
224 nfs4_stateid *stateid,
225 struct nfs4_deviceid *deviceid)
226 {
227 return err->status == status && err->opnum == opnum &&
228 nfs4_stateid_match(&err->stateid, stateid) &&
229 !memcmp(&err->deviceid, deviceid, sizeof(*deviceid)) &&
230 end_offset(err->offset, err->length) >= offset &&
231 err->offset <= end_offset(offset, length);
232 }
233
merge_ds_error(struct nfs4_ff_layout_ds_err * old,struct nfs4_ff_layout_ds_err * new)234 static bool merge_ds_error(struct nfs4_ff_layout_ds_err *old,
235 struct nfs4_ff_layout_ds_err *new)
236 {
237 if (!ds_error_can_merge(old, new->offset, new->length, new->status,
238 new->opnum, &new->stateid, &new->deviceid))
239 return false;
240
241 extend_ds_error(old, new->offset, new->length);
242 return true;
243 }
244
245 static bool
ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout * flo,struct nfs4_ff_layout_ds_err * dserr)246 ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
247 struct nfs4_ff_layout_ds_err *dserr)
248 {
249 struct nfs4_ff_layout_ds_err *err;
250
251 list_for_each_entry(err, &flo->error_list, list) {
252 if (merge_ds_error(err, dserr)) {
253 return true;
254 }
255 }
256
257 list_add(&dserr->list, &flo->error_list);
258 return false;
259 }
260
261 static bool
ff_layout_update_ds_error(struct nfs4_flexfile_layout * flo,u64 offset,u64 length,int status,enum nfs_opnum4 opnum,nfs4_stateid * stateid,struct nfs4_deviceid * deviceid)262 ff_layout_update_ds_error(struct nfs4_flexfile_layout *flo, u64 offset,
263 u64 length, int status, enum nfs_opnum4 opnum,
264 nfs4_stateid *stateid, struct nfs4_deviceid *deviceid)
265 {
266 bool found = false;
267 struct nfs4_ff_layout_ds_err *err;
268
269 list_for_each_entry(err, &flo->error_list, list) {
270 if (ds_error_can_merge(err, offset, length, status, opnum,
271 stateid, deviceid)) {
272 found = true;
273 extend_ds_error(err, offset, length);
274 break;
275 }
276 }
277
278 return found;
279 }
280
ff_layout_track_ds_error(struct nfs4_flexfile_layout * flo,struct nfs4_ff_layout_mirror * mirror,u64 offset,u64 length,int status,enum nfs_opnum4 opnum,gfp_t gfp_flags)281 int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
282 struct nfs4_ff_layout_mirror *mirror, u64 offset,
283 u64 length, int status, enum nfs_opnum4 opnum,
284 gfp_t gfp_flags)
285 {
286 struct nfs4_ff_layout_ds_err *dserr;
287 bool needfree;
288
289 if (status == 0)
290 return 0;
291
292 if (mirror->mirror_ds == NULL)
293 return -EINVAL;
294
295 spin_lock(&flo->generic_hdr.plh_inode->i_lock);
296 if (ff_layout_update_ds_error(flo, offset, length, status, opnum,
297 &mirror->stateid,
298 &mirror->mirror_ds->id_node.deviceid)) {
299 spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
300 return 0;
301 }
302 spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
303 dserr = kmalloc(sizeof(*dserr), gfp_flags);
304 if (!dserr)
305 return -ENOMEM;
306
307 INIT_LIST_HEAD(&dserr->list);
308 dserr->offset = offset;
309 dserr->length = length;
310 dserr->status = status;
311 dserr->opnum = opnum;
312 nfs4_stateid_copy(&dserr->stateid, &mirror->stateid);
313 memcpy(&dserr->deviceid, &mirror->mirror_ds->id_node.deviceid,
314 NFS4_DEVICEID4_SIZE);
315
316 spin_lock(&flo->generic_hdr.plh_inode->i_lock);
317 needfree = ff_layout_add_ds_error_locked(flo, dserr);
318 spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
319 if (needfree)
320 kfree(dserr);
321
322 return 0;
323 }
324
325 /* currently we only support AUTH_NONE and AUTH_SYS */
326 static rpc_authflavor_t
nfs4_ff_layout_choose_authflavor(struct nfs4_ff_layout_mirror * mirror)327 nfs4_ff_layout_choose_authflavor(struct nfs4_ff_layout_mirror *mirror)
328 {
329 if (mirror->uid == (u32)-1)
330 return RPC_AUTH_NULL;
331 return RPC_AUTH_UNIX;
332 }
333
334 /* fetch cred for NFSv3 DS */
ff_layout_update_mirror_cred(struct nfs4_ff_layout_mirror * mirror,struct nfs4_pnfs_ds * ds)335 static int ff_layout_update_mirror_cred(struct nfs4_ff_layout_mirror *mirror,
336 struct nfs4_pnfs_ds *ds)
337 {
338 if (ds->ds_clp && !mirror->cred &&
339 mirror->mirror_ds->ds_versions[0].version == 3) {
340 struct rpc_auth *auth = ds->ds_clp->cl_rpcclient->cl_auth;
341 struct rpc_cred *cred;
342 struct auth_cred acred = {
343 .uid = make_kuid(&init_user_ns, mirror->uid),
344 .gid = make_kgid(&init_user_ns, mirror->gid),
345 };
346
347 /* AUTH_NULL ignores acred */
348 cred = auth->au_ops->lookup_cred(auth, &acred, 0);
349 if (IS_ERR(cred)) {
350 dprintk("%s: lookup_cred failed with %ld\n",
351 __func__, PTR_ERR(cred));
352 return PTR_ERR(cred);
353 } else {
354 if (cmpxchg(&mirror->cred, NULL, cred))
355 put_rpccred(cred);
356 }
357 }
358 return 0;
359 }
360
361 struct nfs_fh *
nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment * lseg,u32 mirror_idx)362 nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx)
363 {
364 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
365 struct nfs_fh *fh = NULL;
366
367 if (!ff_layout_mirror_valid(lseg, mirror)) {
368 pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
369 __func__, mirror_idx);
370 goto out;
371 }
372
373 /* FIXME: For now assume there is only 1 version available for the DS */
374 fh = &mirror->fh_versions[0];
375 out:
376 return fh;
377 }
378
379 /* Upon return, either ds is connected, or ds is NULL */
380 struct nfs4_pnfs_ds *
nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment * lseg,u32 ds_idx,bool fail_return)381 nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
382 bool fail_return)
383 {
384 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
385 struct nfs4_pnfs_ds *ds = NULL;
386 struct nfs4_deviceid_node *devid;
387 struct inode *ino = lseg->pls_layout->plh_inode;
388 struct nfs_server *s = NFS_SERVER(ino);
389 unsigned int max_payload;
390 rpc_authflavor_t flavor;
391
392 if (!ff_layout_mirror_valid(lseg, mirror)) {
393 pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
394 __func__, ds_idx);
395 goto out;
396 }
397
398 devid = &mirror->mirror_ds->id_node;
399 if (ff_layout_test_devid_unavailable(devid))
400 goto out;
401
402 ds = mirror->mirror_ds->ds;
403 /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
404 smp_rmb();
405 if (ds->ds_clp)
406 goto out_update_creds;
407
408 flavor = nfs4_ff_layout_choose_authflavor(mirror);
409
410 /* FIXME: For now we assume the server sent only one version of NFS
411 * to use for the DS.
412 */
413 nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
414 dataserver_retrans,
415 mirror->mirror_ds->ds_versions[0].version,
416 mirror->mirror_ds->ds_versions[0].minor_version,
417 flavor);
418
419 /* connect success, check rsize/wsize limit */
420 if (ds->ds_clp) {
421 max_payload =
422 nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
423 NULL);
424 if (mirror->mirror_ds->ds_versions[0].rsize > max_payload)
425 mirror->mirror_ds->ds_versions[0].rsize = max_payload;
426 if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
427 mirror->mirror_ds->ds_versions[0].wsize = max_payload;
428 } else {
429 ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
430 mirror, lseg->pls_range.offset,
431 lseg->pls_range.length, NFS4ERR_NXIO,
432 OP_ILLEGAL, GFP_NOIO);
433 if (fail_return) {
434 pnfs_error_mark_layout_for_return(ino, lseg);
435 if (ff_layout_has_available_ds(lseg))
436 pnfs_set_retry_layoutget(lseg->pls_layout);
437 else
438 pnfs_clear_retry_layoutget(lseg->pls_layout);
439
440 } else {
441 if (ff_layout_has_available_ds(lseg))
442 set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
443 &lseg->pls_layout->plh_flags);
444 else {
445 pnfs_error_mark_layout_for_return(ino, lseg);
446 pnfs_clear_retry_layoutget(lseg->pls_layout);
447 }
448 }
449 }
450 out_update_creds:
451 if (ff_layout_update_mirror_cred(mirror, ds))
452 ds = NULL;
453 out:
454 return ds;
455 }
456
457 struct rpc_cred *
ff_layout_get_ds_cred(struct pnfs_layout_segment * lseg,u32 ds_idx,struct rpc_cred * mdscred)458 ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx,
459 struct rpc_cred *mdscred)
460 {
461 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
462 struct rpc_cred *cred = ERR_PTR(-EINVAL);
463
464 if (!nfs4_ff_layout_prepare_ds(lseg, ds_idx, true))
465 goto out;
466
467 if (mirror && mirror->cred)
468 cred = mirror->cred;
469 else
470 cred = mdscred;
471 out:
472 return cred;
473 }
474
475 /**
476 * Find or create a DS rpc client with th MDS server rpc client auth flavor
477 * in the nfs_client cl_ds_clients list.
478 */
479 struct rpc_clnt *
nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment * lseg,u32 ds_idx,struct nfs_client * ds_clp,struct inode * inode)480 nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, u32 ds_idx,
481 struct nfs_client *ds_clp, struct inode *inode)
482 {
483 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
484
485 switch (mirror->mirror_ds->ds_versions[0].version) {
486 case 3:
487 /* For NFSv3 DS, flavor is set when creating DS connections */
488 return ds_clp->cl_rpcclient;
489 case 4:
490 return nfs4_find_or_create_ds_client(ds_clp, inode);
491 default:
492 BUG();
493 }
494 }
495
is_range_intersecting(u64 offset1,u64 length1,u64 offset2,u64 length2)496 static bool is_range_intersecting(u64 offset1, u64 length1,
497 u64 offset2, u64 length2)
498 {
499 u64 end1 = end_offset(offset1, length1);
500 u64 end2 = end_offset(offset2, length2);
501
502 return (end1 == NFS4_MAX_UINT64 || end1 > offset2) &&
503 (end2 == NFS4_MAX_UINT64 || end2 > offset1);
504 }
505
506 /* called with inode i_lock held */
ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout * flo,struct xdr_stream * xdr,int * count,const struct pnfs_layout_range * range)507 int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
508 struct xdr_stream *xdr, int *count,
509 const struct pnfs_layout_range *range)
510 {
511 struct nfs4_ff_layout_ds_err *err, *n;
512 __be32 *p;
513
514 list_for_each_entry_safe(err, n, &flo->error_list, list) {
515 if (!is_range_intersecting(err->offset, err->length,
516 range->offset, range->length))
517 continue;
518 /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
519 * + array length + deviceid(NFS4_DEVICEID4_SIZE)
520 * + status(4) + opnum(4)
521 */
522 p = xdr_reserve_space(xdr,
523 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
524 if (unlikely(!p))
525 return -ENOBUFS;
526 p = xdr_encode_hyper(p, err->offset);
527 p = xdr_encode_hyper(p, err->length);
528 p = xdr_encode_opaque_fixed(p, &err->stateid,
529 NFS4_STATEID_SIZE);
530 /* Encode 1 error */
531 *p++ = cpu_to_be32(1);
532 p = xdr_encode_opaque_fixed(p, &err->deviceid,
533 NFS4_DEVICEID4_SIZE);
534 *p++ = cpu_to_be32(err->status);
535 *p++ = cpu_to_be32(err->opnum);
536 *count += 1;
537 list_del(&err->list);
538 dprintk("%s: offset %llu length %llu status %d op %d count %d\n",
539 __func__, err->offset, err->length, err->status,
540 err->opnum, *count);
541 kfree(err);
542 }
543
544 return 0;
545 }
546
ff_read_layout_has_available_ds(struct pnfs_layout_segment * lseg)547 static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
548 {
549 struct nfs4_ff_layout_mirror *mirror;
550 struct nfs4_deviceid_node *devid;
551 u32 idx;
552
553 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
554 mirror = FF_LAYOUT_COMP(lseg, idx);
555 if (mirror && mirror->mirror_ds) {
556 devid = &mirror->mirror_ds->id_node;
557 if (!ff_layout_test_devid_unavailable(devid))
558 return true;
559 }
560 }
561
562 return false;
563 }
564
ff_rw_layout_has_available_ds(struct pnfs_layout_segment * lseg)565 static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
566 {
567 struct nfs4_ff_layout_mirror *mirror;
568 struct nfs4_deviceid_node *devid;
569 u32 idx;
570
571 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
572 mirror = FF_LAYOUT_COMP(lseg, idx);
573 if (!mirror || !mirror->mirror_ds)
574 return false;
575 devid = &mirror->mirror_ds->id_node;
576 if (ff_layout_test_devid_unavailable(devid))
577 return false;
578 }
579
580 return FF_LAYOUT_MIRROR_COUNT(lseg) != 0;
581 }
582
ff_layout_has_available_ds(struct pnfs_layout_segment * lseg)583 bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
584 {
585 if (lseg->pls_range.iomode == IOMODE_READ)
586 return ff_read_layout_has_available_ds(lseg);
587 /* Note: RW layout needs all mirrors available */
588 return ff_rw_layout_has_available_ds(lseg);
589 }
590
591 module_param(dataserver_retrans, uint, 0644);
592 MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client "
593 "retries a request before it attempts further "
594 " recovery action.");
595 module_param(dataserver_timeo, uint, 0644);
596 MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
597 "NFSv4.1 client waits for a response from a "
598 " data server before it retries an NFS request.");
599