1 /*
2 * linux/fs/nfs/blocklayout/blocklayout.c
3 *
4 * Module for the NFSv4.1 pNFS block layout driver.
5 *
6 * Copyright (c) 2006 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
11 *
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
20 *
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
31 */
32
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h> /* struct bio */
38 #include <linux/prefetch.h>
39 #include <linux/pagevec.h>
40
41 #include "../pnfs.h"
42 #include "../nfs4session.h"
43 #include "../internal.h"
44 #include "blocklayout.h"
45
46 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
47
48 MODULE_LICENSE("GPL");
49 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
50 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
51
is_hole(struct pnfs_block_extent * be)52 static bool is_hole(struct pnfs_block_extent *be)
53 {
54 switch (be->be_state) {
55 case PNFS_BLOCK_NONE_DATA:
56 return true;
57 case PNFS_BLOCK_INVALID_DATA:
58 return be->be_tag ? false : true;
59 default:
60 return false;
61 }
62 }
63
64 /* The data we are handed might be spread across several bios. We need
65 * to track when the last one is finished.
66 */
67 struct parallel_io {
68 struct kref refcnt;
69 void (*pnfs_callback) (void *data);
70 void *data;
71 };
72
alloc_parallel(void * data)73 static inline struct parallel_io *alloc_parallel(void *data)
74 {
75 struct parallel_io *rv;
76
77 rv = kmalloc(sizeof(*rv), GFP_NOFS);
78 if (rv) {
79 rv->data = data;
80 kref_init(&rv->refcnt);
81 }
82 return rv;
83 }
84
get_parallel(struct parallel_io * p)85 static inline void get_parallel(struct parallel_io *p)
86 {
87 kref_get(&p->refcnt);
88 }
89
destroy_parallel(struct kref * kref)90 static void destroy_parallel(struct kref *kref)
91 {
92 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
93
94 dprintk("%s enter\n", __func__);
95 p->pnfs_callback(p->data);
96 kfree(p);
97 }
98
put_parallel(struct parallel_io * p)99 static inline void put_parallel(struct parallel_io *p)
100 {
101 kref_put(&p->refcnt, destroy_parallel);
102 }
103
104 static struct bio *
bl_submit_bio(struct bio * bio)105 bl_submit_bio(struct bio *bio)
106 {
107 if (bio) {
108 get_parallel(bio->bi_private);
109 dprintk("%s submitting %s bio %u@%llu\n", __func__,
110 bio_op(bio) == READ ? "read" : "write",
111 bio->bi_iter.bi_size,
112 (unsigned long long)bio->bi_iter.bi_sector);
113 submit_bio(bio);
114 }
115 return NULL;
116 }
117
118 static struct bio *
bl_alloc_init_bio(int npg,struct block_device * bdev,sector_t disk_sector,bio_end_io_t end_io,struct parallel_io * par)119 bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
120 bio_end_io_t end_io, struct parallel_io *par)
121 {
122 struct bio *bio;
123
124 npg = min(npg, BIO_MAX_PAGES);
125 bio = bio_alloc(GFP_NOIO, npg);
126 if (!bio && (current->flags & PF_MEMALLOC)) {
127 while (!bio && (npg /= 2))
128 bio = bio_alloc(GFP_NOIO, npg);
129 }
130
131 if (bio) {
132 bio->bi_iter.bi_sector = disk_sector;
133 bio->bi_bdev = bdev;
134 bio->bi_end_io = end_io;
135 bio->bi_private = par;
136 }
137 return bio;
138 }
139
140 static struct bio *
do_add_page_to_bio(struct bio * bio,int npg,int rw,sector_t isect,struct page * page,struct pnfs_block_dev_map * map,struct pnfs_block_extent * be,bio_end_io_t end_io,struct parallel_io * par,unsigned int offset,int * len)141 do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
142 struct page *page, struct pnfs_block_dev_map *map,
143 struct pnfs_block_extent *be, bio_end_io_t end_io,
144 struct parallel_io *par, unsigned int offset, int *len)
145 {
146 struct pnfs_block_dev *dev =
147 container_of(be->be_device, struct pnfs_block_dev, node);
148 u64 disk_addr, end;
149
150 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
151 npg, rw, (unsigned long long)isect, offset, *len);
152
153 /* translate to device offset */
154 isect += be->be_v_offset;
155 isect -= be->be_f_offset;
156
157 /* translate to physical disk offset */
158 disk_addr = (u64)isect << SECTOR_SHIFT;
159 if (disk_addr < map->start || disk_addr >= map->start + map->len) {
160 if (!dev->map(dev, disk_addr, map))
161 return ERR_PTR(-EIO);
162 bio = bl_submit_bio(bio);
163 }
164 disk_addr += map->disk_offset;
165 disk_addr -= map->start;
166
167 /* limit length to what the device mapping allows */
168 end = disk_addr + *len;
169 if (end >= map->start + map->len)
170 *len = map->start + map->len - disk_addr;
171
172 retry:
173 if (!bio) {
174 bio = bl_alloc_init_bio(npg, map->bdev,
175 disk_addr >> SECTOR_SHIFT, end_io, par);
176 if (!bio)
177 return ERR_PTR(-ENOMEM);
178 bio_set_op_attrs(bio, rw, 0);
179 }
180 if (bio_add_page(bio, page, *len, offset) < *len) {
181 bio = bl_submit_bio(bio);
182 goto retry;
183 }
184 return bio;
185 }
186
bl_end_io_read(struct bio * bio)187 static void bl_end_io_read(struct bio *bio)
188 {
189 struct parallel_io *par = bio->bi_private;
190
191 if (bio->bi_error) {
192 struct nfs_pgio_header *header = par->data;
193
194 if (!header->pnfs_error)
195 header->pnfs_error = -EIO;
196 pnfs_set_lo_fail(header->lseg);
197 }
198
199 bio_put(bio);
200 put_parallel(par);
201 }
202
bl_read_cleanup(struct work_struct * work)203 static void bl_read_cleanup(struct work_struct *work)
204 {
205 struct rpc_task *task;
206 struct nfs_pgio_header *hdr;
207 dprintk("%s enter\n", __func__);
208 task = container_of(work, struct rpc_task, u.tk_work);
209 hdr = container_of(task, struct nfs_pgio_header, task);
210 pnfs_ld_read_done(hdr);
211 }
212
213 static void
bl_end_par_io_read(void * data)214 bl_end_par_io_read(void *data)
215 {
216 struct nfs_pgio_header *hdr = data;
217
218 hdr->task.tk_status = hdr->pnfs_error;
219 INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
220 schedule_work(&hdr->task.u.tk_work);
221 }
222
223 static enum pnfs_try_status
bl_read_pagelist(struct nfs_pgio_header * header)224 bl_read_pagelist(struct nfs_pgio_header *header)
225 {
226 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
227 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
228 struct bio *bio = NULL;
229 struct pnfs_block_extent be;
230 sector_t isect, extent_length = 0;
231 struct parallel_io *par;
232 loff_t f_offset = header->args.offset;
233 size_t bytes_left = header->args.count;
234 unsigned int pg_offset = header->args.pgbase, pg_len;
235 struct page **pages = header->args.pages;
236 int pg_index = header->args.pgbase >> PAGE_SHIFT;
237 const bool is_dio = (header->dreq != NULL);
238 struct blk_plug plug;
239 int i;
240
241 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
242 header->page_array.npages, f_offset,
243 (unsigned int)header->args.count);
244
245 par = alloc_parallel(header);
246 if (!par)
247 return PNFS_NOT_ATTEMPTED;
248 par->pnfs_callback = bl_end_par_io_read;
249
250 blk_start_plug(&plug);
251
252 isect = (sector_t) (f_offset >> SECTOR_SHIFT);
253 /* Code assumes extents are page-aligned */
254 for (i = pg_index; i < header->page_array.npages; i++) {
255 if (extent_length <= 0) {
256 /* We've used up the previous extent */
257 bio = bl_submit_bio(bio);
258
259 /* Get the next one */
260 if (!ext_tree_lookup(bl, isect, &be, false)) {
261 header->pnfs_error = -EIO;
262 goto out;
263 }
264 extent_length = be.be_length - (isect - be.be_f_offset);
265 }
266
267 if (is_dio) {
268 if (pg_offset + bytes_left > PAGE_SIZE)
269 pg_len = PAGE_SIZE - pg_offset;
270 else
271 pg_len = bytes_left;
272 } else {
273 BUG_ON(pg_offset != 0);
274 pg_len = PAGE_SIZE;
275 }
276
277 if (is_hole(&be)) {
278 bio = bl_submit_bio(bio);
279 /* Fill hole w/ zeroes w/o accessing device */
280 dprintk("%s Zeroing page for hole\n", __func__);
281 zero_user_segment(pages[i], pg_offset, pg_len);
282
283 /* invalidate map */
284 map.start = NFS4_MAX_UINT64;
285 } else {
286 bio = do_add_page_to_bio(bio,
287 header->page_array.npages - i,
288 READ,
289 isect, pages[i], &map, &be,
290 bl_end_io_read, par,
291 pg_offset, &pg_len);
292 if (IS_ERR(bio)) {
293 header->pnfs_error = PTR_ERR(bio);
294 bio = NULL;
295 goto out;
296 }
297 }
298 isect += (pg_len >> SECTOR_SHIFT);
299 extent_length -= (pg_len >> SECTOR_SHIFT);
300 f_offset += pg_len;
301 bytes_left -= pg_len;
302 pg_offset = 0;
303 }
304 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
305 header->res.eof = 1;
306 header->res.count = header->inode->i_size - header->args.offset;
307 } else {
308 header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
309 }
310 out:
311 bl_submit_bio(bio);
312 blk_finish_plug(&plug);
313 put_parallel(par);
314 return PNFS_ATTEMPTED;
315 }
316
bl_end_io_write(struct bio * bio)317 static void bl_end_io_write(struct bio *bio)
318 {
319 struct parallel_io *par = bio->bi_private;
320 struct nfs_pgio_header *header = par->data;
321
322 if (bio->bi_error) {
323 if (!header->pnfs_error)
324 header->pnfs_error = -EIO;
325 pnfs_set_lo_fail(header->lseg);
326 }
327 bio_put(bio);
328 put_parallel(par);
329 }
330
331 /* Function scheduled for call during bl_end_par_io_write,
332 * it marks sectors as written and extends the commitlist.
333 */
bl_write_cleanup(struct work_struct * work)334 static void bl_write_cleanup(struct work_struct *work)
335 {
336 struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
337 struct nfs_pgio_header *hdr =
338 container_of(task, struct nfs_pgio_header, task);
339
340 dprintk("%s enter\n", __func__);
341
342 if (likely(!hdr->pnfs_error)) {
343 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
344 u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
345 u64 end = (hdr->args.offset + hdr->args.count +
346 PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
347 u64 lwb = hdr->args.offset + hdr->args.count;
348
349 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
350 (end - start) >> SECTOR_SHIFT, lwb);
351 }
352
353 pnfs_ld_write_done(hdr);
354 }
355
356 /* Called when last of bios associated with a bl_write_pagelist call finishes */
bl_end_par_io_write(void * data)357 static void bl_end_par_io_write(void *data)
358 {
359 struct nfs_pgio_header *hdr = data;
360
361 hdr->task.tk_status = hdr->pnfs_error;
362 hdr->verf.committed = NFS_FILE_SYNC;
363 INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
364 schedule_work(&hdr->task.u.tk_work);
365 }
366
367 static enum pnfs_try_status
bl_write_pagelist(struct nfs_pgio_header * header,int sync)368 bl_write_pagelist(struct nfs_pgio_header *header, int sync)
369 {
370 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
371 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
372 struct bio *bio = NULL;
373 struct pnfs_block_extent be;
374 sector_t isect, extent_length = 0;
375 struct parallel_io *par = NULL;
376 loff_t offset = header->args.offset;
377 size_t count = header->args.count;
378 struct page **pages = header->args.pages;
379 int pg_index = header->args.pgbase >> PAGE_SHIFT;
380 unsigned int pg_len;
381 struct blk_plug plug;
382 int i;
383
384 dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
385
386 /* At this point, header->page_aray is a (sequential) list of nfs_pages.
387 * We want to write each, and if there is an error set pnfs_error
388 * to have it redone using nfs.
389 */
390 par = alloc_parallel(header);
391 if (!par)
392 return PNFS_NOT_ATTEMPTED;
393 par->pnfs_callback = bl_end_par_io_write;
394
395 blk_start_plug(&plug);
396
397 /* we always write out the whole page */
398 offset = offset & (loff_t)PAGE_MASK;
399 isect = offset >> SECTOR_SHIFT;
400
401 for (i = pg_index; i < header->page_array.npages; i++) {
402 if (extent_length <= 0) {
403 /* We've used up the previous extent */
404 bio = bl_submit_bio(bio);
405 /* Get the next one */
406 if (!ext_tree_lookup(bl, isect, &be, true)) {
407 header->pnfs_error = -EINVAL;
408 goto out;
409 }
410
411 extent_length = be.be_length - (isect - be.be_f_offset);
412 }
413
414 pg_len = PAGE_SIZE;
415 bio = do_add_page_to_bio(bio, header->page_array.npages - i,
416 WRITE, isect, pages[i], &map, &be,
417 bl_end_io_write, par,
418 0, &pg_len);
419 if (IS_ERR(bio)) {
420 header->pnfs_error = PTR_ERR(bio);
421 bio = NULL;
422 goto out;
423 }
424
425 offset += pg_len;
426 count -= pg_len;
427 isect += (pg_len >> SECTOR_SHIFT);
428 extent_length -= (pg_len >> SECTOR_SHIFT);
429 }
430
431 header->res.count = header->args.count;
432 out:
433 bl_submit_bio(bio);
434 blk_finish_plug(&plug);
435 put_parallel(par);
436 return PNFS_ATTEMPTED;
437 }
438
bl_free_layout_hdr(struct pnfs_layout_hdr * lo)439 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
440 {
441 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
442 int err;
443
444 dprintk("%s enter\n", __func__);
445
446 err = ext_tree_remove(bl, true, 0, LLONG_MAX);
447 WARN_ON(err);
448
449 kfree(bl);
450 }
451
__bl_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags,bool is_scsi_layout)452 static struct pnfs_layout_hdr *__bl_alloc_layout_hdr(struct inode *inode,
453 gfp_t gfp_flags, bool is_scsi_layout)
454 {
455 struct pnfs_block_layout *bl;
456
457 dprintk("%s enter\n", __func__);
458 bl = kzalloc(sizeof(*bl), gfp_flags);
459 if (!bl)
460 return NULL;
461
462 bl->bl_ext_rw = RB_ROOT;
463 bl->bl_ext_ro = RB_ROOT;
464 spin_lock_init(&bl->bl_ext_lock);
465
466 bl->bl_scsi_layout = is_scsi_layout;
467 return &bl->bl_layout;
468 }
469
bl_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags)470 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
471 gfp_t gfp_flags)
472 {
473 return __bl_alloc_layout_hdr(inode, gfp_flags, false);
474 }
475
sl_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags)476 static struct pnfs_layout_hdr *sl_alloc_layout_hdr(struct inode *inode,
477 gfp_t gfp_flags)
478 {
479 return __bl_alloc_layout_hdr(inode, gfp_flags, true);
480 }
481
bl_free_lseg(struct pnfs_layout_segment * lseg)482 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
483 {
484 dprintk("%s enter\n", __func__);
485 kfree(lseg);
486 }
487
488 /* Tracks info needed to ensure extents in layout obey constraints of spec */
489 struct layout_verification {
490 u32 mode; /* R or RW */
491 u64 start; /* Expected start of next non-COW extent */
492 u64 inval; /* Start of INVAL coverage */
493 u64 cowread; /* End of COW read coverage */
494 };
495
496 /* Verify the extent meets the layout requirements of the pnfs-block draft,
497 * section 2.3.1.
498 */
verify_extent(struct pnfs_block_extent * be,struct layout_verification * lv)499 static int verify_extent(struct pnfs_block_extent *be,
500 struct layout_verification *lv)
501 {
502 if (lv->mode == IOMODE_READ) {
503 if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
504 be->be_state == PNFS_BLOCK_INVALID_DATA)
505 return -EIO;
506 if (be->be_f_offset != lv->start)
507 return -EIO;
508 lv->start += be->be_length;
509 return 0;
510 }
511 /* lv->mode == IOMODE_RW */
512 if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
513 if (be->be_f_offset != lv->start)
514 return -EIO;
515 if (lv->cowread > lv->start)
516 return -EIO;
517 lv->start += be->be_length;
518 lv->inval = lv->start;
519 return 0;
520 } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
521 if (be->be_f_offset != lv->start)
522 return -EIO;
523 lv->start += be->be_length;
524 return 0;
525 } else if (be->be_state == PNFS_BLOCK_READ_DATA) {
526 if (be->be_f_offset > lv->start)
527 return -EIO;
528 if (be->be_f_offset < lv->inval)
529 return -EIO;
530 if (be->be_f_offset < lv->cowread)
531 return -EIO;
532 /* It looks like you might want to min this with lv->start,
533 * but you really don't.
534 */
535 lv->inval = lv->inval + be->be_length;
536 lv->cowread = be->be_f_offset + be->be_length;
537 return 0;
538 } else
539 return -EIO;
540 }
541
decode_sector_number(__be32 ** rp,sector_t * sp)542 static int decode_sector_number(__be32 **rp, sector_t *sp)
543 {
544 uint64_t s;
545
546 *rp = xdr_decode_hyper(*rp, &s);
547 if (s & 0x1ff) {
548 printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
549 return -1;
550 }
551 *sp = s >> SECTOR_SHIFT;
552 return 0;
553 }
554
555 static int
bl_alloc_extent(struct xdr_stream * xdr,struct pnfs_layout_hdr * lo,struct layout_verification * lv,struct list_head * extents,gfp_t gfp_mask)556 bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
557 struct layout_verification *lv, struct list_head *extents,
558 gfp_t gfp_mask)
559 {
560 struct pnfs_block_extent *be;
561 struct nfs4_deviceid id;
562 int error;
563 __be32 *p;
564
565 p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE);
566 if (!p)
567 return -EIO;
568
569 be = kzalloc(sizeof(*be), GFP_NOFS);
570 if (!be)
571 return -ENOMEM;
572
573 memcpy(&id, p, NFS4_DEVICEID4_SIZE);
574 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
575
576 error = -EIO;
577 be->be_device = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
578 lo->plh_lc_cred, gfp_mask);
579 if (!be->be_device)
580 goto out_free_be;
581
582 /*
583 * The next three values are read in as bytes, but stored in the
584 * extent structure in 512-byte granularity.
585 */
586 if (decode_sector_number(&p, &be->be_f_offset) < 0)
587 goto out_put_deviceid;
588 if (decode_sector_number(&p, &be->be_length) < 0)
589 goto out_put_deviceid;
590 if (decode_sector_number(&p, &be->be_v_offset) < 0)
591 goto out_put_deviceid;
592 be->be_state = be32_to_cpup(p++);
593
594 error = verify_extent(be, lv);
595 if (error) {
596 dprintk("%s: extent verification failed\n", __func__);
597 goto out_put_deviceid;
598 }
599
600 list_add_tail(&be->be_list, extents);
601 return 0;
602
603 out_put_deviceid:
604 nfs4_put_deviceid_node(be->be_device);
605 out_free_be:
606 kfree(be);
607 return error;
608 }
609
610 static struct pnfs_layout_segment *
bl_alloc_lseg(struct pnfs_layout_hdr * lo,struct nfs4_layoutget_res * lgr,gfp_t gfp_mask)611 bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
612 gfp_t gfp_mask)
613 {
614 struct layout_verification lv = {
615 .mode = lgr->range.iomode,
616 .start = lgr->range.offset >> SECTOR_SHIFT,
617 .inval = lgr->range.offset >> SECTOR_SHIFT,
618 .cowread = lgr->range.offset >> SECTOR_SHIFT,
619 };
620 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
621 struct pnfs_layout_segment *lseg;
622 struct xdr_buf buf;
623 struct xdr_stream xdr;
624 struct page *scratch;
625 int status, i;
626 uint32_t count;
627 __be32 *p;
628 LIST_HEAD(extents);
629
630 dprintk("---> %s\n", __func__);
631
632 lseg = kzalloc(sizeof(*lseg), gfp_mask);
633 if (!lseg)
634 return ERR_PTR(-ENOMEM);
635
636 status = -ENOMEM;
637 scratch = alloc_page(gfp_mask);
638 if (!scratch)
639 goto out;
640
641 xdr_init_decode_pages(&xdr, &buf,
642 lgr->layoutp->pages, lgr->layoutp->len);
643 xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE);
644
645 status = -EIO;
646 p = xdr_inline_decode(&xdr, 4);
647 if (unlikely(!p))
648 goto out_free_scratch;
649
650 count = be32_to_cpup(p++);
651 dprintk("%s: number of extents %d\n", __func__, count);
652
653 /*
654 * Decode individual extents, putting them in temporary staging area
655 * until whole layout is decoded to make error recovery easier.
656 */
657 for (i = 0; i < count; i++) {
658 status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask);
659 if (status)
660 goto process_extents;
661 }
662
663 if (lgr->range.offset + lgr->range.length !=
664 lv.start << SECTOR_SHIFT) {
665 dprintk("%s Final length mismatch\n", __func__);
666 status = -EIO;
667 goto process_extents;
668 }
669
670 if (lv.start < lv.cowread) {
671 dprintk("%s Final uncovered COW extent\n", __func__);
672 status = -EIO;
673 }
674
675 process_extents:
676 while (!list_empty(&extents)) {
677 struct pnfs_block_extent *be =
678 list_first_entry(&extents, struct pnfs_block_extent,
679 be_list);
680 list_del(&be->be_list);
681
682 if (!status)
683 status = ext_tree_insert(bl, be);
684
685 if (status) {
686 nfs4_put_deviceid_node(be->be_device);
687 kfree(be);
688 }
689 }
690
691 out_free_scratch:
692 __free_page(scratch);
693 out:
694 dprintk("%s returns %d\n", __func__, status);
695 if (status) {
696 kfree(lseg);
697 return ERR_PTR(status);
698 }
699 return lseg;
700 }
701
702 static void
bl_return_range(struct pnfs_layout_hdr * lo,struct pnfs_layout_range * range)703 bl_return_range(struct pnfs_layout_hdr *lo,
704 struct pnfs_layout_range *range)
705 {
706 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
707 sector_t offset = range->offset >> SECTOR_SHIFT, end;
708
709 if (range->offset % 8) {
710 dprintk("%s: offset %lld not block size aligned\n",
711 __func__, range->offset);
712 return;
713 }
714
715 if (range->length != NFS4_MAX_UINT64) {
716 if (range->length % 8) {
717 dprintk("%s: length %lld not block size aligned\n",
718 __func__, range->length);
719 return;
720 }
721
722 end = offset + (range->length >> SECTOR_SHIFT);
723 } else {
724 end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
725 }
726
727 ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
728 }
729
730 static int
bl_prepare_layoutcommit(struct nfs4_layoutcommit_args * arg)731 bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
732 {
733 return ext_tree_prepare_commit(arg);
734 }
735
736 static void
bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data * lcdata)737 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
738 {
739 ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
740 }
741
742 static int
bl_set_layoutdriver(struct nfs_server * server,const struct nfs_fh * fh)743 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
744 {
745 dprintk("%s enter\n", __func__);
746
747 if (server->pnfs_blksize == 0) {
748 dprintk("%s Server did not return blksize\n", __func__);
749 return -EINVAL;
750 }
751 if (server->pnfs_blksize > PAGE_SIZE) {
752 printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
753 __func__, server->pnfs_blksize);
754 return -EINVAL;
755 }
756
757 return 0;
758 }
759
760 static bool
is_aligned_req(struct nfs_pageio_descriptor * pgio,struct nfs_page * req,unsigned int alignment,bool is_write)761 is_aligned_req(struct nfs_pageio_descriptor *pgio,
762 struct nfs_page *req, unsigned int alignment, bool is_write)
763 {
764 /*
765 * Always accept buffered writes, higher layers take care of the
766 * right alignment.
767 */
768 if (pgio->pg_dreq == NULL)
769 return true;
770
771 if (!IS_ALIGNED(req->wb_offset, alignment))
772 return false;
773
774 if (IS_ALIGNED(req->wb_bytes, alignment))
775 return true;
776
777 if (is_write &&
778 (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode))) {
779 /*
780 * If the write goes up to the inode size, just write
781 * the full page. Data past the inode size is
782 * guaranteed to be zeroed by the higher level client
783 * code, and this behaviour is mandated by RFC 5663
784 * section 2.3.2.
785 */
786 return true;
787 }
788
789 return false;
790 }
791
792 static void
bl_pg_init_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)793 bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
794 {
795 if (!is_aligned_req(pgio, req, SECTOR_SIZE, false)) {
796 nfs_pageio_reset_read_mds(pgio);
797 return;
798 }
799
800 pnfs_generic_pg_init_read(pgio, req);
801 }
802
803 /*
804 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
805 * of bytes (maximum @req->wb_bytes) that can be coalesced.
806 */
807 static size_t
bl_pg_test_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * prev,struct nfs_page * req)808 bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
809 struct nfs_page *req)
810 {
811 if (!is_aligned_req(pgio, req, SECTOR_SIZE, false))
812 return 0;
813 return pnfs_generic_pg_test(pgio, prev, req);
814 }
815
816 /*
817 * Return the number of contiguous bytes for a given inode
818 * starting at page frame idx.
819 */
pnfs_num_cont_bytes(struct inode * inode,pgoff_t idx)820 static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
821 {
822 struct address_space *mapping = inode->i_mapping;
823 pgoff_t end;
824
825 /* Optimize common case that writes from 0 to end of file */
826 end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
827 if (end != inode->i_mapping->nrpages) {
828 rcu_read_lock();
829 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
830 rcu_read_unlock();
831 }
832
833 if (!end)
834 return i_size_read(inode) - (idx << PAGE_SHIFT);
835 else
836 return (end - idx) << PAGE_SHIFT;
837 }
838
839 static void
bl_pg_init_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)840 bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
841 {
842 u64 wb_size;
843
844 if (!is_aligned_req(pgio, req, PAGE_SIZE, true)) {
845 nfs_pageio_reset_write_mds(pgio);
846 return;
847 }
848
849 if (pgio->pg_dreq == NULL)
850 wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
851 req->wb_index);
852 else
853 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
854
855 pnfs_generic_pg_init_write(pgio, req, wb_size);
856 }
857
858 /*
859 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
860 * of bytes (maximum @req->wb_bytes) that can be coalesced.
861 */
862 static size_t
bl_pg_test_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * prev,struct nfs_page * req)863 bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
864 struct nfs_page *req)
865 {
866 if (!is_aligned_req(pgio, req, PAGE_SIZE, true))
867 return 0;
868 return pnfs_generic_pg_test(pgio, prev, req);
869 }
870
871 static const struct nfs_pageio_ops bl_pg_read_ops = {
872 .pg_init = bl_pg_init_read,
873 .pg_test = bl_pg_test_read,
874 .pg_doio = pnfs_generic_pg_readpages,
875 .pg_cleanup = pnfs_generic_pg_cleanup,
876 };
877
878 static const struct nfs_pageio_ops bl_pg_write_ops = {
879 .pg_init = bl_pg_init_write,
880 .pg_test = bl_pg_test_write,
881 .pg_doio = pnfs_generic_pg_writepages,
882 .pg_cleanup = pnfs_generic_pg_cleanup,
883 };
884
885 static struct pnfs_layoutdriver_type blocklayout_type = {
886 .id = LAYOUT_BLOCK_VOLUME,
887 .name = "LAYOUT_BLOCK_VOLUME",
888 .owner = THIS_MODULE,
889 .flags = PNFS_LAYOUTRET_ON_SETATTR |
890 PNFS_READ_WHOLE_PAGE,
891 .read_pagelist = bl_read_pagelist,
892 .write_pagelist = bl_write_pagelist,
893 .alloc_layout_hdr = bl_alloc_layout_hdr,
894 .free_layout_hdr = bl_free_layout_hdr,
895 .alloc_lseg = bl_alloc_lseg,
896 .free_lseg = bl_free_lseg,
897 .return_range = bl_return_range,
898 .prepare_layoutcommit = bl_prepare_layoutcommit,
899 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
900 .set_layoutdriver = bl_set_layoutdriver,
901 .alloc_deviceid_node = bl_alloc_deviceid_node,
902 .free_deviceid_node = bl_free_deviceid_node,
903 .pg_read_ops = &bl_pg_read_ops,
904 .pg_write_ops = &bl_pg_write_ops,
905 .sync = pnfs_generic_sync,
906 };
907
908 static struct pnfs_layoutdriver_type scsilayout_type = {
909 .id = LAYOUT_SCSI,
910 .name = "LAYOUT_SCSI",
911 .owner = THIS_MODULE,
912 .flags = PNFS_LAYOUTRET_ON_SETATTR |
913 PNFS_READ_WHOLE_PAGE,
914 .read_pagelist = bl_read_pagelist,
915 .write_pagelist = bl_write_pagelist,
916 .alloc_layout_hdr = sl_alloc_layout_hdr,
917 .free_layout_hdr = bl_free_layout_hdr,
918 .alloc_lseg = bl_alloc_lseg,
919 .free_lseg = bl_free_lseg,
920 .return_range = bl_return_range,
921 .prepare_layoutcommit = bl_prepare_layoutcommit,
922 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
923 .set_layoutdriver = bl_set_layoutdriver,
924 .alloc_deviceid_node = bl_alloc_deviceid_node,
925 .free_deviceid_node = bl_free_deviceid_node,
926 .pg_read_ops = &bl_pg_read_ops,
927 .pg_write_ops = &bl_pg_write_ops,
928 .sync = pnfs_generic_sync,
929 };
930
931
nfs4blocklayout_init(void)932 static int __init nfs4blocklayout_init(void)
933 {
934 int ret;
935
936 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
937
938 ret = bl_init_pipefs();
939 if (ret)
940 goto out;
941
942 ret = pnfs_register_layoutdriver(&blocklayout_type);
943 if (ret)
944 goto out_cleanup_pipe;
945
946 ret = pnfs_register_layoutdriver(&scsilayout_type);
947 if (ret)
948 goto out_unregister_block;
949 return 0;
950
951 out_unregister_block:
952 pnfs_unregister_layoutdriver(&blocklayout_type);
953 out_cleanup_pipe:
954 bl_cleanup_pipefs();
955 out:
956 return ret;
957 }
958
nfs4blocklayout_exit(void)959 static void __exit nfs4blocklayout_exit(void)
960 {
961 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
962 __func__);
963
964 pnfs_unregister_layoutdriver(&scsilayout_type);
965 pnfs_unregister_layoutdriver(&blocklayout_type);
966 bl_cleanup_pipefs();
967 }
968
969 MODULE_ALIAS("nfs-layouttype4-3");
970
971 module_init(nfs4blocklayout_init);
972 module_exit(nfs4blocklayout_exit);
973