Lines Matching +full:bl +full:- +full:data +full:- +full:offset
54 switch (be->be_state) { in is_hole()
58 return be->be_tag ? false : true; in is_hole()
64 /* The data we are handed might be spread across several bios. We need
69 void (*pnfs_callback) (void *data);
70 void *data; member
73 static inline struct parallel_io *alloc_parallel(void *data) in alloc_parallel() argument
79 rv->data = data; in alloc_parallel()
80 kref_init(&rv->refcnt); in alloc_parallel()
87 kref_get(&p->refcnt); in get_parallel()
95 p->pnfs_callback(p->data); in destroy_parallel()
101 kref_put(&p->refcnt, destroy_parallel); in put_parallel()
108 get_parallel(bio->bi_private); in bl_submit_bio()
111 bio->bi_iter.bi_size, in bl_submit_bio()
112 (unsigned long long)bio->bi_iter.bi_sector); in bl_submit_bio()
126 if (!bio && (current->flags & PF_MEMALLOC)) { in bl_alloc_init_bio()
132 bio->bi_iter.bi_sector = disk_sector; in bl_alloc_init_bio()
134 bio->bi_end_io = end_io; in bl_alloc_init_bio()
135 bio->bi_private = par; in bl_alloc_init_bio()
140 static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map) in offset_in_map() argument
142 return offset >= map->start && offset < map->start + map->len; in offset_in_map()
149 struct parallel_io *par, unsigned int offset, int *len) in do_add_page_to_bio() argument
152 container_of(be->be_device, struct pnfs_block_dev, node); in do_add_page_to_bio()
155 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__, in do_add_page_to_bio()
156 npg, rw, (unsigned long long)isect, offset, *len); in do_add_page_to_bio()
158 /* translate to device offset */ in do_add_page_to_bio()
159 isect += be->be_v_offset; in do_add_page_to_bio()
160 isect -= be->be_f_offset; in do_add_page_to_bio()
162 /* translate to physical disk offset */ in do_add_page_to_bio()
165 if (!dev->map(dev, disk_addr, map) || !offset_in_map(disk_addr, map)) in do_add_page_to_bio()
166 return ERR_PTR(-EIO); in do_add_page_to_bio()
169 disk_addr += map->disk_offset; in do_add_page_to_bio()
170 disk_addr -= map->start; in do_add_page_to_bio()
174 if (end >= map->start + map->len) in do_add_page_to_bio()
175 *len = map->start + map->len - disk_addr; in do_add_page_to_bio()
179 bio = bl_alloc_init_bio(npg, map->bdev, in do_add_page_to_bio()
182 return ERR_PTR(-ENOMEM); in do_add_page_to_bio()
185 if (bio_add_page(bio, page, *len, offset) < *len) { in do_add_page_to_bio()
194 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg); in bl_mark_devices_unavailable() local
195 size_t bytes_left = header->args.count; in bl_mark_devices_unavailable()
199 isect = header->args.offset >> SECTOR_SHIFT; in bl_mark_devices_unavailable()
200 bytes_left += header->args.offset - (isect << SECTOR_SHIFT); in bl_mark_devices_unavailable()
203 if (!ext_tree_lookup(bl, isect, &be, rw)) in bl_mark_devices_unavailable()
205 extent_length = be.be_length - (isect - be.be_f_offset); in bl_mark_devices_unavailable()
209 bytes_left -= extent_length << SECTOR_SHIFT; in bl_mark_devices_unavailable()
217 struct parallel_io *par = bio->bi_private; in bl_end_io_read()
219 if (bio->bi_status) { in bl_end_io_read()
220 struct nfs_pgio_header *header = par->data; in bl_end_io_read()
222 if (!header->pnfs_error) in bl_end_io_read()
223 header->pnfs_error = -EIO; in bl_end_io_read()
224 pnfs_set_lo_fail(header->lseg); in bl_end_io_read()
243 bl_end_par_io_read(void *data) in bl_end_par_io_read() argument
245 struct nfs_pgio_header *hdr = data; in bl_end_par_io_read()
247 hdr->task.tk_status = hdr->pnfs_error; in bl_end_par_io_read()
248 INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup); in bl_end_par_io_read()
249 schedule_work(&hdr->task.u.tk_work); in bl_end_par_io_read()
255 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg); in bl_read_pagelist() local
261 loff_t f_offset = header->args.offset; in bl_read_pagelist()
262 size_t bytes_left = header->args.count; in bl_read_pagelist()
263 unsigned int pg_offset = header->args.pgbase, pg_len; in bl_read_pagelist()
264 struct page **pages = header->args.pages; in bl_read_pagelist()
265 int pg_index = header->args.pgbase >> PAGE_SHIFT; in bl_read_pagelist()
266 const bool is_dio = (header->dreq != NULL); in bl_read_pagelist()
270 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__, in bl_read_pagelist()
271 header->page_array.npages, f_offset, in bl_read_pagelist()
272 (unsigned int)header->args.count); in bl_read_pagelist()
277 par->pnfs_callback = bl_end_par_io_read; in bl_read_pagelist()
282 /* Code assumes extents are page-aligned */ in bl_read_pagelist()
283 for (i = pg_index; i < header->page_array.npages; i++) { in bl_read_pagelist()
289 if (!ext_tree_lookup(bl, isect, &be, false)) { in bl_read_pagelist()
290 header->pnfs_error = -EIO; in bl_read_pagelist()
293 extent_length = be.be_length - (isect - be.be_f_offset); in bl_read_pagelist()
298 pg_len = PAGE_SIZE - pg_offset; in bl_read_pagelist()
316 header->page_array.npages - i, in bl_read_pagelist()
322 header->pnfs_error = PTR_ERR(bio); in bl_read_pagelist()
328 extent_length -= (pg_len >> SECTOR_SHIFT); in bl_read_pagelist()
330 bytes_left -= pg_len; in bl_read_pagelist()
333 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) { in bl_read_pagelist()
334 header->res.eof = 1; in bl_read_pagelist()
335 header->res.count = header->inode->i_size - header->args.offset; in bl_read_pagelist()
337 header->res.count = (isect << SECTOR_SHIFT) - header->args.offset; in bl_read_pagelist()
348 struct parallel_io *par = bio->bi_private; in bl_end_io_write()
349 struct nfs_pgio_header *header = par->data; in bl_end_io_write()
351 if (bio->bi_status) { in bl_end_io_write()
352 if (!header->pnfs_error) in bl_end_io_write()
353 header->pnfs_error = -EIO; in bl_end_io_write()
354 pnfs_set_lo_fail(header->lseg); in bl_end_io_write()
372 if (likely(!hdr->pnfs_error)) { in bl_write_cleanup()
373 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg); in bl_write_cleanup() local
374 u64 start = hdr->args.offset & (loff_t)PAGE_MASK; in bl_write_cleanup()
375 u64 end = (hdr->args.offset + hdr->args.count + in bl_write_cleanup()
376 PAGE_SIZE - 1) & (loff_t)PAGE_MASK; in bl_write_cleanup()
377 u64 lwb = hdr->args.offset + hdr->args.count; in bl_write_cleanup()
379 ext_tree_mark_written(bl, start >> SECTOR_SHIFT, in bl_write_cleanup()
380 (end - start) >> SECTOR_SHIFT, lwb); in bl_write_cleanup()
387 static void bl_end_par_io_write(void *data) in bl_end_par_io_write() argument
389 struct nfs_pgio_header *hdr = data; in bl_end_par_io_write()
391 hdr->task.tk_status = hdr->pnfs_error; in bl_end_par_io_write()
392 hdr->verf.committed = NFS_FILE_SYNC; in bl_end_par_io_write()
393 INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup); in bl_end_par_io_write()
394 schedule_work(&hdr->task.u.tk_work); in bl_end_par_io_write()
400 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg); in bl_write_pagelist() local
406 loff_t offset = header->args.offset; in bl_write_pagelist() local
407 size_t count = header->args.count; in bl_write_pagelist()
408 struct page **pages = header->args.pages; in bl_write_pagelist()
409 int pg_index = header->args.pgbase >> PAGE_SHIFT; in bl_write_pagelist()
414 dprintk("%s enter, %zu@%lld\n", __func__, count, offset); in bl_write_pagelist()
416 /* At this point, header->page_aray is a (sequential) list of nfs_pages. in bl_write_pagelist()
423 par->pnfs_callback = bl_end_par_io_write; in bl_write_pagelist()
428 offset = offset & (loff_t)PAGE_MASK; in bl_write_pagelist()
429 isect = offset >> SECTOR_SHIFT; in bl_write_pagelist()
431 for (i = pg_index; i < header->page_array.npages; i++) { in bl_write_pagelist()
436 if (!ext_tree_lookup(bl, isect, &be, true)) { in bl_write_pagelist()
437 header->pnfs_error = -EINVAL; in bl_write_pagelist()
441 extent_length = be.be_length - (isect - be.be_f_offset); in bl_write_pagelist()
445 bio = do_add_page_to_bio(bio, header->page_array.npages - i, in bl_write_pagelist()
450 header->pnfs_error = PTR_ERR(bio); in bl_write_pagelist()
455 offset += pg_len; in bl_write_pagelist()
456 count -= pg_len; in bl_write_pagelist()
458 extent_length -= (pg_len >> SECTOR_SHIFT); in bl_write_pagelist()
461 header->res.count = header->args.count; in bl_write_pagelist()
471 struct pnfs_block_layout *bl = BLK_LO2EXT(lo); in bl_free_layout_hdr() local
476 err = ext_tree_remove(bl, true, 0, LLONG_MAX); in bl_free_layout_hdr()
479 kfree_rcu(bl, bl_layout.plh_rcu); in bl_free_layout_hdr()
485 struct pnfs_block_layout *bl; in __bl_alloc_layout_hdr() local
488 bl = kzalloc(sizeof(*bl), gfp_flags); in __bl_alloc_layout_hdr()
489 if (!bl) in __bl_alloc_layout_hdr()
492 bl->bl_ext_rw = RB_ROOT; in __bl_alloc_layout_hdr()
493 bl->bl_ext_ro = RB_ROOT; in __bl_alloc_layout_hdr()
494 spin_lock_init(&bl->bl_ext_lock); in __bl_alloc_layout_hdr()
496 bl->bl_scsi_layout = is_scsi_layout; in __bl_alloc_layout_hdr()
497 return &bl->bl_layout; in __bl_alloc_layout_hdr()
521 u64 start; /* Expected start of next non-COW extent */
526 /* Verify the extent meets the layout requirements of the pnfs-block draft,
532 if (lv->mode == IOMODE_READ) { in verify_extent()
533 if (be->be_state == PNFS_BLOCK_READWRITE_DATA || in verify_extent()
534 be->be_state == PNFS_BLOCK_INVALID_DATA) in verify_extent()
535 return -EIO; in verify_extent()
536 if (be->be_f_offset != lv->start) in verify_extent()
537 return -EIO; in verify_extent()
538 lv->start += be->be_length; in verify_extent()
541 /* lv->mode == IOMODE_RW */ in verify_extent()
542 if (be->be_state == PNFS_BLOCK_READWRITE_DATA) { in verify_extent()
543 if (be->be_f_offset != lv->start) in verify_extent()
544 return -EIO; in verify_extent()
545 if (lv->cowread > lv->start) in verify_extent()
546 return -EIO; in verify_extent()
547 lv->start += be->be_length; in verify_extent()
548 lv->inval = lv->start; in verify_extent()
550 } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) { in verify_extent()
551 if (be->be_f_offset != lv->start) in verify_extent()
552 return -EIO; in verify_extent()
553 lv->start += be->be_length; in verify_extent()
555 } else if (be->be_state == PNFS_BLOCK_READ_DATA) { in verify_extent()
556 if (be->be_f_offset > lv->start) in verify_extent()
557 return -EIO; in verify_extent()
558 if (be->be_f_offset < lv->inval) in verify_extent()
559 return -EIO; in verify_extent()
560 if (be->be_f_offset < lv->cowread) in verify_extent()
561 return -EIO; in verify_extent()
562 /* It looks like you might want to min this with lv->start, in verify_extent()
565 lv->inval = lv->inval + be->be_length; in verify_extent()
566 lv->cowread = be->be_f_offset + be->be_length; in verify_extent()
569 return -EIO; in verify_extent()
579 return -1; in decode_sector_number()
596 return ERR_PTR(-ENODEV); in bl_find_get_deviceid()
598 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags) == 0) in bl_find_get_deviceid()
602 start = end - PNFS_DEVICE_RETRY_TIMEOUT; in bl_find_get_deviceid()
603 if (!time_in_range(node->timestamp_unavailable, start, end)) { in bl_find_get_deviceid()
604 nfs4_delete_deviceid(node->ld, node->nfs_client, id); in bl_find_get_deviceid()
609 return ERR_PTR(-ENODEV); in bl_find_get_deviceid()
624 return -EIO; in bl_alloc_extent()
628 return -ENOMEM; in bl_alloc_extent()
633 be->be_device = bl_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id, in bl_alloc_extent()
634 lo->plh_lc_cred, gfp_mask); in bl_alloc_extent()
635 if (IS_ERR(be->be_device)) { in bl_alloc_extent()
636 error = PTR_ERR(be->be_device); in bl_alloc_extent()
642 * extent structure in 512-byte granularity. in bl_alloc_extent()
644 error = -EIO; in bl_alloc_extent()
645 if (decode_sector_number(&p, &be->be_f_offset) < 0) in bl_alloc_extent()
647 if (decode_sector_number(&p, &be->be_length) < 0) in bl_alloc_extent()
649 if (decode_sector_number(&p, &be->be_v_offset) < 0) in bl_alloc_extent()
651 be->be_state = be32_to_cpup(p++); in bl_alloc_extent()
659 list_add_tail(&be->be_list, extents); in bl_alloc_extent()
663 nfs4_put_deviceid_node(be->be_device); in bl_alloc_extent()
674 .mode = lgr->range.iomode, in bl_alloc_lseg()
675 .start = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg()
676 .inval = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg()
677 .cowread = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg()
679 struct pnfs_block_layout *bl = BLK_LO2EXT(lo); in bl_alloc_lseg() local
689 dprintk("---> %s\n", __func__); in bl_alloc_lseg()
693 return ERR_PTR(-ENOMEM); in bl_alloc_lseg()
695 status = -ENOMEM; in bl_alloc_lseg()
701 lgr->layoutp->pages, lgr->layoutp->len); in bl_alloc_lseg()
704 status = -EIO; in bl_alloc_lseg()
722 if (lgr->range.offset + lgr->range.length != in bl_alloc_lseg()
725 status = -EIO; in bl_alloc_lseg()
731 status = -EIO; in bl_alloc_lseg()
739 list_del(&be->be_list); in bl_alloc_lseg()
742 status = ext_tree_insert(bl, be); in bl_alloc_lseg()
745 nfs4_put_deviceid_node(be->be_device); in bl_alloc_lseg()
755 case -ENODEV: in bl_alloc_lseg()
757 set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags); in bl_alloc_lseg()
771 struct pnfs_block_layout *bl = BLK_LO2EXT(lo); in bl_return_range() local
772 sector_t offset = range->offset >> SECTOR_SHIFT, end; in bl_return_range() local
774 if (range->offset % 8) { in bl_return_range()
775 dprintk("%s: offset %lld not block size aligned\n", in bl_return_range()
776 __func__, range->offset); in bl_return_range()
780 if (range->length != NFS4_MAX_UINT64) { in bl_return_range()
781 if (range->length % 8) { in bl_return_range()
783 __func__, range->length); in bl_return_range()
787 end = offset + (range->length >> SECTOR_SHIFT); in bl_return_range()
792 ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end); in bl_return_range()
804 ext_tree_mark_committed(&lcdata->args, lcdata->res.status); in bl_cleanup_layoutcommit()
812 if (server->pnfs_blksize == 0) { in bl_set_layoutdriver()
814 return -EINVAL; in bl_set_layoutdriver()
816 if (server->pnfs_blksize > PAGE_SIZE) { in bl_set_layoutdriver()
818 __func__, server->pnfs_blksize); in bl_set_layoutdriver()
819 return -EINVAL; in bl_set_layoutdriver()
833 if (pgio->pg_dreq == NULL) in is_aligned_req()
836 if (!IS_ALIGNED(req->wb_offset, alignment)) in is_aligned_req()
839 if (IS_ALIGNED(req->wb_bytes, alignment)) in is_aligned_req()
843 (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode))) { in is_aligned_req()
846 * the full page. Data past the inode size is in is_aligned_req()
867 if (pgio->pg_lseg && in bl_pg_init_read()
868 test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) { in bl_pg_init_read()
869 pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg); in bl_pg_init_read()
870 pnfs_set_lo_fail(pgio->pg_lseg); in bl_pg_init_read()
877 * of bytes (maximum @req->wb_bytes) that can be coalesced.
894 struct address_space *mapping = inode->i_mapping; in pnfs_num_cont_bytes()
899 if (end != inode->i_mapping->nrpages) { in pnfs_num_cont_bytes()
906 return i_size_read(inode) - (idx << PAGE_SHIFT); in pnfs_num_cont_bytes()
908 return (end - idx) << PAGE_SHIFT; in pnfs_num_cont_bytes()
921 if (pgio->pg_dreq == NULL) in bl_pg_init_write()
922 wb_size = pnfs_num_cont_bytes(pgio->pg_inode, in bl_pg_init_write()
923 req->wb_index); in bl_pg_init_write()
925 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq); in bl_pg_init_write()
929 if (pgio->pg_lseg && in bl_pg_init_write()
930 test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) { in bl_pg_init_write()
932 pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg); in bl_pg_init_write()
933 pnfs_set_lo_fail(pgio->pg_lseg); in bl_pg_init_write()
940 * of bytes (maximum @req->wb_bytes) that can be coalesced.
1051 MODULE_ALIAS("nfs-layouttype4-3");
1052 MODULE_ALIAS("nfs-layouttype4-5");