• Home
  • Raw
  • Download

Lines Matching +full:data +full:- +full:mapping

2  * fs/f2fs/data.c
16 #include <linux/backing-dev.h>
39 struct address_space *mapping = page->mapping; in __is_cp_guaranteed() local
43 if (!mapping) in __is_cp_guaranteed()
46 inode = mapping->host; in __is_cp_guaranteed()
49 if (inode->i_ino == F2FS_META_INO(sbi) || in __is_cp_guaranteed()
50 inode->i_ino == F2FS_NODE_INO(sbi) || in __is_cp_guaranteed()
51 S_ISDIR(inode->i_mode) || in __is_cp_guaranteed()
52 (S_ISREG(inode->i_mode) && in __is_cp_guaranteed()
79 page = bv->bv_page; in __read_end_io()
82 if (bio->bi_status || PageError(page)) { in __read_end_io()
84 /* will re-read again later */ in __read_end_io()
91 if (bio->bi_private) in __read_end_io()
92 mempool_free(bio->bi_private, bio_post_read_ctx_pool); in __read_end_io()
103 fscrypt_decrypt_bio(ctx->bio); in decrypt_work()
110 switch (++ctx->cur_step) { in bio_post_read_processing()
112 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) { in bio_post_read_processing()
113 INIT_WORK(&ctx->work, decrypt_work); in bio_post_read_processing()
114 fscrypt_enqueue_decrypt_work(&ctx->work); in bio_post_read_processing()
117 ctx->cur_step++; in bio_post_read_processing()
118 /* fall-through */ in bio_post_read_processing()
120 __read_end_io(ctx->bio); in bio_post_read_processing()
126 return bio->bi_private && !bio->bi_status; in f2fs_bio_post_read_required()
133 bio->bi_status = BLK_STS_IOERR; in f2fs_read_end_io()
137 struct bio_post_read_ctx *ctx = bio->bi_private; in f2fs_read_end_io()
139 ctx->cur_step = STEP_INITIAL; in f2fs_read_end_io()
149 struct f2fs_sb_info *sbi = bio->bi_private; in f2fs_write_end_io()
154 struct page *page = bvec->bv_page; in f2fs_write_end_io()
161 mempool_free(page, sbi->write_io_dummy); in f2fs_write_end_io()
163 if (unlikely(bio->bi_status)) in f2fs_write_end_io()
170 if (unlikely(bio->bi_status)) { in f2fs_write_end_io()
171 mapping_set_error(page->mapping, -EIO); in f2fs_write_end_io()
176 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) && in f2fs_write_end_io()
177 page->index != nid_of_node(page)); in f2fs_write_end_io()
186 wq_has_sleeper(&sbi->cp_wait)) in f2fs_write_end_io()
187 wake_up(&sbi->cp_wait); in f2fs_write_end_io()
198 struct block_device *bdev = sbi->sb->s_bdev; in f2fs_target_device()
202 for (i = 0; i < sbi->s_ndevs; i++) { in f2fs_target_device()
205 blk_addr -= FDEV(i).start_blk; in f2fs_target_device()
213 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); in f2fs_target_device()
225 for (i = 0; i < sbi->s_ndevs; i++) in f2fs_target_device_index()
235 return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno; in __same_bdev()
239 * Low-level block read/write IO operations.
252 bio->bi_end_io = f2fs_read_end_io; in __bio_alloc()
253 bio->bi_private = NULL; in __bio_alloc()
255 bio->bi_end_io = f2fs_write_end_io; in __bio_alloc()
256 bio->bi_private = sbi; in __bio_alloc()
257 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, type, temp); in __bio_alloc()
271 if (type != DATA && type != NODE) in __submit_bio()
274 if (test_opt(sbi, LFS) && current->plug) in __submit_bio()
275 blk_finish_plug(current->plug); in __submit_bio()
277 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS; in __submit_bio()
286 mempool_alloc(sbi->write_io_dummy, in __submit_bio()
305 trace_f2fs_submit_read_bio(sbi->sb, type, bio); in __submit_bio()
307 trace_f2fs_submit_write_bio(sbi->sb, type, bio); in __submit_bio()
313 struct f2fs_io_info *fio = &io->fio; in __submit_merged_bio()
315 if (!io->bio) in __submit_merged_bio()
318 bio_set_op_attrs(io->bio, fio->op, fio->op_flags); in __submit_merged_bio()
320 if (is_read_io(fio->op)) in __submit_merged_bio()
321 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio); in __submit_merged_bio()
323 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio); in __submit_merged_bio()
325 __submit_bio(io->sbi, io->bio, fio->type); in __submit_merged_bio()
326 io->bio = NULL; in __submit_merged_bio()
336 if (!io->bio) in __has_merged_page()
342 bio_for_each_segment_all(bvec, io->bio, i) { in __has_merged_page()
344 if (bvec->bv_page->mapping) in __has_merged_page()
345 target = bvec->bv_page; in __has_merged_page()
347 target = fscrypt_control_page(bvec->bv_page); in __has_merged_page()
349 if (idx != target->index) in __has_merged_page()
352 if (inode && inode == target->mapping->host) in __has_merged_page()
370 io = sbi->write_io[btype] + temp; in has_merged_page()
372 down_read(&io->io_rwsem); in has_merged_page()
374 up_read(&io->io_rwsem); in has_merged_page()
387 struct f2fs_bio_info *io = sbi->write_io[btype] + temp; in __f2fs_submit_merged_write()
389 down_write(&io->io_rwsem); in __f2fs_submit_merged_write()
393 io->fio.type = META_FLUSH; in __f2fs_submit_merged_write()
394 io->fio.op = REQ_OP_WRITE; in __f2fs_submit_merged_write()
395 io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC; in __f2fs_submit_merged_write()
397 io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA; in __f2fs_submit_merged_write()
400 up_write(&io->io_rwsem); in __f2fs_submit_merged_write()
436 f2fs_submit_merged_write(sbi, DATA); in f2fs_flush_merged_writes()
442 * Fill the locked page with data located in the block address.
448 struct page *page = fio->encrypted_page ? in f2fs_submit_page_bio()
449 fio->encrypted_page : fio->page; in f2fs_submit_page_bio()
451 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, in f2fs_submit_page_bio()
453 return -EFSCORRUPTED; in f2fs_submit_page_bio()
459 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc, in f2fs_submit_page_bio()
460 1, is_read_io(fio->op), fio->type, fio->temp); in f2fs_submit_page_bio()
464 return -EFAULT; in f2fs_submit_page_bio()
467 if (fio->io_wbc && !is_read_io(fio->op)) in f2fs_submit_page_bio()
468 wbc_account_io(fio->io_wbc, page, PAGE_SIZE); in f2fs_submit_page_bio()
470 bio_set_op_attrs(bio, fio->op, fio->op_flags); in f2fs_submit_page_bio()
472 if (!is_read_io(fio->op)) in f2fs_submit_page_bio()
473 inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page)); in f2fs_submit_page_bio()
475 __submit_bio(fio->sbi, bio, fio->type); in f2fs_submit_page_bio()
481 struct f2fs_sb_info *sbi = fio->sbi; in f2fs_submit_page_write()
482 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); in f2fs_submit_page_write()
483 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp; in f2fs_submit_page_write()
486 f2fs_bug_on(sbi, is_read_io(fio->op)); in f2fs_submit_page_write()
488 down_write(&io->io_rwsem); in f2fs_submit_page_write()
490 if (fio->in_list) { in f2fs_submit_page_write()
491 spin_lock(&io->io_lock); in f2fs_submit_page_write()
492 if (list_empty(&io->io_list)) { in f2fs_submit_page_write()
493 spin_unlock(&io->io_lock); in f2fs_submit_page_write()
496 fio = list_first_entry(&io->io_list, in f2fs_submit_page_write()
498 list_del(&fio->list); in f2fs_submit_page_write()
499 spin_unlock(&io->io_lock); in f2fs_submit_page_write()
502 if (__is_valid_data_blkaddr(fio->old_blkaddr)) in f2fs_submit_page_write()
503 verify_block_addr(fio, fio->old_blkaddr); in f2fs_submit_page_write()
504 verify_block_addr(fio, fio->new_blkaddr); in f2fs_submit_page_write()
506 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; in f2fs_submit_page_write()
509 fio->submitted = true; in f2fs_submit_page_write()
513 if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 || in f2fs_submit_page_write()
514 (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) || in f2fs_submit_page_write()
515 !__same_bdev(sbi, fio->new_blkaddr, io->bio))) in f2fs_submit_page_write()
518 if (io->bio == NULL) { in f2fs_submit_page_write()
519 if ((fio->type == DATA || fio->type == NODE) && in f2fs_submit_page_write()
520 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) { in f2fs_submit_page_write()
522 fio->retry = true; in f2fs_submit_page_write()
525 io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc, in f2fs_submit_page_write()
527 fio->type, fio->temp); in f2fs_submit_page_write()
528 io->fio = *fio; in f2fs_submit_page_write()
531 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) { in f2fs_submit_page_write()
536 if (fio->io_wbc) in f2fs_submit_page_write()
537 wbc_account_io(fio->io_wbc, bio_page, PAGE_SIZE); in f2fs_submit_page_write()
539 io->last_block_in_bio = fio->new_blkaddr; in f2fs_submit_page_write()
542 trace_f2fs_submit_page_write(fio->page, fio); in f2fs_submit_page_write()
544 if (fio->in_list) in f2fs_submit_page_write()
549 up_write(&io->io_rwsem); in f2fs_submit_page_write()
561 return ERR_PTR(-EFAULT); in f2fs_grab_read_bio()
565 return ERR_PTR(-ENOMEM); in f2fs_grab_read_bio()
567 bio->bi_end_io = f2fs_read_end_io; in f2fs_grab_read_bio()
576 return ERR_PTR(-ENOMEM); in f2fs_grab_read_bio()
578 ctx->bio = bio; in f2fs_grab_read_bio()
579 ctx->enabled_steps = post_read_steps; in f2fs_grab_read_bio()
580 bio->bi_private = ctx; in f2fs_grab_read_bio()
600 return -EFAULT; in f2fs_submit_page_read()
603 __submit_bio(F2FS_I_SB(inode), bio, DATA); in f2fs_submit_page_read()
609 struct f2fs_node *rn = F2FS_NODE(dn->node_page); in __set_data_blkaddr()
613 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) in __set_data_blkaddr()
614 base = get_extra_isize(dn->inode); in __set_data_blkaddr()
616 /* Get physical address of data block */ in __set_data_blkaddr()
618 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr); in __set_data_blkaddr()
622 * Lock ordering for the change of data block address:
623 * ->data_page
624 * ->node_page
629 f2fs_wait_on_page_writeback(dn->node_page, NODE, true); in f2fs_set_data_blkaddr()
631 if (set_page_dirty(dn->node_page)) in f2fs_set_data_blkaddr()
632 dn->node_changed = true; in f2fs_set_data_blkaddr()
637 dn->data_blkaddr = blkaddr; in f2fs_update_data_blkaddr()
642 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
645 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_reserve_new_blocks()
651 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) in f2fs_reserve_new_blocks()
652 return -EPERM; in f2fs_reserve_new_blocks()
653 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count)))) in f2fs_reserve_new_blocks()
656 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid, in f2fs_reserve_new_blocks()
657 dn->ofs_in_node, count); in f2fs_reserve_new_blocks()
659 f2fs_wait_on_page_writeback(dn->node_page, NODE, true); in f2fs_reserve_new_blocks()
661 for (; count > 0; dn->ofs_in_node++) { in f2fs_reserve_new_blocks()
662 block_t blkaddr = datablock_addr(dn->inode, in f2fs_reserve_new_blocks()
663 dn->node_page, dn->ofs_in_node); in f2fs_reserve_new_blocks()
665 dn->data_blkaddr = NEW_ADDR; in f2fs_reserve_new_blocks()
667 count--; in f2fs_reserve_new_blocks()
671 if (set_page_dirty(dn->node_page)) in f2fs_reserve_new_blocks()
672 dn->node_changed = true; in f2fs_reserve_new_blocks()
676 /* Should keep dn->ofs_in_node unchanged */
679 unsigned int ofs_in_node = dn->ofs_in_node; in f2fs_reserve_new_block()
683 dn->ofs_in_node = ofs_in_node; in f2fs_reserve_new_block()
689 bool need_put = dn->inode_page ? false : true; in f2fs_reserve_block()
696 if (dn->data_blkaddr == NULL_ADDR) in f2fs_reserve_block()
706 struct inode *inode = dn->inode; in f2fs_get_block()
709 dn->data_blkaddr = ei.blk + index - ei.fofs; in f2fs_get_block()
719 struct address_space *mapping = inode->i_mapping; in f2fs_get_read_data_page() local
725 page = f2fs_grab_cache_page(mapping, index, for_write); in f2fs_get_read_data_page()
727 return ERR_PTR(-ENOMEM); in f2fs_get_read_data_page()
730 dn.data_blkaddr = ei.blk + index - ei.fofs; in f2fs_get_read_data_page()
741 err = -ENOENT; in f2fs_get_read_data_page()
752 * new inode page couldn't be allocated due to -ENOSPC. in f2fs_get_read_data_page()
754 * see, f2fs_add_link -> f2fs_get_new_data_page -> in f2fs_get_read_data_page()
777 struct address_space *mapping = inode->i_mapping; in f2fs_find_data_page() local
780 page = find_get_page(mapping, index); in f2fs_find_data_page()
795 return ERR_PTR(-EIO); in f2fs_find_data_page()
808 struct address_space *mapping = inode->i_mapping; in f2fs_get_lock_data_page() local
817 if (unlikely(page->mapping != mapping)) { in f2fs_get_lock_data_page()
823 return ERR_PTR(-EIO); in f2fs_get_lock_data_page()
829 * Caller ensures that this data page is never allocated.
830 * A new zero-filled data page is allocated in the page cache.
840 struct address_space *mapping = inode->i_mapping; in f2fs_get_new_data_page() local
845 page = f2fs_grab_cache_page(mapping, index, true); in f2fs_get_new_data_page()
852 return ERR_PTR(-ENOMEM); in f2fs_get_new_data_page()
889 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in __allocate_data_block()
896 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) in __allocate_data_block()
897 return -EPERM; in __allocate_data_block()
899 err = f2fs_get_node_info(sbi, dn->nid, &ni); in __allocate_data_block()
903 dn->data_blkaddr = datablock_addr(dn->inode, in __allocate_data_block()
904 dn->node_page, dn->ofs_in_node); in __allocate_data_block()
905 if (dn->data_blkaddr == NEW_ADDR) in __allocate_data_block()
908 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count)))) in __allocate_data_block()
912 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); in __allocate_data_block()
913 old_blkaddr = dn->data_blkaddr; in __allocate_data_block()
914 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr, in __allocate_data_block()
923 * data from unwritten block via dio_read. in __allocate_data_block()
930 struct inode *inode = file_inode(iocb->ki_filp); in f2fs_preallocate_blocks()
934 bool direct_io = iocb->ki_flags & IOCB_DIRECT; in f2fs_preallocate_blocks()
936 /* convert inline data for Direct I/O*/ in f2fs_preallocate_blocks()
946 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos); in f2fs_preallocate_blocks()
947 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from)); in f2fs_preallocate_blocks()
949 map.m_len -= map.m_lblk; in f2fs_preallocate_blocks()
958 map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint); in f2fs_preallocate_blocks()
964 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) { in f2fs_preallocate_blocks()
976 if (map.m_len > 0 && err == -ENOSPC) { in f2fs_preallocate_blocks()
988 down_read(&sbi->node_change); in __do_map_lock()
990 up_read(&sbi->node_change); in __do_map_lock()
1002 * If original data blocks are allocated, then give them to blockdev.
1011 unsigned int maxblocks = map->m_len; in f2fs_map_blocks()
1026 map->m_len = 0; in f2fs_map_blocks()
1027 map->m_flags = 0; in f2fs_map_blocks()
1030 pgofs = (pgoff_t)map->m_lblk; in f2fs_map_blocks()
1034 map->m_pblk = ei.blk + pgofs - ei.fofs; in f2fs_map_blocks()
1035 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); in f2fs_map_blocks()
1036 map->m_flags = F2FS_MAP_MAPPED; in f2fs_map_blocks()
1037 if (map->m_next_extent) in f2fs_map_blocks()
1038 *map->m_next_extent = pgofs + map->m_len; in f2fs_map_blocks()
1051 map->m_pblk = 0; in f2fs_map_blocks()
1052 if (err == -ENOENT) { in f2fs_map_blocks()
1054 if (map->m_next_pgofs) in f2fs_map_blocks()
1055 *map->m_next_pgofs = in f2fs_map_blocks()
1057 if (map->m_next_extent) in f2fs_map_blocks()
1058 *map->m_next_extent = in f2fs_map_blocks()
1074 err = -EFSCORRUPTED; in f2fs_map_blocks()
1081 err = -EIO; in f2fs_map_blocks()
1093 map->m_seg_type); in f2fs_map_blocks()
1099 map->m_flags |= F2FS_MAP_NEW; in f2fs_map_blocks()
1103 map->m_pblk = 0; in f2fs_map_blocks()
1110 if (map->m_next_pgofs) in f2fs_map_blocks()
1111 *map->m_next_pgofs = pgofs + 1; in f2fs_map_blocks()
1116 if (map->m_next_pgofs) in f2fs_map_blocks()
1117 *map->m_next_pgofs = pgofs + 1; in f2fs_map_blocks()
1126 if (map->m_len == 0) { in f2fs_map_blocks()
1129 map->m_flags |= F2FS_MAP_UNWRITTEN; in f2fs_map_blocks()
1130 map->m_flags |= F2FS_MAP_MAPPED; in f2fs_map_blocks()
1132 map->m_pblk = blkaddr; in f2fs_map_blocks()
1133 map->m_len = 1; in f2fs_map_blocks()
1134 } else if ((map->m_pblk != NEW_ADDR && in f2fs_map_blocks()
1135 blkaddr == (map->m_pblk + ofs)) || in f2fs_map_blocks()
1136 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) || in f2fs_map_blocks()
1139 map->m_len++; in f2fs_map_blocks()
1157 map->m_len += dn.ofs_in_node - ofs_in_node; in f2fs_map_blocks()
1159 err = -ENOSPC; in f2fs_map_blocks()
1171 if (map->m_flags & F2FS_MAP_MAPPED) { in f2fs_map_blocks()
1172 unsigned int ofs = start_pgofs - map->m_lblk; in f2fs_map_blocks()
1175 start_pgofs, map->m_pblk + ofs, in f2fs_map_blocks()
1176 map->m_len - ofs); in f2fs_map_blocks()
1190 if (map->m_flags & F2FS_MAP_MAPPED) { in f2fs_map_blocks()
1191 unsigned int ofs = start_pgofs - map->m_lblk; in f2fs_map_blocks()
1194 start_pgofs, map->m_pblk + ofs, in f2fs_map_blocks()
1195 map->m_len - ofs); in f2fs_map_blocks()
1197 if (map->m_next_extent) in f2fs_map_blocks()
1198 *map->m_next_extent = pgofs + 1; in f2fs_map_blocks()
1227 map.m_len = last_lblk - map.m_lblk; in f2fs_overwrite_io()
1244 map.m_len = bh->b_size >> inode->i_blkbits; in __get_data_block()
1251 map_bh(bh, inode->i_sb, map.m_pblk); in __get_data_block()
1252 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; in __get_data_block()
1253 bh->b_size = (u64)map.m_len << inode->i_blkbits; in __get_data_block()
1273 inode->i_write_hint)); in get_data_block_dio()
1280 if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks)) in get_data_block_bmap()
1281 return -EFBIG; in get_data_block_bmap()
1290 return (offset >> inode->i_blkbits); in logical_to_blk()
1295 return (blk << inode->i_blkbits); in blk_to_logical()
1306 nid_t xnid = F2FS_I(inode)->i_xattr_nid; in f2fs_xattr_fiemap()
1313 inode->i_ino, false); in f2fs_xattr_fiemap()
1315 return -ENOMEM; in f2fs_xattr_fiemap()
1317 err = f2fs_get_node_info(sbi, inode->i_ino, &ni); in f2fs_xattr_fiemap()
1325 sizeof(__le32) * (DEF_ADDRS_PER_INODE - in f2fs_xattr_fiemap()
1346 return -ENOMEM; in f2fs_xattr_fiemap()
1355 len = inode->i_sb->s_blocksize; in f2fs_xattr_fiemap()
1378 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { in f2fs_fiemap()
1390 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { in f2fs_fiemap()
1397 if (ret != -EAGAIN) in f2fs_fiemap()
1405 last_blk = logical_to_blk(inode, start + len - 1); in f2fs_fiemap()
1421 F2FS_I_SB(inode)->max_file_blocks)) in f2fs_fiemap()
1450 ret = -EINTR; in f2fs_fiemap()
1465 * Note that the aops->readpages() function is ONLY used for read-ahead. If
1466 * this function ever deviates from doing just read-ahead, it should either
1467 * use ->readpage() or do the necessary surgery to decouple ->readpages()
1468 * from read-ahead.
1470 static int f2fs_mpage_readpages(struct address_space *mapping, in f2fs_mpage_readpages() argument
1476 struct inode *inode = mapping->host; in f2fs_mpage_readpages()
1477 const unsigned blkbits = inode->i_blkbits; in f2fs_mpage_readpages()
1493 for (; nr_pages; nr_pages--) { in f2fs_mpage_readpages()
1497 prefetchw(&page->flags); in f2fs_mpage_readpages()
1498 list_del(&page->lru); in f2fs_mpage_readpages()
1499 if (add_to_page_cache_lru(page, mapping, in f2fs_mpage_readpages()
1501 readahead_gfp_mask(mapping))) in f2fs_mpage_readpages()
1507 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> in f2fs_mpage_readpages()
1528 map.m_len = last_block - block_in_file; in f2fs_mpage_readpages()
1536 block_nr = map.m_pblk + block_in_file - map.m_lblk; in f2fs_mpage_readpages()
1560 if (bio && (last_block_in_bio != block_nr - 1 || in f2fs_mpage_readpages()
1563 __submit_bio(F2FS_I_SB(inode), bio, DATA); in f2fs_mpage_readpages()
1577 * its completion to see the correct decrypted data. in f2fs_mpage_readpages()
1594 __submit_bio(F2FS_I_SB(inode), bio, DATA); in f2fs_mpage_readpages()
1604 __submit_bio(F2FS_I_SB(inode), bio, DATA); in f2fs_mpage_readpages()
1610 struct inode *inode = page_file_mapping(page)->host; in f2fs_read_data_page()
1611 int ret = -EAGAIN; in f2fs_read_data_page()
1613 trace_f2fs_readpage(page, DATA); in f2fs_read_data_page()
1615 /* If the file has inline data, try to read it directly */ in f2fs_read_data_page()
1618 if (ret == -EAGAIN) in f2fs_read_data_page()
1625 struct address_space *mapping, in f2fs_read_data_pages() argument
1628 struct inode *inode = mapping->host; in f2fs_read_data_pages()
1633 /* If the file has inline data, skip readpages */ in f2fs_read_data_pages()
1637 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true); in f2fs_read_data_pages()
1642 struct inode *inode = fio->page->mapping->host; in encrypt_one_page()
1650 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); in encrypt_one_page()
1653 fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page, in encrypt_one_page()
1654 PAGE_SIZE, 0, fio->page->index, gfp_flags); in encrypt_one_page()
1655 if (IS_ERR(fio->encrypted_page)) { in encrypt_one_page()
1657 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { in encrypt_one_page()
1658 f2fs_flush_merged_writes(fio->sbi); in encrypt_one_page()
1663 return PTR_ERR(fio->encrypted_page); in encrypt_one_page()
1666 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr); in encrypt_one_page()
1670 page_address(fio->encrypted_page), PAGE_SIZE); in encrypt_one_page()
1680 unsigned int policy = SM_I(sbi)->ipu_policy; in check_inplace_update_policy()
1687 utilization(sbi) > SM_I(sbi)->min_ipu_util) in check_inplace_update_policy()
1690 utilization(sbi) > SM_I(sbi)->min_ipu_util) in check_inplace_update_policy()
1697 fio && fio->op == REQ_OP_WRITE && in check_inplace_update_policy()
1698 !(fio->op_flags & REQ_SYNC) && in check_inplace_update_policy()
1728 if (S_ISDIR(inode->i_mode)) in f2fs_should_update_outplace()
1733 if (is_cold_data(fio->page)) in f2fs_should_update_outplace()
1735 if (IS_ATOMIC_WRITTEN_PAGE(fio->page)) in f2fs_should_update_outplace()
1743 struct inode *inode = fio->page->mapping->host; in need_inplace_update()
1753 struct page *page = fio->page; in f2fs_do_write_data_page()
1754 struct inode *inode = page->mapping->host; in f2fs_do_write_data_page()
1763 f2fs_lookup_extent_cache(inode, page->index, &ei)) { in f2fs_do_write_data_page()
1764 fio->old_blkaddr = ei.blk + page->index - ei.fofs; in f2fs_do_write_data_page()
1766 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, in f2fs_do_write_data_page()
1768 return -EFSCORRUPTED; in f2fs_do_write_data_page()
1771 fio->need_lock = LOCK_DONE; in f2fs_do_write_data_page()
1775 /* Deadlock due to between page->lock and f2fs_lock_op */ in f2fs_do_write_data_page()
1776 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi)) in f2fs_do_write_data_page()
1777 return -EAGAIN; in f2fs_do_write_data_page()
1779 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE); in f2fs_do_write_data_page()
1783 fio->old_blkaddr = dn.data_blkaddr; in f2fs_do_write_data_page()
1786 if (fio->old_blkaddr == NULL_ADDR) { in f2fs_do_write_data_page()
1792 if (__is_valid_data_blkaddr(fio->old_blkaddr) && in f2fs_do_write_data_page()
1793 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, in f2fs_do_write_data_page()
1795 err = -EFSCORRUPTED; in f2fs_do_write_data_page()
1800 * it had better in-place writes for updated data. in f2fs_do_write_data_page()
1802 if (ipu_force || (is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) && in f2fs_do_write_data_page()
1811 if (fio->need_lock == LOCK_REQ) in f2fs_do_write_data_page()
1812 f2fs_unlock_op(fio->sbi); in f2fs_do_write_data_page()
1814 trace_f2fs_do_write_data_page(fio->page, IPU); in f2fs_do_write_data_page()
1819 if (fio->need_lock == LOCK_RETRY) { in f2fs_do_write_data_page()
1820 if (!f2fs_trylock_op(fio->sbi)) { in f2fs_do_write_data_page()
1821 err = -EAGAIN; in f2fs_do_write_data_page()
1824 fio->need_lock = LOCK_REQ; in f2fs_do_write_data_page()
1827 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni); in f2fs_do_write_data_page()
1831 fio->version = ni.version; in f2fs_do_write_data_page()
1844 if (page->index == 0) in f2fs_do_write_data_page()
1849 if (fio->need_lock == LOCK_REQ) in f2fs_do_write_data_page()
1850 f2fs_unlock_op(fio->sbi); in f2fs_do_write_data_page()
1858 struct inode *inode = page->mapping->host; in __write_data_page()
1863 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT; in __write_data_page()
1869 .ino = inode->i_ino, in __write_data_page()
1870 .type = DATA, in __write_data_page()
1882 trace_f2fs_writepage(page, DATA); in __write_data_page()
1884 /* we should bypass data pages to proceed the kworkder jobs */ in __write_data_page()
1886 mapping_set_error(page->mapping, -EIO); in __write_data_page()
1891 if (S_ISDIR(inode->i_mode)) in __write_data_page()
1899 if (page->index < end_index) in __write_data_page()
1903 * If the offset is out-of-range of file size, in __write_data_page()
1906 offset = i_size & (PAGE_SIZE - 1); in __write_data_page()
1907 if ((page->index >= end_index + 1) || !offset) in __write_data_page()
1915 if (f2fs_is_volatile_file(inode) && (!page->index || in __write_data_page()
1916 (!wbc->for_reclaim && in __write_data_page()
1921 if (S_ISDIR(inode->i_mode)) { in __write_data_page()
1927 if (!wbc->for_reclaim) in __write_data_page()
1934 err = -EAGAIN; in __write_data_page()
1941 if (err == -EAGAIN) { in __write_data_page()
1943 if (err == -EAGAIN) { in __write_data_page()
1952 down_write(&F2FS_I(inode)->i_sem); in __write_data_page()
1953 if (F2FS_I(inode)->last_disk_size < psize) in __write_data_page()
1954 F2FS_I(inode)->last_disk_size = psize; in __write_data_page()
1955 up_write(&F2FS_I(inode)->i_sem); in __write_data_page()
1959 if (err && err != -ENOENT) in __write_data_page()
1969 if (wbc->for_reclaim) { in __write_data_page()
1970 f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA); in __write_data_page()
1977 if (!S_ISDIR(inode->i_mode)) in __write_data_page()
1981 f2fs_submit_merged_write(sbi, DATA); in __write_data_page()
1994 * -> mapping_set_error() -> set_bit(AS_EIO, ...). in __write_data_page()
1998 if (!err || wbc->for_reclaim) in __write_data_page()
2011 * This function was copied from write_cche_pages from mm/page-writeback.c.
2012 * The major change is making write step of cold data page separately from
2013 * warm/hot data page.
2015 static int f2fs_write_cache_pages(struct address_space *mapping, in f2fs_write_cache_pages() argument
2022 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); in f2fs_write_cache_pages()
2035 if (get_dirty_pages(mapping->host) <= in f2fs_write_cache_pages()
2036 SM_I(F2FS_M_SB(mapping))->min_hot_blocks) in f2fs_write_cache_pages()
2037 set_inode_flag(mapping->host, FI_HOT_DATA); in f2fs_write_cache_pages()
2039 clear_inode_flag(mapping->host, FI_HOT_DATA); in f2fs_write_cache_pages()
2041 if (wbc->range_cyclic) { in f2fs_write_cache_pages()
2042 writeback_index = mapping->writeback_index; /* prev offset */ in f2fs_write_cache_pages()
2048 end = -1; in f2fs_write_cache_pages()
2050 index = wbc->range_start >> PAGE_SHIFT; in f2fs_write_cache_pages()
2051 end = wbc->range_end >> PAGE_SHIFT; in f2fs_write_cache_pages()
2052 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) in f2fs_write_cache_pages()
2056 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in f2fs_write_cache_pages()
2061 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in f2fs_write_cache_pages()
2062 tag_pages_for_writeback(mapping, index, end); in f2fs_write_cache_pages()
2067 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in f2fs_write_cache_pages()
2077 if (atomic_read(&sbi->wb_sync_req[DATA]) && in f2fs_write_cache_pages()
2078 wbc->sync_mode == WB_SYNC_NONE) { in f2fs_write_cache_pages()
2083 done_index = page->index; in f2fs_write_cache_pages()
2087 if (unlikely(page->mapping != mapping)) { in f2fs_write_cache_pages()
2099 if (wbc->sync_mode != WB_SYNC_NONE) in f2fs_write_cache_pages()
2101 DATA, true); in f2fs_write_cache_pages()
2120 } else if (ret == -EAGAIN) { in f2fs_write_cache_pages()
2122 if (wbc->sync_mode == WB_SYNC_ALL) { in f2fs_write_cache_pages()
2130 done_index = page->index + 1; in f2fs_write_cache_pages()
2134 last_idx = page->index; in f2fs_write_cache_pages()
2137 if (--wbc->nr_to_write <= 0 && in f2fs_write_cache_pages()
2138 wbc->sync_mode == WB_SYNC_NONE) { in f2fs_write_cache_pages()
2150 end = writeback_index - 1; in f2fs_write_cache_pages()
2153 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) in f2fs_write_cache_pages()
2154 mapping->writeback_index = done_index; in f2fs_write_cache_pages()
2157 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host, in f2fs_write_cache_pages()
2158 0, last_idx, DATA); in f2fs_write_cache_pages()
2166 if (!S_ISREG(inode->i_mode)) in __should_serialize_io()
2168 if (wbc->sync_mode != WB_SYNC_ALL) in __should_serialize_io()
2170 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks) in __should_serialize_io()
2175 static int __f2fs_write_data_pages(struct address_space *mapping, in __f2fs_write_data_pages() argument
2179 struct inode *inode = mapping->host; in __f2fs_write_data_pages()
2186 if (!mapping->a_ops->writepage) in __f2fs_write_data_pages()
2190 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE) in __f2fs_write_data_pages()
2197 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && in __f2fs_write_data_pages()
2198 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && in __f2fs_write_data_pages()
2206 trace_f2fs_writepages(mapping->host, wbc, DATA); in __f2fs_write_data_pages()
2209 if (wbc->sync_mode == WB_SYNC_ALL) in __f2fs_write_data_pages()
2210 atomic_inc(&sbi->wb_sync_req[DATA]); in __f2fs_write_data_pages()
2211 else if (atomic_read(&sbi->wb_sync_req[DATA])) in __f2fs_write_data_pages()
2215 mutex_lock(&sbi->writepages); in __f2fs_write_data_pages()
2220 ret = f2fs_write_cache_pages(mapping, wbc, io_type); in __f2fs_write_data_pages()
2224 mutex_unlock(&sbi->writepages); in __f2fs_write_data_pages()
2226 if (wbc->sync_mode == WB_SYNC_ALL) in __f2fs_write_data_pages()
2227 atomic_dec(&sbi->wb_sync_req[DATA]); in __f2fs_write_data_pages()
2229 * if some pages were truncated, we cannot guarantee its mapping->host in __f2fs_write_data_pages()
2237 wbc->pages_skipped += get_dirty_pages(inode); in __f2fs_write_data_pages()
2238 trace_f2fs_writepages(mapping->host, wbc, DATA); in __f2fs_write_data_pages()
2242 static int f2fs_write_data_pages(struct address_space *mapping, in f2fs_write_data_pages() argument
2245 struct inode *inode = mapping->host; in f2fs_write_data_pages()
2247 return __f2fs_write_data_pages(mapping, wbc, in f2fs_write_data_pages()
2248 F2FS_I(inode)->cp_task == current ? in f2fs_write_data_pages()
2252 static void f2fs_write_failed(struct address_space *mapping, loff_t to) in f2fs_write_failed() argument
2254 struct inode *inode = mapping->host; in f2fs_write_failed()
2258 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_write_failed()
2259 down_write(&F2FS_I(inode)->i_mmap_sem); in f2fs_write_failed()
2264 up_write(&F2FS_I(inode)->i_mmap_sem); in f2fs_write_failed()
2265 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_write_failed()
2273 struct inode *inode = page->mapping->host; in prepare_write_begin()
2274 pgoff_t index = page->index; in prepare_write_begin()
2303 ipage = f2fs_get_node_page(sbi, inode->i_ino); in prepare_write_begin()
2315 if (inode->i_nlink) in prepare_write_begin()
2328 dn.data_blkaddr = ei.blk + index - ei.fofs; in prepare_write_begin()
2354 static int f2fs_write_begin(struct file *file, struct address_space *mapping, in f2fs_write_begin() argument
2358 struct inode *inode = mapping->host; in f2fs_write_begin()
2371 err = -ENOMEM; in f2fs_write_begin()
2379 * lock_page(page #0) -> lock_page(inode_page) in f2fs_write_begin()
2391 page = f2fs_pagecache_get_page(mapping, index, in f2fs_write_begin()
2394 err = -ENOMEM; in f2fs_write_begin()
2409 if (page->mapping != mapping) { in f2fs_write_begin()
2416 f2fs_wait_on_page_writeback(page, DATA, false); in f2fs_write_begin()
2421 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) { in f2fs_write_begin()
2435 if (unlikely(page->mapping != mapping)) { in f2fs_write_begin()
2440 err = -EIO; in f2fs_write_begin()
2448 f2fs_write_failed(mapping, pos + len); in f2fs_write_begin()
2455 struct address_space *mapping, in f2fs_write_end() argument
2459 struct inode *inode = page->mapping->host; in f2fs_write_end()
2466 * let generic_perform_write() try to copy data again through copied=0. in f2fs_write_end()
2490 unsigned i_blkbits = READ_ONCE(inode->i_blkbits); in check_direct_IO()
2492 unsigned blocksize_mask = (1 << blkbits) - 1; in check_direct_IO()
2494 struct block_device *bdev = inode->i_sb->s_bdev; in check_direct_IO()
2502 blocksize_mask = (1 << blkbits) - 1; in check_direct_IO()
2504 return -EINVAL; in check_direct_IO()
2512 struct address_space *mapping = iocb->ki_filp->f_mapping; in f2fs_direct_IO() local
2513 struct inode *inode = mapping->host; in f2fs_direct_IO()
2516 loff_t offset = iocb->ki_pos; in f2fs_direct_IO()
2519 enum rw_hint hint = iocb->ki_hint; in f2fs_direct_IO()
2532 iocb->ki_hint = WRITE_LIFE_NOT_SET; in f2fs_direct_IO()
2534 if (!down_read_trylock(&F2FS_I(inode)->i_gc_rwsem[rw])) { in f2fs_direct_IO()
2535 if (iocb->ki_flags & IOCB_NOWAIT) { in f2fs_direct_IO()
2536 iocb->ki_hint = hint; in f2fs_direct_IO()
2537 err = -EAGAIN; in f2fs_direct_IO()
2540 down_read(&F2FS_I(inode)->i_gc_rwsem[rw]); in f2fs_direct_IO()
2544 up_read(&F2FS_I(inode)->i_gc_rwsem[rw]); in f2fs_direct_IO()
2548 iocb->ki_hint = hint; in f2fs_direct_IO()
2554 f2fs_write_failed(mapping, offset + count); in f2fs_direct_IO()
2567 struct inode *inode = page->mapping->host; in f2fs_invalidate_page()
2570 if (inode->i_ino >= F2FS_ROOT_INO(sbi) && in f2fs_invalidate_page()
2575 if (inode->i_ino == F2FS_META_INO(sbi)) { in f2fs_invalidate_page()
2577 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) { in f2fs_invalidate_page()
2613 struct inode *inode = page_file_mapping(page)->host; in f2fs_set_data_page_dirty()
2615 trace_f2fs_set_page_dirty(page, DATA); in f2fs_set_data_page_dirty()
2642 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) in f2fs_bmap() argument
2644 struct inode *inode = mapping->host; in f2fs_bmap()
2650 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) in f2fs_bmap()
2651 filemap_write_and_wait(mapping); in f2fs_bmap()
2653 return generic_block_bmap(mapping, block, get_data_block_bmap); in f2fs_bmap()
2659 int f2fs_migrate_page(struct address_space *mapping, in f2fs_migrate_page() argument
2663 struct f2fs_inode_info *fi = F2FS_I(mapping->host); in f2fs_migrate_page()
2671 return -EBUSY; in f2fs_migrate_page()
2672 if (!mutex_trylock(&fi->inmem_lock)) in f2fs_migrate_page()
2673 return -EAGAIN; in f2fs_migrate_page()
2677 * A reference is expected if PagePrivate set when move mapping, in f2fs_migrate_page()
2681 extra_count = (atomic_written ? 1 : 0) - page_has_private(page); in f2fs_migrate_page()
2682 rc = migrate_page_move_mapping(mapping, newpage, in f2fs_migrate_page()
2686 mutex_unlock(&fi->inmem_lock); in f2fs_migrate_page()
2692 list_for_each_entry(cur, &fi->inmem_pages, list) in f2fs_migrate_page()
2693 if (cur->page == page) { in f2fs_migrate_page()
2694 cur->page = newpage; in f2fs_migrate_page()
2697 mutex_unlock(&fi->inmem_lock); in f2fs_migrate_page()
2719 struct address_space *mapping = swap_file->f_mapping; in check_swap_activate() local
2720 struct inode *inode = mapping->host; in check_swap_activate()
2726 sector_t lowest_block = -1; in check_swap_activate()
2729 blkbits = inode->i_blkbits; in check_swap_activate()
2750 * It must be PAGE_SIZE aligned on-disk in check_swap_activate()
2752 if (first_block & (blocks_per_page - 1)) { in check_swap_activate()
2771 first_block >>= (PAGE_SHIFT - blkbits); in check_swap_activate()
2788 return -EINVAL; in check_swap_activate()
2797 if (!S_ISREG(inode->i_mode)) in f2fs_swap_activate()
2798 return -EINVAL; in f2fs_swap_activate()
2800 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) in f2fs_swap_activate()
2801 return -EROFS; in f2fs_swap_activate()
2807 ret = check_swap_activate(file, sis->max); in f2fs_swap_activate()
2827 return -EOPNOTSUPP; in f2fs_swap_activate()
2856 struct address_space *mapping = page_mapping(page); in f2fs_clear_radix_tree_dirty_tag() local
2859 xa_lock_irqsave(&mapping->i_pages, flags); in f2fs_clear_radix_tree_dirty_tag()
2860 radix_tree_tag_clear(&mapping->i_pages, page_index(page), in f2fs_clear_radix_tree_dirty_tag()
2862 xa_unlock_irqrestore(&mapping->i_pages, flags); in f2fs_clear_radix_tree_dirty_tag()
2880 return -ENOMEM; in f2fs_init_post_read_processing()