• Home
  • Raw
  • Download

Lines Matching refs:page

73 static bool __is_cp_guaranteed(struct page *page)  in __is_cp_guaranteed()  argument
75 struct address_space *mapping = page->mapping; in __is_cp_guaranteed()
82 if (f2fs_is_compressed_page(page)) in __is_cp_guaranteed()
93 is_cold_data(page)) in __is_cp_guaranteed()
98 static enum count_type __read_io_type(struct page *page) in __read_io_type() argument
100 struct address_space *mapping = page_file_mapping(page); in __read_io_type()
132 struct page *page; in __read_end_io() local
137 page = bv->bv_page; in __read_end_io()
140 if (compr && f2fs_is_compressed_page(page)) { in __read_end_io()
141 f2fs_decompress_pages(bio, page, verity); in __read_end_io()
149 if (bio->bi_status || PageError(page)) { in __read_end_io()
150 ClearPageUptodate(page); in __read_end_io()
152 ClearPageError(page); in __read_end_io()
154 SetPageUptodate(page); in __read_end_io()
156 dec_page_count(F2FS_P_SB(page), __read_io_type(page)); in __read_end_io()
157 unlock_page(page); in __read_end_io()
187 static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size) in f2fs_verify_pages()
198 struct page *page = bv->bv_page; in f2fs_verify_bio() local
201 dic = (struct decompress_io_ctx *)page_private(page); in f2fs_verify_bio()
212 if (bio->bi_status || PageError(page)) in f2fs_verify_bio()
215 if (fsverity_verify_page(page)) { in f2fs_verify_bio()
216 SetPageUptodate(page); in f2fs_verify_bio()
220 ClearPageUptodate(page); in f2fs_verify_bio()
221 ClearPageError(page); in f2fs_verify_bio()
223 dec_page_count(F2FS_P_SB(page), __read_io_type(page)); in f2fs_verify_bio()
224 unlock_page(page); in f2fs_verify_bio()
348 struct page *page = bvec->bv_page; in f2fs_write_end_io() local
349 enum count_type type = WB_DATA_TYPE(page); in f2fs_write_end_io()
351 if (IS_DUMMY_WRITTEN_PAGE(page)) { in f2fs_write_end_io()
352 set_page_private(page, (unsigned long)NULL); in f2fs_write_end_io()
353 ClearPagePrivate(page); in f2fs_write_end_io()
354 unlock_page(page); in f2fs_write_end_io()
355 mempool_free(page, sbi->write_io_dummy); in f2fs_write_end_io()
362 fscrypt_finalize_bounce_page(&page); in f2fs_write_end_io()
365 if (f2fs_is_compressed_page(page)) { in f2fs_write_end_io()
366 f2fs_compress_write_end_io(bio, page); in f2fs_write_end_io()
372 mapping_set_error(page->mapping, -EIO); in f2fs_write_end_io()
377 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) && in f2fs_write_end_io()
378 page->index != nid_of_node(page)); in f2fs_write_end_io()
381 if (f2fs_in_warm_node_list(sbi, page)) in f2fs_write_end_io()
382 f2fs_del_fsync_node_entry(sbi, page); in f2fs_write_end_io()
383 clear_cold_data(page); in f2fs_write_end_io()
384 end_page_writeback(page); in f2fs_write_end_io()
516 struct page *page = in __submit_bio() local
519 f2fs_bug_on(sbi, !page); in __submit_bio()
521 zero_user_segment(page, 0, PAGE_SIZE); in __submit_bio()
522 SetPagePrivate(page); in __submit_bio()
523 set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE); in __submit_bio()
524 lock_page(page); in __submit_bio()
525 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) in __submit_bio()
597 struct page *page, nid_t ino) in __has_merged_page() argument
605 if (!inode && !page && !ino) in __has_merged_page()
609 struct page *target = bvec->bv_page; in __has_merged_page()
624 if (page && page == target) in __has_merged_page()
654 struct inode *inode, struct page *page, in __submit_merged_write_cond() argument
666 ret = __has_merged_page(io->bio, inode, page, ino); in __submit_merged_write_cond()
684 struct inode *inode, struct page *page, in f2fs_submit_merged_write_cond() argument
687 __submit_merged_write_cond(sbi, inode, page, ino, type, false); in f2fs_submit_merged_write_cond()
704 struct page *page = fio->encrypted_page ? in f2fs_submit_page_bio() local
705 fio->encrypted_page : fio->page; in f2fs_submit_page_bio()
712 trace_f2fs_submit_page_bio(page, fio); in f2fs_submit_page_bio()
718 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, in f2fs_submit_page_bio()
719 fio->page->index, fio, GFP_NOIO); in f2fs_submit_page_bio()
721 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in f2fs_submit_page_bio()
727 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE); in f2fs_submit_page_bio()
733 __read_io_type(page): WB_DATA_TYPE(fio->page)); in f2fs_submit_page_bio()
777 struct page *page, enum temp_type temp) in add_bio_entry() argument
786 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) in add_bio_entry()
801 struct page *page) in add_ipu_page() argument
823 fio->page->mapping->host, in add_ipu_page()
824 fio->page->index, fio) && in add_ipu_page()
825 bio_add_page(*bio, page, PAGE_SIZE, 0) == in add_ipu_page()
848 struct bio **bio, struct page *page) in f2fs_submit_merged_ipu_write() argument
868 page, 0); in f2fs_submit_merged_ipu_write()
885 page, 0); in f2fs_submit_merged_ipu_write()
906 struct page *page = fio->encrypted_page ? in f2fs_merge_page_bio() local
907 fio->encrypted_page : fio->page; in f2fs_merge_page_bio()
913 trace_f2fs_submit_page_bio(page, fio); in f2fs_merge_page_bio()
919 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, in f2fs_merge_page_bio()
920 fio->page->index, fio, in f2fs_merge_page_bio()
925 add_bio_entry(fio->sbi, bio, page, fio->temp); in f2fs_merge_page_bio()
927 if (add_ipu_page(fio, &bio, page)) in f2fs_merge_page_bio()
932 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE); in f2fs_merge_page_bio()
934 inc_page_count(fio->sbi, WB_DATA_TYPE(page)); in f2fs_merge_page_bio()
947 struct page *bio_page; in f2fs_submit_page_write()
972 bio_page = fio->page; in f2fs_submit_page_write()
982 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host, in f2fs_submit_page_write()
983 fio->page->index, fio))) in f2fs_submit_page_write()
995 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host, in f2fs_submit_page_write()
996 fio->page->index, fio, in f2fs_submit_page_write()
1007 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE); in f2fs_submit_page_write()
1012 trace_f2fs_submit_page_write(fio->page, fio); in f2fs_submit_page_write()
1076 static int f2fs_submit_page_read(struct inode *inode, struct page *page, in f2fs_submit_page_read() argument
1082 bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index, for_write); in f2fs_submit_page_read()
1089 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in f2fs_submit_page_read()
1093 ClearPageError(page); in f2fs_submit_page_read()
1208 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, in f2fs_get_read_data_page()
1213 struct page *page; in f2fs_get_read_data_page() local
1217 page = f2fs_grab_cache_page(mapping, index, for_write); in f2fs_get_read_data_page()
1218 if (!page) in f2fs_get_read_data_page()
1249 if (PageUptodate(page)) { in f2fs_get_read_data_page()
1250 unlock_page(page); in f2fs_get_read_data_page()
1251 return page; in f2fs_get_read_data_page()
1262 zero_user_segment(page, 0, PAGE_SIZE); in f2fs_get_read_data_page()
1263 if (!PageUptodate(page)) in f2fs_get_read_data_page()
1264 SetPageUptodate(page); in f2fs_get_read_data_page()
1265 unlock_page(page); in f2fs_get_read_data_page()
1266 return page; in f2fs_get_read_data_page()
1269 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr, for_write); in f2fs_get_read_data_page()
1272 return page; in f2fs_get_read_data_page()
1275 f2fs_put_page(page, 1); in f2fs_get_read_data_page()
1279 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index) in f2fs_find_data_page()
1282 struct page *page; in f2fs_find_data_page() local
1284 page = find_get_page(mapping, index); in f2fs_find_data_page()
1285 if (page && PageUptodate(page)) in f2fs_find_data_page()
1286 return page; in f2fs_find_data_page()
1287 f2fs_put_page(page, 0); in f2fs_find_data_page()
1289 page = f2fs_get_read_data_page(inode, index, 0, false); in f2fs_find_data_page()
1290 if (IS_ERR(page)) in f2fs_find_data_page()
1291 return page; in f2fs_find_data_page()
1293 if (PageUptodate(page)) in f2fs_find_data_page()
1294 return page; in f2fs_find_data_page()
1296 wait_on_page_locked(page); in f2fs_find_data_page()
1297 if (unlikely(!PageUptodate(page))) { in f2fs_find_data_page()
1298 f2fs_put_page(page, 0); in f2fs_find_data_page()
1301 return page; in f2fs_find_data_page()
1309 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, in f2fs_get_lock_data_page()
1313 struct page *page; in f2fs_get_lock_data_page() local
1315 page = f2fs_get_read_data_page(inode, index, 0, for_write); in f2fs_get_lock_data_page()
1316 if (IS_ERR(page)) in f2fs_get_lock_data_page()
1317 return page; in f2fs_get_lock_data_page()
1320 lock_page(page); in f2fs_get_lock_data_page()
1321 if (unlikely(page->mapping != mapping)) { in f2fs_get_lock_data_page()
1322 f2fs_put_page(page, 1); in f2fs_get_lock_data_page()
1325 if (unlikely(!PageUptodate(page))) { in f2fs_get_lock_data_page()
1326 f2fs_put_page(page, 1); in f2fs_get_lock_data_page()
1329 return page; in f2fs_get_lock_data_page()
1341 struct page *f2fs_get_new_data_page(struct inode *inode, in f2fs_get_new_data_page()
1342 struct page *ipage, pgoff_t index, bool new_i_size) in f2fs_get_new_data_page()
1345 struct page *page; in f2fs_get_new_data_page() local
1349 page = f2fs_grab_cache_page(mapping, index, true); in f2fs_get_new_data_page()
1350 if (!page) { in f2fs_get_new_data_page()
1362 f2fs_put_page(page, 1); in f2fs_get_new_data_page()
1368 if (PageUptodate(page)) in f2fs_get_new_data_page()
1372 zero_user_segment(page, 0, PAGE_SIZE); in f2fs_get_new_data_page()
1373 if (!PageUptodate(page)) in f2fs_get_new_data_page()
1374 SetPageUptodate(page); in f2fs_get_new_data_page()
1376 f2fs_put_page(page, 1); in f2fs_get_new_data_page()
1380 page = f2fs_get_lock_data_page(inode, index, true); in f2fs_get_new_data_page()
1381 if (IS_ERR(page)) in f2fs_get_new_data_page()
1382 return page; in f2fs_get_new_data_page()
1388 return page; in f2fs_get_new_data_page()
1842 struct page *page; in f2fs_xattr_fiemap() local
1852 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), in f2fs_xattr_fiemap()
1854 if (!page) in f2fs_xattr_fiemap()
1859 f2fs_put_page(page, 1); in f2fs_xattr_fiemap()
1871 f2fs_put_page(page, 1); in f2fs_xattr_fiemap()
1884 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false); in f2fs_xattr_fiemap()
1885 if (!page) in f2fs_xattr_fiemap()
1890 f2fs_put_page(page, 1); in f2fs_xattr_fiemap()
1897 f2fs_put_page(page, 1); in f2fs_xattr_fiemap()
2061 static int f2fs_read_single_page(struct inode *inode, struct page *page, in f2fs_read_single_page() argument
2077 block_in_file = (sector_t)page_index(page); in f2fs_read_single_page()
2108 SetPageMappedToDisk(page); in f2fs_read_single_page()
2110 if (!PageUptodate(page) && (!PageSwapCache(page) && in f2fs_read_single_page()
2111 !cleancache_get_page(page))) { in f2fs_read_single_page()
2112 SetPageUptodate(page); in f2fs_read_single_page()
2123 zero_user_segment(page, 0, PAGE_SIZE); in f2fs_read_single_page()
2124 if (f2fs_need_verity(inode, page->index) && in f2fs_read_single_page()
2125 !fsverity_verify_page(page)) { in f2fs_read_single_page()
2129 if (!PageUptodate(page)) in f2fs_read_single_page()
2130 SetPageUptodate(page); in f2fs_read_single_page()
2131 unlock_page(page); in f2fs_read_single_page()
2141 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) { in f2fs_read_single_page()
2148 is_readahead ? REQ_RAHEAD : 0, page->index, in f2fs_read_single_page()
2163 if (bio_add_page(bio, page, blocksize, 0) < blocksize) in f2fs_read_single_page()
2168 ClearPageError(page); in f2fs_read_single_page()
2176 unlock_page(page); in f2fs_read_single_page()
2206 struct page *page = cc->rpages[i]; in f2fs_read_multi_pages() local
2208 if (!page) in f2fs_read_multi_pages()
2210 if ((sector_t)page->index >= last_block_in_file) { in f2fs_read_multi_pages()
2211 zero_user_segment(page, 0, PAGE_SIZE); in f2fs_read_multi_pages()
2212 if (!PageUptodate(page)) in f2fs_read_multi_pages()
2213 SetPageUptodate(page); in f2fs_read_multi_pages()
2214 } else if (!PageUptodate(page)) { in f2fs_read_multi_pages()
2217 unlock_page(page); in f2fs_read_multi_pages()
2264 struct page *page = dic->cpages[i]; in f2fs_read_multi_pages() local
2273 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) { in f2fs_read_multi_pages()
2282 page->index, for_write); in f2fs_read_multi_pages()
2301 if (bio_add_page(bio, page, blocksize, 0) < blocksize) in f2fs_read_multi_pages()
2312 ClearPageError(page); in f2fs_read_multi_pages()
2340 struct list_head *pages, struct page *page, in f2fs_mpage_readpages() argument
2377 page = list_last_entry(pages, struct page, lru); in f2fs_mpage_readpages()
2379 prefetchw(&page->flags); in f2fs_mpage_readpages()
2380 list_del(&page->lru); in f2fs_mpage_readpages()
2381 if (add_to_page_cache_lru(page, mapping, in f2fs_mpage_readpages()
2382 page_index(page), in f2fs_mpage_readpages()
2390 if (!f2fs_cluster_can_merge_page(&cc, page->index)) { in f2fs_mpage_readpages()
2399 ret = f2fs_is_compressed_cluster(inode, page->index); in f2fs_mpage_readpages()
2409 f2fs_compress_ctx_add_page(&cc, page); in f2fs_mpage_readpages()
2416 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map, in f2fs_mpage_readpages()
2422 SetPageError(page); in f2fs_mpage_readpages()
2423 zero_user_segment(page, 0, PAGE_SIZE); in f2fs_mpage_readpages()
2424 unlock_page(page); in f2fs_mpage_readpages()
2428 put_page(page); in f2fs_mpage_readpages()
2449 static int f2fs_read_data_page(struct file *file, struct page *page) in f2fs_read_data_page() argument
2451 struct inode *inode = page_file_mapping(page)->host; in f2fs_read_data_page()
2454 trace_f2fs_readpage(page, DATA); in f2fs_read_data_page()
2457 unlock_page(page); in f2fs_read_data_page()
2463 ret = f2fs_read_inline_data(inode, page); in f2fs_read_data_page()
2465 ret = f2fs_mpage_readpages(page_file_mapping(page), in f2fs_read_data_page()
2466 NULL, page, 1, false); in f2fs_read_data_page()
2475 struct page *page = list_last_entry(pages, struct page, lru); in f2fs_read_data_pages() local
2477 trace_f2fs_readpages(inode, page, nr_pages); in f2fs_read_data_pages()
2491 struct inode *inode = fio->page->mapping->host; in f2fs_encrypt_one_page()
2492 struct page *mpage, *page; in f2fs_encrypt_one_page() local
2498 page = fio->compressed_page ? fio->compressed_page : fio->page; in f2fs_encrypt_one_page()
2507 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page, in f2fs_encrypt_one_page()
2598 if (is_cold_data(fio->page)) in f2fs_should_update_outplace()
2600 if (IS_ATOMIC_WRITTEN_PAGE(fio->page)) in f2fs_should_update_outplace()
2611 struct inode *inode = fio->page->mapping->host; in need_inplace_update()
2621 struct page *page = fio->page; in f2fs_do_write_data_page() local
2622 struct inode *inode = page->mapping->host; in f2fs_do_write_data_page()
2631 f2fs_lookup_extent_cache(inode, page->index, &ei)) { in f2fs_do_write_data_page()
2632 fio->old_blkaddr = ei.blk + page->index - ei.fofs; in f2fs_do_write_data_page()
2647 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE); in f2fs_do_write_data_page()
2655 ClearPageUptodate(page); in f2fs_do_write_data_page()
2656 clear_cold_data(page); in f2fs_do_write_data_page()
2677 set_page_writeback(page); in f2fs_do_write_data_page()
2678 ClearPageError(page); in f2fs_do_write_data_page()
2686 if (PageWriteback(page)) in f2fs_do_write_data_page()
2687 end_page_writeback(page); in f2fs_do_write_data_page()
2691 trace_f2fs_do_write_data_page(fio->page, IPU); in f2fs_do_write_data_page()
2713 set_page_writeback(page); in f2fs_do_write_data_page()
2714 ClearPageError(page); in f2fs_do_write_data_page()
2721 trace_f2fs_do_write_data_page(page, OPU); in f2fs_do_write_data_page()
2723 if (page->index == 0) in f2fs_do_write_data_page()
2733 int f2fs_write_single_data_page(struct page *page, int *submitted, in f2fs_write_single_data_page() argument
2740 struct inode *inode = page->mapping->host; in f2fs_write_single_data_page()
2745 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT; in f2fs_write_single_data_page()
2756 .page = page, in f2fs_write_single_data_page()
2767 trace_f2fs_writepage(page, DATA); in f2fs_write_single_data_page()
2771 mapping_set_error(page->mapping, -EIO); in f2fs_write_single_data_page()
2785 if (page->index < end_index || in f2fs_write_single_data_page()
2795 if ((page->index >= end_index + 1) || !offset) in f2fs_write_single_data_page()
2798 zero_user_segment(page, offset, PAGE_SIZE); in f2fs_write_single_data_page()
2803 if (f2fs_is_volatile_file(inode) && (!page->index || in f2fs_write_single_data_page()
2824 err = f2fs_write_inline_data(inode, page); in f2fs_write_single_data_page()
2853 ClearPageUptodate(page); in f2fs_write_single_data_page()
2854 clear_cold_data(page); in f2fs_write_single_data_page()
2858 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA); in f2fs_write_single_data_page()
2863 unlock_page(page); in f2fs_write_single_data_page()
2880 redirty_page_for_writepage(wbc, page); in f2fs_write_single_data_page()
2889 unlock_page(page); in f2fs_write_single_data_page()
2893 static int f2fs_write_data_page(struct page *page, in f2fs_write_data_page() argument
2897 struct inode *inode = page->mapping->host; in f2fs_write_data_page()
2903 if (f2fs_is_compressed_cluster(inode, page->index)) { in f2fs_write_data_page()
2904 redirty_page_for_writepage(wbc, page); in f2fs_write_data_page()
2911 return f2fs_write_single_data_page(page, NULL, NULL, NULL, in f2fs_write_data_page()
2991 struct page *page = pvec.pages[i]; in f2fs_write_cache_pages() local
3004 page->index)) { in f2fs_write_cache_pages()
3017 struct page *pagep; in f2fs_write_cache_pages()
3022 page->index, &fsdata); in f2fs_write_cache_pages()
3029 fsdata, page->index, in f2fs_write_cache_pages()
3048 done_index = page->index; in f2fs_write_cache_pages()
3050 lock_page(page); in f2fs_write_cache_pages()
3052 if (unlikely(page->mapping != mapping)) { in f2fs_write_cache_pages()
3054 unlock_page(page); in f2fs_write_cache_pages()
3058 if (!PageDirty(page)) { in f2fs_write_cache_pages()
3063 if (PageWriteback(page)) { in f2fs_write_cache_pages()
3065 f2fs_wait_on_page_writeback(page, in f2fs_write_cache_pages()
3071 if (!clear_page_dirty_for_io(page)) in f2fs_write_cache_pages()
3076 get_page(page); in f2fs_write_cache_pages()
3077 f2fs_compress_ctx_add_page(&cc, page); in f2fs_write_cache_pages()
3081 ret = f2fs_write_single_data_page(page, &submitted, in f2fs_write_cache_pages()
3084 unlock_page(page); in f2fs_write_cache_pages()
3109 done_index = page->index + 1; in f2fs_write_cache_pages()
3283 struct page *page, loff_t pos, unsigned len, in prepare_write_begin() argument
3286 struct inode *inode = page->mapping->host; in prepare_write_begin()
3287 pgoff_t index = page->index; in prepare_write_begin()
3289 struct page *ipage; in prepare_write_begin()
3328 f2fs_do_read_inline_data(page, ipage); in prepare_write_begin()
3333 err = f2fs_convert_inline_page(&dn, page); in prepare_write_begin()
3371 struct page **pagep, void **fsdata) in f2fs_write_begin()
3375 struct page *page = NULL; in f2fs_write_begin() local
3445 page = f2fs_pagecache_get_page(mapping, index, in f2fs_write_begin()
3447 if (!page) { in f2fs_write_begin()
3454 *pagep = page; in f2fs_write_begin()
3456 err = prepare_write_begin(sbi, page, pos, len, in f2fs_write_begin()
3463 unlock_page(page); in f2fs_write_begin()
3465 lock_page(page); in f2fs_write_begin()
3466 if (page->mapping != mapping) { in f2fs_write_begin()
3468 f2fs_put_page(page, 1); in f2fs_write_begin()
3473 f2fs_wait_on_page_writeback(page, DATA, false, true); in f2fs_write_begin()
3475 if (len == PAGE_SIZE || PageUptodate(page)) in f2fs_write_begin()
3480 zero_user_segment(page, len, PAGE_SIZE); in f2fs_write_begin()
3485 zero_user_segment(page, 0, PAGE_SIZE); in f2fs_write_begin()
3486 SetPageUptodate(page); in f2fs_write_begin()
3493 err = f2fs_submit_page_read(inode, page, blkaddr, true); in f2fs_write_begin()
3497 lock_page(page); in f2fs_write_begin()
3498 if (unlikely(page->mapping != mapping)) { in f2fs_write_begin()
3499 f2fs_put_page(page, 1); in f2fs_write_begin()
3502 if (unlikely(!PageUptodate(page))) { in f2fs_write_begin()
3510 f2fs_put_page(page, 1); in f2fs_write_begin()
3520 struct page *page, void *fsdata) in f2fs_write_end() argument
3522 struct inode *inode = page->mapping->host; in f2fs_write_end()
3532 if (!PageUptodate(page)) { in f2fs_write_end()
3536 SetPageUptodate(page); in f2fs_write_end()
3542 f2fs_compress_write_end(inode, fsdata, page->index, copied); in f2fs_write_end()
3551 set_page_dirty(page); in f2fs_write_end()
3557 f2fs_put_page(page, 1); in f2fs_write_end()
3738 void f2fs_invalidate_page(struct page *page, unsigned int offset, in f2fs_invalidate_page() argument
3741 struct inode *inode = page->mapping->host; in f2fs_invalidate_page()
3748 if (PageDirty(page)) { in f2fs_invalidate_page()
3759 clear_cold_data(page); in f2fs_invalidate_page()
3761 if (IS_ATOMIC_WRITTEN_PAGE(page)) in f2fs_invalidate_page()
3762 return f2fs_drop_inmem_page(inode, page); in f2fs_invalidate_page()
3764 f2fs_clear_page_private(page); in f2fs_invalidate_page()
3767 int f2fs_release_page(struct page *page, gfp_t wait) in f2fs_release_page() argument
3770 if (PageDirty(page)) in f2fs_release_page()
3774 if (IS_ATOMIC_WRITTEN_PAGE(page)) in f2fs_release_page()
3777 clear_cold_data(page); in f2fs_release_page()
3778 f2fs_clear_page_private(page); in f2fs_release_page()
3782 static int f2fs_set_data_page_dirty(struct page *page) in f2fs_set_data_page_dirty() argument
3784 struct inode *inode = page_file_mapping(page)->host; in f2fs_set_data_page_dirty()
3786 trace_f2fs_set_page_dirty(page, DATA); in f2fs_set_data_page_dirty()
3788 if (!PageUptodate(page)) in f2fs_set_data_page_dirty()
3789 SetPageUptodate(page); in f2fs_set_data_page_dirty()
3790 if (PageSwapCache(page)) in f2fs_set_data_page_dirty()
3791 return __set_page_dirty_nobuffers(page); in f2fs_set_data_page_dirty()
3794 if (!IS_ATOMIC_WRITTEN_PAGE(page)) { in f2fs_set_data_page_dirty()
3795 f2fs_register_inmem_page(inode, page); in f2fs_set_data_page_dirty()
3805 if (!PageDirty(page)) { in f2fs_set_data_page_dirty()
3806 __set_page_dirty_nobuffers(page); in f2fs_set_data_page_dirty()
3807 f2fs_update_dirty_page(inode, page); in f2fs_set_data_page_dirty()
3865 struct page *newpage, struct page *page, enum migrate_mode mode) in f2fs_migrate_page() argument
3869 bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page); in f2fs_migrate_page()
3871 BUG_ON(PageWriteback(page)); in f2fs_migrate_page()
3884 page, extra_count); in f2fs_migrate_page()
3894 if (cur->page == page) { in f2fs_migrate_page()
3895 cur->page = newpage; in f2fs_migrate_page()
3899 put_page(page); in f2fs_migrate_page()
3903 if (PagePrivate(page)) { in f2fs_migrate_page()
3904 f2fs_set_page_private(newpage, page_private(page)); in f2fs_migrate_page()
3905 f2fs_clear_page_private(page); in f2fs_migrate_page()
3909 migrate_page_copy(newpage, page); in f2fs_migrate_page()
3911 migrate_page_states(newpage, page); in f2fs_migrate_page()
4077 void f2fs_clear_page_cache_dirty_tag(struct page *page) in f2fs_clear_page_cache_dirty_tag() argument
4079 struct address_space *mapping = page_mapping(page); in f2fs_clear_page_cache_dirty_tag()
4083 __xa_clear_mark(&mapping->i_pages, page_index(page), in f2fs_clear_page_cache_dirty_tag()