Lines Matching refs:i
99 int i; in f2fs_drop_rpages() local
101 for (i = 0; i < len; i++) { in f2fs_drop_rpages()
102 if (!cc->rpages[i]) in f2fs_drop_rpages()
105 unlock_page(cc->rpages[i]); in f2fs_drop_rpages()
107 put_page(cc->rpages[i]); in f2fs_drop_rpages()
124 unsigned int i; in f2fs_put_rpages_wbc() local
126 for (i = 0; i < cc->cluster_size; i++) { in f2fs_put_rpages_wbc()
127 if (!cc->rpages[i]) in f2fs_put_rpages_wbc()
130 redirty_page_for_writepage(wbc, cc->rpages[i]); in f2fs_put_rpages_wbc()
131 f2fs_put_page(cc->rpages[i], unlock); in f2fs_put_rpages_wbc()
602 int i; in f2fs_vmap() local
605 for (i = 0; i < MAX_VMAP_RETRIES; i++) { in f2fs_vmap()
622 int i, ret; in f2fs_compress_pages() local
642 for (i = 0; i < cc->nr_cpages; i++) { in f2fs_compress_pages()
643 cc->cpages[i] = f2fs_compress_alloc_page(); in f2fs_compress_pages()
644 if (!cc->cpages[i]) { in f2fs_compress_pages()
680 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++) in f2fs_compress_pages()
681 cc->cbuf->reserved[i] = cpu_to_le32(0); in f2fs_compress_pages()
700 for (i = 0; i < cc->nr_cpages; i++) { in f2fs_compress_pages()
701 if (i < new_nr_cpages) { in f2fs_compress_pages()
702 new_cpages[i] = cc->cpages[i]; in f2fs_compress_pages()
705 f2fs_compress_free_page(cc->cpages[i]); in f2fs_compress_pages()
706 cc->cpages[i] = NULL; in f2fs_compress_pages()
725 for (i = 0; i < cc->nr_cpages; i++) { in f2fs_compress_pages()
726 if (cc->cpages[i]) in f2fs_compress_pages()
727 f2fs_compress_free_page(cc->cpages[i]); in f2fs_compress_pages()
856 int i; in cluster_has_invalid_data() local
858 for (i = 0; i < cc->cluster_size; i++) { in cluster_has_invalid_data()
859 struct page *page = cc->rpages[i]; in cluster_has_invalid_data()
888 int i; in __f2fs_cluster_blocks() local
891 for (i = 1; i < cluster_size; i++) { in __f2fs_cluster_blocks()
895 dn.node_page, dn.ofs_in_node + i); in __f2fs_cluster_blocks()
943 int i; in set_cluster_writeback() local
945 for (i = 0; i < cc->cluster_size; i++) { in set_cluster_writeback()
946 if (cc->rpages[i]) in set_cluster_writeback()
947 set_page_writeback(cc->rpages[i]); in set_cluster_writeback()
953 int i; in set_cluster_dirty() local
955 for (i = 0; i < cc->cluster_size; i++) in set_cluster_dirty()
956 if (cc->rpages[i]) in set_cluster_dirty()
957 set_page_dirty(cc->rpages[i]); in set_cluster_dirty()
969 int i, ret; in prepare_compress_overwrite() local
981 for (i = 0; i < cc->cluster_size; i++) { in prepare_compress_overwrite()
982 page = f2fs_pagecache_get_page(mapping, start_idx + i, in prepare_compress_overwrite()
1012 for (i = 0; i < cc->cluster_size; i++) { in prepare_compress_overwrite()
1013 f2fs_bug_on(sbi, cc->rpages[i]); in prepare_compress_overwrite()
1015 page = find_lock_page(mapping, start_idx + i); in prepare_compress_overwrite()
1027 f2fs_unlock_rpages(cc, i + 1); in prepare_compress_overwrite()
1041 f2fs_unlock_rpages(cc, i); in prepare_compress_overwrite()
1113 int i; in f2fs_truncate_partial_cluster() local
1115 for (i = cluster_size - 1; i >= 0; i--) { in f2fs_truncate_partial_cluster()
1116 loff_t start = rpages[i]->index << PAGE_SHIFT; in f2fs_truncate_partial_cluster()
1119 zero_user_segment(rpages[i], 0, PAGE_SIZE); in f2fs_truncate_partial_cluster()
1121 zero_user_segment(rpages[i], from - start, in f2fs_truncate_partial_cluster()
1161 int i, err; in f2fs_write_compressed_pages() local
1186 for (i = 0; i < cc->cluster_size; i++) { in f2fs_write_compressed_pages()
1188 dn.ofs_in_node + i) == NULL_ADDR) in f2fs_write_compressed_pages()
1213 for (i = 0; i < cc->nr_cpages; i++) { in f2fs_write_compressed_pages()
1214 f2fs_set_compressed_page(cc->cpages[i], inode, in f2fs_write_compressed_pages()
1215 cc->rpages[i + 1]->index, cic); in f2fs_write_compressed_pages()
1216 fio.compressed_page = cc->cpages[i]; in f2fs_write_compressed_pages()
1219 dn.ofs_in_node + i + 1); in f2fs_write_compressed_pages()
1225 fio.page = cc->rpages[i + 1]; in f2fs_write_compressed_pages()
1229 cc->cpages[i] = fio.encrypted_page; in f2fs_write_compressed_pages()
1235 for (i = 0; i < cc->cluster_size; i++) in f2fs_write_compressed_pages()
1236 cic->rpages[i] = cc->rpages[i]; in f2fs_write_compressed_pages()
1238 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) { in f2fs_write_compressed_pages()
1242 fio.page = cc->rpages[i]; in f2fs_write_compressed_pages()
1246 if (i == 0) { in f2fs_write_compressed_pages()
1258 if (i > cc->nr_cpages) { in f2fs_write_compressed_pages()
1269 fio.encrypted_page = cc->cpages[i - 1]; in f2fs_write_compressed_pages()
1271 fio.compressed_page = cc->cpages[i - 1]; in f2fs_write_compressed_pages()
1273 cc->cpages[i - 1] = NULL; in f2fs_write_compressed_pages()
1310 for (--i; i >= 0; i--) in f2fs_write_compressed_pages()
1311 fscrypt_finalize_bounce_page(&cc->cpages[i]); in f2fs_write_compressed_pages()
1322 for (i = 0; i < cc->nr_cpages; i++) { in f2fs_write_compressed_pages()
1323 if (!cc->cpages[i]) in f2fs_write_compressed_pages()
1325 f2fs_compress_free_page(cc->cpages[i]); in f2fs_write_compressed_pages()
1326 cc->cpages[i] = NULL; in f2fs_write_compressed_pages()
1338 int i; in f2fs_compress_write_end_io() local
1350 for (i = 0; i < cic->nr_rpages; i++) { in f2fs_compress_write_end_io()
1351 WARN_ON(!cic->rpages[i]); in f2fs_compress_write_end_io()
1352 clear_page_private_gcing(cic->rpages[i]); in f2fs_compress_write_end_io()
1353 end_page_writeback(cic->rpages[i]); in f2fs_compress_write_end_io()
1366 int _submitted, compr_blocks, ret, i; in f2fs_write_raw_pages() local
1370 for (i = 0; i < cc->cluster_size; i++) { in f2fs_write_raw_pages()
1371 if (!cc->rpages[i]) in f2fs_write_raw_pages()
1374 redirty_page_for_writepage(wbc, cc->rpages[i]); in f2fs_write_raw_pages()
1375 unlock_page(cc->rpages[i]); in f2fs_write_raw_pages()
1381 for (i = 0; i < cc->cluster_size; i++) { in f2fs_write_raw_pages()
1382 if (!cc->rpages[i]) in f2fs_write_raw_pages()
1385 lock_page(cc->rpages[i]); in f2fs_write_raw_pages()
1387 if (cc->rpages[i]->mapping != mapping) { in f2fs_write_raw_pages()
1389 unlock_page(cc->rpages[i]); in f2fs_write_raw_pages()
1393 if (!PageDirty(cc->rpages[i])) in f2fs_write_raw_pages()
1396 if (PageWriteback(cc->rpages[i])) { in f2fs_write_raw_pages()
1399 f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true); in f2fs_write_raw_pages()
1402 if (!clear_page_dirty_for_io(cc->rpages[i])) in f2fs_write_raw_pages()
1405 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted, in f2fs_write_raw_pages()
1410 unlock_page(cc->rpages[i]); in f2fs_write_raw_pages()
1482 int i; in f2fs_prepare_decomp_mem() local
1491 for (i = 0; i < dic->cluster_size; i++) { in f2fs_prepare_decomp_mem()
1492 if (dic->rpages[i]) { in f2fs_prepare_decomp_mem()
1493 dic->tpages[i] = dic->rpages[i]; in f2fs_prepare_decomp_mem()
1497 dic->tpages[i] = f2fs_compress_alloc_page(); in f2fs_prepare_decomp_mem()
1498 if (!dic->tpages[i]) in f2fs_prepare_decomp_mem()
1546 int i, ret; in f2fs_alloc_dic() local
1569 for (i = 0; i < dic->cluster_size; i++) in f2fs_alloc_dic()
1570 dic->rpages[i] = cc->rpages[i]; in f2fs_alloc_dic()
1579 for (i = 0; i < dic->nr_cpages; i++) { in f2fs_alloc_dic()
1589 start_idx + i + 1, dic); in f2fs_alloc_dic()
1590 dic->cpages[i] = page; in f2fs_alloc_dic()
1607 int i; in f2fs_free_dic() local
1612 for (i = 0; i < dic->cluster_size; i++) { in f2fs_free_dic()
1613 if (dic->rpages[i]) in f2fs_free_dic()
1615 if (!dic->tpages[i]) in f2fs_free_dic()
1617 f2fs_compress_free_page(dic->tpages[i]); in f2fs_free_dic()
1623 for (i = 0; i < dic->nr_cpages; i++) { in f2fs_free_dic()
1624 if (!dic->cpages[i]) in f2fs_free_dic()
1626 f2fs_compress_free_page(dic->cpages[i]); in f2fs_free_dic()
1663 int i; in __f2fs_decompress_end_io() local
1665 for (i = 0; i < dic->cluster_size; i++) { in __f2fs_decompress_end_io()
1666 struct page *rpage = dic->rpages[i]; in __f2fs_decompress_end_io()
1689 int i; in f2fs_verify_cluster() local
1692 for (i = 0; i < dic->cluster_size; i++) { in f2fs_verify_cluster()
1693 struct page *rpage = dic->rpages[i]; in f2fs_verify_cluster()
1743 int i = compressed ? 1 : 0; in f2fs_cluster_blocks_are_contiguous() local
1745 dn->ofs_in_node + i); in f2fs_cluster_blocks_are_contiguous()
1747 for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) { in f2fs_cluster_blocks_are_contiguous()
1749 dn->ofs_in_node + i); in f2fs_cluster_blocks_are_contiguous()
1753 if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr) in f2fs_cluster_blocks_are_contiguous()
1757 return compressed ? i - 1 : i; in f2fs_cluster_blocks_are_contiguous()
1858 int i; in f2fs_invalidate_compress_pages() local
1865 for (i = 0; i < nr_pages; i++) { in f2fs_invalidate_compress_pages()
1866 struct page *page = pvec.pages[i]; in f2fs_invalidate_compress_pages()