1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * f2fs compress support
4 *
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6 */
7
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/moduleparam.h>
11 #include <linux/writeback.h>
12 #include <linux/backing-dev.h>
13 #include <linux/lzo.h>
14 #include <linux/lz4.h>
15 #include <linux/zstd.h>
16 #include <linux/pagevec.h>
17
18 #include "f2fs.h"
19 #include "node.h"
20 #include "segment.h"
21 #include <trace/events/f2fs.h>
22
23 static struct kmem_cache *cic_entry_slab;
24 static struct kmem_cache *dic_entry_slab;
25
page_array_alloc(struct inode * inode,int nr)26 static void *page_array_alloc(struct inode *inode, int nr)
27 {
28 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29 unsigned int size = sizeof(struct page *) * nr;
30
31 if (likely(size <= sbi->page_array_slab_size))
32 return f2fs_kmem_cache_alloc(sbi->page_array_slab,
33 GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
34 return f2fs_kzalloc(sbi, size, GFP_NOFS);
35 }
36
page_array_free(struct inode * inode,void * pages,int nr)37 static void page_array_free(struct inode *inode, void *pages, int nr)
38 {
39 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40 unsigned int size = sizeof(struct page *) * nr;
41
42 if (!pages)
43 return;
44
45 if (likely(size <= sbi->page_array_slab_size))
46 kmem_cache_free(sbi->page_array_slab, pages);
47 else
48 kfree(pages);
49 }
50
51 struct f2fs_compress_ops {
52 int (*init_compress_ctx)(struct compress_ctx *cc);
53 void (*destroy_compress_ctx)(struct compress_ctx *cc);
54 int (*compress_pages)(struct compress_ctx *cc);
55 int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
56 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
57 int (*decompress_pages)(struct decompress_io_ctx *dic);
58 bool (*is_level_valid)(int level);
59 };
60
offset_in_cluster(struct compress_ctx * cc,pgoff_t index)61 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
62 {
63 return index & (cc->cluster_size - 1);
64 }
65
cluster_idx(struct compress_ctx * cc,pgoff_t index)66 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
67 {
68 return index >> cc->log_cluster_size;
69 }
70
start_idx_of_cluster(struct compress_ctx * cc)71 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
72 {
73 return cc->cluster_idx << cc->log_cluster_size;
74 }
75
f2fs_is_compressed_page(struct page * page)76 bool f2fs_is_compressed_page(struct page *page)
77 {
78 if (!PagePrivate(page))
79 return false;
80 if (!page_private(page))
81 return false;
82 if (page_private_nonpointer(page))
83 return false;
84
85 f2fs_bug_on(F2FS_M_SB(page->mapping),
86 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
87 return true;
88 }
89
f2fs_set_compressed_page(struct page * page,struct inode * inode,pgoff_t index,void * data)90 static void f2fs_set_compressed_page(struct page *page,
91 struct inode *inode, pgoff_t index, void *data)
92 {
93 struct folio *folio = page_folio(page);
94
95 folio_attach_private(folio, (void *)data);
96
97 /* i_crypto_info and iv index */
98 folio->index = index;
99 folio->mapping = inode->i_mapping;
100 }
101
f2fs_drop_rpages(struct compress_ctx * cc,int len,bool unlock)102 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
103 {
104 int i;
105
106 for (i = 0; i < len; i++) {
107 if (!cc->rpages[i])
108 continue;
109 if (unlock)
110 unlock_page(cc->rpages[i]);
111 else
112 put_page(cc->rpages[i]);
113 }
114 }
115
f2fs_put_rpages(struct compress_ctx * cc)116 static void f2fs_put_rpages(struct compress_ctx *cc)
117 {
118 f2fs_drop_rpages(cc, cc->cluster_size, false);
119 }
120
f2fs_unlock_rpages(struct compress_ctx * cc,int len)121 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
122 {
123 f2fs_drop_rpages(cc, len, true);
124 }
125
f2fs_put_rpages_wbc(struct compress_ctx * cc,struct writeback_control * wbc,bool redirty,int unlock)126 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
127 struct writeback_control *wbc, bool redirty, int unlock)
128 {
129 unsigned int i;
130
131 for (i = 0; i < cc->cluster_size; i++) {
132 if (!cc->rpages[i])
133 continue;
134 if (redirty)
135 redirty_page_for_writepage(wbc, cc->rpages[i]);
136 f2fs_put_page(cc->rpages[i], unlock);
137 }
138 }
139
f2fs_compress_control_page(struct page * page)140 struct page *f2fs_compress_control_page(struct page *page)
141 {
142 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
143 }
144
f2fs_init_compress_ctx(struct compress_ctx * cc)145 int f2fs_init_compress_ctx(struct compress_ctx *cc)
146 {
147 if (cc->rpages)
148 return 0;
149
150 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
151 return cc->rpages ? 0 : -ENOMEM;
152 }
153
f2fs_destroy_compress_ctx(struct compress_ctx * cc,bool reuse)154 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
155 {
156 page_array_free(cc->inode, cc->rpages, cc->cluster_size);
157 cc->rpages = NULL;
158 cc->nr_rpages = 0;
159 cc->nr_cpages = 0;
160 cc->valid_nr_cpages = 0;
161 if (!reuse)
162 cc->cluster_idx = NULL_CLUSTER;
163 }
164
f2fs_compress_ctx_add_page(struct compress_ctx * cc,struct folio * folio)165 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio)
166 {
167 unsigned int cluster_ofs;
168
169 if (!f2fs_cluster_can_merge_page(cc, folio->index))
170 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
171
172 cluster_ofs = offset_in_cluster(cc, folio->index);
173 cc->rpages[cluster_ofs] = folio_page(folio, 0);
174 cc->nr_rpages++;
175 cc->cluster_idx = cluster_idx(cc, folio->index);
176 }
177
178 #ifdef CONFIG_F2FS_FS_LZO
lzo_init_compress_ctx(struct compress_ctx * cc)179 static int lzo_init_compress_ctx(struct compress_ctx *cc)
180 {
181 cc->private = f2fs_vmalloc(LZO1X_MEM_COMPRESS);
182 if (!cc->private)
183 return -ENOMEM;
184
185 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
186 return 0;
187 }
188
lzo_destroy_compress_ctx(struct compress_ctx * cc)189 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
190 {
191 vfree(cc->private);
192 cc->private = NULL;
193 }
194
lzo_compress_pages(struct compress_ctx * cc)195 static int lzo_compress_pages(struct compress_ctx *cc)
196 {
197 int ret;
198
199 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
200 &cc->clen, cc->private);
201 if (ret != LZO_E_OK) {
202 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
203 "lzo compress failed, ret:%d", ret);
204 return -EIO;
205 }
206 return 0;
207 }
208
lzo_decompress_pages(struct decompress_io_ctx * dic)209 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
210 {
211 int ret;
212
213 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
214 dic->rbuf, &dic->rlen);
215 if (ret != LZO_E_OK) {
216 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
217 "lzo decompress failed, ret:%d", ret);
218 return -EIO;
219 }
220
221 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
222 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
223 "lzo invalid rlen:%zu, expected:%lu",
224 dic->rlen, PAGE_SIZE << dic->log_cluster_size);
225 return -EIO;
226 }
227 return 0;
228 }
229
230 static const struct f2fs_compress_ops f2fs_lzo_ops = {
231 .init_compress_ctx = lzo_init_compress_ctx,
232 .destroy_compress_ctx = lzo_destroy_compress_ctx,
233 .compress_pages = lzo_compress_pages,
234 .decompress_pages = lzo_decompress_pages,
235 };
236 #endif
237
238 #ifdef CONFIG_F2FS_FS_LZ4
lz4_init_compress_ctx(struct compress_ctx * cc)239 static int lz4_init_compress_ctx(struct compress_ctx *cc)
240 {
241 unsigned int size = LZ4_MEM_COMPRESS;
242
243 #ifdef CONFIG_F2FS_FS_LZ4HC
244 if (F2FS_I(cc->inode)->i_compress_level)
245 size = LZ4HC_MEM_COMPRESS;
246 #endif
247
248 cc->private = f2fs_vmalloc(size);
249 if (!cc->private)
250 return -ENOMEM;
251
252 /*
253 * we do not change cc->clen to LZ4_compressBound(inputsize) to
254 * adapt worst compress case, because lz4 compressor can handle
255 * output budget properly.
256 */
257 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
258 return 0;
259 }
260
lz4_destroy_compress_ctx(struct compress_ctx * cc)261 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
262 {
263 vfree(cc->private);
264 cc->private = NULL;
265 }
266
lz4_compress_pages(struct compress_ctx * cc)267 static int lz4_compress_pages(struct compress_ctx *cc)
268 {
269 int len = -EINVAL;
270 unsigned char level = F2FS_I(cc->inode)->i_compress_level;
271
272 if (!level)
273 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
274 cc->clen, cc->private);
275 #ifdef CONFIG_F2FS_FS_LZ4HC
276 else
277 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
278 cc->clen, level, cc->private);
279 #endif
280 if (len < 0)
281 return len;
282 if (!len)
283 return -EAGAIN;
284
285 cc->clen = len;
286 return 0;
287 }
288
lz4_decompress_pages(struct decompress_io_ctx * dic)289 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
290 {
291 int ret;
292
293 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
294 dic->clen, dic->rlen);
295 if (ret < 0) {
296 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
297 "lz4 decompress failed, ret:%d", ret);
298 return -EIO;
299 }
300
301 if (ret != PAGE_SIZE << dic->log_cluster_size) {
302 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
303 "lz4 invalid ret:%d, expected:%lu",
304 ret, PAGE_SIZE << dic->log_cluster_size);
305 return -EIO;
306 }
307 return 0;
308 }
309
lz4_is_level_valid(int lvl)310 static bool lz4_is_level_valid(int lvl)
311 {
312 #ifdef CONFIG_F2FS_FS_LZ4HC
313 return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
314 #else
315 return lvl == 0;
316 #endif
317 }
318
319 static const struct f2fs_compress_ops f2fs_lz4_ops = {
320 .init_compress_ctx = lz4_init_compress_ctx,
321 .destroy_compress_ctx = lz4_destroy_compress_ctx,
322 .compress_pages = lz4_compress_pages,
323 .decompress_pages = lz4_decompress_pages,
324 .is_level_valid = lz4_is_level_valid,
325 };
326 #endif
327
328 #ifdef CONFIG_F2FS_FS_ZSTD
zstd_init_compress_ctx(struct compress_ctx * cc)329 static int zstd_init_compress_ctx(struct compress_ctx *cc)
330 {
331 zstd_parameters params;
332 zstd_cstream *stream;
333 void *workspace;
334 unsigned int workspace_size;
335 unsigned char level = F2FS_I(cc->inode)->i_compress_level;
336
337 /* Need to remain this for backward compatibility */
338 if (!level)
339 level = F2FS_ZSTD_DEFAULT_CLEVEL;
340
341 params = zstd_get_params(level, cc->rlen);
342 workspace_size = zstd_cstream_workspace_bound(¶ms.cParams);
343
344 workspace = f2fs_vmalloc(workspace_size);
345 if (!workspace)
346 return -ENOMEM;
347
348 stream = zstd_init_cstream(¶ms, 0, workspace, workspace_size);
349 if (!stream) {
350 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
351 "%s zstd_init_cstream failed", __func__);
352 vfree(workspace);
353 return -EIO;
354 }
355
356 cc->private = workspace;
357 cc->private2 = stream;
358
359 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
360 return 0;
361 }
362
zstd_destroy_compress_ctx(struct compress_ctx * cc)363 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
364 {
365 vfree(cc->private);
366 cc->private = NULL;
367 cc->private2 = NULL;
368 }
369
zstd_compress_pages(struct compress_ctx * cc)370 static int zstd_compress_pages(struct compress_ctx *cc)
371 {
372 zstd_cstream *stream = cc->private2;
373 zstd_in_buffer inbuf;
374 zstd_out_buffer outbuf;
375 int src_size = cc->rlen;
376 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
377 int ret;
378
379 inbuf.pos = 0;
380 inbuf.src = cc->rbuf;
381 inbuf.size = src_size;
382
383 outbuf.pos = 0;
384 outbuf.dst = cc->cbuf->cdata;
385 outbuf.size = dst_size;
386
387 ret = zstd_compress_stream(stream, &outbuf, &inbuf);
388 if (zstd_is_error(ret)) {
389 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
390 "%s zstd_compress_stream failed, ret: %d",
391 __func__, zstd_get_error_code(ret));
392 return -EIO;
393 }
394
395 ret = zstd_end_stream(stream, &outbuf);
396 if (zstd_is_error(ret)) {
397 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
398 "%s zstd_end_stream returned %d",
399 __func__, zstd_get_error_code(ret));
400 return -EIO;
401 }
402
403 /*
404 * there is compressed data remained in intermediate buffer due to
405 * no more space in cbuf.cdata
406 */
407 if (ret)
408 return -EAGAIN;
409
410 cc->clen = outbuf.pos;
411 return 0;
412 }
413
zstd_init_decompress_ctx(struct decompress_io_ctx * dic)414 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
415 {
416 zstd_dstream *stream;
417 void *workspace;
418 unsigned int workspace_size;
419 unsigned int max_window_size =
420 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
421
422 workspace_size = zstd_dstream_workspace_bound(max_window_size);
423
424 workspace = f2fs_vmalloc(workspace_size);
425 if (!workspace)
426 return -ENOMEM;
427
428 stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
429 if (!stream) {
430 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
431 "%s zstd_init_dstream failed", __func__);
432 vfree(workspace);
433 return -EIO;
434 }
435
436 dic->private = workspace;
437 dic->private2 = stream;
438
439 return 0;
440 }
441
zstd_destroy_decompress_ctx(struct decompress_io_ctx * dic)442 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
443 {
444 vfree(dic->private);
445 dic->private = NULL;
446 dic->private2 = NULL;
447 }
448
zstd_decompress_pages(struct decompress_io_ctx * dic)449 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
450 {
451 zstd_dstream *stream = dic->private2;
452 zstd_in_buffer inbuf;
453 zstd_out_buffer outbuf;
454 int ret;
455
456 inbuf.pos = 0;
457 inbuf.src = dic->cbuf->cdata;
458 inbuf.size = dic->clen;
459
460 outbuf.pos = 0;
461 outbuf.dst = dic->rbuf;
462 outbuf.size = dic->rlen;
463
464 ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
465 if (zstd_is_error(ret)) {
466 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
467 "%s zstd_decompress_stream failed, ret: %d",
468 __func__, zstd_get_error_code(ret));
469 return -EIO;
470 }
471
472 if (dic->rlen != outbuf.pos) {
473 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
474 "%s ZSTD invalid rlen:%zu, expected:%lu",
475 __func__, dic->rlen,
476 PAGE_SIZE << dic->log_cluster_size);
477 return -EIO;
478 }
479
480 return 0;
481 }
482
zstd_is_level_valid(int lvl)483 static bool zstd_is_level_valid(int lvl)
484 {
485 return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
486 }
487
488 static const struct f2fs_compress_ops f2fs_zstd_ops = {
489 .init_compress_ctx = zstd_init_compress_ctx,
490 .destroy_compress_ctx = zstd_destroy_compress_ctx,
491 .compress_pages = zstd_compress_pages,
492 .init_decompress_ctx = zstd_init_decompress_ctx,
493 .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
494 .decompress_pages = zstd_decompress_pages,
495 .is_level_valid = zstd_is_level_valid,
496 };
497 #endif
498
499 #ifdef CONFIG_F2FS_FS_LZO
500 #ifdef CONFIG_F2FS_FS_LZORLE
lzorle_compress_pages(struct compress_ctx * cc)501 static int lzorle_compress_pages(struct compress_ctx *cc)
502 {
503 int ret;
504
505 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
506 &cc->clen, cc->private);
507 if (ret != LZO_E_OK) {
508 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
509 "lzo-rle compress failed, ret:%d", ret);
510 return -EIO;
511 }
512 return 0;
513 }
514
515 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
516 .init_compress_ctx = lzo_init_compress_ctx,
517 .destroy_compress_ctx = lzo_destroy_compress_ctx,
518 .compress_pages = lzorle_compress_pages,
519 .decompress_pages = lzo_decompress_pages,
520 };
521 #endif
522 #endif
523
524 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
525 #ifdef CONFIG_F2FS_FS_LZO
526 &f2fs_lzo_ops,
527 #else
528 NULL,
529 #endif
530 #ifdef CONFIG_F2FS_FS_LZ4
531 &f2fs_lz4_ops,
532 #else
533 NULL,
534 #endif
535 #ifdef CONFIG_F2FS_FS_ZSTD
536 &f2fs_zstd_ops,
537 #else
538 NULL,
539 #endif
540 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
541 &f2fs_lzorle_ops,
542 #else
543 NULL,
544 #endif
545 };
546
f2fs_is_compress_backend_ready(struct inode * inode)547 bool f2fs_is_compress_backend_ready(struct inode *inode)
548 {
549 if (!f2fs_compressed_file(inode))
550 return true;
551 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
552 }
553
f2fs_is_compress_level_valid(int alg,int lvl)554 bool f2fs_is_compress_level_valid(int alg, int lvl)
555 {
556 const struct f2fs_compress_ops *cops = f2fs_cops[alg];
557
558 if (cops->is_level_valid)
559 return cops->is_level_valid(lvl);
560
561 return lvl == 0;
562 }
563
564 static mempool_t *compress_page_pool;
565 static int num_compress_pages = 512;
566 module_param(num_compress_pages, uint, 0444);
567 MODULE_PARM_DESC(num_compress_pages,
568 "Number of intermediate compress pages to preallocate");
569
f2fs_init_compress_mempool(void)570 int __init f2fs_init_compress_mempool(void)
571 {
572 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
573 return compress_page_pool ? 0 : -ENOMEM;
574 }
575
f2fs_destroy_compress_mempool(void)576 void f2fs_destroy_compress_mempool(void)
577 {
578 mempool_destroy(compress_page_pool);
579 }
580
f2fs_compress_alloc_page(void)581 static struct page *f2fs_compress_alloc_page(void)
582 {
583 struct page *page;
584
585 page = mempool_alloc(compress_page_pool, GFP_NOFS);
586 lock_page(page);
587
588 return page;
589 }
590
f2fs_compress_free_page(struct page * page)591 static void f2fs_compress_free_page(struct page *page)
592 {
593 if (!page)
594 return;
595 detach_page_private(page);
596 page->mapping = NULL;
597 unlock_page(page);
598 mempool_free(page, compress_page_pool);
599 }
600
601 #define MAX_VMAP_RETRIES 3
602
f2fs_vmap(struct page ** pages,unsigned int count)603 static void *f2fs_vmap(struct page **pages, unsigned int count)
604 {
605 int i;
606 void *buf = NULL;
607
608 for (i = 0; i < MAX_VMAP_RETRIES; i++) {
609 buf = vm_map_ram(pages, count, -1);
610 if (buf)
611 break;
612 vm_unmap_aliases();
613 }
614 return buf;
615 }
616
f2fs_compress_pages(struct compress_ctx * cc)617 static int f2fs_compress_pages(struct compress_ctx *cc)
618 {
619 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
620 const struct f2fs_compress_ops *cops =
621 f2fs_cops[fi->i_compress_algorithm];
622 unsigned int max_len, new_nr_cpages;
623 u32 chksum = 0;
624 int i, ret;
625
626 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
627 cc->cluster_size, fi->i_compress_algorithm);
628
629 if (cops->init_compress_ctx) {
630 ret = cops->init_compress_ctx(cc);
631 if (ret)
632 goto out;
633 }
634
635 max_len = COMPRESS_HEADER_SIZE + cc->clen;
636 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
637 cc->valid_nr_cpages = cc->nr_cpages;
638
639 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
640 if (!cc->cpages) {
641 ret = -ENOMEM;
642 goto destroy_compress_ctx;
643 }
644
645 for (i = 0; i < cc->nr_cpages; i++)
646 cc->cpages[i] = f2fs_compress_alloc_page();
647
648 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
649 if (!cc->rbuf) {
650 ret = -ENOMEM;
651 goto out_free_cpages;
652 }
653
654 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
655 if (!cc->cbuf) {
656 ret = -ENOMEM;
657 goto out_vunmap_rbuf;
658 }
659
660 ret = cops->compress_pages(cc);
661 if (ret)
662 goto out_vunmap_cbuf;
663
664 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
665
666 if (cc->clen > max_len) {
667 ret = -EAGAIN;
668 goto out_vunmap_cbuf;
669 }
670
671 cc->cbuf->clen = cpu_to_le32(cc->clen);
672
673 if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
674 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
675 cc->cbuf->cdata, cc->clen);
676 cc->cbuf->chksum = cpu_to_le32(chksum);
677
678 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
679 cc->cbuf->reserved[i] = cpu_to_le32(0);
680
681 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
682
683 /* zero out any unused part of the last page */
684 memset(&cc->cbuf->cdata[cc->clen], 0,
685 (new_nr_cpages * PAGE_SIZE) -
686 (cc->clen + COMPRESS_HEADER_SIZE));
687
688 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
689 vm_unmap_ram(cc->rbuf, cc->cluster_size);
690
691 for (i = new_nr_cpages; i < cc->nr_cpages; i++) {
692 f2fs_compress_free_page(cc->cpages[i]);
693 cc->cpages[i] = NULL;
694 }
695
696 if (cops->destroy_compress_ctx)
697 cops->destroy_compress_ctx(cc);
698
699 cc->valid_nr_cpages = new_nr_cpages;
700
701 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
702 cc->clen, ret);
703 return 0;
704
705 out_vunmap_cbuf:
706 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
707 out_vunmap_rbuf:
708 vm_unmap_ram(cc->rbuf, cc->cluster_size);
709 out_free_cpages:
710 for (i = 0; i < cc->nr_cpages; i++) {
711 if (cc->cpages[i])
712 f2fs_compress_free_page(cc->cpages[i]);
713 }
714 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
715 cc->cpages = NULL;
716 destroy_compress_ctx:
717 if (cops->destroy_compress_ctx)
718 cops->destroy_compress_ctx(cc);
719 out:
720 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
721 cc->clen, ret);
722 return ret;
723 }
724
725 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
726 bool pre_alloc);
727 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
728 bool bypass_destroy_callback, bool pre_alloc);
729
f2fs_decompress_cluster(struct decompress_io_ctx * dic,bool in_task)730 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
731 {
732 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
733 struct f2fs_inode_info *fi = F2FS_I(dic->inode);
734 const struct f2fs_compress_ops *cops =
735 f2fs_cops[fi->i_compress_algorithm];
736 bool bypass_callback = false;
737 int ret;
738
739 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
740 dic->cluster_size, fi->i_compress_algorithm);
741
742 if (dic->failed) {
743 ret = -EIO;
744 goto out_end_io;
745 }
746
747 ret = f2fs_prepare_decomp_mem(dic, false);
748 if (ret) {
749 bypass_callback = true;
750 goto out_release;
751 }
752
753 dic->clen = le32_to_cpu(dic->cbuf->clen);
754 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
755
756 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
757 ret = -EFSCORRUPTED;
758
759 /* Avoid f2fs_commit_super in irq context */
760 if (!in_task)
761 f2fs_handle_error_async(sbi, ERROR_FAIL_DECOMPRESSION);
762 else
763 f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
764 goto out_release;
765 }
766
767 ret = cops->decompress_pages(dic);
768
769 if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
770 u32 provided = le32_to_cpu(dic->cbuf->chksum);
771 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
772
773 if (provided != calculated) {
774 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
775 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
776 f2fs_info_ratelimited(sbi,
777 "checksum invalid, nid = %lu, %x vs %x",
778 dic->inode->i_ino,
779 provided, calculated);
780 }
781 set_sbi_flag(sbi, SBI_NEED_FSCK);
782 }
783 }
784
785 out_release:
786 f2fs_release_decomp_mem(dic, bypass_callback, false);
787
788 out_end_io:
789 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
790 dic->clen, ret);
791 f2fs_decompress_end_io(dic, ret, in_task);
792 }
793
794 /*
795 * This is called when a page of a compressed cluster has been read from disk
796 * (or failed to be read from disk). It checks whether this page was the last
797 * page being waited on in the cluster, and if so, it decompresses the cluster
798 * (or in the case of a failure, cleans up without actually decompressing).
799 */
f2fs_end_read_compressed_page(struct page * page,bool failed,block_t blkaddr,bool in_task)800 void f2fs_end_read_compressed_page(struct page *page, bool failed,
801 block_t blkaddr, bool in_task)
802 {
803 struct decompress_io_ctx *dic =
804 (struct decompress_io_ctx *)page_private(page);
805 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
806
807 dec_page_count(sbi, F2FS_RD_DATA);
808
809 if (failed)
810 WRITE_ONCE(dic->failed, true);
811 else if (blkaddr && in_task)
812 f2fs_cache_compressed_page(sbi, page,
813 dic->inode->i_ino, blkaddr);
814
815 if (atomic_dec_and_test(&dic->remaining_pages))
816 f2fs_decompress_cluster(dic, in_task);
817 }
818
is_page_in_cluster(struct compress_ctx * cc,pgoff_t index)819 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
820 {
821 if (cc->cluster_idx == NULL_CLUSTER)
822 return true;
823 return cc->cluster_idx == cluster_idx(cc, index);
824 }
825
f2fs_cluster_is_empty(struct compress_ctx * cc)826 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
827 {
828 return cc->nr_rpages == 0;
829 }
830
f2fs_cluster_is_full(struct compress_ctx * cc)831 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
832 {
833 return cc->cluster_size == cc->nr_rpages;
834 }
835
f2fs_cluster_can_merge_page(struct compress_ctx * cc,pgoff_t index)836 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
837 {
838 if (f2fs_cluster_is_empty(cc))
839 return true;
840 return is_page_in_cluster(cc, index);
841 }
842
f2fs_all_cluster_page_ready(struct compress_ctx * cc,struct page ** pages,int index,int nr_pages,bool uptodate)843 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
844 int index, int nr_pages, bool uptodate)
845 {
846 unsigned long pgidx = page_folio(pages[index])->index;
847 int i = uptodate ? 0 : 1;
848
849 /*
850 * when uptodate set to true, try to check all pages in cluster is
851 * uptodate or not.
852 */
853 if (uptodate && (pgidx % cc->cluster_size))
854 return false;
855
856 if (nr_pages - index < cc->cluster_size)
857 return false;
858
859 for (; i < cc->cluster_size; i++) {
860 struct folio *folio = page_folio(pages[index + i]);
861
862 if (folio->index != pgidx + i)
863 return false;
864 if (uptodate && !folio_test_uptodate(folio))
865 return false;
866 }
867
868 return true;
869 }
870
cluster_has_invalid_data(struct compress_ctx * cc)871 static bool cluster_has_invalid_data(struct compress_ctx *cc)
872 {
873 loff_t i_size = i_size_read(cc->inode);
874 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
875 int i;
876
877 for (i = 0; i < cc->cluster_size; i++) {
878 struct page *page = cc->rpages[i];
879
880 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
881
882 /* beyond EOF */
883 if (page_folio(page)->index >= nr_pages)
884 return true;
885 }
886 return false;
887 }
888
f2fs_sanity_check_cluster(struct dnode_of_data * dn)889 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
890 {
891 #ifdef CONFIG_F2FS_CHECK_FS
892 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
893 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
894 int cluster_end = 0;
895 unsigned int count;
896 int i;
897 char *reason = "";
898
899 if (dn->data_blkaddr != COMPRESS_ADDR)
900 return false;
901
902 /* [..., COMPR_ADDR, ...] */
903 if (dn->ofs_in_node % cluster_size) {
904 reason = "[*|C|*|*]";
905 goto out;
906 }
907
908 for (i = 1, count = 1; i < cluster_size; i++, count++) {
909 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
910 dn->ofs_in_node + i);
911
912 /* [COMPR_ADDR, ..., COMPR_ADDR] */
913 if (blkaddr == COMPRESS_ADDR) {
914 reason = "[C|*|C|*]";
915 goto out;
916 }
917 if (!__is_valid_data_blkaddr(blkaddr)) {
918 if (!cluster_end)
919 cluster_end = i;
920 continue;
921 }
922 /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
923 if (cluster_end) {
924 reason = "[C|N|N|V]";
925 goto out;
926 }
927 }
928
929 f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size &&
930 !is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED));
931
932 return false;
933 out:
934 f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
935 dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
936 set_sbi_flag(sbi, SBI_NEED_FSCK);
937 return true;
938 #else
939 return false;
940 #endif
941 }
942
__f2fs_get_cluster_blocks(struct inode * inode,struct dnode_of_data * dn)943 static int __f2fs_get_cluster_blocks(struct inode *inode,
944 struct dnode_of_data *dn)
945 {
946 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
947 int count, i;
948
949 for (i = 0, count = 0; i < cluster_size; i++) {
950 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
951 dn->ofs_in_node + i);
952
953 if (__is_valid_data_blkaddr(blkaddr))
954 count++;
955 }
956
957 return count;
958 }
959
__f2fs_cluster_blocks(struct inode * inode,unsigned int cluster_idx,enum cluster_check_type type)960 static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx,
961 enum cluster_check_type type)
962 {
963 struct dnode_of_data dn;
964 unsigned int start_idx = cluster_idx <<
965 F2FS_I(inode)->i_log_cluster_size;
966 int ret;
967
968 set_new_dnode(&dn, inode, NULL, NULL, 0);
969 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
970 if (ret) {
971 if (ret == -ENOENT)
972 ret = 0;
973 goto fail;
974 }
975
976 if (f2fs_sanity_check_cluster(&dn)) {
977 ret = -EFSCORRUPTED;
978 goto fail;
979 }
980
981 if (dn.data_blkaddr == COMPRESS_ADDR) {
982 if (type == CLUSTER_COMPR_BLKS)
983 ret = 1 + __f2fs_get_cluster_blocks(inode, &dn);
984 else if (type == CLUSTER_IS_COMPR)
985 ret = 1;
986 } else if (type == CLUSTER_RAW_BLKS) {
987 ret = __f2fs_get_cluster_blocks(inode, &dn);
988 }
989 fail:
990 f2fs_put_dnode(&dn);
991 return ret;
992 }
993
994 /* return # of compressed blocks in compressed cluster */
f2fs_compressed_blocks(struct compress_ctx * cc)995 static int f2fs_compressed_blocks(struct compress_ctx *cc)
996 {
997 return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx,
998 CLUSTER_COMPR_BLKS);
999 }
1000
1001 /* return # of raw blocks in non-compressed cluster */
f2fs_decompressed_blocks(struct inode * inode,unsigned int cluster_idx)1002 static int f2fs_decompressed_blocks(struct inode *inode,
1003 unsigned int cluster_idx)
1004 {
1005 return __f2fs_cluster_blocks(inode, cluster_idx,
1006 CLUSTER_RAW_BLKS);
1007 }
1008
1009 /* return whether cluster is compressed one or not */
f2fs_is_compressed_cluster(struct inode * inode,pgoff_t index)1010 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
1011 {
1012 return __f2fs_cluster_blocks(inode,
1013 index >> F2FS_I(inode)->i_log_cluster_size,
1014 CLUSTER_IS_COMPR);
1015 }
1016
1017 /* return whether cluster contains non raw blocks or not */
f2fs_is_sparse_cluster(struct inode * inode,pgoff_t index)1018 bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index)
1019 {
1020 unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
1021
1022 return f2fs_decompressed_blocks(inode, cluster_idx) !=
1023 F2FS_I(inode)->i_cluster_size;
1024 }
1025
cluster_may_compress(struct compress_ctx * cc)1026 static bool cluster_may_compress(struct compress_ctx *cc)
1027 {
1028 if (!f2fs_need_compress_data(cc->inode))
1029 return false;
1030 if (f2fs_is_atomic_file(cc->inode))
1031 return false;
1032 if (!f2fs_cluster_is_full(cc))
1033 return false;
1034 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1035 return false;
1036 return !cluster_has_invalid_data(cc);
1037 }
1038
set_cluster_writeback(struct compress_ctx * cc)1039 static void set_cluster_writeback(struct compress_ctx *cc)
1040 {
1041 int i;
1042
1043 for (i = 0; i < cc->cluster_size; i++) {
1044 if (cc->rpages[i])
1045 set_page_writeback(cc->rpages[i]);
1046 }
1047 }
1048
cancel_cluster_writeback(struct compress_ctx * cc,struct compress_io_ctx * cic,int submitted)1049 static void cancel_cluster_writeback(struct compress_ctx *cc,
1050 struct compress_io_ctx *cic, int submitted)
1051 {
1052 int i;
1053
1054 /* Wait for submitted IOs. */
1055 if (submitted > 1) {
1056 f2fs_submit_merged_write(F2FS_I_SB(cc->inode), DATA);
1057 while (atomic_read(&cic->pending_pages) !=
1058 (cc->valid_nr_cpages - submitted + 1))
1059 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1060 }
1061
1062 /* Cancel writeback and stay locked. */
1063 for (i = 0; i < cc->cluster_size; i++) {
1064 if (i < submitted) {
1065 inode_inc_dirty_pages(cc->inode);
1066 lock_page(cc->rpages[i]);
1067 }
1068 clear_page_private_gcing(cc->rpages[i]);
1069 if (folio_test_writeback(page_folio(cc->rpages[i])))
1070 end_page_writeback(cc->rpages[i]);
1071 }
1072 }
1073
set_cluster_dirty(struct compress_ctx * cc)1074 static void set_cluster_dirty(struct compress_ctx *cc)
1075 {
1076 int i;
1077
1078 for (i = 0; i < cc->cluster_size; i++)
1079 if (cc->rpages[i]) {
1080 set_page_dirty(cc->rpages[i]);
1081 set_page_private_gcing(cc->rpages[i]);
1082 }
1083 }
1084
prepare_compress_overwrite(struct compress_ctx * cc,struct page ** pagep,pgoff_t index,void ** fsdata)1085 static int prepare_compress_overwrite(struct compress_ctx *cc,
1086 struct page **pagep, pgoff_t index, void **fsdata)
1087 {
1088 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1089 struct address_space *mapping = cc->inode->i_mapping;
1090 struct page *page;
1091 sector_t last_block_in_bio;
1092 fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1093 pgoff_t start_idx = start_idx_of_cluster(cc);
1094 int i, ret;
1095
1096 retry:
1097 ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1098 if (ret <= 0)
1099 return ret;
1100
1101 ret = f2fs_init_compress_ctx(cc);
1102 if (ret)
1103 return ret;
1104
1105 /* keep page reference to avoid page reclaim */
1106 for (i = 0; i < cc->cluster_size; i++) {
1107 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1108 fgp_flag, GFP_NOFS);
1109 if (!page) {
1110 ret = -ENOMEM;
1111 goto unlock_pages;
1112 }
1113
1114 if (PageUptodate(page))
1115 f2fs_put_page(page, 1);
1116 else
1117 f2fs_compress_ctx_add_page(cc, page_folio(page));
1118 }
1119
1120 if (!f2fs_cluster_is_empty(cc)) {
1121 struct bio *bio = NULL;
1122
1123 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1124 &last_block_in_bio, NULL, true);
1125 f2fs_put_rpages(cc);
1126 f2fs_destroy_compress_ctx(cc, true);
1127 if (ret)
1128 goto out;
1129 if (bio)
1130 f2fs_submit_read_bio(sbi, bio, DATA);
1131
1132 ret = f2fs_init_compress_ctx(cc);
1133 if (ret)
1134 goto out;
1135 }
1136
1137 for (i = 0; i < cc->cluster_size; i++) {
1138 f2fs_bug_on(sbi, cc->rpages[i]);
1139
1140 page = find_lock_page(mapping, start_idx + i);
1141 if (!page) {
1142 /* page can be truncated */
1143 goto release_and_retry;
1144 }
1145
1146 f2fs_wait_on_page_writeback(page, DATA, true, true);
1147 f2fs_compress_ctx_add_page(cc, page_folio(page));
1148
1149 if (!PageUptodate(page)) {
1150 f2fs_handle_page_eio(sbi, page_folio(page), DATA);
1151 release_and_retry:
1152 f2fs_put_rpages(cc);
1153 f2fs_unlock_rpages(cc, i + 1);
1154 f2fs_destroy_compress_ctx(cc, true);
1155 goto retry;
1156 }
1157 }
1158
1159 if (likely(!ret)) {
1160 *fsdata = cc->rpages;
1161 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1162 return cc->cluster_size;
1163 }
1164
1165 unlock_pages:
1166 f2fs_put_rpages(cc);
1167 f2fs_unlock_rpages(cc, i);
1168 f2fs_destroy_compress_ctx(cc, true);
1169 out:
1170 return ret;
1171 }
1172
f2fs_prepare_compress_overwrite(struct inode * inode,struct page ** pagep,pgoff_t index,void ** fsdata)1173 int f2fs_prepare_compress_overwrite(struct inode *inode,
1174 struct page **pagep, pgoff_t index, void **fsdata)
1175 {
1176 struct compress_ctx cc = {
1177 .inode = inode,
1178 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1179 .cluster_size = F2FS_I(inode)->i_cluster_size,
1180 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1181 .rpages = NULL,
1182 .nr_rpages = 0,
1183 };
1184
1185 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1186 }
1187
f2fs_compress_write_end(struct inode * inode,void * fsdata,pgoff_t index,unsigned copied)1188 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1189 pgoff_t index, unsigned copied)
1190
1191 {
1192 struct compress_ctx cc = {
1193 .inode = inode,
1194 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1195 .cluster_size = F2FS_I(inode)->i_cluster_size,
1196 .rpages = fsdata,
1197 };
1198 struct folio *folio = page_folio(cc.rpages[0]);
1199 bool first_index = (index == folio->index);
1200
1201 if (copied)
1202 set_cluster_dirty(&cc);
1203
1204 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1205 f2fs_destroy_compress_ctx(&cc, false);
1206
1207 return first_index;
1208 }
1209
f2fs_truncate_partial_cluster(struct inode * inode,u64 from,bool lock)1210 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1211 {
1212 void *fsdata = NULL;
1213 struct page *pagep;
1214 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1215 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1216 log_cluster_size;
1217 int err;
1218
1219 err = f2fs_is_compressed_cluster(inode, start_idx);
1220 if (err < 0)
1221 return err;
1222
1223 /* truncate normal cluster */
1224 if (!err)
1225 return f2fs_do_truncate_blocks(inode, from, lock);
1226
1227 /* truncate compressed cluster */
1228 err = f2fs_prepare_compress_overwrite(inode, &pagep,
1229 start_idx, &fsdata);
1230
1231 /* should not be a normal cluster */
1232 f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1233
1234 if (err <= 0)
1235 return err;
1236
1237 if (err > 0) {
1238 struct page **rpages = fsdata;
1239 int cluster_size = F2FS_I(inode)->i_cluster_size;
1240 int i;
1241
1242 for (i = cluster_size - 1; i >= 0; i--) {
1243 struct folio *folio = page_folio(rpages[i]);
1244 loff_t start = folio->index << PAGE_SHIFT;
1245
1246 if (from <= start) {
1247 folio_zero_segment(folio, 0, folio_size(folio));
1248 } else {
1249 folio_zero_segment(folio, from - start,
1250 folio_size(folio));
1251 break;
1252 }
1253 }
1254
1255 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1256 }
1257 return 0;
1258 }
1259
f2fs_write_compressed_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1260 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1261 int *submitted,
1262 struct writeback_control *wbc,
1263 enum iostat_type io_type)
1264 {
1265 struct inode *inode = cc->inode;
1266 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1267 struct f2fs_inode_info *fi = F2FS_I(inode);
1268 struct f2fs_io_info fio = {
1269 .sbi = sbi,
1270 .ino = cc->inode->i_ino,
1271 .type = DATA,
1272 .op = REQ_OP_WRITE,
1273 .op_flags = wbc_to_write_flags(wbc),
1274 .old_blkaddr = NEW_ADDR,
1275 .page = NULL,
1276 .encrypted_page = NULL,
1277 .compressed_page = NULL,
1278 .io_type = io_type,
1279 .io_wbc = wbc,
1280 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1281 1 : 0,
1282 };
1283 struct folio *folio;
1284 struct dnode_of_data dn;
1285 struct node_info ni;
1286 struct compress_io_ctx *cic;
1287 pgoff_t start_idx = start_idx_of_cluster(cc);
1288 unsigned int last_index = cc->cluster_size - 1;
1289 loff_t psize;
1290 int i, err;
1291 bool quota_inode = IS_NOQUOTA(inode);
1292
1293 /* we should bypass data pages to proceed the kworker jobs */
1294 if (unlikely(f2fs_cp_error(sbi))) {
1295 mapping_set_error(inode->i_mapping, -EIO);
1296 goto out_free;
1297 }
1298
1299 if (quota_inode) {
1300 /*
1301 * We need to wait for node_write to avoid block allocation during
1302 * checkpoint. This can only happen to quota writes which can cause
1303 * the below discard race condition.
1304 */
1305 f2fs_down_read(&sbi->node_write);
1306 } else if (!f2fs_trylock_op(sbi)) {
1307 goto out_free;
1308 }
1309
1310 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1311
1312 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1313 if (err)
1314 goto out_unlock_op;
1315
1316 for (i = 0; i < cc->cluster_size; i++) {
1317 if (data_blkaddr(dn.inode, dn.node_page,
1318 dn.ofs_in_node + i) == NULL_ADDR)
1319 goto out_put_dnode;
1320 }
1321
1322 folio = page_folio(cc->rpages[last_index]);
1323 psize = folio_pos(folio) + folio_size(folio);
1324
1325 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1326 if (err)
1327 goto out_put_dnode;
1328
1329 fio.version = ni.version;
1330
1331 cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1332 if (!cic)
1333 goto out_put_dnode;
1334
1335 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1336 cic->inode = inode;
1337 atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
1338 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1339 if (!cic->rpages)
1340 goto out_put_cic;
1341
1342 cic->nr_rpages = cc->cluster_size;
1343
1344 for (i = 0; i < cc->valid_nr_cpages; i++) {
1345 f2fs_set_compressed_page(cc->cpages[i], inode,
1346 page_folio(cc->rpages[i + 1])->index, cic);
1347 fio.compressed_page = cc->cpages[i];
1348
1349 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1350 dn.ofs_in_node + i + 1);
1351
1352 /* wait for GCed page writeback via META_MAPPING */
1353 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1354
1355 if (fio.encrypted) {
1356 fio.page = cc->rpages[i + 1];
1357 err = f2fs_encrypt_one_page(&fio);
1358 if (err)
1359 goto out_destroy_crypt;
1360 cc->cpages[i] = fio.encrypted_page;
1361 }
1362 }
1363
1364 set_cluster_writeback(cc);
1365
1366 for (i = 0; i < cc->cluster_size; i++)
1367 cic->rpages[i] = cc->rpages[i];
1368
1369 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1370 block_t blkaddr;
1371
1372 blkaddr = f2fs_data_blkaddr(&dn);
1373 fio.page = cc->rpages[i];
1374 fio.old_blkaddr = blkaddr;
1375
1376 /* cluster header */
1377 if (i == 0) {
1378 if (blkaddr == COMPRESS_ADDR)
1379 fio.compr_blocks++;
1380 if (__is_valid_data_blkaddr(blkaddr))
1381 f2fs_invalidate_blocks(sbi, blkaddr, 1);
1382 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1383 goto unlock_continue;
1384 }
1385
1386 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1387 fio.compr_blocks++;
1388
1389 if (i > cc->valid_nr_cpages) {
1390 if (__is_valid_data_blkaddr(blkaddr)) {
1391 f2fs_invalidate_blocks(sbi, blkaddr, 1);
1392 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1393 }
1394 goto unlock_continue;
1395 }
1396
1397 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1398
1399 if (fio.encrypted)
1400 fio.encrypted_page = cc->cpages[i - 1];
1401 else
1402 fio.compressed_page = cc->cpages[i - 1];
1403
1404 cc->cpages[i - 1] = NULL;
1405 fio.submitted = 0;
1406 f2fs_outplace_write_data(&dn, &fio);
1407 if (unlikely(!fio.submitted)) {
1408 cancel_cluster_writeback(cc, cic, i);
1409
1410 /* To call fscrypt_finalize_bounce_page */
1411 i = cc->valid_nr_cpages;
1412 *submitted = 0;
1413 goto out_destroy_crypt;
1414 }
1415 (*submitted)++;
1416 unlock_continue:
1417 inode_dec_dirty_pages(cc->inode);
1418 unlock_page(fio.page);
1419 }
1420
1421 if (fio.compr_blocks)
1422 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1423 f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1424 add_compr_block_stat(inode, cc->valid_nr_cpages);
1425
1426 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1427
1428 f2fs_put_dnode(&dn);
1429 if (quota_inode)
1430 f2fs_up_read(&sbi->node_write);
1431 else
1432 f2fs_unlock_op(sbi);
1433
1434 spin_lock(&fi->i_size_lock);
1435 if (fi->last_disk_size < psize)
1436 fi->last_disk_size = psize;
1437 spin_unlock(&fi->i_size_lock);
1438
1439 f2fs_put_rpages(cc);
1440 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1441 cc->cpages = NULL;
1442 f2fs_destroy_compress_ctx(cc, false);
1443 return 0;
1444
1445 out_destroy_crypt:
1446 page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1447
1448 for (--i; i >= 0; i--) {
1449 if (!cc->cpages[i])
1450 continue;
1451 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1452 }
1453 out_put_cic:
1454 kmem_cache_free(cic_entry_slab, cic);
1455 out_put_dnode:
1456 f2fs_put_dnode(&dn);
1457 out_unlock_op:
1458 if (quota_inode)
1459 f2fs_up_read(&sbi->node_write);
1460 else
1461 f2fs_unlock_op(sbi);
1462 out_free:
1463 for (i = 0; i < cc->valid_nr_cpages; i++) {
1464 f2fs_compress_free_page(cc->cpages[i]);
1465 cc->cpages[i] = NULL;
1466 }
1467 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1468 cc->cpages = NULL;
1469 return -EAGAIN;
1470 }
1471
f2fs_compress_write_end_io(struct bio * bio,struct page * page)1472 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1473 {
1474 struct f2fs_sb_info *sbi = bio->bi_private;
1475 struct compress_io_ctx *cic =
1476 (struct compress_io_ctx *)page_private(page);
1477 enum count_type type = WB_DATA_TYPE(page,
1478 f2fs_is_compressed_page(page));
1479 int i;
1480
1481 if (unlikely(bio->bi_status))
1482 mapping_set_error(cic->inode->i_mapping, -EIO);
1483
1484 f2fs_compress_free_page(page);
1485
1486 dec_page_count(sbi, type);
1487
1488 if (atomic_dec_return(&cic->pending_pages))
1489 return;
1490
1491 for (i = 0; i < cic->nr_rpages; i++) {
1492 WARN_ON(!cic->rpages[i]);
1493 clear_page_private_gcing(cic->rpages[i]);
1494 end_page_writeback(cic->rpages[i]);
1495 }
1496
1497 page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1498 kmem_cache_free(cic_entry_slab, cic);
1499 }
1500
f2fs_write_raw_pages(struct compress_ctx * cc,int * submitted_p,struct writeback_control * wbc,enum iostat_type io_type)1501 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1502 int *submitted_p,
1503 struct writeback_control *wbc,
1504 enum iostat_type io_type)
1505 {
1506 struct address_space *mapping = cc->inode->i_mapping;
1507 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1508 int submitted, compr_blocks, i;
1509 int ret = 0;
1510
1511 compr_blocks = f2fs_compressed_blocks(cc);
1512
1513 for (i = 0; i < cc->cluster_size; i++) {
1514 if (!cc->rpages[i])
1515 continue;
1516
1517 redirty_page_for_writepage(wbc, cc->rpages[i]);
1518 unlock_page(cc->rpages[i]);
1519 }
1520
1521 if (compr_blocks < 0)
1522 return compr_blocks;
1523
1524 /* overwrite compressed cluster w/ normal cluster */
1525 if (compr_blocks > 0)
1526 f2fs_lock_op(sbi);
1527
1528 for (i = 0; i < cc->cluster_size; i++) {
1529 if (!cc->rpages[i])
1530 continue;
1531 retry_write:
1532 lock_page(cc->rpages[i]);
1533
1534 if (cc->rpages[i]->mapping != mapping) {
1535 continue_unlock:
1536 unlock_page(cc->rpages[i]);
1537 continue;
1538 }
1539
1540 if (!PageDirty(cc->rpages[i]))
1541 goto continue_unlock;
1542
1543 if (folio_test_writeback(page_folio(cc->rpages[i]))) {
1544 if (wbc->sync_mode == WB_SYNC_NONE)
1545 goto continue_unlock;
1546 f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
1547 }
1548
1549 if (!clear_page_dirty_for_io(cc->rpages[i]))
1550 goto continue_unlock;
1551
1552 submitted = 0;
1553 ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]),
1554 &submitted,
1555 NULL, NULL, wbc, io_type,
1556 compr_blocks, false);
1557 if (ret) {
1558 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1559 unlock_page(cc->rpages[i]);
1560 ret = 0;
1561 } else if (ret == -EAGAIN) {
1562 ret = 0;
1563 /*
1564 * for quota file, just redirty left pages to
1565 * avoid deadlock caused by cluster update race
1566 * from foreground operation.
1567 */
1568 if (IS_NOQUOTA(cc->inode))
1569 goto out;
1570 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1571 goto retry_write;
1572 }
1573 goto out;
1574 }
1575
1576 *submitted_p += submitted;
1577 }
1578
1579 out:
1580 if (compr_blocks > 0)
1581 f2fs_unlock_op(sbi);
1582
1583 f2fs_balance_fs(sbi, true);
1584 return ret;
1585 }
1586
f2fs_write_multi_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1587 int f2fs_write_multi_pages(struct compress_ctx *cc,
1588 int *submitted,
1589 struct writeback_control *wbc,
1590 enum iostat_type io_type)
1591 {
1592 int err;
1593
1594 *submitted = 0;
1595 if (cluster_may_compress(cc)) {
1596 err = f2fs_compress_pages(cc);
1597 if (err == -EAGAIN) {
1598 add_compr_block_stat(cc->inode, cc->cluster_size);
1599 goto write;
1600 } else if (err) {
1601 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1602 goto destroy_out;
1603 }
1604
1605 err = f2fs_write_compressed_pages(cc, submitted,
1606 wbc, io_type);
1607 if (!err)
1608 return 0;
1609 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1610 }
1611 write:
1612 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1613
1614 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1615 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1616 destroy_out:
1617 f2fs_destroy_compress_ctx(cc, false);
1618 return err;
1619 }
1620
allow_memalloc_for_decomp(struct f2fs_sb_info * sbi,bool pre_alloc)1621 static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
1622 bool pre_alloc)
1623 {
1624 return pre_alloc ^ f2fs_low_mem_mode(sbi);
1625 }
1626
f2fs_prepare_decomp_mem(struct decompress_io_ctx * dic,bool pre_alloc)1627 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
1628 bool pre_alloc)
1629 {
1630 const struct f2fs_compress_ops *cops =
1631 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1632 int i;
1633
1634 if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1635 return 0;
1636
1637 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
1638 if (!dic->tpages)
1639 return -ENOMEM;
1640
1641 for (i = 0; i < dic->cluster_size; i++) {
1642 if (dic->rpages[i]) {
1643 dic->tpages[i] = dic->rpages[i];
1644 continue;
1645 }
1646
1647 dic->tpages[i] = f2fs_compress_alloc_page();
1648 }
1649
1650 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
1651 if (!dic->rbuf)
1652 return -ENOMEM;
1653
1654 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
1655 if (!dic->cbuf)
1656 return -ENOMEM;
1657
1658 if (cops->init_decompress_ctx)
1659 return cops->init_decompress_ctx(dic);
1660
1661 return 0;
1662 }
1663
f2fs_release_decomp_mem(struct decompress_io_ctx * dic,bool bypass_destroy_callback,bool pre_alloc)1664 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
1665 bool bypass_destroy_callback, bool pre_alloc)
1666 {
1667 const struct f2fs_compress_ops *cops =
1668 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1669
1670 if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1671 return;
1672
1673 if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
1674 cops->destroy_decompress_ctx(dic);
1675
1676 if (dic->cbuf)
1677 vm_unmap_ram(dic->cbuf, dic->nr_cpages);
1678
1679 if (dic->rbuf)
1680 vm_unmap_ram(dic->rbuf, dic->cluster_size);
1681 }
1682
1683 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1684 bool bypass_destroy_callback);
1685
f2fs_alloc_dic(struct compress_ctx * cc)1686 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1687 {
1688 struct decompress_io_ctx *dic;
1689 pgoff_t start_idx = start_idx_of_cluster(cc);
1690 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1691 int i, ret;
1692
1693 dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1694 if (!dic)
1695 return ERR_PTR(-ENOMEM);
1696
1697 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1698 if (!dic->rpages) {
1699 kmem_cache_free(dic_entry_slab, dic);
1700 return ERR_PTR(-ENOMEM);
1701 }
1702
1703 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1704 dic->inode = cc->inode;
1705 atomic_set(&dic->remaining_pages, cc->nr_cpages);
1706 dic->cluster_idx = cc->cluster_idx;
1707 dic->cluster_size = cc->cluster_size;
1708 dic->log_cluster_size = cc->log_cluster_size;
1709 dic->nr_cpages = cc->nr_cpages;
1710 refcount_set(&dic->refcnt, 1);
1711 dic->failed = false;
1712 dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1713
1714 for (i = 0; i < dic->cluster_size; i++)
1715 dic->rpages[i] = cc->rpages[i];
1716 dic->nr_rpages = cc->cluster_size;
1717
1718 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1719 if (!dic->cpages) {
1720 ret = -ENOMEM;
1721 goto out_free;
1722 }
1723
1724 for (i = 0; i < dic->nr_cpages; i++) {
1725 struct page *page;
1726
1727 page = f2fs_compress_alloc_page();
1728 f2fs_set_compressed_page(page, cc->inode,
1729 start_idx + i + 1, dic);
1730 dic->cpages[i] = page;
1731 }
1732
1733 ret = f2fs_prepare_decomp_mem(dic, true);
1734 if (ret)
1735 goto out_free;
1736
1737 return dic;
1738
1739 out_free:
1740 f2fs_free_dic(dic, true);
1741 return ERR_PTR(ret);
1742 }
1743
f2fs_free_dic(struct decompress_io_ctx * dic,bool bypass_destroy_callback)1744 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1745 bool bypass_destroy_callback)
1746 {
1747 int i;
1748
1749 f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
1750
1751 if (dic->tpages) {
1752 for (i = 0; i < dic->cluster_size; i++) {
1753 if (dic->rpages[i])
1754 continue;
1755 if (!dic->tpages[i])
1756 continue;
1757 f2fs_compress_free_page(dic->tpages[i]);
1758 }
1759 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1760 }
1761
1762 if (dic->cpages) {
1763 for (i = 0; i < dic->nr_cpages; i++) {
1764 if (!dic->cpages[i])
1765 continue;
1766 f2fs_compress_free_page(dic->cpages[i]);
1767 }
1768 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1769 }
1770
1771 page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1772 kmem_cache_free(dic_entry_slab, dic);
1773 }
1774
f2fs_late_free_dic(struct work_struct * work)1775 static void f2fs_late_free_dic(struct work_struct *work)
1776 {
1777 struct decompress_io_ctx *dic =
1778 container_of(work, struct decompress_io_ctx, free_work);
1779
1780 f2fs_free_dic(dic, false);
1781 }
1782
f2fs_put_dic(struct decompress_io_ctx * dic,bool in_task)1783 static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
1784 {
1785 if (refcount_dec_and_test(&dic->refcnt)) {
1786 if (in_task) {
1787 f2fs_free_dic(dic, false);
1788 } else {
1789 INIT_WORK(&dic->free_work, f2fs_late_free_dic);
1790 queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
1791 &dic->free_work);
1792 }
1793 }
1794 }
1795
f2fs_verify_cluster(struct work_struct * work)1796 static void f2fs_verify_cluster(struct work_struct *work)
1797 {
1798 struct decompress_io_ctx *dic =
1799 container_of(work, struct decompress_io_ctx, verity_work);
1800 int i;
1801
1802 /* Verify, update, and unlock the decompressed pages. */
1803 for (i = 0; i < dic->cluster_size; i++) {
1804 struct page *rpage = dic->rpages[i];
1805
1806 if (!rpage)
1807 continue;
1808
1809 if (fsverity_verify_page(rpage))
1810 SetPageUptodate(rpage);
1811 else
1812 ClearPageUptodate(rpage);
1813 unlock_page(rpage);
1814 }
1815
1816 f2fs_put_dic(dic, true);
1817 }
1818
1819 /*
1820 * This is called when a compressed cluster has been decompressed
1821 * (or failed to be read and/or decompressed).
1822 */
f2fs_decompress_end_io(struct decompress_io_ctx * dic,bool failed,bool in_task)1823 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1824 bool in_task)
1825 {
1826 int i;
1827
1828 if (!failed && dic->need_verity) {
1829 /*
1830 * Note that to avoid deadlocks, the verity work can't be done
1831 * on the decompression workqueue. This is because verifying
1832 * the data pages can involve reading metadata pages from the
1833 * file, and these metadata pages may be compressed.
1834 */
1835 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1836 fsverity_enqueue_verify_work(&dic->verity_work);
1837 return;
1838 }
1839
1840 /* Update and unlock the cluster's pagecache pages. */
1841 for (i = 0; i < dic->cluster_size; i++) {
1842 struct page *rpage = dic->rpages[i];
1843
1844 if (!rpage)
1845 continue;
1846
1847 if (failed)
1848 ClearPageUptodate(rpage);
1849 else
1850 SetPageUptodate(rpage);
1851 unlock_page(rpage);
1852 }
1853
1854 /*
1855 * Release the reference to the decompress_io_ctx that was being held
1856 * for I/O completion.
1857 */
1858 f2fs_put_dic(dic, in_task);
1859 }
1860
1861 /*
1862 * Put a reference to a compressed page's decompress_io_ctx.
1863 *
1864 * This is called when the page is no longer needed and can be freed.
1865 */
f2fs_put_page_dic(struct page * page,bool in_task)1866 void f2fs_put_page_dic(struct page *page, bool in_task)
1867 {
1868 struct decompress_io_ctx *dic =
1869 (struct decompress_io_ctx *)page_private(page);
1870
1871 f2fs_put_dic(dic, in_task);
1872 }
1873
1874 /*
1875 * check whether cluster blocks are contiguous, and add extent cache entry
1876 * only if cluster blocks are logically and physically contiguous.
1877 */
f2fs_cluster_blocks_are_contiguous(struct dnode_of_data * dn,unsigned int ofs_in_node)1878 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
1879 unsigned int ofs_in_node)
1880 {
1881 bool compressed = data_blkaddr(dn->inode, dn->node_page,
1882 ofs_in_node) == COMPRESS_ADDR;
1883 int i = compressed ? 1 : 0;
1884 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
1885 ofs_in_node + i);
1886
1887 for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1888 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
1889 ofs_in_node + i);
1890
1891 if (!__is_valid_data_blkaddr(blkaddr))
1892 break;
1893 if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1894 return 0;
1895 }
1896
1897 return compressed ? i - 1 : i;
1898 }
1899
1900 const struct address_space_operations f2fs_compress_aops = {
1901 .release_folio = f2fs_release_folio,
1902 .invalidate_folio = f2fs_invalidate_folio,
1903 .migrate_folio = filemap_migrate_folio,
1904 };
1905
COMPRESS_MAPPING(struct f2fs_sb_info * sbi)1906 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1907 {
1908 return sbi->compress_inode->i_mapping;
1909 }
1910
f2fs_invalidate_compress_pages_range(struct f2fs_sb_info * sbi,block_t blkaddr,unsigned int len)1911 void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
1912 block_t blkaddr, unsigned int len)
1913 {
1914 if (!sbi->compress_inode)
1915 return;
1916 invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr + len - 1);
1917 }
1918
f2fs_cache_compressed_page(struct f2fs_sb_info * sbi,struct page * page,nid_t ino,block_t blkaddr)1919 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1920 nid_t ino, block_t blkaddr)
1921 {
1922 struct page *cpage;
1923 int ret;
1924
1925 if (!test_opt(sbi, COMPRESS_CACHE))
1926 return;
1927
1928 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1929 return;
1930
1931 if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1932 return;
1933
1934 cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1935 if (cpage) {
1936 f2fs_put_page(cpage, 0);
1937 return;
1938 }
1939
1940 cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1941 if (!cpage)
1942 return;
1943
1944 ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1945 blkaddr, GFP_NOFS);
1946 if (ret) {
1947 f2fs_put_page(cpage, 0);
1948 return;
1949 }
1950
1951 set_page_private_data(cpage, ino);
1952
1953 memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1954 SetPageUptodate(cpage);
1955 f2fs_put_page(cpage, 1);
1956 }
1957
f2fs_load_compressed_page(struct f2fs_sb_info * sbi,struct page * page,block_t blkaddr)1958 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1959 block_t blkaddr)
1960 {
1961 struct page *cpage;
1962 bool hitted = false;
1963
1964 if (!test_opt(sbi, COMPRESS_CACHE))
1965 return false;
1966
1967 cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1968 blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1969 if (cpage) {
1970 if (PageUptodate(cpage)) {
1971 atomic_inc(&sbi->compress_page_hit);
1972 memcpy(page_address(page),
1973 page_address(cpage), PAGE_SIZE);
1974 hitted = true;
1975 }
1976 f2fs_put_page(cpage, 1);
1977 }
1978
1979 return hitted;
1980 }
1981
f2fs_invalidate_compress_pages(struct f2fs_sb_info * sbi,nid_t ino)1982 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1983 {
1984 struct address_space *mapping = COMPRESS_MAPPING(sbi);
1985 struct folio_batch fbatch;
1986 pgoff_t index = 0;
1987 pgoff_t end = MAX_BLKADDR(sbi);
1988
1989 if (!mapping->nrpages)
1990 return;
1991
1992 folio_batch_init(&fbatch);
1993
1994 do {
1995 unsigned int nr, i;
1996
1997 nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
1998 if (!nr)
1999 break;
2000
2001 for (i = 0; i < nr; i++) {
2002 struct folio *folio = fbatch.folios[i];
2003
2004 folio_lock(folio);
2005 if (folio->mapping != mapping) {
2006 folio_unlock(folio);
2007 continue;
2008 }
2009
2010 if (ino != get_page_private_data(&folio->page)) {
2011 folio_unlock(folio);
2012 continue;
2013 }
2014
2015 generic_error_remove_folio(mapping, folio);
2016 folio_unlock(folio);
2017 }
2018 folio_batch_release(&fbatch);
2019 cond_resched();
2020 } while (index < end);
2021 }
2022
f2fs_init_compress_inode(struct f2fs_sb_info * sbi)2023 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
2024 {
2025 struct inode *inode;
2026
2027 if (!test_opt(sbi, COMPRESS_CACHE))
2028 return 0;
2029
2030 inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
2031 if (IS_ERR(inode))
2032 return PTR_ERR(inode);
2033 sbi->compress_inode = inode;
2034
2035 sbi->compress_percent = COMPRESS_PERCENT;
2036 sbi->compress_watermark = COMPRESS_WATERMARK;
2037
2038 atomic_set(&sbi->compress_page_hit, 0);
2039
2040 return 0;
2041 }
2042
f2fs_destroy_compress_inode(struct f2fs_sb_info * sbi)2043 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
2044 {
2045 if (!sbi->compress_inode)
2046 return;
2047 iput(sbi->compress_inode);
2048 sbi->compress_inode = NULL;
2049 }
2050
f2fs_init_page_array_cache(struct f2fs_sb_info * sbi)2051 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
2052 {
2053 dev_t dev = sbi->sb->s_bdev->bd_dev;
2054 char slab_name[35];
2055
2056 if (!f2fs_sb_has_compression(sbi))
2057 return 0;
2058
2059 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
2060
2061 sbi->page_array_slab_size = sizeof(struct page *) <<
2062 F2FS_OPTION(sbi).compress_log_size;
2063
2064 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
2065 sbi->page_array_slab_size);
2066 return sbi->page_array_slab ? 0 : -ENOMEM;
2067 }
2068
f2fs_destroy_page_array_cache(struct f2fs_sb_info * sbi)2069 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
2070 {
2071 kmem_cache_destroy(sbi->page_array_slab);
2072 }
2073
f2fs_init_compress_cache(void)2074 int __init f2fs_init_compress_cache(void)
2075 {
2076 cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
2077 sizeof(struct compress_io_ctx));
2078 if (!cic_entry_slab)
2079 return -ENOMEM;
2080 dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
2081 sizeof(struct decompress_io_ctx));
2082 if (!dic_entry_slab)
2083 goto free_cic;
2084 return 0;
2085 free_cic:
2086 kmem_cache_destroy(cic_entry_slab);
2087 return -ENOMEM;
2088 }
2089
f2fs_destroy_compress_cache(void)2090 void f2fs_destroy_compress_cache(void)
2091 {
2092 kmem_cache_destroy(dic_entry_slab);
2093 kmem_cache_destroy(cic_entry_slab);
2094 }
2095