• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
15 
16 #include "f2fs.h"
17 #include "node.h"
18 #include <trace/events/f2fs.h>
19 
20 static struct kmem_cache *cic_entry_slab;
21 static struct kmem_cache *dic_entry_slab;
22 
page_array_alloc(struct inode * inode,int nr)23 static void *page_array_alloc(struct inode *inode, int nr)
24 {
25 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
26 	unsigned int size = sizeof(struct page *) * nr;
27 
28 	if (likely(size <= sbi->page_array_slab_size))
29 		return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
30 	return f2fs_kzalloc(sbi, size, GFP_NOFS);
31 }
32 
page_array_free(struct inode * inode,void * pages,int nr)33 static void page_array_free(struct inode *inode, void *pages, int nr)
34 {
35 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
36 	unsigned int size = sizeof(struct page *) * nr;
37 
38 	if (!pages)
39 		return;
40 
41 	if (likely(size <= sbi->page_array_slab_size))
42 		kmem_cache_free(sbi->page_array_slab, pages);
43 	else
44 		kfree(pages);
45 }
46 
47 struct f2fs_compress_ops {
48 	int (*init_compress_ctx)(struct compress_ctx *cc);
49 	void (*destroy_compress_ctx)(struct compress_ctx *cc);
50 	int (*compress_pages)(struct compress_ctx *cc);
51 	int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
52 	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
53 	int (*decompress_pages)(struct decompress_io_ctx *dic);
54 };
55 
offset_in_cluster(struct compress_ctx * cc,pgoff_t index)56 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
57 {
58 	return index & (cc->cluster_size - 1);
59 }
60 
cluster_idx(struct compress_ctx * cc,pgoff_t index)61 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
62 {
63 	return index >> cc->log_cluster_size;
64 }
65 
start_idx_of_cluster(struct compress_ctx * cc)66 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
67 {
68 	return cc->cluster_idx << cc->log_cluster_size;
69 }
70 
f2fs_is_compressed_page(struct page * page)71 bool f2fs_is_compressed_page(struct page *page)
72 {
73 	if (!PagePrivate(page))
74 		return false;
75 	if (!page_private(page))
76 		return false;
77 	if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
78 		return false;
79 	/*
80 	 * page->private may be set with pid.
81 	 * pid_max is enough to check if it is traced.
82 	 */
83 	if (IS_IO_TRACED_PAGE(page))
84 		return false;
85 
86 	f2fs_bug_on(F2FS_M_SB(page->mapping),
87 		*((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
88 	return true;
89 }
90 
f2fs_set_compressed_page(struct page * page,struct inode * inode,pgoff_t index,void * data)91 static void f2fs_set_compressed_page(struct page *page,
92 		struct inode *inode, pgoff_t index, void *data)
93 {
94 	SetPagePrivate(page);
95 	set_page_private(page, (unsigned long)data);
96 
97 	/* i_crypto_info and iv index */
98 	page->index = index;
99 	page->mapping = inode->i_mapping;
100 }
101 
f2fs_drop_rpages(struct compress_ctx * cc,int len,bool unlock)102 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
103 {
104 	int i;
105 
106 	for (i = 0; i < len; i++) {
107 		if (!cc->rpages[i])
108 			continue;
109 		if (unlock)
110 			unlock_page(cc->rpages[i]);
111 		else
112 			put_page(cc->rpages[i]);
113 	}
114 }
115 
f2fs_put_rpages(struct compress_ctx * cc)116 static void f2fs_put_rpages(struct compress_ctx *cc)
117 {
118 	f2fs_drop_rpages(cc, cc->cluster_size, false);
119 }
120 
f2fs_unlock_rpages(struct compress_ctx * cc,int len)121 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
122 {
123 	f2fs_drop_rpages(cc, len, true);
124 }
125 
f2fs_put_rpages_wbc(struct compress_ctx * cc,struct writeback_control * wbc,bool redirty,int unlock)126 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
127 		struct writeback_control *wbc, bool redirty, int unlock)
128 {
129 	unsigned int i;
130 
131 	for (i = 0; i < cc->cluster_size; i++) {
132 		if (!cc->rpages[i])
133 			continue;
134 		if (redirty)
135 			redirty_page_for_writepage(wbc, cc->rpages[i]);
136 		f2fs_put_page(cc->rpages[i], unlock);
137 	}
138 }
139 
f2fs_compress_control_page(struct page * page)140 struct page *f2fs_compress_control_page(struct page *page)
141 {
142 	return ((struct compress_io_ctx *)page_private(page))->rpages[0];
143 }
144 
f2fs_init_compress_ctx(struct compress_ctx * cc)145 int f2fs_init_compress_ctx(struct compress_ctx *cc)
146 {
147 	if (cc->rpages)
148 		return 0;
149 
150 	cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
151 	return cc->rpages ? 0 : -ENOMEM;
152 }
153 
f2fs_destroy_compress_ctx(struct compress_ctx * cc,bool reuse)154 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
155 {
156 	page_array_free(cc->inode, cc->rpages, cc->cluster_size);
157 	cc->rpages = NULL;
158 	cc->nr_rpages = 0;
159 	cc->nr_cpages = 0;
160 	if (!reuse)
161 		cc->cluster_idx = NULL_CLUSTER;
162 }
163 
f2fs_compress_ctx_add_page(struct compress_ctx * cc,struct page * page)164 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
165 {
166 	unsigned int cluster_ofs;
167 
168 	if (!f2fs_cluster_can_merge_page(cc, page->index))
169 		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
170 
171 	cluster_ofs = offset_in_cluster(cc, page->index);
172 	cc->rpages[cluster_ofs] = page;
173 	cc->nr_rpages++;
174 	cc->cluster_idx = cluster_idx(cc, page->index);
175 }
176 
177 #ifdef CONFIG_F2FS_FS_LZO
lzo_init_compress_ctx(struct compress_ctx * cc)178 static int lzo_init_compress_ctx(struct compress_ctx *cc)
179 {
180 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
181 				LZO1X_MEM_COMPRESS, GFP_NOFS);
182 	if (!cc->private)
183 		return -ENOMEM;
184 
185 	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
186 	return 0;
187 }
188 
lzo_destroy_compress_ctx(struct compress_ctx * cc)189 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
190 {
191 	kvfree(cc->private);
192 	cc->private = NULL;
193 }
194 
lzo_compress_pages(struct compress_ctx * cc)195 static int lzo_compress_pages(struct compress_ctx *cc)
196 {
197 	int ret;
198 
199 	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
200 					&cc->clen, cc->private);
201 	if (ret != LZO_E_OK) {
202 		printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
203 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
204 		return -EIO;
205 	}
206 	return 0;
207 }
208 
lzo_decompress_pages(struct decompress_io_ctx * dic)209 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
210 {
211 	int ret;
212 
213 	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
214 						dic->rbuf, &dic->rlen);
215 	if (ret != LZO_E_OK) {
216 		printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
217 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
218 		return -EIO;
219 	}
220 
221 	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
222 		printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
223 					"expected:%lu\n", KERN_ERR,
224 					F2FS_I_SB(dic->inode)->sb->s_id,
225 					dic->rlen,
226 					PAGE_SIZE << dic->log_cluster_size);
227 		return -EIO;
228 	}
229 	return 0;
230 }
231 
232 static const struct f2fs_compress_ops f2fs_lzo_ops = {
233 	.init_compress_ctx	= lzo_init_compress_ctx,
234 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
235 	.compress_pages		= lzo_compress_pages,
236 	.decompress_pages	= lzo_decompress_pages,
237 };
238 #endif
239 
240 #ifdef CONFIG_F2FS_FS_LZ4
lz4_init_compress_ctx(struct compress_ctx * cc)241 static int lz4_init_compress_ctx(struct compress_ctx *cc)
242 {
243 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
244 				LZ4_MEM_COMPRESS, GFP_NOFS);
245 	if (!cc->private)
246 		return -ENOMEM;
247 
248 	/*
249 	 * we do not change cc->clen to LZ4_compressBound(inputsize) to
250 	 * adapt worst compress case, because lz4 compressor can handle
251 	 * output budget properly.
252 	 */
253 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
254 	return 0;
255 }
256 
lz4_destroy_compress_ctx(struct compress_ctx * cc)257 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
258 {
259 	kvfree(cc->private);
260 	cc->private = NULL;
261 }
262 
lz4_compress_pages(struct compress_ctx * cc)263 static int lz4_compress_pages(struct compress_ctx *cc)
264 {
265 	int len;
266 
267 	len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
268 						cc->clen, cc->private);
269 	if (!len)
270 		return -EAGAIN;
271 
272 	cc->clen = len;
273 	return 0;
274 }
275 
lz4_decompress_pages(struct decompress_io_ctx * dic)276 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
277 {
278 	int ret;
279 
280 	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
281 						dic->clen, dic->rlen);
282 	if (ret < 0) {
283 		printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
284 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
285 		return -EIO;
286 	}
287 
288 	if (ret != PAGE_SIZE << dic->log_cluster_size) {
289 		printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
290 					"expected:%lu\n", KERN_ERR,
291 					F2FS_I_SB(dic->inode)->sb->s_id,
292 					dic->rlen,
293 					PAGE_SIZE << dic->log_cluster_size);
294 		return -EIO;
295 	}
296 	return 0;
297 }
298 
299 static const struct f2fs_compress_ops f2fs_lz4_ops = {
300 	.init_compress_ctx	= lz4_init_compress_ctx,
301 	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
302 	.compress_pages		= lz4_compress_pages,
303 	.decompress_pages	= lz4_decompress_pages,
304 };
305 #endif
306 
307 #ifdef CONFIG_F2FS_FS_ZSTD
308 #define F2FS_ZSTD_DEFAULT_CLEVEL	1
309 
zstd_init_compress_ctx(struct compress_ctx * cc)310 static int zstd_init_compress_ctx(struct compress_ctx *cc)
311 {
312 	ZSTD_parameters params;
313 	ZSTD_CStream *stream;
314 	void *workspace;
315 	unsigned int workspace_size;
316 
317 	params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
318 	workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
319 
320 	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
321 					workspace_size, GFP_NOFS);
322 	if (!workspace)
323 		return -ENOMEM;
324 
325 	stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
326 	if (!stream) {
327 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
328 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
329 				__func__);
330 		kvfree(workspace);
331 		return -EIO;
332 	}
333 
334 	cc->private = workspace;
335 	cc->private2 = stream;
336 
337 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
338 	return 0;
339 }
340 
zstd_destroy_compress_ctx(struct compress_ctx * cc)341 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
342 {
343 	kvfree(cc->private);
344 	cc->private = NULL;
345 	cc->private2 = NULL;
346 }
347 
zstd_compress_pages(struct compress_ctx * cc)348 static int zstd_compress_pages(struct compress_ctx *cc)
349 {
350 	ZSTD_CStream *stream = cc->private2;
351 	ZSTD_inBuffer inbuf;
352 	ZSTD_outBuffer outbuf;
353 	int src_size = cc->rlen;
354 	int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
355 	int ret;
356 
357 	inbuf.pos = 0;
358 	inbuf.src = cc->rbuf;
359 	inbuf.size = src_size;
360 
361 	outbuf.pos = 0;
362 	outbuf.dst = cc->cbuf->cdata;
363 	outbuf.size = dst_size;
364 
365 	ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
366 	if (ZSTD_isError(ret)) {
367 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
368 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
369 				__func__, ZSTD_getErrorCode(ret));
370 		return -EIO;
371 	}
372 
373 	ret = ZSTD_endStream(stream, &outbuf);
374 	if (ZSTD_isError(ret)) {
375 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
376 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
377 				__func__, ZSTD_getErrorCode(ret));
378 		return -EIO;
379 	}
380 
381 	/*
382 	 * there is compressed data remained in intermediate buffer due to
383 	 * no more space in cbuf.cdata
384 	 */
385 	if (ret)
386 		return -EAGAIN;
387 
388 	cc->clen = outbuf.pos;
389 	return 0;
390 }
391 
zstd_init_decompress_ctx(struct decompress_io_ctx * dic)392 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
393 {
394 	ZSTD_DStream *stream;
395 	void *workspace;
396 	unsigned int workspace_size;
397 	unsigned int max_window_size =
398 			MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
399 
400 	workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
401 
402 	workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
403 					workspace_size, GFP_NOFS);
404 	if (!workspace)
405 		return -ENOMEM;
406 
407 	stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
408 	if (!stream) {
409 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
410 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
411 				__func__);
412 		kvfree(workspace);
413 		return -EIO;
414 	}
415 
416 	dic->private = workspace;
417 	dic->private2 = stream;
418 
419 	return 0;
420 }
421 
zstd_destroy_decompress_ctx(struct decompress_io_ctx * dic)422 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
423 {
424 	kvfree(dic->private);
425 	dic->private = NULL;
426 	dic->private2 = NULL;
427 }
428 
zstd_decompress_pages(struct decompress_io_ctx * dic)429 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
430 {
431 	ZSTD_DStream *stream = dic->private2;
432 	ZSTD_inBuffer inbuf;
433 	ZSTD_outBuffer outbuf;
434 	int ret;
435 
436 	inbuf.pos = 0;
437 	inbuf.src = dic->cbuf->cdata;
438 	inbuf.size = dic->clen;
439 
440 	outbuf.pos = 0;
441 	outbuf.dst = dic->rbuf;
442 	outbuf.size = dic->rlen;
443 
444 	ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
445 	if (ZSTD_isError(ret)) {
446 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
447 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
448 				__func__, ZSTD_getErrorCode(ret));
449 		return -EIO;
450 	}
451 
452 	if (dic->rlen != outbuf.pos) {
453 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
454 				"expected:%lu\n", KERN_ERR,
455 				F2FS_I_SB(dic->inode)->sb->s_id,
456 				__func__, dic->rlen,
457 				PAGE_SIZE << dic->log_cluster_size);
458 		return -EIO;
459 	}
460 
461 	return 0;
462 }
463 
464 static const struct f2fs_compress_ops f2fs_zstd_ops = {
465 	.init_compress_ctx	= zstd_init_compress_ctx,
466 	.destroy_compress_ctx	= zstd_destroy_compress_ctx,
467 	.compress_pages		= zstd_compress_pages,
468 	.init_decompress_ctx	= zstd_init_decompress_ctx,
469 	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx,
470 	.decompress_pages	= zstd_decompress_pages,
471 };
472 #endif
473 
474 #ifdef CONFIG_F2FS_FS_LZO
475 #ifdef CONFIG_F2FS_FS_LZORLE
lzorle_compress_pages(struct compress_ctx * cc)476 static int lzorle_compress_pages(struct compress_ctx *cc)
477 {
478 	int ret;
479 
480 	ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
481 					&cc->clen, cc->private);
482 	if (ret != LZO_E_OK) {
483 		printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
484 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
485 		return -EIO;
486 	}
487 	return 0;
488 }
489 
490 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
491 	.init_compress_ctx	= lzo_init_compress_ctx,
492 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
493 	.compress_pages		= lzorle_compress_pages,
494 	.decompress_pages	= lzo_decompress_pages,
495 };
496 #endif
497 #endif
498 
499 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
500 #ifdef CONFIG_F2FS_FS_LZO
501 	&f2fs_lzo_ops,
502 #else
503 	NULL,
504 #endif
505 #ifdef CONFIG_F2FS_FS_LZ4
506 	&f2fs_lz4_ops,
507 #else
508 	NULL,
509 #endif
510 #ifdef CONFIG_F2FS_FS_ZSTD
511 	&f2fs_zstd_ops,
512 #else
513 	NULL,
514 #endif
515 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
516 	&f2fs_lzorle_ops,
517 #else
518 	NULL,
519 #endif
520 };
521 
f2fs_is_compress_backend_ready(struct inode * inode)522 bool f2fs_is_compress_backend_ready(struct inode *inode)
523 {
524 	if (!f2fs_compressed_file(inode))
525 		return true;
526 	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
527 }
528 
529 static mempool_t *compress_page_pool;
530 static int num_compress_pages = 512;
531 module_param(num_compress_pages, uint, 0444);
532 MODULE_PARM_DESC(num_compress_pages,
533 		"Number of intermediate compress pages to preallocate");
534 
f2fs_init_compress_mempool(void)535 int f2fs_init_compress_mempool(void)
536 {
537 	compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
538 	if (!compress_page_pool)
539 		return -ENOMEM;
540 
541 	return 0;
542 }
543 
f2fs_destroy_compress_mempool(void)544 void f2fs_destroy_compress_mempool(void)
545 {
546 	mempool_destroy(compress_page_pool);
547 }
548 
f2fs_compress_alloc_page(void)549 static struct page *f2fs_compress_alloc_page(void)
550 {
551 	struct page *page;
552 
553 	page = mempool_alloc(compress_page_pool, GFP_NOFS);
554 	lock_page(page);
555 
556 	return page;
557 }
558 
f2fs_compress_free_page(struct page * page)559 static void f2fs_compress_free_page(struct page *page)
560 {
561 	if (!page)
562 		return;
563 	set_page_private(page, (unsigned long)NULL);
564 	ClearPagePrivate(page);
565 	page->mapping = NULL;
566 	unlock_page(page);
567 	mempool_free(page, compress_page_pool);
568 }
569 
570 #define MAX_VMAP_RETRIES	3
571 
f2fs_vmap(struct page ** pages,unsigned int count)572 static void *f2fs_vmap(struct page **pages, unsigned int count)
573 {
574 	int i;
575 	void *buf = NULL;
576 
577 	for (i = 0; i < MAX_VMAP_RETRIES; i++) {
578 		buf = vm_map_ram(pages, count, -1);
579 		if (buf)
580 			break;
581 		vm_unmap_aliases();
582 	}
583 	return buf;
584 }
585 
f2fs_compress_pages(struct compress_ctx * cc)586 static int f2fs_compress_pages(struct compress_ctx *cc)
587 {
588 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
589 	const struct f2fs_compress_ops *cops =
590 				f2fs_cops[fi->i_compress_algorithm];
591 	unsigned int max_len, new_nr_cpages;
592 	struct page **new_cpages;
593 	int i, ret;
594 
595 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
596 				cc->cluster_size, fi->i_compress_algorithm);
597 
598 	if (cops->init_compress_ctx) {
599 		ret = cops->init_compress_ctx(cc);
600 		if (ret)
601 			goto out;
602 	}
603 
604 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
605 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
606 
607 	cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
608 	if (!cc->cpages) {
609 		ret = -ENOMEM;
610 		goto destroy_compress_ctx;
611 	}
612 
613 	for (i = 0; i < cc->nr_cpages; i++) {
614 		cc->cpages[i] = f2fs_compress_alloc_page();
615 		if (!cc->cpages[i]) {
616 			ret = -ENOMEM;
617 			goto out_free_cpages;
618 		}
619 	}
620 
621 	cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
622 	if (!cc->rbuf) {
623 		ret = -ENOMEM;
624 		goto out_free_cpages;
625 	}
626 
627 	cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
628 	if (!cc->cbuf) {
629 		ret = -ENOMEM;
630 		goto out_vunmap_rbuf;
631 	}
632 
633 	ret = cops->compress_pages(cc);
634 	if (ret)
635 		goto out_vunmap_cbuf;
636 
637 	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
638 
639 	if (cc->clen > max_len) {
640 		ret = -EAGAIN;
641 		goto out_vunmap_cbuf;
642 	}
643 
644 	cc->cbuf->clen = cpu_to_le32(cc->clen);
645 
646 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
647 		cc->cbuf->reserved[i] = cpu_to_le32(0);
648 
649 	new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
650 
651 	/* Now we're going to cut unnecessary tail pages */
652 	new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
653 	if (!new_cpages) {
654 		ret = -ENOMEM;
655 		goto out_vunmap_cbuf;
656 	}
657 
658 	/* zero out any unused part of the last page */
659 	memset(&cc->cbuf->cdata[cc->clen], 0,
660 			(new_nr_cpages * PAGE_SIZE) -
661 			(cc->clen + COMPRESS_HEADER_SIZE));
662 
663 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
664 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
665 
666 	for (i = 0; i < cc->nr_cpages; i++) {
667 		if (i < new_nr_cpages) {
668 			new_cpages[i] = cc->cpages[i];
669 			continue;
670 		}
671 		f2fs_compress_free_page(cc->cpages[i]);
672 		cc->cpages[i] = NULL;
673 	}
674 
675 	if (cops->destroy_compress_ctx)
676 		cops->destroy_compress_ctx(cc);
677 
678 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
679 	cc->cpages = new_cpages;
680 	cc->nr_cpages = new_nr_cpages;
681 
682 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
683 							cc->clen, ret);
684 	return 0;
685 
686 out_vunmap_cbuf:
687 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
688 out_vunmap_rbuf:
689 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
690 out_free_cpages:
691 	for (i = 0; i < cc->nr_cpages; i++) {
692 		if (cc->cpages[i])
693 			f2fs_compress_free_page(cc->cpages[i]);
694 	}
695 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
696 	cc->cpages = NULL;
697 destroy_compress_ctx:
698 	if (cops->destroy_compress_ctx)
699 		cops->destroy_compress_ctx(cc);
700 out:
701 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
702 							cc->clen, ret);
703 	return ret;
704 }
705 
f2fs_decompress_pages(struct bio * bio,struct page * page,bool verity)706 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
707 {
708 	struct decompress_io_ctx *dic =
709 			(struct decompress_io_ctx *)page_private(page);
710 	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
711 	struct f2fs_inode_info *fi= F2FS_I(dic->inode);
712 	const struct f2fs_compress_ops *cops =
713 			f2fs_cops[fi->i_compress_algorithm];
714 	int ret;
715 	int i;
716 
717 	dec_page_count(sbi, F2FS_RD_DATA);
718 
719 	if (bio->bi_status || PageError(page))
720 		dic->failed = true;
721 
722 	if (atomic_dec_return(&dic->pending_pages))
723 		return;
724 
725 	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
726 				dic->cluster_size, fi->i_compress_algorithm);
727 
728 	/* submit partial compressed pages */
729 	if (dic->failed) {
730 		ret = -EIO;
731 		goto out_free_dic;
732 	}
733 
734 	dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
735 	if (!dic->tpages) {
736 		ret = -ENOMEM;
737 		goto out_free_dic;
738 	}
739 
740 	for (i = 0; i < dic->cluster_size; i++) {
741 		if (dic->rpages[i]) {
742 			dic->tpages[i] = dic->rpages[i];
743 			continue;
744 		}
745 
746 		dic->tpages[i] = f2fs_compress_alloc_page();
747 		if (!dic->tpages[i]) {
748 			ret = -ENOMEM;
749 			goto out_free_dic;
750 		}
751 	}
752 
753 	if (cops->init_decompress_ctx) {
754 		ret = cops->init_decompress_ctx(dic);
755 		if (ret)
756 			goto out_free_dic;
757 	}
758 
759 	dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
760 	if (!dic->rbuf) {
761 		ret = -ENOMEM;
762 		goto destroy_decompress_ctx;
763 	}
764 
765 	dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
766 	if (!dic->cbuf) {
767 		ret = -ENOMEM;
768 		goto out_vunmap_rbuf;
769 	}
770 
771 	dic->clen = le32_to_cpu(dic->cbuf->clen);
772 	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
773 
774 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
775 		ret = -EFSCORRUPTED;
776 		goto out_vunmap_cbuf;
777 	}
778 
779 	ret = cops->decompress_pages(dic);
780 
781 out_vunmap_cbuf:
782 	vm_unmap_ram(dic->cbuf, dic->nr_cpages);
783 out_vunmap_rbuf:
784 	vm_unmap_ram(dic->rbuf, dic->cluster_size);
785 destroy_decompress_ctx:
786 	if (cops->destroy_decompress_ctx)
787 		cops->destroy_decompress_ctx(dic);
788 out_free_dic:
789 	if (!verity)
790 		f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
791 								ret, false);
792 
793 	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
794 							dic->clen, ret);
795 	if (!verity)
796 		f2fs_free_dic(dic);
797 }
798 
is_page_in_cluster(struct compress_ctx * cc,pgoff_t index)799 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
800 {
801 	if (cc->cluster_idx == NULL_CLUSTER)
802 		return true;
803 	return cc->cluster_idx == cluster_idx(cc, index);
804 }
805 
f2fs_cluster_is_empty(struct compress_ctx * cc)806 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
807 {
808 	return cc->nr_rpages == 0;
809 }
810 
f2fs_cluster_is_full(struct compress_ctx * cc)811 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
812 {
813 	return cc->cluster_size == cc->nr_rpages;
814 }
815 
f2fs_cluster_can_merge_page(struct compress_ctx * cc,pgoff_t index)816 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
817 {
818 	if (f2fs_cluster_is_empty(cc))
819 		return true;
820 	return is_page_in_cluster(cc, index);
821 }
822 
__cluster_may_compress(struct compress_ctx * cc)823 static bool __cluster_may_compress(struct compress_ctx *cc)
824 {
825 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
826 	loff_t i_size = i_size_read(cc->inode);
827 	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
828 	int i;
829 
830 	for (i = 0; i < cc->cluster_size; i++) {
831 		struct page *page = cc->rpages[i];
832 
833 		f2fs_bug_on(sbi, !page);
834 
835 		if (unlikely(f2fs_cp_error(sbi)))
836 			return false;
837 		if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
838 			return false;
839 
840 		/* beyond EOF */
841 		if (page->index >= nr_pages)
842 			return false;
843 	}
844 	return true;
845 }
846 
__f2fs_cluster_blocks(struct compress_ctx * cc,bool compr)847 static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
848 {
849 	struct dnode_of_data dn;
850 	int ret;
851 
852 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
853 	ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
854 							LOOKUP_NODE);
855 	if (ret) {
856 		if (ret == -ENOENT)
857 			ret = 0;
858 		goto fail;
859 	}
860 
861 	if (dn.data_blkaddr == COMPRESS_ADDR) {
862 		int i;
863 
864 		ret = 1;
865 		for (i = 1; i < cc->cluster_size; i++) {
866 			block_t blkaddr;
867 
868 			blkaddr = data_blkaddr(dn.inode,
869 					dn.node_page, dn.ofs_in_node + i);
870 			if (compr) {
871 				if (__is_valid_data_blkaddr(blkaddr))
872 					ret++;
873 			} else {
874 				if (blkaddr != NULL_ADDR)
875 					ret++;
876 			}
877 		}
878 	}
879 fail:
880 	f2fs_put_dnode(&dn);
881 	return ret;
882 }
883 
884 /* return # of compressed blocks in compressed cluster */
f2fs_compressed_blocks(struct compress_ctx * cc)885 static int f2fs_compressed_blocks(struct compress_ctx *cc)
886 {
887 	return __f2fs_cluster_blocks(cc, true);
888 }
889 
890 /* return # of valid blocks in compressed cluster */
f2fs_cluster_blocks(struct compress_ctx * cc)891 static int f2fs_cluster_blocks(struct compress_ctx *cc)
892 {
893 	return __f2fs_cluster_blocks(cc, false);
894 }
895 
f2fs_is_compressed_cluster(struct inode * inode,pgoff_t index)896 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
897 {
898 	struct compress_ctx cc = {
899 		.inode = inode,
900 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
901 		.cluster_size = F2FS_I(inode)->i_cluster_size,
902 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
903 	};
904 
905 	return f2fs_cluster_blocks(&cc);
906 }
907 
cluster_may_compress(struct compress_ctx * cc)908 static bool cluster_may_compress(struct compress_ctx *cc)
909 {
910 	if (!f2fs_compressed_file(cc->inode))
911 		return false;
912 	if (f2fs_is_atomic_file(cc->inode))
913 		return false;
914 	if (f2fs_is_mmap_file(cc->inode))
915 		return false;
916 	if (!f2fs_cluster_is_full(cc))
917 		return false;
918 	if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
919 		return false;
920 	return __cluster_may_compress(cc);
921 }
922 
set_cluster_writeback(struct compress_ctx * cc)923 static void set_cluster_writeback(struct compress_ctx *cc)
924 {
925 	int i;
926 
927 	for (i = 0; i < cc->cluster_size; i++) {
928 		if (cc->rpages[i])
929 			set_page_writeback(cc->rpages[i]);
930 	}
931 }
932 
set_cluster_dirty(struct compress_ctx * cc)933 static void set_cluster_dirty(struct compress_ctx *cc)
934 {
935 	int i;
936 
937 	for (i = 0; i < cc->cluster_size; i++)
938 		if (cc->rpages[i])
939 			set_page_dirty(cc->rpages[i]);
940 }
941 
prepare_compress_overwrite(struct compress_ctx * cc,struct page ** pagep,pgoff_t index,void ** fsdata)942 static int prepare_compress_overwrite(struct compress_ctx *cc,
943 		struct page **pagep, pgoff_t index, void **fsdata)
944 {
945 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
946 	struct address_space *mapping = cc->inode->i_mapping;
947 	struct page *page;
948 	struct dnode_of_data dn;
949 	sector_t last_block_in_bio;
950 	unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
951 	pgoff_t start_idx = start_idx_of_cluster(cc);
952 	int i, ret;
953 	bool prealloc;
954 
955 retry:
956 	ret = f2fs_cluster_blocks(cc);
957 	if (ret <= 0)
958 		return ret;
959 
960 	/* compressed case */
961 	prealloc = (ret < cc->cluster_size);
962 
963 	ret = f2fs_init_compress_ctx(cc);
964 	if (ret)
965 		return ret;
966 
967 	/* keep page reference to avoid page reclaim */
968 	for (i = 0; i < cc->cluster_size; i++) {
969 		page = f2fs_pagecache_get_page(mapping, start_idx + i,
970 							fgp_flag, GFP_NOFS);
971 		if (!page) {
972 			ret = -ENOMEM;
973 			goto unlock_pages;
974 		}
975 
976 		if (PageUptodate(page))
977 			f2fs_put_page(page, 1);
978 		else
979 			f2fs_compress_ctx_add_page(cc, page);
980 	}
981 
982 	if (!f2fs_cluster_is_empty(cc)) {
983 		struct bio *bio = NULL;
984 
985 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
986 					&last_block_in_bio, false, true);
987 		f2fs_put_rpages(cc);
988 		f2fs_destroy_compress_ctx(cc, true);
989 		if (ret)
990 			goto out;
991 		if (bio)
992 			f2fs_submit_bio(sbi, bio, DATA);
993 
994 		ret = f2fs_init_compress_ctx(cc);
995 		if (ret)
996 			goto out;
997 	}
998 
999 	for (i = 0; i < cc->cluster_size; i++) {
1000 		f2fs_bug_on(sbi, cc->rpages[i]);
1001 
1002 		page = find_lock_page(mapping, start_idx + i);
1003 		if (!page) {
1004 			/* page can be truncated */
1005 			goto release_and_retry;
1006 		}
1007 
1008 		f2fs_wait_on_page_writeback(page, DATA, true, true);
1009 		f2fs_compress_ctx_add_page(cc, page);
1010 
1011 		if (!PageUptodate(page)) {
1012 release_and_retry:
1013 			f2fs_put_rpages(cc);
1014 			f2fs_unlock_rpages(cc, i + 1);
1015 			f2fs_destroy_compress_ctx(cc, true);
1016 			goto retry;
1017 		}
1018 	}
1019 
1020 	if (prealloc) {
1021 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
1022 
1023 		set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1024 
1025 		for (i = cc->cluster_size - 1; i > 0; i--) {
1026 			ret = f2fs_get_block(&dn, start_idx + i);
1027 			if (ret) {
1028 				i = cc->cluster_size;
1029 				break;
1030 			}
1031 
1032 			if (dn.data_blkaddr != NEW_ADDR)
1033 				break;
1034 		}
1035 
1036 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
1037 	}
1038 
1039 	if (likely(!ret)) {
1040 		*fsdata = cc->rpages;
1041 		*pagep = cc->rpages[offset_in_cluster(cc, index)];
1042 		return cc->cluster_size;
1043 	}
1044 
1045 unlock_pages:
1046 	f2fs_put_rpages(cc);
1047 	f2fs_unlock_rpages(cc, i);
1048 	f2fs_destroy_compress_ctx(cc, true);
1049 out:
1050 	return ret;
1051 }
1052 
f2fs_prepare_compress_overwrite(struct inode * inode,struct page ** pagep,pgoff_t index,void ** fsdata)1053 int f2fs_prepare_compress_overwrite(struct inode *inode,
1054 		struct page **pagep, pgoff_t index, void **fsdata)
1055 {
1056 	struct compress_ctx cc = {
1057 		.inode = inode,
1058 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1059 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1060 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1061 		.rpages = NULL,
1062 		.nr_rpages = 0,
1063 	};
1064 
1065 	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1066 }
1067 
f2fs_compress_write_end(struct inode * inode,void * fsdata,pgoff_t index,unsigned copied)1068 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1069 					pgoff_t index, unsigned copied)
1070 
1071 {
1072 	struct compress_ctx cc = {
1073 		.inode = inode,
1074 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1075 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1076 		.rpages = fsdata,
1077 	};
1078 	bool first_index = (index == cc.rpages[0]->index);
1079 
1080 	if (copied)
1081 		set_cluster_dirty(&cc);
1082 
1083 	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1084 	f2fs_destroy_compress_ctx(&cc, false);
1085 
1086 	return first_index;
1087 }
1088 
f2fs_truncate_partial_cluster(struct inode * inode,u64 from,bool lock)1089 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1090 {
1091 	void *fsdata = NULL;
1092 	struct page *pagep;
1093 	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1094 	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1095 							log_cluster_size;
1096 	int err;
1097 
1098 	err = f2fs_is_compressed_cluster(inode, start_idx);
1099 	if (err < 0)
1100 		return err;
1101 
1102 	/* truncate normal cluster */
1103 	if (!err)
1104 		return f2fs_do_truncate_blocks(inode, from, lock);
1105 
1106 	/* truncate compressed cluster */
1107 	err = f2fs_prepare_compress_overwrite(inode, &pagep,
1108 						start_idx, &fsdata);
1109 
1110 	/* should not be a normal cluster */
1111 	f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1112 
1113 	if (err <= 0)
1114 		return err;
1115 
1116 	if (err > 0) {
1117 		struct page **rpages = fsdata;
1118 		int cluster_size = F2FS_I(inode)->i_cluster_size;
1119 		int i;
1120 
1121 		for (i = cluster_size - 1; i >= 0; i--) {
1122 			loff_t start = rpages[i]->index << PAGE_SHIFT;
1123 
1124 			if (from <= start) {
1125 				zero_user_segment(rpages[i], 0, PAGE_SIZE);
1126 			} else {
1127 				zero_user_segment(rpages[i], from - start,
1128 								PAGE_SIZE);
1129 				break;
1130 			}
1131 		}
1132 
1133 		f2fs_compress_write_end(inode, fsdata, start_idx, true);
1134 	}
1135 	return 0;
1136 }
1137 
f2fs_write_compressed_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1138 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1139 					int *submitted,
1140 					struct writeback_control *wbc,
1141 					enum iostat_type io_type)
1142 {
1143 	struct inode *inode = cc->inode;
1144 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1145 	struct f2fs_inode_info *fi = F2FS_I(inode);
1146 	struct f2fs_io_info fio = {
1147 		.sbi = sbi,
1148 		.ino = cc->inode->i_ino,
1149 		.type = DATA,
1150 		.op = REQ_OP_WRITE,
1151 		.op_flags = wbc_to_write_flags(wbc),
1152 		.old_blkaddr = NEW_ADDR,
1153 		.page = NULL,
1154 		.encrypted_page = NULL,
1155 		.compressed_page = NULL,
1156 		.submitted = false,
1157 		.io_type = io_type,
1158 		.io_wbc = wbc,
1159 		.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1160 	};
1161 	struct dnode_of_data dn;
1162 	struct node_info ni;
1163 	struct compress_io_ctx *cic;
1164 	pgoff_t start_idx = start_idx_of_cluster(cc);
1165 	unsigned int last_index = cc->cluster_size - 1;
1166 	loff_t psize;
1167 	int i, err;
1168 
1169 	if (IS_NOQUOTA(inode)) {
1170 		/*
1171 		 * We need to wait for node_write to avoid block allocation during
1172 		 * checkpoint. This can only happen to quota writes which can cause
1173 		 * the below discard race condition.
1174 		 */
1175 		down_read(&sbi->node_write);
1176 	} else if (!f2fs_trylock_op(sbi)) {
1177 		goto out_free;
1178 	}
1179 
1180 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1181 
1182 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1183 	if (err)
1184 		goto out_unlock_op;
1185 
1186 	for (i = 0; i < cc->cluster_size; i++) {
1187 		if (data_blkaddr(dn.inode, dn.node_page,
1188 					dn.ofs_in_node + i) == NULL_ADDR)
1189 			goto out_put_dnode;
1190 	}
1191 
1192 	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1193 
1194 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1195 	if (err)
1196 		goto out_put_dnode;
1197 
1198 	fio.version = ni.version;
1199 
1200 	cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
1201 	if (!cic)
1202 		goto out_put_dnode;
1203 
1204 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1205 	cic->inode = inode;
1206 	atomic_set(&cic->pending_pages, cc->nr_cpages);
1207 	cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1208 	if (!cic->rpages)
1209 		goto out_put_cic;
1210 
1211 	cic->nr_rpages = cc->cluster_size;
1212 
1213 	for (i = 0; i < cc->nr_cpages; i++) {
1214 		f2fs_set_compressed_page(cc->cpages[i], inode,
1215 					cc->rpages[i + 1]->index, cic);
1216 		fio.compressed_page = cc->cpages[i];
1217 
1218 		fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1219 						dn.ofs_in_node + i + 1);
1220 
1221 		/* wait for GCed page writeback via META_MAPPING */
1222 		f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1223 
1224 		if (fio.encrypted) {
1225 			fio.page = cc->rpages[i + 1];
1226 			err = f2fs_encrypt_one_page(&fio);
1227 			if (err)
1228 				goto out_destroy_crypt;
1229 			cc->cpages[i] = fio.encrypted_page;
1230 		}
1231 	}
1232 
1233 	set_cluster_writeback(cc);
1234 
1235 	for (i = 0; i < cc->cluster_size; i++)
1236 		cic->rpages[i] = cc->rpages[i];
1237 
1238 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1239 		block_t blkaddr;
1240 
1241 		blkaddr = f2fs_data_blkaddr(&dn);
1242 		fio.page = cc->rpages[i];
1243 		fio.old_blkaddr = blkaddr;
1244 
1245 		/* cluster header */
1246 		if (i == 0) {
1247 			if (blkaddr == COMPRESS_ADDR)
1248 				fio.compr_blocks++;
1249 			if (__is_valid_data_blkaddr(blkaddr))
1250 				f2fs_invalidate_blocks(sbi, blkaddr);
1251 			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1252 			goto unlock_continue;
1253 		}
1254 
1255 		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1256 			fio.compr_blocks++;
1257 
1258 		if (i > cc->nr_cpages) {
1259 			if (__is_valid_data_blkaddr(blkaddr)) {
1260 				f2fs_invalidate_blocks(sbi, blkaddr);
1261 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1262 			}
1263 			goto unlock_continue;
1264 		}
1265 
1266 		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1267 
1268 		if (fio.encrypted)
1269 			fio.encrypted_page = cc->cpages[i - 1];
1270 		else
1271 			fio.compressed_page = cc->cpages[i - 1];
1272 
1273 		cc->cpages[i - 1] = NULL;
1274 		f2fs_outplace_write_data(&dn, &fio);
1275 		(*submitted)++;
1276 unlock_continue:
1277 		inode_dec_dirty_pages(cc->inode);
1278 		unlock_page(fio.page);
1279 	}
1280 
1281 	if (fio.compr_blocks)
1282 		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1283 	f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1284 
1285 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
1286 	if (cc->cluster_idx == 0)
1287 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1288 
1289 	f2fs_put_dnode(&dn);
1290 	if (IS_NOQUOTA(inode))
1291 		up_read(&sbi->node_write);
1292 	else
1293 		f2fs_unlock_op(sbi);
1294 
1295 	spin_lock(&fi->i_size_lock);
1296 	if (fi->last_disk_size < psize)
1297 		fi->last_disk_size = psize;
1298 	spin_unlock(&fi->i_size_lock);
1299 
1300 	f2fs_put_rpages(cc);
1301 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1302 	cc->cpages = NULL;
1303 	f2fs_destroy_compress_ctx(cc, false);
1304 	return 0;
1305 
1306 out_destroy_crypt:
1307 	page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1308 
1309 	for (--i; i >= 0; i--)
1310 		fscrypt_finalize_bounce_page(&cc->cpages[i]);
1311 out_put_cic:
1312 	kmem_cache_free(cic_entry_slab, cic);
1313 out_put_dnode:
1314 	f2fs_put_dnode(&dn);
1315 out_unlock_op:
1316 	if (IS_NOQUOTA(inode))
1317 		up_read(&sbi->node_write);
1318 	else
1319 		f2fs_unlock_op(sbi);
1320 out_free:
1321 	for (i = 0; i < cc->nr_cpages; i++) {
1322 		if (!cc->cpages[i])
1323 			continue;
1324 		f2fs_compress_free_page(cc->cpages[i]);
1325 		cc->cpages[i] = NULL;
1326 	}
1327 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1328 	cc->cpages = NULL;
1329 	return -EAGAIN;
1330 }
1331 
f2fs_compress_write_end_io(struct bio * bio,struct page * page)1332 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1333 {
1334 	struct f2fs_sb_info *sbi = bio->bi_private;
1335 	struct compress_io_ctx *cic =
1336 			(struct compress_io_ctx *)page_private(page);
1337 	int i;
1338 
1339 	if (unlikely(bio->bi_status))
1340 		mapping_set_error(cic->inode->i_mapping, -EIO);
1341 
1342 	f2fs_compress_free_page(page);
1343 
1344 	dec_page_count(sbi, F2FS_WB_DATA);
1345 
1346 	if (atomic_dec_return(&cic->pending_pages))
1347 		return;
1348 
1349 	for (i = 0; i < cic->nr_rpages; i++) {
1350 		WARN_ON(!cic->rpages[i]);
1351 		clear_cold_data(cic->rpages[i]);
1352 		end_page_writeback(cic->rpages[i]);
1353 	}
1354 
1355 	page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1356 	kmem_cache_free(cic_entry_slab, cic);
1357 }
1358 
f2fs_write_raw_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1359 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1360 					int *submitted,
1361 					struct writeback_control *wbc,
1362 					enum iostat_type io_type)
1363 {
1364 	struct address_space *mapping = cc->inode->i_mapping;
1365 	int _submitted, compr_blocks, ret, i;
1366 
1367 	compr_blocks = f2fs_compressed_blocks(cc);
1368 
1369 	for (i = 0; i < cc->cluster_size; i++) {
1370 		if (!cc->rpages[i])
1371 			continue;
1372 
1373 		redirty_page_for_writepage(wbc, cc->rpages[i]);
1374 		unlock_page(cc->rpages[i]);
1375 	}
1376 
1377 	if (compr_blocks < 0)
1378 		return compr_blocks;
1379 
1380 	for (i = 0; i < cc->cluster_size; i++) {
1381 		if (!cc->rpages[i])
1382 			continue;
1383 retry_write:
1384 		lock_page(cc->rpages[i]);
1385 
1386 		if (cc->rpages[i]->mapping != mapping) {
1387 continue_unlock:
1388 			unlock_page(cc->rpages[i]);
1389 			continue;
1390 		}
1391 
1392 		if (!PageDirty(cc->rpages[i]))
1393 			goto continue_unlock;
1394 
1395 		if (!clear_page_dirty_for_io(cc->rpages[i]))
1396 			goto continue_unlock;
1397 
1398 		ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1399 						NULL, NULL, wbc, io_type,
1400 						compr_blocks, false);
1401 		if (ret) {
1402 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
1403 				unlock_page(cc->rpages[i]);
1404 				ret = 0;
1405 			} else if (ret == -EAGAIN) {
1406 				/*
1407 				 * for quota file, just redirty left pages to
1408 				 * avoid deadlock caused by cluster update race
1409 				 * from foreground operation.
1410 				 */
1411 				if (IS_NOQUOTA(cc->inode))
1412 					return 0;
1413 				ret = 0;
1414 				cond_resched();
1415 				congestion_wait(BLK_RW_ASYNC,
1416 						DEFAULT_IO_TIMEOUT);
1417 				goto retry_write;
1418 			}
1419 			return ret;
1420 		}
1421 
1422 		*submitted += _submitted;
1423 	}
1424 
1425 	f2fs_balance_fs(F2FS_M_SB(mapping), true);
1426 
1427 	return 0;
1428 }
1429 
f2fs_write_multi_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1430 int f2fs_write_multi_pages(struct compress_ctx *cc,
1431 					int *submitted,
1432 					struct writeback_control *wbc,
1433 					enum iostat_type io_type)
1434 {
1435 	int err;
1436 
1437 	*submitted = 0;
1438 	if (cluster_may_compress(cc)) {
1439 		err = f2fs_compress_pages(cc);
1440 		if (err == -EAGAIN) {
1441 			goto write;
1442 		} else if (err) {
1443 			f2fs_put_rpages_wbc(cc, wbc, true, 1);
1444 			goto destroy_out;
1445 		}
1446 
1447 		err = f2fs_write_compressed_pages(cc, submitted,
1448 							wbc, io_type);
1449 		if (!err)
1450 			return 0;
1451 		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1452 	}
1453 write:
1454 	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1455 
1456 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1457 	f2fs_put_rpages_wbc(cc, wbc, false, 0);
1458 destroy_out:
1459 	f2fs_destroy_compress_ctx(cc, false);
1460 	return err;
1461 }
1462 
f2fs_alloc_dic(struct compress_ctx * cc)1463 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1464 {
1465 	struct decompress_io_ctx *dic;
1466 	pgoff_t start_idx = start_idx_of_cluster(cc);
1467 	int i;
1468 
1469 	dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
1470 	if (!dic)
1471 		return ERR_PTR(-ENOMEM);
1472 
1473 	dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1474 	if (!dic->rpages) {
1475 		kmem_cache_free(dic_entry_slab, dic);
1476 		return ERR_PTR(-ENOMEM);
1477 	}
1478 
1479 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1480 	dic->inode = cc->inode;
1481 	atomic_set(&dic->pending_pages, cc->nr_cpages);
1482 	dic->cluster_idx = cc->cluster_idx;
1483 	dic->cluster_size = cc->cluster_size;
1484 	dic->log_cluster_size = cc->log_cluster_size;
1485 	dic->nr_cpages = cc->nr_cpages;
1486 	dic->failed = false;
1487 
1488 	for (i = 0; i < dic->cluster_size; i++)
1489 		dic->rpages[i] = cc->rpages[i];
1490 	dic->nr_rpages = cc->cluster_size;
1491 
1492 	dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1493 	if (!dic->cpages)
1494 		goto out_free;
1495 
1496 	for (i = 0; i < dic->nr_cpages; i++) {
1497 		struct page *page;
1498 
1499 		page = f2fs_compress_alloc_page();
1500 		if (!page)
1501 			goto out_free;
1502 
1503 		f2fs_set_compressed_page(page, cc->inode,
1504 					start_idx + i + 1, dic);
1505 		dic->cpages[i] = page;
1506 	}
1507 
1508 	return dic;
1509 
1510 out_free:
1511 	f2fs_free_dic(dic);
1512 	return ERR_PTR(-ENOMEM);
1513 }
1514 
f2fs_free_dic(struct decompress_io_ctx * dic)1515 void f2fs_free_dic(struct decompress_io_ctx *dic)
1516 {
1517 	int i;
1518 
1519 	if (dic->tpages) {
1520 		for (i = 0; i < dic->cluster_size; i++) {
1521 			if (dic->rpages[i])
1522 				continue;
1523 			if (!dic->tpages[i])
1524 				continue;
1525 			f2fs_compress_free_page(dic->tpages[i]);
1526 		}
1527 		page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1528 	}
1529 
1530 	if (dic->cpages) {
1531 		for (i = 0; i < dic->nr_cpages; i++) {
1532 			if (!dic->cpages[i])
1533 				continue;
1534 			f2fs_compress_free_page(dic->cpages[i]);
1535 		}
1536 		page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1537 	}
1538 
1539 	page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1540 	kmem_cache_free(dic_entry_slab, dic);
1541 }
1542 
f2fs_decompress_end_io(struct page ** rpages,unsigned int cluster_size,bool err,bool verity)1543 void f2fs_decompress_end_io(struct page **rpages,
1544 			unsigned int cluster_size, bool err, bool verity)
1545 {
1546 	int i;
1547 
1548 	for (i = 0; i < cluster_size; i++) {
1549 		struct page *rpage = rpages[i];
1550 
1551 		if (!rpage)
1552 			continue;
1553 
1554 		if (err || PageError(rpage))
1555 			goto clear_uptodate;
1556 
1557 		if (!verity || fsverity_verify_page(rpage)) {
1558 			SetPageUptodate(rpage);
1559 			goto unlock;
1560 		}
1561 clear_uptodate:
1562 		ClearPageUptodate(rpage);
1563 		ClearPageError(rpage);
1564 unlock:
1565 		unlock_page(rpage);
1566 	}
1567 }
1568 
f2fs_init_page_array_cache(struct f2fs_sb_info * sbi)1569 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1570 {
1571 	dev_t dev = sbi->sb->s_bdev->bd_dev;
1572 	char slab_name[32];
1573 
1574 	sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1575 
1576 	sbi->page_array_slab_size = sizeof(struct page *) <<
1577 					F2FS_OPTION(sbi).compress_log_size;
1578 
1579 	sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1580 					sbi->page_array_slab_size);
1581 	if (!sbi->page_array_slab)
1582 		return -ENOMEM;
1583 	return 0;
1584 }
1585 
f2fs_destroy_page_array_cache(struct f2fs_sb_info * sbi)1586 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1587 {
1588 	kmem_cache_destroy(sbi->page_array_slab);
1589 }
1590 
f2fs_init_cic_cache(void)1591 static int __init f2fs_init_cic_cache(void)
1592 {
1593 	cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1594 					sizeof(struct compress_io_ctx));
1595 	if (!cic_entry_slab)
1596 		return -ENOMEM;
1597 	return 0;
1598 }
1599 
f2fs_destroy_cic_cache(void)1600 static void f2fs_destroy_cic_cache(void)
1601 {
1602 	kmem_cache_destroy(cic_entry_slab);
1603 }
1604 
f2fs_init_dic_cache(void)1605 static int __init f2fs_init_dic_cache(void)
1606 {
1607 	dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1608 					sizeof(struct decompress_io_ctx));
1609 	if (!dic_entry_slab)
1610 		return -ENOMEM;
1611 	return 0;
1612 }
1613 
f2fs_destroy_dic_cache(void)1614 static void f2fs_destroy_dic_cache(void)
1615 {
1616 	kmem_cache_destroy(dic_entry_slab);
1617 }
1618 
f2fs_init_compress_cache(void)1619 int __init f2fs_init_compress_cache(void)
1620 {
1621 	int err;
1622 
1623 	err = f2fs_init_cic_cache();
1624 	if (err)
1625 		goto out;
1626 	err = f2fs_init_dic_cache();
1627 	if (err)
1628 		goto free_cic;
1629 	return 0;
1630 free_cic:
1631 	f2fs_destroy_cic_cache();
1632 out:
1633 	return -ENOMEM;
1634 }
1635 
f2fs_destroy_compress_cache(void)1636 void f2fs_destroy_compress_cache(void)
1637 {
1638 	f2fs_destroy_dic_cache();
1639 	f2fs_destroy_cic_cache();
1640 }
1641