• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
15 #include <linux/moduleparam.h>
16 
17 #include "f2fs.h"
18 #include "node.h"
19 #include <trace/events/f2fs.h>
20 
21 struct f2fs_compress_ops {
22 	int (*init_compress_ctx)(struct compress_ctx *cc);
23 	void (*destroy_compress_ctx)(struct compress_ctx *cc);
24 	int (*compress_pages)(struct compress_ctx *cc);
25 	int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
26 	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
27 	int (*decompress_pages)(struct decompress_io_ctx *dic);
28 };
29 
offset_in_cluster(struct compress_ctx * cc,pgoff_t index)30 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
31 {
32 	return index & (cc->cluster_size - 1);
33 }
34 
cluster_idx(struct compress_ctx * cc,pgoff_t index)35 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
36 {
37 	return index >> cc->log_cluster_size;
38 }
39 
start_idx_of_cluster(struct compress_ctx * cc)40 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
41 {
42 	return cc->cluster_idx << cc->log_cluster_size;
43 }
44 
f2fs_is_compressed_page(struct page * page)45 bool f2fs_is_compressed_page(struct page *page)
46 {
47 	if (!PagePrivate(page))
48 		return false;
49 	if (!page_private(page))
50 		return false;
51 	if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
52 		return false;
53 	f2fs_bug_on(F2FS_M_SB(page->mapping),
54 		*((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
55 	return true;
56 }
57 
f2fs_set_compressed_page(struct page * page,struct inode * inode,pgoff_t index,void * data)58 static void f2fs_set_compressed_page(struct page *page,
59 		struct inode *inode, pgoff_t index, void *data)
60 {
61 	SetPagePrivate(page);
62 	set_page_private(page, (unsigned long)data);
63 
64 	/* i_crypto_info and iv index */
65 	page->index = index;
66 	page->mapping = inode->i_mapping;
67 }
68 
f2fs_drop_rpages(struct compress_ctx * cc,int len,bool unlock)69 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
70 {
71 	int i;
72 
73 	for (i = 0; i < len; i++) {
74 		if (!cc->rpages[i])
75 			continue;
76 		if (unlock)
77 			unlock_page(cc->rpages[i]);
78 		else
79 			put_page(cc->rpages[i]);
80 	}
81 }
82 
f2fs_put_rpages(struct compress_ctx * cc)83 static void f2fs_put_rpages(struct compress_ctx *cc)
84 {
85 	f2fs_drop_rpages(cc, cc->cluster_size, false);
86 }
87 
f2fs_unlock_rpages(struct compress_ctx * cc,int len)88 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
89 {
90 	f2fs_drop_rpages(cc, len, true);
91 }
92 
f2fs_put_rpages_mapping(struct address_space * mapping,pgoff_t start,int len)93 static void f2fs_put_rpages_mapping(struct address_space *mapping,
94 				pgoff_t start, int len)
95 {
96 	int i;
97 
98 	for (i = 0; i < len; i++) {
99 		struct page *page = find_get_page(mapping, start + i);
100 
101 		put_page(page);
102 		put_page(page);
103 	}
104 }
105 
f2fs_put_rpages_wbc(struct compress_ctx * cc,struct writeback_control * wbc,bool redirty,int unlock)106 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
107 		struct writeback_control *wbc, bool redirty, int unlock)
108 {
109 	unsigned int i;
110 
111 	for (i = 0; i < cc->cluster_size; i++) {
112 		if (!cc->rpages[i])
113 			continue;
114 		if (redirty)
115 			redirty_page_for_writepage(wbc, cc->rpages[i]);
116 		f2fs_put_page(cc->rpages[i], unlock);
117 	}
118 }
119 
f2fs_compress_control_page(struct page * page)120 struct page *f2fs_compress_control_page(struct page *page)
121 {
122 	return ((struct compress_io_ctx *)page_private(page))->rpages[0];
123 }
124 
f2fs_init_compress_ctx(struct compress_ctx * cc)125 int f2fs_init_compress_ctx(struct compress_ctx *cc)
126 {
127 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
128 
129 	if (cc->nr_rpages)
130 		return 0;
131 
132 	cc->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
133 					cc->log_cluster_size, GFP_NOFS);
134 	return cc->rpages ? 0 : -ENOMEM;
135 }
136 
f2fs_destroy_compress_ctx(struct compress_ctx * cc)137 void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
138 {
139 	kfree(cc->rpages);
140 	cc->rpages = NULL;
141 	cc->nr_rpages = 0;
142 	cc->nr_cpages = 0;
143 	cc->cluster_idx = NULL_CLUSTER;
144 }
145 
f2fs_compress_ctx_add_page(struct compress_ctx * cc,struct page * page)146 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
147 {
148 	unsigned int cluster_ofs;
149 
150 	if (!f2fs_cluster_can_merge_page(cc, page->index))
151 		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
152 
153 	cluster_ofs = offset_in_cluster(cc, page->index);
154 	cc->rpages[cluster_ofs] = page;
155 	cc->nr_rpages++;
156 	cc->cluster_idx = cluster_idx(cc, page->index);
157 }
158 
159 #ifdef CONFIG_F2FS_FS_LZO
lzo_init_compress_ctx(struct compress_ctx * cc)160 static int lzo_init_compress_ctx(struct compress_ctx *cc)
161 {
162 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
163 				LZO1X_MEM_COMPRESS, GFP_NOFS);
164 	if (!cc->private)
165 		return -ENOMEM;
166 
167 	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
168 	return 0;
169 }
170 
lzo_destroy_compress_ctx(struct compress_ctx * cc)171 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
172 {
173 	kvfree(cc->private);
174 	cc->private = NULL;
175 }
176 
lzo_compress_pages(struct compress_ctx * cc)177 static int lzo_compress_pages(struct compress_ctx *cc)
178 {
179 	int ret;
180 
181 	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
182 					&cc->clen, cc->private);
183 	if (ret != LZO_E_OK) {
184 		printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
185 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
186 		return -EIO;
187 	}
188 	return 0;
189 }
190 
lzo_decompress_pages(struct decompress_io_ctx * dic)191 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
192 {
193 	int ret;
194 
195 	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
196 						dic->rbuf, &dic->rlen);
197 	if (ret != LZO_E_OK) {
198 		printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
199 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
200 		return -EIO;
201 	}
202 
203 	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
204 		printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
205 					"expected:%lu\n", KERN_ERR,
206 					F2FS_I_SB(dic->inode)->sb->s_id,
207 					dic->rlen,
208 					PAGE_SIZE << dic->log_cluster_size);
209 		return -EIO;
210 	}
211 	return 0;
212 }
213 
214 static const struct f2fs_compress_ops f2fs_lzo_ops = {
215 	.init_compress_ctx	= lzo_init_compress_ctx,
216 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
217 	.compress_pages		= lzo_compress_pages,
218 	.decompress_pages	= lzo_decompress_pages,
219 };
220 #endif
221 
222 #ifdef CONFIG_F2FS_FS_LZ4
lz4_init_compress_ctx(struct compress_ctx * cc)223 static int lz4_init_compress_ctx(struct compress_ctx *cc)
224 {
225 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
226 				LZ4_MEM_COMPRESS, GFP_NOFS);
227 	if (!cc->private)
228 		return -ENOMEM;
229 
230 	/*
231 	 * we do not change cc->clen to LZ4_compressBound(inputsize) to
232 	 * adapt worst compress case, because lz4 compressor can handle
233 	 * output budget properly.
234 	 */
235 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
236 	return 0;
237 }
238 
lz4_destroy_compress_ctx(struct compress_ctx * cc)239 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
240 {
241 	kvfree(cc->private);
242 	cc->private = NULL;
243 }
244 
lz4_compress_pages(struct compress_ctx * cc)245 static int lz4_compress_pages(struct compress_ctx *cc)
246 {
247 	int len;
248 
249 	len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
250 						cc->clen, cc->private);
251 	if (!len)
252 		return -EAGAIN;
253 
254 	cc->clen = len;
255 	return 0;
256 }
257 
lz4_decompress_pages(struct decompress_io_ctx * dic)258 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
259 {
260 	int ret;
261 
262 	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
263 						dic->clen, dic->rlen);
264 	if (ret < 0) {
265 		printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
266 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
267 		return -EIO;
268 	}
269 
270 	if (ret != PAGE_SIZE << dic->log_cluster_size) {
271 		printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
272 					"expected:%lu\n", KERN_ERR,
273 					F2FS_I_SB(dic->inode)->sb->s_id,
274 					dic->rlen,
275 					PAGE_SIZE << dic->log_cluster_size);
276 		return -EIO;
277 	}
278 	return 0;
279 }
280 
281 static const struct f2fs_compress_ops f2fs_lz4_ops = {
282 	.init_compress_ctx	= lz4_init_compress_ctx,
283 	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
284 	.compress_pages		= lz4_compress_pages,
285 	.decompress_pages	= lz4_decompress_pages,
286 };
287 #endif
288 
289 #ifdef CONFIG_F2FS_FS_ZSTD
290 #define F2FS_ZSTD_DEFAULT_CLEVEL	1
291 
zstd_init_compress_ctx(struct compress_ctx * cc)292 static int zstd_init_compress_ctx(struct compress_ctx *cc)
293 {
294 	ZSTD_parameters params;
295 	ZSTD_CStream *stream;
296 	void *workspace;
297 	unsigned int workspace_size;
298 
299 	params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
300 	workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
301 
302 	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
303 					workspace_size, GFP_NOFS);
304 	if (!workspace)
305 		return -ENOMEM;
306 
307 	stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
308 	if (!stream) {
309 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
310 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
311 				__func__);
312 		kvfree(workspace);
313 		return -EIO;
314 	}
315 
316 	cc->private = workspace;
317 	cc->private2 = stream;
318 
319 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
320 	return 0;
321 }
322 
zstd_destroy_compress_ctx(struct compress_ctx * cc)323 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
324 {
325 	kvfree(cc->private);
326 	cc->private = NULL;
327 	cc->private2 = NULL;
328 }
329 
zstd_compress_pages(struct compress_ctx * cc)330 static int zstd_compress_pages(struct compress_ctx *cc)
331 {
332 	ZSTD_CStream *stream = cc->private2;
333 	ZSTD_inBuffer inbuf;
334 	ZSTD_outBuffer outbuf;
335 	int src_size = cc->rlen;
336 	int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
337 	int ret;
338 
339 	inbuf.pos = 0;
340 	inbuf.src = cc->rbuf;
341 	inbuf.size = src_size;
342 
343 	outbuf.pos = 0;
344 	outbuf.dst = cc->cbuf->cdata;
345 	outbuf.size = dst_size;
346 
347 	ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
348 	if (ZSTD_isError(ret)) {
349 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
350 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
351 				__func__, ZSTD_getErrorCode(ret));
352 		return -EIO;
353 	}
354 
355 	ret = ZSTD_endStream(stream, &outbuf);
356 	if (ZSTD_isError(ret)) {
357 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
358 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
359 				__func__, ZSTD_getErrorCode(ret));
360 		return -EIO;
361 	}
362 
363 	/*
364 	 * there is compressed data remained in intermediate buffer due to
365 	 * no more space in cbuf.cdata
366 	 */
367 	if (ret)
368 		return -EAGAIN;
369 
370 	cc->clen = outbuf.pos;
371 	return 0;
372 }
373 
zstd_init_decompress_ctx(struct decompress_io_ctx * dic)374 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
375 {
376 	ZSTD_DStream *stream;
377 	void *workspace;
378 	unsigned int workspace_size;
379 
380 	workspace_size = ZSTD_DStreamWorkspaceBound(MAX_COMPRESS_WINDOW_SIZE);
381 
382 	workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
383 					workspace_size, GFP_NOFS);
384 	if (!workspace)
385 		return -ENOMEM;
386 
387 	stream = ZSTD_initDStream(MAX_COMPRESS_WINDOW_SIZE,
388 					workspace, workspace_size);
389 	if (!stream) {
390 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
391 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
392 				__func__);
393 		kvfree(workspace);
394 		return -EIO;
395 	}
396 
397 	dic->private = workspace;
398 	dic->private2 = stream;
399 
400 	return 0;
401 }
402 
zstd_destroy_decompress_ctx(struct decompress_io_ctx * dic)403 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
404 {
405 	kvfree(dic->private);
406 	dic->private = NULL;
407 	dic->private2 = NULL;
408 }
409 
zstd_decompress_pages(struct decompress_io_ctx * dic)410 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
411 {
412 	ZSTD_DStream *stream = dic->private2;
413 	ZSTD_inBuffer inbuf;
414 	ZSTD_outBuffer outbuf;
415 	int ret;
416 
417 	inbuf.pos = 0;
418 	inbuf.src = dic->cbuf->cdata;
419 	inbuf.size = dic->clen;
420 
421 	outbuf.pos = 0;
422 	outbuf.dst = dic->rbuf;
423 	outbuf.size = dic->rlen;
424 
425 	ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
426 	if (ZSTD_isError(ret)) {
427 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
428 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
429 				__func__, ZSTD_getErrorCode(ret));
430 		return -EIO;
431 	}
432 
433 	if (dic->rlen != outbuf.pos) {
434 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
435 				"expected:%lu\n", KERN_ERR,
436 				F2FS_I_SB(dic->inode)->sb->s_id,
437 				__func__, dic->rlen,
438 				PAGE_SIZE << dic->log_cluster_size);
439 		return -EIO;
440 	}
441 
442 	return 0;
443 }
444 
445 static const struct f2fs_compress_ops f2fs_zstd_ops = {
446 	.init_compress_ctx	= zstd_init_compress_ctx,
447 	.destroy_compress_ctx	= zstd_destroy_compress_ctx,
448 	.compress_pages		= zstd_compress_pages,
449 	.init_decompress_ctx	= zstd_init_decompress_ctx,
450 	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx,
451 	.decompress_pages	= zstd_decompress_pages,
452 };
453 #endif
454 
455 #ifdef CONFIG_F2FS_FS_LZO
456 #ifdef CONFIG_F2FS_FS_LZORLE
lzorle_compress_pages(struct compress_ctx * cc)457 static int lzorle_compress_pages(struct compress_ctx *cc)
458 {
459 	int ret;
460 
461 	ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
462 					&cc->clen, cc->private);
463 	if (ret != LZO_E_OK) {
464 		printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
465 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
466 		return -EIO;
467 	}
468 	return 0;
469 }
470 
471 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
472 	.init_compress_ctx	= lzo_init_compress_ctx,
473 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
474 	.compress_pages		= lzorle_compress_pages,
475 	.decompress_pages	= lzo_decompress_pages,
476 };
477 #endif
478 #endif
479 
480 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
481 #ifdef CONFIG_F2FS_FS_LZO
482 	&f2fs_lzo_ops,
483 #else
484 	NULL,
485 #endif
486 #ifdef CONFIG_F2FS_FS_LZ4
487 	&f2fs_lz4_ops,
488 #else
489 	NULL,
490 #endif
491 #ifdef CONFIG_F2FS_FS_ZSTD
492 	&f2fs_zstd_ops,
493 #else
494 	NULL,
495 #endif
496 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
497 	&f2fs_lzorle_ops,
498 #else
499 	NULL,
500 #endif
501 };
502 
f2fs_is_compress_backend_ready(struct inode * inode)503 bool f2fs_is_compress_backend_ready(struct inode *inode)
504 {
505 	if (!f2fs_compressed_file(inode))
506 		return true;
507 	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
508 }
509 
510 static mempool_t *compress_page_pool = NULL;
511 static int num_compress_pages = 512;
512 module_param(num_compress_pages, uint, 0444);
513 MODULE_PARM_DESC(num_compress_pages,
514 		"Number of intermediate compress pages to preallocate");
515 
f2fs_init_compress_mempool(void)516 int f2fs_init_compress_mempool(void)
517 {
518 	compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
519 	if (!compress_page_pool)
520 		return -ENOMEM;
521 
522 	return 0;
523 }
524 
f2fs_destroy_compress_mempool(void)525 void f2fs_destroy_compress_mempool(void)
526 {
527 	mempool_destroy(compress_page_pool);
528 }
529 
f2fs_compress_alloc_page(void)530 static struct page *f2fs_compress_alloc_page(void)
531 {
532 	struct page *page;
533 
534 	page = mempool_alloc(compress_page_pool, GFP_NOFS);
535 	lock_page(page);
536 
537 	return page;
538 }
539 
f2fs_compress_free_page(struct page * page)540 static void f2fs_compress_free_page(struct page *page)
541 {
542 	if (!page)
543 		return;
544 	set_page_private(page, (unsigned long)NULL);
545 	ClearPagePrivate(page);
546 	page->mapping = NULL;
547 	unlock_page(page);
548 	mempool_free(page, compress_page_pool);
549 }
550 
f2fs_compress_pages(struct compress_ctx * cc)551 static int f2fs_compress_pages(struct compress_ctx *cc)
552 {
553 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
554 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
555 	const struct f2fs_compress_ops *cops =
556 				f2fs_cops[fi->i_compress_algorithm];
557 	unsigned int max_len, nr_cpages;
558 	int i, ret;
559 
560 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
561 				cc->cluster_size, fi->i_compress_algorithm);
562 
563 	if (cops->init_compress_ctx) {
564 		ret = cops->init_compress_ctx(cc);
565 		if (ret)
566 			goto out;
567 	}
568 
569 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
570 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
571 
572 	cc->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
573 					cc->nr_cpages, GFP_NOFS);
574 	if (!cc->cpages) {
575 		ret = -ENOMEM;
576 		goto destroy_compress_ctx;
577 	}
578 
579 	for (i = 0; i < cc->nr_cpages; i++) {
580 		cc->cpages[i] = f2fs_compress_alloc_page();
581 		if (!cc->cpages[i]) {
582 			ret = -ENOMEM;
583 			goto out_free_cpages;
584 		}
585 	}
586 
587 	cc->rbuf = vmap(cc->rpages, cc->cluster_size, VM_MAP, PAGE_KERNEL_RO);
588 	if (!cc->rbuf) {
589 		ret = -ENOMEM;
590 		goto out_free_cpages;
591 	}
592 
593 	cc->cbuf = vmap(cc->cpages, cc->nr_cpages, VM_MAP, PAGE_KERNEL);
594 	if (!cc->cbuf) {
595 		ret = -ENOMEM;
596 		goto out_vunmap_rbuf;
597 	}
598 
599 	ret = cops->compress_pages(cc);
600 	if (ret)
601 		goto out_vunmap_cbuf;
602 
603 	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
604 
605 	if (cc->clen > max_len) {
606 		ret = -EAGAIN;
607 		goto out_vunmap_cbuf;
608 	}
609 
610 	cc->cbuf->clen = cpu_to_le32(cc->clen);
611 
612 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
613 		cc->cbuf->reserved[i] = cpu_to_le32(0);
614 
615 	nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
616 
617 	/* zero out any unused part of the last page */
618 	memset(&cc->cbuf->cdata[cc->clen], 0,
619 	       (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
620 
621 	vunmap(cc->cbuf);
622 	vunmap(cc->rbuf);
623 
624 	for (i = nr_cpages; i < cc->nr_cpages; i++) {
625 		f2fs_compress_free_page(cc->cpages[i]);
626 		cc->cpages[i] = NULL;
627 	}
628 
629 	if (cops->destroy_compress_ctx)
630 		cops->destroy_compress_ctx(cc);
631 
632 	cc->nr_cpages = nr_cpages;
633 
634 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
635 							cc->clen, ret);
636 	return 0;
637 
638 out_vunmap_cbuf:
639 	vunmap(cc->cbuf);
640 out_vunmap_rbuf:
641 	vunmap(cc->rbuf);
642 out_free_cpages:
643 	for (i = 0; i < cc->nr_cpages; i++) {
644 		if (cc->cpages[i])
645 			f2fs_compress_free_page(cc->cpages[i]);
646 	}
647 	kfree(cc->cpages);
648 	cc->cpages = NULL;
649 destroy_compress_ctx:
650 	if (cops->destroy_compress_ctx)
651 		cops->destroy_compress_ctx(cc);
652 out:
653 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
654 							cc->clen, ret);
655 	return ret;
656 }
657 
f2fs_decompress_pages(struct bio * bio,struct page * page,bool verity)658 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
659 {
660 	struct decompress_io_ctx *dic =
661 			(struct decompress_io_ctx *)page_private(page);
662 	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
663 	struct f2fs_inode_info *fi= F2FS_I(dic->inode);
664 	const struct f2fs_compress_ops *cops =
665 			f2fs_cops[fi->i_compress_algorithm];
666 	int ret;
667 
668 	dec_page_count(sbi, F2FS_RD_DATA);
669 
670 	if (bio->bi_status || PageError(page))
671 		dic->failed = true;
672 
673 	if (refcount_dec_not_one(&dic->ref))
674 		return;
675 
676 	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
677 				dic->cluster_size, fi->i_compress_algorithm);
678 
679 	/* submit partial compressed pages */
680 	if (dic->failed) {
681 		ret = -EIO;
682 		goto out_free_dic;
683 	}
684 
685 	if (cops->init_decompress_ctx) {
686 		ret = cops->init_decompress_ctx(dic);
687 		if (ret)
688 			goto out_free_dic;
689 	}
690 
691 	dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL);
692 	if (!dic->rbuf) {
693 		ret = -ENOMEM;
694 		goto destroy_decompress_ctx;
695 	}
696 
697 	dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO);
698 	if (!dic->cbuf) {
699 		ret = -ENOMEM;
700 		goto out_vunmap_rbuf;
701 	}
702 
703 	dic->clen = le32_to_cpu(dic->cbuf->clen);
704 	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
705 
706 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
707 		ret = -EFSCORRUPTED;
708 		goto out_vunmap_cbuf;
709 	}
710 
711 	ret = cops->decompress_pages(dic);
712 
713 out_vunmap_cbuf:
714 	vunmap(dic->cbuf);
715 out_vunmap_rbuf:
716 	vunmap(dic->rbuf);
717 destroy_decompress_ctx:
718 	if (cops->destroy_decompress_ctx)
719 		cops->destroy_decompress_ctx(dic);
720 out_free_dic:
721 	if (verity)
722 		refcount_set(&dic->ref, dic->nr_cpages);
723 	if (!verity)
724 		f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
725 								ret, false);
726 
727 	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
728 							dic->clen, ret);
729 	if (!verity)
730 		f2fs_free_dic(dic);
731 }
732 
is_page_in_cluster(struct compress_ctx * cc,pgoff_t index)733 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
734 {
735 	if (cc->cluster_idx == NULL_CLUSTER)
736 		return true;
737 	return cc->cluster_idx == cluster_idx(cc, index);
738 }
739 
f2fs_cluster_is_empty(struct compress_ctx * cc)740 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
741 {
742 	return cc->nr_rpages == 0;
743 }
744 
f2fs_cluster_is_full(struct compress_ctx * cc)745 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
746 {
747 	return cc->cluster_size == cc->nr_rpages;
748 }
749 
f2fs_cluster_can_merge_page(struct compress_ctx * cc,pgoff_t index)750 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
751 {
752 	if (f2fs_cluster_is_empty(cc))
753 		return true;
754 	return is_page_in_cluster(cc, index);
755 }
756 
__cluster_may_compress(struct compress_ctx * cc)757 static bool __cluster_may_compress(struct compress_ctx *cc)
758 {
759 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
760 	loff_t i_size = i_size_read(cc->inode);
761 	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
762 	int i;
763 
764 	for (i = 0; i < cc->cluster_size; i++) {
765 		struct page *page = cc->rpages[i];
766 
767 		f2fs_bug_on(sbi, !page);
768 
769 		if (unlikely(f2fs_cp_error(sbi)))
770 			return false;
771 		if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
772 			return false;
773 
774 		/* beyond EOF */
775 		if (page->index >= nr_pages)
776 			return false;
777 	}
778 	return true;
779 }
780 
__f2fs_cluster_blocks(struct compress_ctx * cc,bool compr)781 static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
782 {
783 	struct dnode_of_data dn;
784 	int ret;
785 
786 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
787 	ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
788 							LOOKUP_NODE);
789 	if (ret) {
790 		if (ret == -ENOENT)
791 			ret = 0;
792 		goto fail;
793 	}
794 
795 	if (dn.data_blkaddr == COMPRESS_ADDR) {
796 		int i;
797 
798 		ret = 1;
799 		for (i = 1; i < cc->cluster_size; i++) {
800 			block_t blkaddr;
801 
802 			blkaddr = data_blkaddr(dn.inode,
803 					dn.node_page, dn.ofs_in_node + i);
804 			if (compr) {
805 				if (__is_valid_data_blkaddr(blkaddr))
806 					ret++;
807 			} else {
808 				if (blkaddr != NULL_ADDR)
809 					ret++;
810 			}
811 		}
812 
813 		f2fs_bug_on(F2FS_I_SB(cc->inode),
814 			!compr && ret != cc->cluster_size &&
815 			!is_inode_flag_set(cc->inode, FI_COMPRESS_RELEASED));
816 	}
817 fail:
818 	f2fs_put_dnode(&dn);
819 	return ret;
820 }
821 
822 /* return # of compressed blocks in compressed cluster */
f2fs_compressed_blocks(struct compress_ctx * cc)823 static int f2fs_compressed_blocks(struct compress_ctx *cc)
824 {
825 	return __f2fs_cluster_blocks(cc, true);
826 }
827 
828 /* return # of valid blocks in compressed cluster */
f2fs_cluster_blocks(struct compress_ctx * cc,bool compr)829 static int f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
830 {
831 	return __f2fs_cluster_blocks(cc, false);
832 }
833 
f2fs_is_compressed_cluster(struct inode * inode,pgoff_t index)834 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
835 {
836 	struct compress_ctx cc = {
837 		.inode = inode,
838 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
839 		.cluster_size = F2FS_I(inode)->i_cluster_size,
840 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
841 	};
842 
843 	return f2fs_cluster_blocks(&cc, false);
844 }
845 
cluster_may_compress(struct compress_ctx * cc)846 static bool cluster_may_compress(struct compress_ctx *cc)
847 {
848 	if (!f2fs_compressed_file(cc->inode))
849 		return false;
850 	if (f2fs_is_atomic_file(cc->inode))
851 		return false;
852 	if (f2fs_is_mmap_file(cc->inode))
853 		return false;
854 	if (!f2fs_cluster_is_full(cc))
855 		return false;
856 	if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
857 		return false;
858 	return __cluster_may_compress(cc);
859 }
860 
set_cluster_writeback(struct compress_ctx * cc)861 static void set_cluster_writeback(struct compress_ctx *cc)
862 {
863 	int i;
864 
865 	for (i = 0; i < cc->cluster_size; i++) {
866 		if (cc->rpages[i])
867 			set_page_writeback(cc->rpages[i]);
868 	}
869 }
870 
set_cluster_dirty(struct compress_ctx * cc)871 static void set_cluster_dirty(struct compress_ctx *cc)
872 {
873 	int i;
874 
875 	for (i = 0; i < cc->cluster_size; i++)
876 		if (cc->rpages[i])
877 			set_page_dirty(cc->rpages[i]);
878 }
879 
prepare_compress_overwrite(struct compress_ctx * cc,struct page ** pagep,pgoff_t index,void ** fsdata)880 static int prepare_compress_overwrite(struct compress_ctx *cc,
881 		struct page **pagep, pgoff_t index, void **fsdata)
882 {
883 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
884 	struct address_space *mapping = cc->inode->i_mapping;
885 	struct page *page;
886 	sector_t last_block_in_bio;
887 	unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
888 	pgoff_t start_idx = start_idx_of_cluster(cc);
889 	int i, ret;
890 
891 retry:
892 	ret = f2fs_cluster_blocks(cc, false);
893 	if (ret <= 0)
894 		return ret;
895 
896 	ret = f2fs_init_compress_ctx(cc);
897 	if (ret)
898 		return ret;
899 
900 	/* keep page reference to avoid page reclaim */
901 	for (i = 0; i < cc->cluster_size; i++) {
902 		page = f2fs_pagecache_get_page(mapping, start_idx + i,
903 							fgp_flag, GFP_NOFS);
904 		if (!page) {
905 			ret = -ENOMEM;
906 			goto unlock_pages;
907 		}
908 
909 		if (PageUptodate(page))
910 			unlock_page(page);
911 		else
912 			f2fs_compress_ctx_add_page(cc, page);
913 	}
914 
915 	if (!f2fs_cluster_is_empty(cc)) {
916 		struct bio *bio = NULL;
917 
918 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
919 					&last_block_in_bio, false, true);
920 		f2fs_destroy_compress_ctx(cc);
921 		if (ret)
922 			goto release_pages;
923 		if (bio)
924 			f2fs_submit_bio(sbi, bio, DATA);
925 
926 		ret = f2fs_init_compress_ctx(cc);
927 		if (ret)
928 			goto release_pages;
929 	}
930 
931 	for (i = 0; i < cc->cluster_size; i++) {
932 		f2fs_bug_on(sbi, cc->rpages[i]);
933 
934 		page = find_lock_page(mapping, start_idx + i);
935 		f2fs_bug_on(sbi, !page);
936 
937 		f2fs_wait_on_page_writeback(page, DATA, true, true);
938 
939 		f2fs_compress_ctx_add_page(cc, page);
940 		f2fs_put_page(page, 0);
941 
942 		if (!PageUptodate(page)) {
943 			f2fs_unlock_rpages(cc, i + 1);
944 			f2fs_put_rpages_mapping(mapping, start_idx,
945 					cc->cluster_size);
946 			f2fs_destroy_compress_ctx(cc);
947 			goto retry;
948 		}
949 	}
950 
951 	if (likely(!ret)) {
952 		*fsdata = cc->rpages;
953 		*pagep = cc->rpages[offset_in_cluster(cc, index)];
954 		return cc->cluster_size;
955 	}
956 
957 unlock_pages:
958 	f2fs_unlock_rpages(cc, i);
959 release_pages:
960 	f2fs_put_rpages_mapping(mapping, start_idx, i);
961 	f2fs_destroy_compress_ctx(cc);
962 	return ret;
963 }
964 
f2fs_prepare_compress_overwrite(struct inode * inode,struct page ** pagep,pgoff_t index,void ** fsdata)965 int f2fs_prepare_compress_overwrite(struct inode *inode,
966 		struct page **pagep, pgoff_t index, void **fsdata)
967 {
968 	struct compress_ctx cc = {
969 		.inode = inode,
970 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
971 		.cluster_size = F2FS_I(inode)->i_cluster_size,
972 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
973 		.rpages = NULL,
974 		.nr_rpages = 0,
975 	};
976 
977 	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
978 }
979 
f2fs_compress_write_end(struct inode * inode,void * fsdata,pgoff_t index,unsigned copied)980 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
981 					pgoff_t index, unsigned copied)
982 
983 {
984 	struct compress_ctx cc = {
985 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
986 		.cluster_size = F2FS_I(inode)->i_cluster_size,
987 		.rpages = fsdata,
988 	};
989 	bool first_index = (index == cc.rpages[0]->index);
990 
991 	if (copied)
992 		set_cluster_dirty(&cc);
993 
994 	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
995 	f2fs_destroy_compress_ctx(&cc);
996 
997 	return first_index;
998 }
999 
f2fs_truncate_partial_cluster(struct inode * inode,u64 from,bool lock)1000 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1001 {
1002 	void *fsdata = NULL;
1003 	struct page *pagep;
1004 	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1005 	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1006 							log_cluster_size;
1007 	int err;
1008 
1009 	err = f2fs_is_compressed_cluster(inode, start_idx);
1010 	if (err < 0)
1011 		return err;
1012 
1013 	/* truncate normal cluster */
1014 	if (!err)
1015 		return f2fs_do_truncate_blocks(inode, from, lock);
1016 
1017 	/* truncate compressed cluster */
1018 	err = f2fs_prepare_compress_overwrite(inode, &pagep,
1019 						start_idx, &fsdata);
1020 
1021 	/* should not be a normal cluster */
1022 	f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1023 
1024 	if (err <= 0)
1025 		return err;
1026 
1027 	if (err > 0) {
1028 		struct page **rpages = fsdata;
1029 		int cluster_size = F2FS_I(inode)->i_cluster_size;
1030 		int i;
1031 
1032 		for (i = cluster_size - 1; i >= 0; i--) {
1033 			loff_t start = rpages[i]->index << PAGE_SHIFT;
1034 
1035 			if (from <= start) {
1036 				zero_user_segment(rpages[i], 0, PAGE_SIZE);
1037 			} else {
1038 				zero_user_segment(rpages[i], from - start,
1039 								PAGE_SIZE);
1040 				break;
1041 			}
1042 		}
1043 
1044 		f2fs_compress_write_end(inode, fsdata, start_idx, true);
1045 	}
1046 	return 0;
1047 }
1048 
f2fs_write_compressed_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1049 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1050 					int *submitted,
1051 					struct writeback_control *wbc,
1052 					enum iostat_type io_type)
1053 {
1054 	struct inode *inode = cc->inode;
1055 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1056 	struct f2fs_inode_info *fi = F2FS_I(inode);
1057 	struct f2fs_io_info fio = {
1058 		.sbi = sbi,
1059 		.ino = cc->inode->i_ino,
1060 		.type = DATA,
1061 		.op = REQ_OP_WRITE,
1062 		.op_flags = wbc_to_write_flags(wbc),
1063 		.old_blkaddr = NEW_ADDR,
1064 		.page = NULL,
1065 		.encrypted_page = NULL,
1066 		.compressed_page = NULL,
1067 		.submitted = false,
1068 		.io_type = io_type,
1069 		.io_wbc = wbc,
1070 		.encrypted = f2fs_encrypted_file(cc->inode),
1071 	};
1072 	struct dnode_of_data dn;
1073 	struct node_info ni;
1074 	struct compress_io_ctx *cic;
1075 	pgoff_t start_idx = start_idx_of_cluster(cc);
1076 	unsigned int last_index = cc->cluster_size - 1;
1077 	loff_t psize;
1078 	int i, err;
1079 
1080 	if (!IS_NOQUOTA(inode) && !f2fs_trylock_op(sbi))
1081 		return -EAGAIN;
1082 
1083 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1084 
1085 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1086 	if (err)
1087 		goto out_unlock_op;
1088 
1089 	for (i = 0; i < cc->cluster_size; i++) {
1090 		if (data_blkaddr(dn.inode, dn.node_page,
1091 					dn.ofs_in_node + i) == NULL_ADDR)
1092 			goto out_put_dnode;
1093 	}
1094 
1095 	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1096 
1097 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1098 	if (err)
1099 		goto out_put_dnode;
1100 
1101 	fio.version = ni.version;
1102 
1103 	cic = f2fs_kzalloc(sbi, sizeof(struct compress_io_ctx), GFP_NOFS);
1104 	if (!cic)
1105 		goto out_put_dnode;
1106 
1107 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1108 	cic->inode = inode;
1109 	refcount_set(&cic->ref, cc->nr_cpages);
1110 	cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
1111 			cc->log_cluster_size, GFP_NOFS);
1112 	if (!cic->rpages)
1113 		goto out_put_cic;
1114 
1115 	cic->nr_rpages = cc->cluster_size;
1116 
1117 	for (i = 0; i < cc->nr_cpages; i++) {
1118 		f2fs_set_compressed_page(cc->cpages[i], inode,
1119 					cc->rpages[i + 1]->index, cic);
1120 		fio.compressed_page = cc->cpages[i];
1121 		if (fio.encrypted) {
1122 			fio.page = cc->rpages[i + 1];
1123 			err = f2fs_encrypt_one_page(&fio);
1124 			if (err)
1125 				goto out_destroy_crypt;
1126 			if (fscrypt_inode_uses_fs_layer_crypto(inode))
1127 				cc->cpages[i] = fio.encrypted_page;
1128 		}
1129 	}
1130 
1131 	set_cluster_writeback(cc);
1132 
1133 	for (i = 0; i < cc->cluster_size; i++)
1134 		cic->rpages[i] = cc->rpages[i];
1135 
1136 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1137 		block_t blkaddr;
1138 
1139 		blkaddr = f2fs_data_blkaddr(&dn);
1140 		fio.page = cc->rpages[i];
1141 		fio.old_blkaddr = blkaddr;
1142 
1143 		/* cluster header */
1144 		if (i == 0) {
1145 			if (blkaddr == COMPRESS_ADDR)
1146 				fio.compr_blocks++;
1147 			if (__is_valid_data_blkaddr(blkaddr))
1148 				f2fs_invalidate_blocks(sbi, blkaddr);
1149 			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1150 			goto unlock_continue;
1151 		}
1152 
1153 		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1154 			fio.compr_blocks++;
1155 
1156 		if (i > cc->nr_cpages) {
1157 			if (__is_valid_data_blkaddr(blkaddr)) {
1158 				f2fs_invalidate_blocks(sbi, blkaddr);
1159 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1160 			}
1161 			goto unlock_continue;
1162 		}
1163 
1164 		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1165 
1166 		if (fio.encrypted && fscrypt_inode_uses_fs_layer_crypto(inode))
1167 			fio.encrypted_page = cc->cpages[i - 1];
1168 		else
1169 			fio.compressed_page = cc->cpages[i - 1];
1170 
1171 		cc->cpages[i - 1] = NULL;
1172 		f2fs_outplace_write_data(&dn, &fio);
1173 		(*submitted)++;
1174 unlock_continue:
1175 		inode_dec_dirty_pages(cc->inode);
1176 		unlock_page(fio.page);
1177 	}
1178 
1179 	if (fio.compr_blocks)
1180 		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1181 	f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1182 
1183 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
1184 	if (cc->cluster_idx == 0)
1185 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1186 
1187 	f2fs_put_dnode(&dn);
1188 	if (!IS_NOQUOTA(inode))
1189 		f2fs_unlock_op(sbi);
1190 
1191 	spin_lock(&fi->i_size_lock);
1192 	if (fi->last_disk_size < psize)
1193 		fi->last_disk_size = psize;
1194 	spin_unlock(&fi->i_size_lock);
1195 
1196 	f2fs_put_rpages(cc);
1197 	f2fs_destroy_compress_ctx(cc);
1198 	return 0;
1199 
1200 out_destroy_crypt:
1201 	kfree(cic->rpages);
1202 
1203 	for (--i; i >= 0; i--)
1204 		fscrypt_finalize_bounce_page(&cc->cpages[i]);
1205 	for (i = 0; i < cc->nr_cpages; i++) {
1206 		if (!cc->cpages[i])
1207 			continue;
1208 		f2fs_put_page(cc->cpages[i], 1);
1209 	}
1210 out_put_cic:
1211 	kfree(cic);
1212 out_put_dnode:
1213 	f2fs_put_dnode(&dn);
1214 out_unlock_op:
1215 	if (!IS_NOQUOTA(inode))
1216 		f2fs_unlock_op(sbi);
1217 	return -EAGAIN;
1218 }
1219 
f2fs_compress_write_end_io(struct bio * bio,struct page * page)1220 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1221 {
1222 	struct f2fs_sb_info *sbi = bio->bi_private;
1223 	struct compress_io_ctx *cic =
1224 			(struct compress_io_ctx *)page_private(page);
1225 	int i;
1226 
1227 	if (unlikely(bio->bi_status))
1228 		mapping_set_error(cic->inode->i_mapping, -EIO);
1229 
1230 	f2fs_compress_free_page(page);
1231 
1232 	dec_page_count(sbi, F2FS_WB_DATA);
1233 
1234 	if (refcount_dec_not_one(&cic->ref))
1235 		return;
1236 
1237 	for (i = 0; i < cic->nr_rpages; i++) {
1238 		WARN_ON(!cic->rpages[i]);
1239 		clear_cold_data(cic->rpages[i]);
1240 		end_page_writeback(cic->rpages[i]);
1241 	}
1242 
1243 	kfree(cic->rpages);
1244 	kfree(cic);
1245 }
1246 
f2fs_write_raw_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1247 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1248 					int *submitted,
1249 					struct writeback_control *wbc,
1250 					enum iostat_type io_type)
1251 {
1252 	struct address_space *mapping = cc->inode->i_mapping;
1253 	int _submitted, compr_blocks, ret;
1254 	int i = -1, err = 0;
1255 
1256 	compr_blocks = f2fs_compressed_blocks(cc);
1257 	if (compr_blocks < 0) {
1258 		err = compr_blocks;
1259 		goto out_err;
1260 	}
1261 
1262 	for (i = 0; i < cc->cluster_size; i++) {
1263 		if (!cc->rpages[i])
1264 			continue;
1265 retry_write:
1266 		if (cc->rpages[i]->mapping != mapping) {
1267 			unlock_page(cc->rpages[i]);
1268 			continue;
1269 		}
1270 
1271 		BUG_ON(!PageLocked(cc->rpages[i]));
1272 
1273 		ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1274 						NULL, NULL, wbc, io_type,
1275 						compr_blocks);
1276 		if (ret) {
1277 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
1278 				unlock_page(cc->rpages[i]);
1279 				ret = 0;
1280 			} else if (ret == -EAGAIN) {
1281 				/*
1282 				 * for quota file, just redirty left pages to
1283 				 * avoid deadlock caused by cluster update race
1284 				 * from foreground operation.
1285 				 */
1286 				if (IS_NOQUOTA(cc->inode)) {
1287 					err = 0;
1288 					goto out_err;
1289 				}
1290 				ret = 0;
1291 				cond_resched();
1292 				congestion_wait(BLK_RW_ASYNC,
1293 						DEFAULT_IO_TIMEOUT);
1294 				lock_page(cc->rpages[i]);
1295 				clear_page_dirty_for_io(cc->rpages[i]);
1296 				goto retry_write;
1297 			}
1298 			err = ret;
1299 			goto out_err;
1300 		}
1301 
1302 		*submitted += _submitted;
1303 	}
1304 	return 0;
1305 out_err:
1306 	for (++i; i < cc->cluster_size; i++) {
1307 		if (!cc->rpages[i])
1308 			continue;
1309 		redirty_page_for_writepage(wbc, cc->rpages[i]);
1310 		unlock_page(cc->rpages[i]);
1311 	}
1312 	return err;
1313 }
1314 
f2fs_write_multi_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1315 int f2fs_write_multi_pages(struct compress_ctx *cc,
1316 					int *submitted,
1317 					struct writeback_control *wbc,
1318 					enum iostat_type io_type)
1319 {
1320 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
1321 	const struct f2fs_compress_ops *cops =
1322 			f2fs_cops[fi->i_compress_algorithm];
1323 	int err;
1324 
1325 	*submitted = 0;
1326 	if (cluster_may_compress(cc)) {
1327 		err = f2fs_compress_pages(cc);
1328 		if (err == -EAGAIN) {
1329 			goto write;
1330 		} else if (err) {
1331 			f2fs_put_rpages_wbc(cc, wbc, true, 1);
1332 			goto destroy_out;
1333 		}
1334 
1335 		err = f2fs_write_compressed_pages(cc, submitted,
1336 							wbc, io_type);
1337 		cops->destroy_compress_ctx(cc);
1338 		if (!err)
1339 			return 0;
1340 		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1341 	}
1342 write:
1343 	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1344 
1345 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1346 	f2fs_put_rpages_wbc(cc, wbc, false, 0);
1347 destroy_out:
1348 	f2fs_destroy_compress_ctx(cc);
1349 	return err;
1350 }
1351 
f2fs_alloc_dic(struct compress_ctx * cc)1352 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1353 {
1354 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1355 	struct decompress_io_ctx *dic;
1356 	pgoff_t start_idx = start_idx_of_cluster(cc);
1357 	int i;
1358 
1359 	dic = f2fs_kzalloc(sbi, sizeof(struct decompress_io_ctx), GFP_NOFS);
1360 	if (!dic)
1361 		return ERR_PTR(-ENOMEM);
1362 
1363 	dic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
1364 			cc->log_cluster_size, GFP_NOFS);
1365 	if (!dic->rpages) {
1366 		kfree(dic);
1367 		return ERR_PTR(-ENOMEM);
1368 	}
1369 
1370 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1371 	dic->inode = cc->inode;
1372 	refcount_set(&dic->ref, cc->nr_cpages);
1373 	dic->cluster_idx = cc->cluster_idx;
1374 	dic->cluster_size = cc->cluster_size;
1375 	dic->log_cluster_size = cc->log_cluster_size;
1376 	dic->nr_cpages = cc->nr_cpages;
1377 	dic->failed = false;
1378 
1379 	for (i = 0; i < dic->cluster_size; i++)
1380 		dic->rpages[i] = cc->rpages[i];
1381 	dic->nr_rpages = cc->cluster_size;
1382 
1383 	dic->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1384 					dic->nr_cpages, GFP_NOFS);
1385 	if (!dic->cpages)
1386 		goto out_free;
1387 
1388 	for (i = 0; i < dic->nr_cpages; i++) {
1389 		struct page *page;
1390 
1391 		page = f2fs_compress_alloc_page();
1392 		if (!page)
1393 			goto out_free;
1394 
1395 		f2fs_set_compressed_page(page, cc->inode,
1396 					start_idx + i + 1, dic);
1397 		dic->cpages[i] = page;
1398 	}
1399 
1400 	dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1401 					dic->cluster_size, GFP_NOFS);
1402 	if (!dic->tpages)
1403 		goto out_free;
1404 
1405 	for (i = 0; i < dic->cluster_size; i++) {
1406 		if (cc->rpages[i]) {
1407 			dic->tpages[i] = cc->rpages[i];
1408 			continue;
1409 		}
1410 
1411 		dic->tpages[i] = f2fs_compress_alloc_page();
1412 		if (!dic->tpages[i])
1413 			goto out_free;
1414 	}
1415 
1416 	return dic;
1417 
1418 out_free:
1419 	f2fs_free_dic(dic);
1420 	return ERR_PTR(-ENOMEM);
1421 }
1422 
f2fs_free_dic(struct decompress_io_ctx * dic)1423 void f2fs_free_dic(struct decompress_io_ctx *dic)
1424 {
1425 	int i;
1426 
1427 	if (dic->tpages) {
1428 		for (i = 0; i < dic->cluster_size; i++) {
1429 			if (dic->rpages[i])
1430 				continue;
1431 			if (!dic->tpages[i])
1432 				continue;
1433 			f2fs_compress_free_page(dic->tpages[i]);
1434 		}
1435 		kfree(dic->tpages);
1436 	}
1437 
1438 	if (dic->cpages) {
1439 		for (i = 0; i < dic->nr_cpages; i++) {
1440 			if (!dic->cpages[i])
1441 				continue;
1442 			f2fs_compress_free_page(dic->cpages[i]);
1443 		}
1444 		kfree(dic->cpages);
1445 	}
1446 
1447 	kfree(dic->rpages);
1448 	kfree(dic);
1449 }
1450 
f2fs_decompress_end_io(struct page ** rpages,unsigned int cluster_size,bool err,bool verity)1451 void f2fs_decompress_end_io(struct page **rpages,
1452 			unsigned int cluster_size, bool err, bool verity)
1453 {
1454 	int i;
1455 
1456 	for (i = 0; i < cluster_size; i++) {
1457 		struct page *rpage = rpages[i];
1458 
1459 		if (!rpage)
1460 			continue;
1461 
1462 		if (err || PageError(rpage))
1463 			goto clear_uptodate;
1464 
1465 		if (!verity || fsverity_verify_page(rpage)) {
1466 			SetPageUptodate(rpage);
1467 			goto unlock;
1468 		}
1469 clear_uptodate:
1470 		ClearPageUptodate(rpage);
1471 		ClearPageError(rpage);
1472 unlock:
1473 		unlock_page(rpage);
1474 	}
1475 }
1476