• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
15 
16 #include "f2fs.h"
17 #include "node.h"
18 #include <trace/events/f2fs.h>
19 
20 static struct kmem_cache *cic_entry_slab;
21 static struct kmem_cache *dic_entry_slab;
22 
page_array_alloc(struct inode * inode,int nr)23 static void *page_array_alloc(struct inode *inode, int nr)
24 {
25 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
26 	unsigned int size = sizeof(struct page *) * nr;
27 
28 	if (likely(size <= sbi->page_array_slab_size))
29 		return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
30 	return f2fs_kzalloc(sbi, size, GFP_NOFS);
31 }
32 
page_array_free(struct inode * inode,void * pages,int nr)33 static void page_array_free(struct inode *inode, void *pages, int nr)
34 {
35 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
36 	unsigned int size = sizeof(struct page *) * nr;
37 
38 	if (!pages)
39 		return;
40 
41 	if (likely(size <= sbi->page_array_slab_size))
42 		kmem_cache_free(sbi->page_array_slab, pages);
43 	else
44 		kfree(pages);
45 }
46 
47 struct f2fs_compress_ops {
48 	int (*init_compress_ctx)(struct compress_ctx *cc);
49 	void (*destroy_compress_ctx)(struct compress_ctx *cc);
50 	int (*compress_pages)(struct compress_ctx *cc);
51 	int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
52 	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
53 	int (*decompress_pages)(struct decompress_io_ctx *dic);
54 };
55 
offset_in_cluster(struct compress_ctx * cc,pgoff_t index)56 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
57 {
58 	return index & (cc->cluster_size - 1);
59 }
60 
cluster_idx(struct compress_ctx * cc,pgoff_t index)61 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
62 {
63 	return index >> cc->log_cluster_size;
64 }
65 
start_idx_of_cluster(struct compress_ctx * cc)66 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
67 {
68 	return cc->cluster_idx << cc->log_cluster_size;
69 }
70 
f2fs_is_compressed_page(struct page * page)71 bool f2fs_is_compressed_page(struct page *page)
72 {
73 	if (!PagePrivate(page))
74 		return false;
75 	if (!page_private(page))
76 		return false;
77 	if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
78 		return false;
79 	/*
80 	 * page->private may be set with pid.
81 	 * pid_max is enough to check if it is traced.
82 	 */
83 	if (IS_IO_TRACED_PAGE(page))
84 		return false;
85 
86 	f2fs_bug_on(F2FS_M_SB(page->mapping),
87 		*((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
88 	return true;
89 }
90 
f2fs_set_compressed_page(struct page * page,struct inode * inode,pgoff_t index,void * data)91 static void f2fs_set_compressed_page(struct page *page,
92 		struct inode *inode, pgoff_t index, void *data)
93 {
94 	SetPagePrivate(page);
95 	set_page_private(page, (unsigned long)data);
96 
97 	/* i_crypto_info and iv index */
98 	page->index = index;
99 	page->mapping = inode->i_mapping;
100 }
101 
f2fs_drop_rpages(struct compress_ctx * cc,int len,bool unlock)102 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
103 {
104 	int i;
105 
106 	for (i = 0; i < len; i++) {
107 		if (!cc->rpages[i])
108 			continue;
109 		if (unlock)
110 			unlock_page(cc->rpages[i]);
111 		else
112 			put_page(cc->rpages[i]);
113 	}
114 }
115 
f2fs_put_rpages(struct compress_ctx * cc)116 static void f2fs_put_rpages(struct compress_ctx *cc)
117 {
118 	f2fs_drop_rpages(cc, cc->cluster_size, false);
119 }
120 
f2fs_unlock_rpages(struct compress_ctx * cc,int len)121 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
122 {
123 	f2fs_drop_rpages(cc, len, true);
124 }
125 
f2fs_put_rpages_wbc(struct compress_ctx * cc,struct writeback_control * wbc,bool redirty,int unlock)126 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
127 		struct writeback_control *wbc, bool redirty, int unlock)
128 {
129 	unsigned int i;
130 
131 	for (i = 0; i < cc->cluster_size; i++) {
132 		if (!cc->rpages[i])
133 			continue;
134 		if (redirty)
135 			redirty_page_for_writepage(wbc, cc->rpages[i]);
136 		f2fs_put_page(cc->rpages[i], unlock);
137 	}
138 }
139 
f2fs_compress_control_page(struct page * page)140 struct page *f2fs_compress_control_page(struct page *page)
141 {
142 	return ((struct compress_io_ctx *)page_private(page))->rpages[0];
143 }
144 
f2fs_init_compress_ctx(struct compress_ctx * cc)145 int f2fs_init_compress_ctx(struct compress_ctx *cc)
146 {
147 	if (cc->rpages)
148 		return 0;
149 
150 	cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
151 	return cc->rpages ? 0 : -ENOMEM;
152 }
153 
f2fs_destroy_compress_ctx(struct compress_ctx * cc,bool reuse)154 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
155 {
156 	page_array_free(cc->inode, cc->rpages, cc->cluster_size);
157 	cc->rpages = NULL;
158 	cc->nr_rpages = 0;
159 	cc->nr_cpages = 0;
160 	if (!reuse)
161 		cc->cluster_idx = NULL_CLUSTER;
162 }
163 
f2fs_compress_ctx_add_page(struct compress_ctx * cc,struct page * page)164 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
165 {
166 	unsigned int cluster_ofs;
167 
168 	if (!f2fs_cluster_can_merge_page(cc, page->index))
169 		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
170 
171 	cluster_ofs = offset_in_cluster(cc, page->index);
172 	cc->rpages[cluster_ofs] = page;
173 	cc->nr_rpages++;
174 	cc->cluster_idx = cluster_idx(cc, page->index);
175 }
176 
177 #ifdef CONFIG_F2FS_FS_LZO
lzo_init_compress_ctx(struct compress_ctx * cc)178 static int lzo_init_compress_ctx(struct compress_ctx *cc)
179 {
180 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
181 				LZO1X_MEM_COMPRESS, GFP_NOFS);
182 	if (!cc->private)
183 		return -ENOMEM;
184 
185 	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
186 	return 0;
187 }
188 
lzo_destroy_compress_ctx(struct compress_ctx * cc)189 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
190 {
191 	kvfree(cc->private);
192 	cc->private = NULL;
193 }
194 
lzo_compress_pages(struct compress_ctx * cc)195 static int lzo_compress_pages(struct compress_ctx *cc)
196 {
197 	int ret;
198 
199 	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
200 					&cc->clen, cc->private);
201 	if (ret != LZO_E_OK) {
202 		printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
203 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
204 		return -EIO;
205 	}
206 	return 0;
207 }
208 
lzo_decompress_pages(struct decompress_io_ctx * dic)209 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
210 {
211 	int ret;
212 
213 	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
214 						dic->rbuf, &dic->rlen);
215 	if (ret != LZO_E_OK) {
216 		printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
217 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
218 		return -EIO;
219 	}
220 
221 	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
222 		printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
223 					"expected:%lu\n", KERN_ERR,
224 					F2FS_I_SB(dic->inode)->sb->s_id,
225 					dic->rlen,
226 					PAGE_SIZE << dic->log_cluster_size);
227 		return -EIO;
228 	}
229 	return 0;
230 }
231 
232 static const struct f2fs_compress_ops f2fs_lzo_ops = {
233 	.init_compress_ctx	= lzo_init_compress_ctx,
234 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
235 	.compress_pages		= lzo_compress_pages,
236 	.decompress_pages	= lzo_decompress_pages,
237 };
238 #endif
239 
240 #ifdef CONFIG_F2FS_FS_LZ4
lz4_init_compress_ctx(struct compress_ctx * cc)241 static int lz4_init_compress_ctx(struct compress_ctx *cc)
242 {
243 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
244 				LZ4_MEM_COMPRESS, GFP_NOFS);
245 	if (!cc->private)
246 		return -ENOMEM;
247 
248 	/*
249 	 * we do not change cc->clen to LZ4_compressBound(inputsize) to
250 	 * adapt worst compress case, because lz4 compressor can handle
251 	 * output budget properly.
252 	 */
253 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
254 	return 0;
255 }
256 
lz4_destroy_compress_ctx(struct compress_ctx * cc)257 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
258 {
259 	kvfree(cc->private);
260 	cc->private = NULL;
261 }
262 
lz4_compress_pages(struct compress_ctx * cc)263 static int lz4_compress_pages(struct compress_ctx *cc)
264 {
265 	int len;
266 
267 	len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
268 						cc->clen, cc->private);
269 	if (!len)
270 		return -EAGAIN;
271 
272 	cc->clen = len;
273 	return 0;
274 }
275 
lz4_decompress_pages(struct decompress_io_ctx * dic)276 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
277 {
278 	int ret;
279 
280 	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
281 						dic->clen, dic->rlen);
282 	if (ret < 0) {
283 		printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
284 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
285 		return -EIO;
286 	}
287 
288 	if (ret != PAGE_SIZE << dic->log_cluster_size) {
289 		printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
290 					"expected:%lu\n", KERN_ERR,
291 					F2FS_I_SB(dic->inode)->sb->s_id, ret,
292 					PAGE_SIZE << dic->log_cluster_size);
293 		return -EIO;
294 	}
295 	return 0;
296 }
297 
298 static const struct f2fs_compress_ops f2fs_lz4_ops = {
299 	.init_compress_ctx	= lz4_init_compress_ctx,
300 	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
301 	.compress_pages		= lz4_compress_pages,
302 	.decompress_pages	= lz4_decompress_pages,
303 };
304 #endif
305 
306 #ifdef CONFIG_F2FS_FS_ZSTD
307 #define F2FS_ZSTD_DEFAULT_CLEVEL	1
308 
zstd_init_compress_ctx(struct compress_ctx * cc)309 static int zstd_init_compress_ctx(struct compress_ctx *cc)
310 {
311 	ZSTD_parameters params;
312 	ZSTD_CStream *stream;
313 	void *workspace;
314 	unsigned int workspace_size;
315 
316 	params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
317 	workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
318 
319 	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
320 					workspace_size, GFP_NOFS);
321 	if (!workspace)
322 		return -ENOMEM;
323 
324 	stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
325 	if (!stream) {
326 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
327 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
328 				__func__);
329 		kvfree(workspace);
330 		return -EIO;
331 	}
332 
333 	cc->private = workspace;
334 	cc->private2 = stream;
335 
336 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
337 	return 0;
338 }
339 
zstd_destroy_compress_ctx(struct compress_ctx * cc)340 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
341 {
342 	kvfree(cc->private);
343 	cc->private = NULL;
344 	cc->private2 = NULL;
345 }
346 
zstd_compress_pages(struct compress_ctx * cc)347 static int zstd_compress_pages(struct compress_ctx *cc)
348 {
349 	ZSTD_CStream *stream = cc->private2;
350 	ZSTD_inBuffer inbuf;
351 	ZSTD_outBuffer outbuf;
352 	int src_size = cc->rlen;
353 	int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
354 	int ret;
355 
356 	inbuf.pos = 0;
357 	inbuf.src = cc->rbuf;
358 	inbuf.size = src_size;
359 
360 	outbuf.pos = 0;
361 	outbuf.dst = cc->cbuf->cdata;
362 	outbuf.size = dst_size;
363 
364 	ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
365 	if (ZSTD_isError(ret)) {
366 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
367 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
368 				__func__, ZSTD_getErrorCode(ret));
369 		return -EIO;
370 	}
371 
372 	ret = ZSTD_endStream(stream, &outbuf);
373 	if (ZSTD_isError(ret)) {
374 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
375 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
376 				__func__, ZSTD_getErrorCode(ret));
377 		return -EIO;
378 	}
379 
380 	/*
381 	 * there is compressed data remained in intermediate buffer due to
382 	 * no more space in cbuf.cdata
383 	 */
384 	if (ret)
385 		return -EAGAIN;
386 
387 	cc->clen = outbuf.pos;
388 	return 0;
389 }
390 
zstd_init_decompress_ctx(struct decompress_io_ctx * dic)391 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
392 {
393 	ZSTD_DStream *stream;
394 	void *workspace;
395 	unsigned int workspace_size;
396 	unsigned int max_window_size =
397 			MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
398 
399 	workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
400 
401 	workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
402 					workspace_size, GFP_NOFS);
403 	if (!workspace)
404 		return -ENOMEM;
405 
406 	stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
407 	if (!stream) {
408 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
409 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
410 				__func__);
411 		kvfree(workspace);
412 		return -EIO;
413 	}
414 
415 	dic->private = workspace;
416 	dic->private2 = stream;
417 
418 	return 0;
419 }
420 
zstd_destroy_decompress_ctx(struct decompress_io_ctx * dic)421 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
422 {
423 	kvfree(dic->private);
424 	dic->private = NULL;
425 	dic->private2 = NULL;
426 }
427 
zstd_decompress_pages(struct decompress_io_ctx * dic)428 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
429 {
430 	ZSTD_DStream *stream = dic->private2;
431 	ZSTD_inBuffer inbuf;
432 	ZSTD_outBuffer outbuf;
433 	int ret;
434 
435 	inbuf.pos = 0;
436 	inbuf.src = dic->cbuf->cdata;
437 	inbuf.size = dic->clen;
438 
439 	outbuf.pos = 0;
440 	outbuf.dst = dic->rbuf;
441 	outbuf.size = dic->rlen;
442 
443 	ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
444 	if (ZSTD_isError(ret)) {
445 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
446 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
447 				__func__, ZSTD_getErrorCode(ret));
448 		return -EIO;
449 	}
450 
451 	if (dic->rlen != outbuf.pos) {
452 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
453 				"expected:%lu\n", KERN_ERR,
454 				F2FS_I_SB(dic->inode)->sb->s_id,
455 				__func__, dic->rlen,
456 				PAGE_SIZE << dic->log_cluster_size);
457 		return -EIO;
458 	}
459 
460 	return 0;
461 }
462 
463 static const struct f2fs_compress_ops f2fs_zstd_ops = {
464 	.init_compress_ctx	= zstd_init_compress_ctx,
465 	.destroy_compress_ctx	= zstd_destroy_compress_ctx,
466 	.compress_pages		= zstd_compress_pages,
467 	.init_decompress_ctx	= zstd_init_decompress_ctx,
468 	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx,
469 	.decompress_pages	= zstd_decompress_pages,
470 };
471 #endif
472 
473 #ifdef CONFIG_F2FS_FS_LZO
474 #ifdef CONFIG_F2FS_FS_LZORLE
lzorle_compress_pages(struct compress_ctx * cc)475 static int lzorle_compress_pages(struct compress_ctx *cc)
476 {
477 	int ret;
478 
479 	ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
480 					&cc->clen, cc->private);
481 	if (ret != LZO_E_OK) {
482 		printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
483 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
484 		return -EIO;
485 	}
486 	return 0;
487 }
488 
489 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
490 	.init_compress_ctx	= lzo_init_compress_ctx,
491 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
492 	.compress_pages		= lzorle_compress_pages,
493 	.decompress_pages	= lzo_decompress_pages,
494 };
495 #endif
496 #endif
497 
498 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
499 #ifdef CONFIG_F2FS_FS_LZO
500 	&f2fs_lzo_ops,
501 #else
502 	NULL,
503 #endif
504 #ifdef CONFIG_F2FS_FS_LZ4
505 	&f2fs_lz4_ops,
506 #else
507 	NULL,
508 #endif
509 #ifdef CONFIG_F2FS_FS_ZSTD
510 	&f2fs_zstd_ops,
511 #else
512 	NULL,
513 #endif
514 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
515 	&f2fs_lzorle_ops,
516 #else
517 	NULL,
518 #endif
519 };
520 
f2fs_is_compress_backend_ready(struct inode * inode)521 bool f2fs_is_compress_backend_ready(struct inode *inode)
522 {
523 	if (!f2fs_compressed_file(inode))
524 		return true;
525 	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
526 }
527 
528 static mempool_t *compress_page_pool;
529 static int num_compress_pages = 512;
530 module_param(num_compress_pages, uint, 0444);
531 MODULE_PARM_DESC(num_compress_pages,
532 		"Number of intermediate compress pages to preallocate");
533 
f2fs_init_compress_mempool(void)534 int f2fs_init_compress_mempool(void)
535 {
536 	compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
537 	if (!compress_page_pool)
538 		return -ENOMEM;
539 
540 	return 0;
541 }
542 
f2fs_destroy_compress_mempool(void)543 void f2fs_destroy_compress_mempool(void)
544 {
545 	mempool_destroy(compress_page_pool);
546 }
547 
f2fs_compress_alloc_page(void)548 static struct page *f2fs_compress_alloc_page(void)
549 {
550 	struct page *page;
551 
552 	page = mempool_alloc(compress_page_pool, GFP_NOFS);
553 	lock_page(page);
554 
555 	return page;
556 }
557 
f2fs_compress_free_page(struct page * page)558 static void f2fs_compress_free_page(struct page *page)
559 {
560 	if (!page)
561 		return;
562 	set_page_private(page, (unsigned long)NULL);
563 	ClearPagePrivate(page);
564 	page->mapping = NULL;
565 	unlock_page(page);
566 	mempool_free(page, compress_page_pool);
567 }
568 
569 #define MAX_VMAP_RETRIES	3
570 
f2fs_vmap(struct page ** pages,unsigned int count)571 static void *f2fs_vmap(struct page **pages, unsigned int count)
572 {
573 	int i;
574 	void *buf = NULL;
575 
576 	for (i = 0; i < MAX_VMAP_RETRIES; i++) {
577 		buf = vm_map_ram(pages, count, -1);
578 		if (buf)
579 			break;
580 		vm_unmap_aliases();
581 	}
582 	return buf;
583 }
584 
f2fs_compress_pages(struct compress_ctx * cc)585 static int f2fs_compress_pages(struct compress_ctx *cc)
586 {
587 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
588 	const struct f2fs_compress_ops *cops =
589 				f2fs_cops[fi->i_compress_algorithm];
590 	unsigned int max_len, new_nr_cpages;
591 	struct page **new_cpages;
592 	int i, ret;
593 
594 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
595 				cc->cluster_size, fi->i_compress_algorithm);
596 
597 	if (cops->init_compress_ctx) {
598 		ret = cops->init_compress_ctx(cc);
599 		if (ret)
600 			goto out;
601 	}
602 
603 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
604 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
605 
606 	cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
607 	if (!cc->cpages) {
608 		ret = -ENOMEM;
609 		goto destroy_compress_ctx;
610 	}
611 
612 	for (i = 0; i < cc->nr_cpages; i++) {
613 		cc->cpages[i] = f2fs_compress_alloc_page();
614 		if (!cc->cpages[i]) {
615 			ret = -ENOMEM;
616 			goto out_free_cpages;
617 		}
618 	}
619 
620 	cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
621 	if (!cc->rbuf) {
622 		ret = -ENOMEM;
623 		goto out_free_cpages;
624 	}
625 
626 	cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
627 	if (!cc->cbuf) {
628 		ret = -ENOMEM;
629 		goto out_vunmap_rbuf;
630 	}
631 
632 	ret = cops->compress_pages(cc);
633 	if (ret)
634 		goto out_vunmap_cbuf;
635 
636 	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
637 
638 	if (cc->clen > max_len) {
639 		ret = -EAGAIN;
640 		goto out_vunmap_cbuf;
641 	}
642 
643 	cc->cbuf->clen = cpu_to_le32(cc->clen);
644 
645 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
646 		cc->cbuf->reserved[i] = cpu_to_le32(0);
647 
648 	new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
649 
650 	/* Now we're going to cut unnecessary tail pages */
651 	new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
652 	if (!new_cpages) {
653 		ret = -ENOMEM;
654 		goto out_vunmap_cbuf;
655 	}
656 
657 	/* zero out any unused part of the last page */
658 	memset(&cc->cbuf->cdata[cc->clen], 0,
659 			(new_nr_cpages * PAGE_SIZE) -
660 			(cc->clen + COMPRESS_HEADER_SIZE));
661 
662 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
663 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
664 
665 	for (i = 0; i < cc->nr_cpages; i++) {
666 		if (i < new_nr_cpages) {
667 			new_cpages[i] = cc->cpages[i];
668 			continue;
669 		}
670 		f2fs_compress_free_page(cc->cpages[i]);
671 		cc->cpages[i] = NULL;
672 	}
673 
674 	if (cops->destroy_compress_ctx)
675 		cops->destroy_compress_ctx(cc);
676 
677 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
678 	cc->cpages = new_cpages;
679 	cc->nr_cpages = new_nr_cpages;
680 
681 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
682 							cc->clen, ret);
683 	return 0;
684 
685 out_vunmap_cbuf:
686 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
687 out_vunmap_rbuf:
688 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
689 out_free_cpages:
690 	for (i = 0; i < cc->nr_cpages; i++) {
691 		if (cc->cpages[i])
692 			f2fs_compress_free_page(cc->cpages[i]);
693 	}
694 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
695 	cc->cpages = NULL;
696 destroy_compress_ctx:
697 	if (cops->destroy_compress_ctx)
698 		cops->destroy_compress_ctx(cc);
699 out:
700 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
701 							cc->clen, ret);
702 	return ret;
703 }
704 
f2fs_decompress_pages(struct bio * bio,struct page * page,bool verity)705 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
706 {
707 	struct decompress_io_ctx *dic =
708 			(struct decompress_io_ctx *)page_private(page);
709 	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
710 	struct f2fs_inode_info *fi= F2FS_I(dic->inode);
711 	const struct f2fs_compress_ops *cops =
712 			f2fs_cops[fi->i_compress_algorithm];
713 	int ret;
714 	int i;
715 
716 	dec_page_count(sbi, F2FS_RD_DATA);
717 
718 	if (bio->bi_status || PageError(page))
719 		dic->failed = true;
720 
721 	if (atomic_dec_return(&dic->pending_pages))
722 		return;
723 
724 	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
725 				dic->cluster_size, fi->i_compress_algorithm);
726 
727 	/* submit partial compressed pages */
728 	if (dic->failed) {
729 		ret = -EIO;
730 		goto out_free_dic;
731 	}
732 
733 	dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
734 	if (!dic->tpages) {
735 		ret = -ENOMEM;
736 		goto out_free_dic;
737 	}
738 
739 	for (i = 0; i < dic->cluster_size; i++) {
740 		if (dic->rpages[i]) {
741 			dic->tpages[i] = dic->rpages[i];
742 			continue;
743 		}
744 
745 		dic->tpages[i] = f2fs_compress_alloc_page();
746 		if (!dic->tpages[i]) {
747 			ret = -ENOMEM;
748 			goto out_free_dic;
749 		}
750 	}
751 
752 	if (cops->init_decompress_ctx) {
753 		ret = cops->init_decompress_ctx(dic);
754 		if (ret)
755 			goto out_free_dic;
756 	}
757 
758 	dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
759 	if (!dic->rbuf) {
760 		ret = -ENOMEM;
761 		goto destroy_decompress_ctx;
762 	}
763 
764 	dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
765 	if (!dic->cbuf) {
766 		ret = -ENOMEM;
767 		goto out_vunmap_rbuf;
768 	}
769 
770 	dic->clen = le32_to_cpu(dic->cbuf->clen);
771 	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
772 
773 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
774 		ret = -EFSCORRUPTED;
775 		goto out_vunmap_cbuf;
776 	}
777 
778 	ret = cops->decompress_pages(dic);
779 
780 out_vunmap_cbuf:
781 	vm_unmap_ram(dic->cbuf, dic->nr_cpages);
782 out_vunmap_rbuf:
783 	vm_unmap_ram(dic->rbuf, dic->cluster_size);
784 destroy_decompress_ctx:
785 	if (cops->destroy_decompress_ctx)
786 		cops->destroy_decompress_ctx(dic);
787 out_free_dic:
788 	if (!verity)
789 		f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
790 								ret, false);
791 
792 	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
793 							dic->clen, ret);
794 	if (!verity)
795 		f2fs_free_dic(dic);
796 }
797 
is_page_in_cluster(struct compress_ctx * cc,pgoff_t index)798 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
799 {
800 	if (cc->cluster_idx == NULL_CLUSTER)
801 		return true;
802 	return cc->cluster_idx == cluster_idx(cc, index);
803 }
804 
f2fs_cluster_is_empty(struct compress_ctx * cc)805 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
806 {
807 	return cc->nr_rpages == 0;
808 }
809 
f2fs_cluster_is_full(struct compress_ctx * cc)810 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
811 {
812 	return cc->cluster_size == cc->nr_rpages;
813 }
814 
f2fs_cluster_can_merge_page(struct compress_ctx * cc,pgoff_t index)815 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
816 {
817 	if (f2fs_cluster_is_empty(cc))
818 		return true;
819 	return is_page_in_cluster(cc, index);
820 }
821 
__cluster_may_compress(struct compress_ctx * cc)822 static bool __cluster_may_compress(struct compress_ctx *cc)
823 {
824 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
825 	loff_t i_size = i_size_read(cc->inode);
826 	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
827 	int i;
828 
829 	for (i = 0; i < cc->cluster_size; i++) {
830 		struct page *page = cc->rpages[i];
831 
832 		f2fs_bug_on(sbi, !page);
833 
834 		if (unlikely(f2fs_cp_error(sbi)))
835 			return false;
836 		if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
837 			return false;
838 
839 		/* beyond EOF */
840 		if (page->index >= nr_pages)
841 			return false;
842 	}
843 	return true;
844 }
845 
__f2fs_cluster_blocks(struct compress_ctx * cc,bool compr)846 static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
847 {
848 	struct dnode_of_data dn;
849 	int ret;
850 
851 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
852 	ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
853 							LOOKUP_NODE);
854 	if (ret) {
855 		if (ret == -ENOENT)
856 			ret = 0;
857 		goto fail;
858 	}
859 
860 	if (dn.data_blkaddr == COMPRESS_ADDR) {
861 		int i;
862 
863 		ret = 1;
864 		for (i = 1; i < cc->cluster_size; i++) {
865 			block_t blkaddr;
866 
867 			blkaddr = data_blkaddr(dn.inode,
868 					dn.node_page, dn.ofs_in_node + i);
869 			if (compr) {
870 				if (__is_valid_data_blkaddr(blkaddr))
871 					ret++;
872 			} else {
873 				if (blkaddr != NULL_ADDR)
874 					ret++;
875 			}
876 		}
877 	}
878 fail:
879 	f2fs_put_dnode(&dn);
880 	return ret;
881 }
882 
883 /* return # of compressed blocks in compressed cluster */
f2fs_compressed_blocks(struct compress_ctx * cc)884 static int f2fs_compressed_blocks(struct compress_ctx *cc)
885 {
886 	return __f2fs_cluster_blocks(cc, true);
887 }
888 
889 /* return # of valid blocks in compressed cluster */
f2fs_cluster_blocks(struct compress_ctx * cc)890 static int f2fs_cluster_blocks(struct compress_ctx *cc)
891 {
892 	return __f2fs_cluster_blocks(cc, false);
893 }
894 
f2fs_is_compressed_cluster(struct inode * inode,pgoff_t index)895 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
896 {
897 	struct compress_ctx cc = {
898 		.inode = inode,
899 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
900 		.cluster_size = F2FS_I(inode)->i_cluster_size,
901 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
902 	};
903 
904 	return f2fs_cluster_blocks(&cc);
905 }
906 
cluster_may_compress(struct compress_ctx * cc)907 static bool cluster_may_compress(struct compress_ctx *cc)
908 {
909 	if (!f2fs_compressed_file(cc->inode))
910 		return false;
911 	if (f2fs_is_atomic_file(cc->inode))
912 		return false;
913 	if (f2fs_is_mmap_file(cc->inode))
914 		return false;
915 	if (!f2fs_cluster_is_full(cc))
916 		return false;
917 	if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
918 		return false;
919 	return __cluster_may_compress(cc);
920 }
921 
set_cluster_writeback(struct compress_ctx * cc)922 static void set_cluster_writeback(struct compress_ctx *cc)
923 {
924 	int i;
925 
926 	for (i = 0; i < cc->cluster_size; i++) {
927 		if (cc->rpages[i])
928 			set_page_writeback(cc->rpages[i]);
929 	}
930 }
931 
set_cluster_dirty(struct compress_ctx * cc)932 static void set_cluster_dirty(struct compress_ctx *cc)
933 {
934 	int i;
935 
936 	for (i = 0; i < cc->cluster_size; i++)
937 		if (cc->rpages[i])
938 			set_page_dirty(cc->rpages[i]);
939 }
940 
prepare_compress_overwrite(struct compress_ctx * cc,struct page ** pagep,pgoff_t index,void ** fsdata)941 static int prepare_compress_overwrite(struct compress_ctx *cc,
942 		struct page **pagep, pgoff_t index, void **fsdata)
943 {
944 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
945 	struct address_space *mapping = cc->inode->i_mapping;
946 	struct page *page;
947 	struct dnode_of_data dn;
948 	sector_t last_block_in_bio;
949 	unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
950 	pgoff_t start_idx = start_idx_of_cluster(cc);
951 	int i, ret;
952 	bool prealloc;
953 
954 retry:
955 	ret = f2fs_cluster_blocks(cc);
956 	if (ret <= 0)
957 		return ret;
958 
959 	/* compressed case */
960 	prealloc = (ret < cc->cluster_size);
961 
962 	ret = f2fs_init_compress_ctx(cc);
963 	if (ret)
964 		return ret;
965 
966 	/* keep page reference to avoid page reclaim */
967 	for (i = 0; i < cc->cluster_size; i++) {
968 		page = f2fs_pagecache_get_page(mapping, start_idx + i,
969 							fgp_flag, GFP_NOFS);
970 		if (!page) {
971 			ret = -ENOMEM;
972 			goto unlock_pages;
973 		}
974 
975 		if (PageUptodate(page))
976 			f2fs_put_page(page, 1);
977 		else
978 			f2fs_compress_ctx_add_page(cc, page);
979 	}
980 
981 	if (!f2fs_cluster_is_empty(cc)) {
982 		struct bio *bio = NULL;
983 
984 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
985 					&last_block_in_bio, false, true);
986 		f2fs_put_rpages(cc);
987 		f2fs_destroy_compress_ctx(cc, true);
988 		if (ret)
989 			goto out;
990 		if (bio)
991 			f2fs_submit_bio(sbi, bio, DATA);
992 
993 		ret = f2fs_init_compress_ctx(cc);
994 		if (ret)
995 			goto out;
996 	}
997 
998 	for (i = 0; i < cc->cluster_size; i++) {
999 		f2fs_bug_on(sbi, cc->rpages[i]);
1000 
1001 		page = find_lock_page(mapping, start_idx + i);
1002 		if (!page) {
1003 			/* page can be truncated */
1004 			goto release_and_retry;
1005 		}
1006 
1007 		f2fs_wait_on_page_writeback(page, DATA, true, true);
1008 		f2fs_compress_ctx_add_page(cc, page);
1009 
1010 		if (!PageUptodate(page)) {
1011 release_and_retry:
1012 			f2fs_put_rpages(cc);
1013 			f2fs_unlock_rpages(cc, i + 1);
1014 			f2fs_destroy_compress_ctx(cc, true);
1015 			goto retry;
1016 		}
1017 	}
1018 
1019 	if (prealloc) {
1020 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
1021 
1022 		set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1023 
1024 		for (i = cc->cluster_size - 1; i > 0; i--) {
1025 			ret = f2fs_get_block(&dn, start_idx + i);
1026 			if (ret) {
1027 				i = cc->cluster_size;
1028 				break;
1029 			}
1030 
1031 			if (dn.data_blkaddr != NEW_ADDR)
1032 				break;
1033 		}
1034 
1035 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
1036 	}
1037 
1038 	if (likely(!ret)) {
1039 		*fsdata = cc->rpages;
1040 		*pagep = cc->rpages[offset_in_cluster(cc, index)];
1041 		return cc->cluster_size;
1042 	}
1043 
1044 unlock_pages:
1045 	f2fs_put_rpages(cc);
1046 	f2fs_unlock_rpages(cc, i);
1047 	f2fs_destroy_compress_ctx(cc, true);
1048 out:
1049 	return ret;
1050 }
1051 
f2fs_prepare_compress_overwrite(struct inode * inode,struct page ** pagep,pgoff_t index,void ** fsdata)1052 int f2fs_prepare_compress_overwrite(struct inode *inode,
1053 		struct page **pagep, pgoff_t index, void **fsdata)
1054 {
1055 	struct compress_ctx cc = {
1056 		.inode = inode,
1057 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1058 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1059 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1060 		.rpages = NULL,
1061 		.nr_rpages = 0,
1062 	};
1063 
1064 	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1065 }
1066 
f2fs_compress_write_end(struct inode * inode,void * fsdata,pgoff_t index,unsigned copied)1067 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1068 					pgoff_t index, unsigned copied)
1069 
1070 {
1071 	struct compress_ctx cc = {
1072 		.inode = inode,
1073 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1074 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1075 		.rpages = fsdata,
1076 	};
1077 	bool first_index = (index == cc.rpages[0]->index);
1078 
1079 	if (copied)
1080 		set_cluster_dirty(&cc);
1081 
1082 	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1083 	f2fs_destroy_compress_ctx(&cc, false);
1084 
1085 	return first_index;
1086 }
1087 
f2fs_truncate_partial_cluster(struct inode * inode,u64 from,bool lock)1088 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1089 {
1090 	void *fsdata = NULL;
1091 	struct page *pagep;
1092 	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1093 	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1094 							log_cluster_size;
1095 	int err;
1096 
1097 	err = f2fs_is_compressed_cluster(inode, start_idx);
1098 	if (err < 0)
1099 		return err;
1100 
1101 	/* truncate normal cluster */
1102 	if (!err)
1103 		return f2fs_do_truncate_blocks(inode, from, lock);
1104 
1105 	/* truncate compressed cluster */
1106 	err = f2fs_prepare_compress_overwrite(inode, &pagep,
1107 						start_idx, &fsdata);
1108 
1109 	/* should not be a normal cluster */
1110 	f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1111 
1112 	if (err <= 0)
1113 		return err;
1114 
1115 	if (err > 0) {
1116 		struct page **rpages = fsdata;
1117 		int cluster_size = F2FS_I(inode)->i_cluster_size;
1118 		int i;
1119 
1120 		for (i = cluster_size - 1; i >= 0; i--) {
1121 			loff_t start = rpages[i]->index << PAGE_SHIFT;
1122 
1123 			if (from <= start) {
1124 				zero_user_segment(rpages[i], 0, PAGE_SIZE);
1125 			} else {
1126 				zero_user_segment(rpages[i], from - start,
1127 								PAGE_SIZE);
1128 				break;
1129 			}
1130 		}
1131 
1132 		f2fs_compress_write_end(inode, fsdata, start_idx, true);
1133 	}
1134 	return 0;
1135 }
1136 
f2fs_write_compressed_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1137 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1138 					int *submitted,
1139 					struct writeback_control *wbc,
1140 					enum iostat_type io_type)
1141 {
1142 	struct inode *inode = cc->inode;
1143 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1144 	struct f2fs_inode_info *fi = F2FS_I(inode);
1145 	struct f2fs_io_info fio = {
1146 		.sbi = sbi,
1147 		.ino = cc->inode->i_ino,
1148 		.type = DATA,
1149 		.op = REQ_OP_WRITE,
1150 		.op_flags = wbc_to_write_flags(wbc),
1151 		.old_blkaddr = NEW_ADDR,
1152 		.page = NULL,
1153 		.encrypted_page = NULL,
1154 		.compressed_page = NULL,
1155 		.submitted = false,
1156 		.io_type = io_type,
1157 		.io_wbc = wbc,
1158 		.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1159 	};
1160 	struct dnode_of_data dn;
1161 	struct node_info ni;
1162 	struct compress_io_ctx *cic;
1163 	pgoff_t start_idx = start_idx_of_cluster(cc);
1164 	unsigned int last_index = cc->cluster_size - 1;
1165 	loff_t psize;
1166 	int i, err;
1167 
1168 	if (IS_NOQUOTA(inode)) {
1169 		/*
1170 		 * We need to wait for node_write to avoid block allocation during
1171 		 * checkpoint. This can only happen to quota writes which can cause
1172 		 * the below discard race condition.
1173 		 */
1174 		down_read(&sbi->node_write);
1175 	} else if (!f2fs_trylock_op(sbi)) {
1176 		goto out_free;
1177 	}
1178 
1179 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1180 
1181 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1182 	if (err)
1183 		goto out_unlock_op;
1184 
1185 	for (i = 0; i < cc->cluster_size; i++) {
1186 		if (data_blkaddr(dn.inode, dn.node_page,
1187 					dn.ofs_in_node + i) == NULL_ADDR)
1188 			goto out_put_dnode;
1189 	}
1190 
1191 	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1192 
1193 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1194 	if (err)
1195 		goto out_put_dnode;
1196 
1197 	fio.version = ni.version;
1198 
1199 	cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
1200 	if (!cic)
1201 		goto out_put_dnode;
1202 
1203 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1204 	cic->inode = inode;
1205 	atomic_set(&cic->pending_pages, cc->nr_cpages);
1206 	cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1207 	if (!cic->rpages)
1208 		goto out_put_cic;
1209 
1210 	cic->nr_rpages = cc->cluster_size;
1211 
1212 	for (i = 0; i < cc->nr_cpages; i++) {
1213 		f2fs_set_compressed_page(cc->cpages[i], inode,
1214 					cc->rpages[i + 1]->index, cic);
1215 		fio.compressed_page = cc->cpages[i];
1216 
1217 		fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1218 						dn.ofs_in_node + i + 1);
1219 
1220 		/* wait for GCed page writeback via META_MAPPING */
1221 		f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1222 
1223 		if (fio.encrypted) {
1224 			fio.page = cc->rpages[i + 1];
1225 			err = f2fs_encrypt_one_page(&fio);
1226 			if (err)
1227 				goto out_destroy_crypt;
1228 			cc->cpages[i] = fio.encrypted_page;
1229 		}
1230 	}
1231 
1232 	set_cluster_writeback(cc);
1233 
1234 	for (i = 0; i < cc->cluster_size; i++)
1235 		cic->rpages[i] = cc->rpages[i];
1236 
1237 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1238 		block_t blkaddr;
1239 
1240 		blkaddr = f2fs_data_blkaddr(&dn);
1241 		fio.page = cc->rpages[i];
1242 		fio.old_blkaddr = blkaddr;
1243 
1244 		/* cluster header */
1245 		if (i == 0) {
1246 			if (blkaddr == COMPRESS_ADDR)
1247 				fio.compr_blocks++;
1248 			if (__is_valid_data_blkaddr(blkaddr))
1249 				f2fs_invalidate_blocks(sbi, blkaddr);
1250 			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1251 			goto unlock_continue;
1252 		}
1253 
1254 		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1255 			fio.compr_blocks++;
1256 
1257 		if (i > cc->nr_cpages) {
1258 			if (__is_valid_data_blkaddr(blkaddr)) {
1259 				f2fs_invalidate_blocks(sbi, blkaddr);
1260 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1261 			}
1262 			goto unlock_continue;
1263 		}
1264 
1265 		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1266 
1267 		if (fio.encrypted)
1268 			fio.encrypted_page = cc->cpages[i - 1];
1269 		else
1270 			fio.compressed_page = cc->cpages[i - 1];
1271 
1272 		cc->cpages[i - 1] = NULL;
1273 		f2fs_outplace_write_data(&dn, &fio);
1274 		(*submitted)++;
1275 unlock_continue:
1276 		inode_dec_dirty_pages(cc->inode);
1277 		unlock_page(fio.page);
1278 	}
1279 
1280 	if (fio.compr_blocks)
1281 		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1282 	f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1283 
1284 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
1285 	if (cc->cluster_idx == 0)
1286 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1287 
1288 	f2fs_put_dnode(&dn);
1289 	if (IS_NOQUOTA(inode))
1290 		up_read(&sbi->node_write);
1291 	else
1292 		f2fs_unlock_op(sbi);
1293 
1294 	spin_lock(&fi->i_size_lock);
1295 	if (fi->last_disk_size < psize)
1296 		fi->last_disk_size = psize;
1297 	spin_unlock(&fi->i_size_lock);
1298 
1299 	f2fs_put_rpages(cc);
1300 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1301 	cc->cpages = NULL;
1302 	f2fs_destroy_compress_ctx(cc, false);
1303 	return 0;
1304 
1305 out_destroy_crypt:
1306 	page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1307 
1308 	for (--i; i >= 0; i--)
1309 		fscrypt_finalize_bounce_page(&cc->cpages[i]);
1310 out_put_cic:
1311 	kmem_cache_free(cic_entry_slab, cic);
1312 out_put_dnode:
1313 	f2fs_put_dnode(&dn);
1314 out_unlock_op:
1315 	if (IS_NOQUOTA(inode))
1316 		up_read(&sbi->node_write);
1317 	else
1318 		f2fs_unlock_op(sbi);
1319 out_free:
1320 	for (i = 0; i < cc->nr_cpages; i++) {
1321 		if (!cc->cpages[i])
1322 			continue;
1323 		f2fs_compress_free_page(cc->cpages[i]);
1324 		cc->cpages[i] = NULL;
1325 	}
1326 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1327 	cc->cpages = NULL;
1328 	return -EAGAIN;
1329 }
1330 
f2fs_compress_write_end_io(struct bio * bio,struct page * page)1331 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1332 {
1333 	struct f2fs_sb_info *sbi = bio->bi_private;
1334 	struct compress_io_ctx *cic =
1335 			(struct compress_io_ctx *)page_private(page);
1336 	int i;
1337 
1338 	if (unlikely(bio->bi_status))
1339 		mapping_set_error(cic->inode->i_mapping, -EIO);
1340 
1341 	f2fs_compress_free_page(page);
1342 
1343 	dec_page_count(sbi, F2FS_WB_DATA);
1344 
1345 	if (atomic_dec_return(&cic->pending_pages))
1346 		return;
1347 
1348 	for (i = 0; i < cic->nr_rpages; i++) {
1349 		WARN_ON(!cic->rpages[i]);
1350 		clear_cold_data(cic->rpages[i]);
1351 		end_page_writeback(cic->rpages[i]);
1352 	}
1353 
1354 	page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1355 	kmem_cache_free(cic_entry_slab, cic);
1356 }
1357 
f2fs_write_raw_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1358 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1359 					int *submitted,
1360 					struct writeback_control *wbc,
1361 					enum iostat_type io_type)
1362 {
1363 	struct address_space *mapping = cc->inode->i_mapping;
1364 	int _submitted, compr_blocks, ret, i;
1365 
1366 	compr_blocks = f2fs_compressed_blocks(cc);
1367 
1368 	for (i = 0; i < cc->cluster_size; i++) {
1369 		if (!cc->rpages[i])
1370 			continue;
1371 
1372 		redirty_page_for_writepage(wbc, cc->rpages[i]);
1373 		unlock_page(cc->rpages[i]);
1374 	}
1375 
1376 	if (compr_blocks < 0)
1377 		return compr_blocks;
1378 
1379 	for (i = 0; i < cc->cluster_size; i++) {
1380 		if (!cc->rpages[i])
1381 			continue;
1382 retry_write:
1383 		lock_page(cc->rpages[i]);
1384 
1385 		if (cc->rpages[i]->mapping != mapping) {
1386 continue_unlock:
1387 			unlock_page(cc->rpages[i]);
1388 			continue;
1389 		}
1390 
1391 		if (!PageDirty(cc->rpages[i]))
1392 			goto continue_unlock;
1393 
1394 		if (!clear_page_dirty_for_io(cc->rpages[i]))
1395 			goto continue_unlock;
1396 
1397 		ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1398 						NULL, NULL, wbc, io_type,
1399 						compr_blocks, false);
1400 		if (ret) {
1401 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
1402 				unlock_page(cc->rpages[i]);
1403 				ret = 0;
1404 			} else if (ret == -EAGAIN) {
1405 				/*
1406 				 * for quota file, just redirty left pages to
1407 				 * avoid deadlock caused by cluster update race
1408 				 * from foreground operation.
1409 				 */
1410 				if (IS_NOQUOTA(cc->inode))
1411 					return 0;
1412 				ret = 0;
1413 				cond_resched();
1414 				congestion_wait(BLK_RW_ASYNC,
1415 						DEFAULT_IO_TIMEOUT);
1416 				goto retry_write;
1417 			}
1418 			return ret;
1419 		}
1420 
1421 		*submitted += _submitted;
1422 	}
1423 
1424 	f2fs_balance_fs(F2FS_M_SB(mapping), true);
1425 
1426 	return 0;
1427 }
1428 
f2fs_write_multi_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1429 int f2fs_write_multi_pages(struct compress_ctx *cc,
1430 					int *submitted,
1431 					struct writeback_control *wbc,
1432 					enum iostat_type io_type)
1433 {
1434 	int err;
1435 
1436 	*submitted = 0;
1437 	if (cluster_may_compress(cc)) {
1438 		err = f2fs_compress_pages(cc);
1439 		if (err == -EAGAIN) {
1440 			goto write;
1441 		} else if (err) {
1442 			f2fs_put_rpages_wbc(cc, wbc, true, 1);
1443 			goto destroy_out;
1444 		}
1445 
1446 		err = f2fs_write_compressed_pages(cc, submitted,
1447 							wbc, io_type);
1448 		if (!err)
1449 			return 0;
1450 		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1451 	}
1452 write:
1453 	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1454 
1455 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1456 	f2fs_put_rpages_wbc(cc, wbc, false, 0);
1457 destroy_out:
1458 	f2fs_destroy_compress_ctx(cc, false);
1459 	return err;
1460 }
1461 
f2fs_alloc_dic(struct compress_ctx * cc)1462 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1463 {
1464 	struct decompress_io_ctx *dic;
1465 	pgoff_t start_idx = start_idx_of_cluster(cc);
1466 	int i;
1467 
1468 	dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
1469 	if (!dic)
1470 		return ERR_PTR(-ENOMEM);
1471 
1472 	dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1473 	if (!dic->rpages) {
1474 		kmem_cache_free(dic_entry_slab, dic);
1475 		return ERR_PTR(-ENOMEM);
1476 	}
1477 
1478 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1479 	dic->inode = cc->inode;
1480 	atomic_set(&dic->pending_pages, cc->nr_cpages);
1481 	dic->cluster_idx = cc->cluster_idx;
1482 	dic->cluster_size = cc->cluster_size;
1483 	dic->log_cluster_size = cc->log_cluster_size;
1484 	dic->nr_cpages = cc->nr_cpages;
1485 	dic->failed = false;
1486 
1487 	for (i = 0; i < dic->cluster_size; i++)
1488 		dic->rpages[i] = cc->rpages[i];
1489 	dic->nr_rpages = cc->cluster_size;
1490 
1491 	dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1492 	if (!dic->cpages)
1493 		goto out_free;
1494 
1495 	for (i = 0; i < dic->nr_cpages; i++) {
1496 		struct page *page;
1497 
1498 		page = f2fs_compress_alloc_page();
1499 		if (!page)
1500 			goto out_free;
1501 
1502 		f2fs_set_compressed_page(page, cc->inode,
1503 					start_idx + i + 1, dic);
1504 		dic->cpages[i] = page;
1505 	}
1506 
1507 	return dic;
1508 
1509 out_free:
1510 	f2fs_free_dic(dic);
1511 	return ERR_PTR(-ENOMEM);
1512 }
1513 
f2fs_free_dic(struct decompress_io_ctx * dic)1514 void f2fs_free_dic(struct decompress_io_ctx *dic)
1515 {
1516 	int i;
1517 
1518 	if (dic->tpages) {
1519 		for (i = 0; i < dic->cluster_size; i++) {
1520 			if (dic->rpages[i])
1521 				continue;
1522 			if (!dic->tpages[i])
1523 				continue;
1524 			f2fs_compress_free_page(dic->tpages[i]);
1525 		}
1526 		page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1527 	}
1528 
1529 	if (dic->cpages) {
1530 		for (i = 0; i < dic->nr_cpages; i++) {
1531 			if (!dic->cpages[i])
1532 				continue;
1533 			f2fs_compress_free_page(dic->cpages[i]);
1534 		}
1535 		page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1536 	}
1537 
1538 	page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1539 	kmem_cache_free(dic_entry_slab, dic);
1540 }
1541 
f2fs_decompress_end_io(struct page ** rpages,unsigned int cluster_size,bool err,bool verity)1542 void f2fs_decompress_end_io(struct page **rpages,
1543 			unsigned int cluster_size, bool err, bool verity)
1544 {
1545 	int i;
1546 
1547 	for (i = 0; i < cluster_size; i++) {
1548 		struct page *rpage = rpages[i];
1549 
1550 		if (!rpage)
1551 			continue;
1552 
1553 		if (err || PageError(rpage))
1554 			goto clear_uptodate;
1555 
1556 		if (!verity || fsverity_verify_page(rpage)) {
1557 			SetPageUptodate(rpage);
1558 			goto unlock;
1559 		}
1560 clear_uptodate:
1561 		ClearPageUptodate(rpage);
1562 		ClearPageError(rpage);
1563 unlock:
1564 		unlock_page(rpage);
1565 	}
1566 }
1567 
f2fs_init_page_array_cache(struct f2fs_sb_info * sbi)1568 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1569 {
1570 	dev_t dev = sbi->sb->s_bdev->bd_dev;
1571 	char slab_name[32];
1572 
1573 	sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1574 
1575 	sbi->page_array_slab_size = sizeof(struct page *) <<
1576 					F2FS_OPTION(sbi).compress_log_size;
1577 
1578 	sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1579 					sbi->page_array_slab_size);
1580 	if (!sbi->page_array_slab)
1581 		return -ENOMEM;
1582 	return 0;
1583 }
1584 
f2fs_destroy_page_array_cache(struct f2fs_sb_info * sbi)1585 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1586 {
1587 	kmem_cache_destroy(sbi->page_array_slab);
1588 }
1589 
f2fs_init_cic_cache(void)1590 static int __init f2fs_init_cic_cache(void)
1591 {
1592 	cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1593 					sizeof(struct compress_io_ctx));
1594 	if (!cic_entry_slab)
1595 		return -ENOMEM;
1596 	return 0;
1597 }
1598 
f2fs_destroy_cic_cache(void)1599 static void f2fs_destroy_cic_cache(void)
1600 {
1601 	kmem_cache_destroy(cic_entry_slab);
1602 }
1603 
f2fs_init_dic_cache(void)1604 static int __init f2fs_init_dic_cache(void)
1605 {
1606 	dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1607 					sizeof(struct decompress_io_ctx));
1608 	if (!dic_entry_slab)
1609 		return -ENOMEM;
1610 	return 0;
1611 }
1612 
f2fs_destroy_dic_cache(void)1613 static void f2fs_destroy_dic_cache(void)
1614 {
1615 	kmem_cache_destroy(dic_entry_slab);
1616 }
1617 
f2fs_init_compress_cache(void)1618 int __init f2fs_init_compress_cache(void)
1619 {
1620 	int err;
1621 
1622 	err = f2fs_init_cic_cache();
1623 	if (err)
1624 		goto out;
1625 	err = f2fs_init_dic_cache();
1626 	if (err)
1627 		goto free_cic;
1628 	return 0;
1629 free_cic:
1630 	f2fs_destroy_cic_cache();
1631 out:
1632 	return -ENOMEM;
1633 }
1634 
f2fs_destroy_compress_cache(void)1635 void f2fs_destroy_compress_cache(void)
1636 {
1637 	f2fs_destroy_dic_cache();
1638 	f2fs_destroy_cic_cache();
1639 }
1640