• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  *
5  * Based on jffs2 zlib code:
6  * Copyright © 2001-2007 Red Hat, Inc.
7  * Created by David Woodhouse <dwmw2@infradead.org>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/zlib.h>
13 #include <linux/zutil.h>
14 #include <linux/mm.h>
15 #include <linux/init.h>
16 #include <linux/err.h>
17 #include <linux/sched.h>
18 #include <linux/pagemap.h>
19 #include <linux/bio.h>
20 #include <linux/refcount.h>
21 #include "compression.h"
22 
23 struct workspace {
24 	z_stream strm;
25 	char *buf;
26 	struct list_head list;
27 	int level;
28 };
29 
30 static struct workspace_manager wsm;
31 
zlib_init_workspace_manager(void)32 static void zlib_init_workspace_manager(void)
33 {
34 	btrfs_init_workspace_manager(&wsm, &btrfs_zlib_compress);
35 }
36 
zlib_cleanup_workspace_manager(void)37 static void zlib_cleanup_workspace_manager(void)
38 {
39 	btrfs_cleanup_workspace_manager(&wsm);
40 }
41 
zlib_get_workspace(unsigned int level)42 static struct list_head *zlib_get_workspace(unsigned int level)
43 {
44 	struct list_head *ws = btrfs_get_workspace(&wsm, level);
45 	struct workspace *workspace = list_entry(ws, struct workspace, list);
46 
47 	workspace->level = level;
48 
49 	return ws;
50 }
51 
zlib_put_workspace(struct list_head * ws)52 static void zlib_put_workspace(struct list_head *ws)
53 {
54 	btrfs_put_workspace(&wsm, ws);
55 }
56 
zlib_free_workspace(struct list_head * ws)57 static void zlib_free_workspace(struct list_head *ws)
58 {
59 	struct workspace *workspace = list_entry(ws, struct workspace, list);
60 
61 	kvfree(workspace->strm.workspace);
62 	kfree(workspace->buf);
63 	kfree(workspace);
64 }
65 
zlib_alloc_workspace(unsigned int level)66 static struct list_head *zlib_alloc_workspace(unsigned int level)
67 {
68 	struct workspace *workspace;
69 	int workspacesize;
70 
71 	workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
72 	if (!workspace)
73 		return ERR_PTR(-ENOMEM);
74 
75 	workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
76 			zlib_inflate_workspacesize());
77 	workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
78 	workspace->level = level;
79 	workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
80 	if (!workspace->strm.workspace || !workspace->buf)
81 		goto fail;
82 
83 	INIT_LIST_HEAD(&workspace->list);
84 
85 	return &workspace->list;
86 fail:
87 	zlib_free_workspace(&workspace->list);
88 	return ERR_PTR(-ENOMEM);
89 }
90 
zlib_compress_pages(struct list_head * ws,struct address_space * mapping,u64 start,struct page ** pages,unsigned long * out_pages,unsigned long * total_in,unsigned long * total_out)91 static int zlib_compress_pages(struct list_head *ws,
92 			       struct address_space *mapping,
93 			       u64 start,
94 			       struct page **pages,
95 			       unsigned long *out_pages,
96 			       unsigned long *total_in,
97 			       unsigned long *total_out)
98 {
99 	struct workspace *workspace = list_entry(ws, struct workspace, list);
100 	int ret;
101 	char *data_in;
102 	char *cpage_out;
103 	int nr_pages = 0;
104 	struct page *in_page = NULL;
105 	struct page *out_page = NULL;
106 	unsigned long bytes_left;
107 	unsigned long len = *total_out;
108 	unsigned long nr_dest_pages = *out_pages;
109 	const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
110 
111 	*out_pages = 0;
112 	*total_out = 0;
113 	*total_in = 0;
114 
115 	if (Z_OK != zlib_deflateInit(&workspace->strm, workspace->level)) {
116 		pr_warn("BTRFS: deflateInit failed\n");
117 		ret = -EIO;
118 		goto out;
119 	}
120 
121 	workspace->strm.total_in = 0;
122 	workspace->strm.total_out = 0;
123 
124 	in_page = find_get_page(mapping, start >> PAGE_SHIFT);
125 	data_in = kmap(in_page);
126 
127 	out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
128 	if (out_page == NULL) {
129 		ret = -ENOMEM;
130 		goto out;
131 	}
132 	cpage_out = kmap(out_page);
133 	pages[0] = out_page;
134 	nr_pages = 1;
135 
136 	workspace->strm.next_in = data_in;
137 	workspace->strm.next_out = cpage_out;
138 	workspace->strm.avail_out = PAGE_SIZE;
139 	workspace->strm.avail_in = min(len, PAGE_SIZE);
140 
141 	while (workspace->strm.total_in < len) {
142 		ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
143 		if (ret != Z_OK) {
144 			pr_debug("BTRFS: deflate in loop returned %d\n",
145 			       ret);
146 			zlib_deflateEnd(&workspace->strm);
147 			ret = -EIO;
148 			goto out;
149 		}
150 
151 		/* we're making it bigger, give up */
152 		if (workspace->strm.total_in > 8192 &&
153 		    workspace->strm.total_in <
154 		    workspace->strm.total_out) {
155 			ret = -E2BIG;
156 			goto out;
157 		}
158 		/* we need another page for writing out.  Test this
159 		 * before the total_in so we will pull in a new page for
160 		 * the stream end if required
161 		 */
162 		if (workspace->strm.avail_out == 0) {
163 			kunmap(out_page);
164 			if (nr_pages == nr_dest_pages) {
165 				out_page = NULL;
166 				ret = -E2BIG;
167 				goto out;
168 			}
169 			out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
170 			if (out_page == NULL) {
171 				ret = -ENOMEM;
172 				goto out;
173 			}
174 			cpage_out = kmap(out_page);
175 			pages[nr_pages] = out_page;
176 			nr_pages++;
177 			workspace->strm.avail_out = PAGE_SIZE;
178 			workspace->strm.next_out = cpage_out;
179 		}
180 		/* we're all done */
181 		if (workspace->strm.total_in >= len)
182 			break;
183 
184 		/* we've read in a full page, get a new one */
185 		if (workspace->strm.avail_in == 0) {
186 			if (workspace->strm.total_out > max_out)
187 				break;
188 
189 			bytes_left = len - workspace->strm.total_in;
190 			kunmap(in_page);
191 			put_page(in_page);
192 
193 			start += PAGE_SIZE;
194 			in_page = find_get_page(mapping,
195 						start >> PAGE_SHIFT);
196 			data_in = kmap(in_page);
197 			workspace->strm.avail_in = min(bytes_left,
198 							   PAGE_SIZE);
199 			workspace->strm.next_in = data_in;
200 		}
201 	}
202 	workspace->strm.avail_in = 0;
203 	ret = zlib_deflate(&workspace->strm, Z_FINISH);
204 	zlib_deflateEnd(&workspace->strm);
205 
206 	if (ret != Z_STREAM_END) {
207 		ret = -EIO;
208 		goto out;
209 	}
210 
211 	if (workspace->strm.total_out >= workspace->strm.total_in) {
212 		ret = -E2BIG;
213 		goto out;
214 	}
215 
216 	ret = 0;
217 	*total_out = workspace->strm.total_out;
218 	*total_in = workspace->strm.total_in;
219 out:
220 	*out_pages = nr_pages;
221 	if (out_page)
222 		kunmap(out_page);
223 
224 	if (in_page) {
225 		kunmap(in_page);
226 		put_page(in_page);
227 	}
228 	return ret;
229 }
230 
zlib_decompress_bio(struct list_head * ws,struct compressed_bio * cb)231 static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
232 {
233 	struct workspace *workspace = list_entry(ws, struct workspace, list);
234 	int ret = 0, ret2;
235 	int wbits = MAX_WBITS;
236 	char *data_in;
237 	size_t total_out = 0;
238 	unsigned long page_in_index = 0;
239 	size_t srclen = cb->compressed_len;
240 	unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
241 	unsigned long buf_start;
242 	struct page **pages_in = cb->compressed_pages;
243 	u64 disk_start = cb->start;
244 	struct bio *orig_bio = cb->orig_bio;
245 
246 	data_in = kmap(pages_in[page_in_index]);
247 	workspace->strm.next_in = data_in;
248 	workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
249 	workspace->strm.total_in = 0;
250 
251 	workspace->strm.total_out = 0;
252 	workspace->strm.next_out = workspace->buf;
253 	workspace->strm.avail_out = PAGE_SIZE;
254 
255 	/* If it's deflate, and it's got no preset dictionary, then
256 	   we can tell zlib to skip the adler32 check. */
257 	if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
258 	    ((data_in[0] & 0x0f) == Z_DEFLATED) &&
259 	    !(((data_in[0]<<8) + data_in[1]) % 31)) {
260 
261 		wbits = -((data_in[0] >> 4) + 8);
262 		workspace->strm.next_in += 2;
263 		workspace->strm.avail_in -= 2;
264 	}
265 
266 	if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
267 		pr_warn("BTRFS: inflateInit failed\n");
268 		kunmap(pages_in[page_in_index]);
269 		return -EIO;
270 	}
271 	while (workspace->strm.total_in < srclen) {
272 		ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
273 		if (ret != Z_OK && ret != Z_STREAM_END)
274 			break;
275 
276 		buf_start = total_out;
277 		total_out = workspace->strm.total_out;
278 
279 		/* we didn't make progress in this inflate call, we're done */
280 		if (buf_start == total_out)
281 			break;
282 
283 		ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
284 						 total_out, disk_start,
285 						 orig_bio);
286 		if (ret2 == 0) {
287 			ret = 0;
288 			goto done;
289 		}
290 
291 		workspace->strm.next_out = workspace->buf;
292 		workspace->strm.avail_out = PAGE_SIZE;
293 
294 		if (workspace->strm.avail_in == 0) {
295 			unsigned long tmp;
296 			kunmap(pages_in[page_in_index]);
297 			page_in_index++;
298 			if (page_in_index >= total_pages_in) {
299 				data_in = NULL;
300 				break;
301 			}
302 			data_in = kmap(pages_in[page_in_index]);
303 			workspace->strm.next_in = data_in;
304 			tmp = srclen - workspace->strm.total_in;
305 			workspace->strm.avail_in = min(tmp,
306 							   PAGE_SIZE);
307 		}
308 	}
309 	if (ret != Z_STREAM_END)
310 		ret = -EIO;
311 	else
312 		ret = 0;
313 done:
314 	zlib_inflateEnd(&workspace->strm);
315 	if (data_in)
316 		kunmap(pages_in[page_in_index]);
317 	if (!ret)
318 		zero_fill_bio(orig_bio);
319 	return ret;
320 }
321 
zlib_decompress(struct list_head * ws,unsigned char * data_in,struct page * dest_page,unsigned long start_byte,size_t srclen,size_t destlen)322 static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
323 			   struct page *dest_page,
324 			   unsigned long start_byte,
325 			   size_t srclen, size_t destlen)
326 {
327 	struct workspace *workspace = list_entry(ws, struct workspace, list);
328 	int ret = 0;
329 	int wbits = MAX_WBITS;
330 	unsigned long bytes_left;
331 	unsigned long total_out = 0;
332 	unsigned long pg_offset = 0;
333 	char *kaddr;
334 
335 	destlen = min_t(unsigned long, destlen, PAGE_SIZE);
336 	bytes_left = destlen;
337 
338 	workspace->strm.next_in = data_in;
339 	workspace->strm.avail_in = srclen;
340 	workspace->strm.total_in = 0;
341 
342 	workspace->strm.next_out = workspace->buf;
343 	workspace->strm.avail_out = PAGE_SIZE;
344 	workspace->strm.total_out = 0;
345 	/* If it's deflate, and it's got no preset dictionary, then
346 	   we can tell zlib to skip the adler32 check. */
347 	if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
348 	    ((data_in[0] & 0x0f) == Z_DEFLATED) &&
349 	    !(((data_in[0]<<8) + data_in[1]) % 31)) {
350 
351 		wbits = -((data_in[0] >> 4) + 8);
352 		workspace->strm.next_in += 2;
353 		workspace->strm.avail_in -= 2;
354 	}
355 
356 	if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
357 		pr_warn("BTRFS: inflateInit failed\n");
358 		return -EIO;
359 	}
360 
361 	while (bytes_left > 0) {
362 		unsigned long buf_start;
363 		unsigned long buf_offset;
364 		unsigned long bytes;
365 
366 		ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
367 		if (ret != Z_OK && ret != Z_STREAM_END)
368 			break;
369 
370 		buf_start = total_out;
371 		total_out = workspace->strm.total_out;
372 
373 		if (total_out == buf_start) {
374 			ret = -EIO;
375 			break;
376 		}
377 
378 		if (total_out <= start_byte)
379 			goto next;
380 
381 		if (total_out > start_byte && buf_start < start_byte)
382 			buf_offset = start_byte - buf_start;
383 		else
384 			buf_offset = 0;
385 
386 		bytes = min(PAGE_SIZE - pg_offset,
387 			    PAGE_SIZE - buf_offset);
388 		bytes = min(bytes, bytes_left);
389 
390 		kaddr = kmap_atomic(dest_page);
391 		memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
392 		kunmap_atomic(kaddr);
393 
394 		pg_offset += bytes;
395 		bytes_left -= bytes;
396 next:
397 		workspace->strm.next_out = workspace->buf;
398 		workspace->strm.avail_out = PAGE_SIZE;
399 	}
400 
401 	if (ret != Z_STREAM_END && bytes_left != 0)
402 		ret = -EIO;
403 	else
404 		ret = 0;
405 
406 	zlib_inflateEnd(&workspace->strm);
407 
408 	/*
409 	 * this should only happen if zlib returned fewer bytes than we
410 	 * expected.  btrfs_get_block is responsible for zeroing from the
411 	 * end of the inline extent (destlen) to the end of the page
412 	 */
413 	if (pg_offset < destlen) {
414 		kaddr = kmap_atomic(dest_page);
415 		memset(kaddr + pg_offset, 0, destlen - pg_offset);
416 		kunmap_atomic(kaddr);
417 	}
418 	return ret;
419 }
420 
421 const struct btrfs_compress_op btrfs_zlib_compress = {
422 	.init_workspace_manager	= zlib_init_workspace_manager,
423 	.cleanup_workspace_manager = zlib_cleanup_workspace_manager,
424 	.get_workspace		= zlib_get_workspace,
425 	.put_workspace		= zlib_put_workspace,
426 	.alloc_workspace	= zlib_alloc_workspace,
427 	.free_workspace		= zlib_free_workspace,
428 	.compress_pages		= zlib_compress_pages,
429 	.decompress_bio		= zlib_decompress_bio,
430 	.decompress		= zlib_decompress,
431 	.max_level		= 9,
432 	.default_level		= BTRFS_ZLIB_DEFAULT_LEVEL,
433 };
434