• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/slab.h>
35 #include "compat.h"
36 #include "ctree.h"
37 #include "disk-io.h"
38 #include "transaction.h"
39 #include "btrfs_inode.h"
40 #include "volumes.h"
41 #include "ordered-data.h"
42 #include "compression.h"
43 #include "extent_io.h"
44 #include "extent_map.h"
45 
46 struct compressed_bio {
47 	/* number of bios pending for this compressed extent */
48 	atomic_t pending_bios;
49 
50 	/* the pages with the compressed data on them */
51 	struct page **compressed_pages;
52 
53 	/* inode that owns this data */
54 	struct inode *inode;
55 
56 	/* starting offset in the inode for our pages */
57 	u64 start;
58 
59 	/* number of bytes in the inode we're working on */
60 	unsigned long len;
61 
62 	/* number of bytes on disk */
63 	unsigned long compressed_len;
64 
65 	/* the compression algorithm for this bio */
66 	int compress_type;
67 
68 	/* number of compressed pages in the array */
69 	unsigned long nr_pages;
70 
71 	/* IO errors */
72 	int errors;
73 	int mirror_num;
74 
75 	/* for reads, this is the bio we are copying the data into */
76 	struct bio *orig_bio;
77 
78 	/*
79 	 * the start of a variable length array of checksums only
80 	 * used by reads
81 	 */
82 	u32 sums;
83 };
84 
85 static int btrfs_decompress_biovec(int type, struct page **pages_in,
86 				   u64 disk_start, struct bio_vec *bvec,
87 				   int vcnt, size_t srclen);
88 
compressed_bio_size(struct btrfs_root * root,unsigned long disk_size)89 static inline int compressed_bio_size(struct btrfs_root *root,
90 				      unsigned long disk_size)
91 {
92 	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
93 
94 	return sizeof(struct compressed_bio) +
95 		((disk_size + root->sectorsize - 1) / root->sectorsize) *
96 		csum_size;
97 }
98 
compressed_bio_alloc(struct block_device * bdev,u64 first_byte,gfp_t gfp_flags)99 static struct bio *compressed_bio_alloc(struct block_device *bdev,
100 					u64 first_byte, gfp_t gfp_flags)
101 {
102 	int nr_vecs;
103 
104 	nr_vecs = bio_get_nr_vecs(bdev);
105 	return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags);
106 }
107 
check_compressed_csum(struct inode * inode,struct compressed_bio * cb,u64 disk_start)108 static int check_compressed_csum(struct inode *inode,
109 				 struct compressed_bio *cb,
110 				 u64 disk_start)
111 {
112 	int ret;
113 	struct page *page;
114 	unsigned long i;
115 	char *kaddr;
116 	u32 csum;
117 	u32 *cb_sum = &cb->sums;
118 
119 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
120 		return 0;
121 
122 	for (i = 0; i < cb->nr_pages; i++) {
123 		page = cb->compressed_pages[i];
124 		csum = ~(u32)0;
125 
126 		kaddr = kmap_atomic(page);
127 		csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE);
128 		btrfs_csum_final(csum, (char *)&csum);
129 		kunmap_atomic(kaddr);
130 
131 		if (csum != *cb_sum) {
132 			printk(KERN_INFO "btrfs csum failed ino %llu "
133 			       "extent %llu csum %u "
134 			       "wanted %u mirror %d\n",
135 			       (unsigned long long)btrfs_ino(inode),
136 			       (unsigned long long)disk_start,
137 			       csum, *cb_sum, cb->mirror_num);
138 			ret = -EIO;
139 			goto fail;
140 		}
141 		cb_sum++;
142 
143 	}
144 	ret = 0;
145 fail:
146 	return ret;
147 }
148 
149 /* when we finish reading compressed pages from the disk, we
150  * decompress them and then run the bio end_io routines on the
151  * decompressed pages (in the inode address space).
152  *
153  * This allows the checksumming and other IO error handling routines
154  * to work normally
155  *
156  * The compressed pages are freed here, and it must be run
157  * in process context
158  */
end_compressed_bio_read(struct bio * bio,int err)159 static void end_compressed_bio_read(struct bio *bio, int err)
160 {
161 	struct compressed_bio *cb = bio->bi_private;
162 	struct inode *inode;
163 	struct page *page;
164 	unsigned long index;
165 	int ret;
166 
167 	if (err)
168 		cb->errors = 1;
169 
170 	/* if there are more bios still pending for this compressed
171 	 * extent, just exit
172 	 */
173 	if (!atomic_dec_and_test(&cb->pending_bios))
174 		goto out;
175 
176 	inode = cb->inode;
177 	ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9);
178 	if (ret)
179 		goto csum_failed;
180 
181 	/* ok, we're the last bio for this extent, lets start
182 	 * the decompression.
183 	 */
184 	ret = btrfs_decompress_biovec(cb->compress_type,
185 				      cb->compressed_pages,
186 				      cb->start,
187 				      cb->orig_bio->bi_io_vec,
188 				      cb->orig_bio->bi_vcnt,
189 				      cb->compressed_len);
190 csum_failed:
191 	if (ret)
192 		cb->errors = 1;
193 
194 	/* release the compressed pages */
195 	index = 0;
196 	for (index = 0; index < cb->nr_pages; index++) {
197 		page = cb->compressed_pages[index];
198 		page->mapping = NULL;
199 		page_cache_release(page);
200 	}
201 
202 	/* do io completion on the original bio */
203 	if (cb->errors) {
204 		bio_io_error(cb->orig_bio);
205 	} else {
206 		int bio_index = 0;
207 		struct bio_vec *bvec = cb->orig_bio->bi_io_vec;
208 
209 		/*
210 		 * we have verified the checksum already, set page
211 		 * checked so the end_io handlers know about it
212 		 */
213 		while (bio_index < cb->orig_bio->bi_vcnt) {
214 			SetPageChecked(bvec->bv_page);
215 			bvec++;
216 			bio_index++;
217 		}
218 		bio_endio(cb->orig_bio, 0);
219 	}
220 
221 	/* finally free the cb struct */
222 	kfree(cb->compressed_pages);
223 	kfree(cb);
224 out:
225 	bio_put(bio);
226 }
227 
228 /*
229  * Clear the writeback bits on all of the file
230  * pages for a compressed write
231  */
end_compressed_writeback(struct inode * inode,u64 start,unsigned long ram_size)232 static noinline void end_compressed_writeback(struct inode *inode, u64 start,
233 					      unsigned long ram_size)
234 {
235 	unsigned long index = start >> PAGE_CACHE_SHIFT;
236 	unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT;
237 	struct page *pages[16];
238 	unsigned long nr_pages = end_index - index + 1;
239 	int i;
240 	int ret;
241 
242 	while (nr_pages > 0) {
243 		ret = find_get_pages_contig(inode->i_mapping, index,
244 				     min_t(unsigned long,
245 				     nr_pages, ARRAY_SIZE(pages)), pages);
246 		if (ret == 0) {
247 			nr_pages -= 1;
248 			index += 1;
249 			continue;
250 		}
251 		for (i = 0; i < ret; i++) {
252 			end_page_writeback(pages[i]);
253 			page_cache_release(pages[i]);
254 		}
255 		nr_pages -= ret;
256 		index += ret;
257 	}
258 	/* the inode may be gone now */
259 }
260 
261 /*
262  * do the cleanup once all the compressed pages hit the disk.
263  * This will clear writeback on the file pages and free the compressed
264  * pages.
265  *
266  * This also calls the writeback end hooks for the file pages so that
267  * metadata and checksums can be updated in the file.
268  */
end_compressed_bio_write(struct bio * bio,int err)269 static void end_compressed_bio_write(struct bio *bio, int err)
270 {
271 	struct extent_io_tree *tree;
272 	struct compressed_bio *cb = bio->bi_private;
273 	struct inode *inode;
274 	struct page *page;
275 	unsigned long index;
276 
277 	if (err)
278 		cb->errors = 1;
279 
280 	/* if there are more bios still pending for this compressed
281 	 * extent, just exit
282 	 */
283 	if (!atomic_dec_and_test(&cb->pending_bios))
284 		goto out;
285 
286 	/* ok, we're the last bio for this extent, step one is to
287 	 * call back into the FS and do all the end_io operations
288 	 */
289 	inode = cb->inode;
290 	tree = &BTRFS_I(inode)->io_tree;
291 	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
292 	tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
293 					 cb->start,
294 					 cb->start + cb->len - 1,
295 					 NULL, 1);
296 	cb->compressed_pages[0]->mapping = NULL;
297 
298 	end_compressed_writeback(inode, cb->start, cb->len);
299 	/* note, our inode could be gone now */
300 
301 	/*
302 	 * release the compressed pages, these came from alloc_page and
303 	 * are not attached to the inode at all
304 	 */
305 	index = 0;
306 	for (index = 0; index < cb->nr_pages; index++) {
307 		page = cb->compressed_pages[index];
308 		page->mapping = NULL;
309 		page_cache_release(page);
310 	}
311 
312 	/* finally free the cb struct */
313 	kfree(cb->compressed_pages);
314 	kfree(cb);
315 out:
316 	bio_put(bio);
317 }
318 
319 /*
320  * worker function to build and submit bios for previously compressed pages.
321  * The corresponding pages in the inode should be marked for writeback
322  * and the compressed pages should have a reference on them for dropping
323  * when the IO is complete.
324  *
325  * This also checksums the file bytes and gets things ready for
326  * the end io hooks.
327  */
btrfs_submit_compressed_write(struct inode * inode,u64 start,unsigned long len,u64 disk_start,unsigned long compressed_len,struct page ** compressed_pages,unsigned long nr_pages)328 int btrfs_submit_compressed_write(struct inode *inode, u64 start,
329 				 unsigned long len, u64 disk_start,
330 				 unsigned long compressed_len,
331 				 struct page **compressed_pages,
332 				 unsigned long nr_pages)
333 {
334 	struct bio *bio = NULL;
335 	struct btrfs_root *root = BTRFS_I(inode)->root;
336 	struct compressed_bio *cb;
337 	unsigned long bytes_left;
338 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
339 	int pg_index = 0;
340 	struct page *page;
341 	u64 first_byte = disk_start;
342 	struct block_device *bdev;
343 	int ret;
344 	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
345 
346 	WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
347 	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
348 	if (!cb)
349 		return -ENOMEM;
350 	atomic_set(&cb->pending_bios, 0);
351 	cb->errors = 0;
352 	cb->inode = inode;
353 	cb->start = start;
354 	cb->len = len;
355 	cb->mirror_num = 0;
356 	cb->compressed_pages = compressed_pages;
357 	cb->compressed_len = compressed_len;
358 	cb->orig_bio = NULL;
359 	cb->nr_pages = nr_pages;
360 
361 	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
362 
363 	bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
364 	if(!bio) {
365 		kfree(cb);
366 		return -ENOMEM;
367 	}
368 	bio->bi_private = cb;
369 	bio->bi_end_io = end_compressed_bio_write;
370 	atomic_inc(&cb->pending_bios);
371 
372 	/* create and submit bios for the compressed pages */
373 	bytes_left = compressed_len;
374 	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
375 		page = compressed_pages[pg_index];
376 		page->mapping = inode->i_mapping;
377 		if (bio->bi_size)
378 			ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
379 							   PAGE_CACHE_SIZE,
380 							   bio, 0);
381 		else
382 			ret = 0;
383 
384 		page->mapping = NULL;
385 		if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) <
386 		    PAGE_CACHE_SIZE) {
387 			bio_get(bio);
388 
389 			/*
390 			 * inc the count before we submit the bio so
391 			 * we know the end IO handler won't happen before
392 			 * we inc the count.  Otherwise, the cb might get
393 			 * freed before we're done setting it up
394 			 */
395 			atomic_inc(&cb->pending_bios);
396 			ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
397 			BUG_ON(ret); /* -ENOMEM */
398 
399 			if (!skip_sum) {
400 				ret = btrfs_csum_one_bio(root, inode, bio,
401 							 start, 1);
402 				BUG_ON(ret); /* -ENOMEM */
403 			}
404 
405 			ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
406 			BUG_ON(ret); /* -ENOMEM */
407 
408 			bio_put(bio);
409 
410 			bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
411 			BUG_ON(!bio);
412 			bio->bi_private = cb;
413 			bio->bi_end_io = end_compressed_bio_write;
414 			bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
415 		}
416 		if (bytes_left < PAGE_CACHE_SIZE) {
417 			printk("bytes left %lu compress len %lu nr %lu\n",
418 			       bytes_left, cb->compressed_len, cb->nr_pages);
419 		}
420 		bytes_left -= PAGE_CACHE_SIZE;
421 		first_byte += PAGE_CACHE_SIZE;
422 		cond_resched();
423 	}
424 	bio_get(bio);
425 
426 	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
427 	BUG_ON(ret); /* -ENOMEM */
428 
429 	if (!skip_sum) {
430 		ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
431 		BUG_ON(ret); /* -ENOMEM */
432 	}
433 
434 	ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
435 	BUG_ON(ret); /* -ENOMEM */
436 
437 	bio_put(bio);
438 	return 0;
439 }
440 
add_ra_bio_pages(struct inode * inode,u64 compressed_end,struct compressed_bio * cb)441 static noinline int add_ra_bio_pages(struct inode *inode,
442 				     u64 compressed_end,
443 				     struct compressed_bio *cb)
444 {
445 	unsigned long end_index;
446 	unsigned long pg_index;
447 	u64 last_offset;
448 	u64 isize = i_size_read(inode);
449 	int ret;
450 	struct page *page;
451 	unsigned long nr_pages = 0;
452 	struct extent_map *em;
453 	struct address_space *mapping = inode->i_mapping;
454 	struct extent_map_tree *em_tree;
455 	struct extent_io_tree *tree;
456 	u64 end;
457 	int misses = 0;
458 
459 	page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
460 	last_offset = (page_offset(page) + PAGE_CACHE_SIZE);
461 	em_tree = &BTRFS_I(inode)->extent_tree;
462 	tree = &BTRFS_I(inode)->io_tree;
463 
464 	if (isize == 0)
465 		return 0;
466 
467 	end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
468 
469 	while (last_offset < compressed_end) {
470 		pg_index = last_offset >> PAGE_CACHE_SHIFT;
471 
472 		if (pg_index > end_index)
473 			break;
474 
475 		rcu_read_lock();
476 		page = radix_tree_lookup(&mapping->page_tree, pg_index);
477 		rcu_read_unlock();
478 		if (page) {
479 			misses++;
480 			if (misses > 4)
481 				break;
482 			goto next;
483 		}
484 
485 		page = __page_cache_alloc(mapping_gfp_mask(mapping) &
486 								~__GFP_FS);
487 		if (!page)
488 			break;
489 
490 		if (add_to_page_cache_lru(page, mapping, pg_index,
491 								GFP_NOFS)) {
492 			page_cache_release(page);
493 			goto next;
494 		}
495 
496 		end = last_offset + PAGE_CACHE_SIZE - 1;
497 		/*
498 		 * at this point, we have a locked page in the page cache
499 		 * for these bytes in the file.  But, we have to make
500 		 * sure they map to this compressed extent on disk.
501 		 */
502 		set_page_extent_mapped(page);
503 		lock_extent(tree, last_offset, end);
504 		read_lock(&em_tree->lock);
505 		em = lookup_extent_mapping(em_tree, last_offset,
506 					   PAGE_CACHE_SIZE);
507 		read_unlock(&em_tree->lock);
508 
509 		if (!em || last_offset < em->start ||
510 		    (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
511 		    (em->block_start >> 9) != cb->orig_bio->bi_sector) {
512 			free_extent_map(em);
513 			unlock_extent(tree, last_offset, end);
514 			unlock_page(page);
515 			page_cache_release(page);
516 			break;
517 		}
518 		free_extent_map(em);
519 
520 		if (page->index == end_index) {
521 			char *userpage;
522 			size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1);
523 
524 			if (zero_offset) {
525 				int zeros;
526 				zeros = PAGE_CACHE_SIZE - zero_offset;
527 				userpage = kmap_atomic(page);
528 				memset(userpage + zero_offset, 0, zeros);
529 				flush_dcache_page(page);
530 				kunmap_atomic(userpage);
531 			}
532 		}
533 
534 		ret = bio_add_page(cb->orig_bio, page,
535 				   PAGE_CACHE_SIZE, 0);
536 
537 		if (ret == PAGE_CACHE_SIZE) {
538 			nr_pages++;
539 			page_cache_release(page);
540 		} else {
541 			unlock_extent(tree, last_offset, end);
542 			unlock_page(page);
543 			page_cache_release(page);
544 			break;
545 		}
546 next:
547 		last_offset += PAGE_CACHE_SIZE;
548 	}
549 	return 0;
550 }
551 
552 /*
553  * for a compressed read, the bio we get passed has all the inode pages
554  * in it.  We don't actually do IO on those pages but allocate new ones
555  * to hold the compressed pages on disk.
556  *
557  * bio->bi_sector points to the compressed extent on disk
558  * bio->bi_io_vec points to all of the inode pages
559  * bio->bi_vcnt is a count of pages
560  *
561  * After the compressed pages are read, we copy the bytes into the
562  * bio we were passed and then call the bio end_io calls
563  */
btrfs_submit_compressed_read(struct inode * inode,struct bio * bio,int mirror_num,unsigned long bio_flags)564 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
565 				 int mirror_num, unsigned long bio_flags)
566 {
567 	struct extent_io_tree *tree;
568 	struct extent_map_tree *em_tree;
569 	struct compressed_bio *cb;
570 	struct btrfs_root *root = BTRFS_I(inode)->root;
571 	unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
572 	unsigned long compressed_len;
573 	unsigned long nr_pages;
574 	unsigned long pg_index;
575 	struct page *page;
576 	struct block_device *bdev;
577 	struct bio *comp_bio;
578 	u64 cur_disk_byte = (u64)bio->bi_sector << 9;
579 	u64 em_len;
580 	u64 em_start;
581 	struct extent_map *em;
582 	int ret = -ENOMEM;
583 	int faili = 0;
584 	u32 *sums;
585 
586 	tree = &BTRFS_I(inode)->io_tree;
587 	em_tree = &BTRFS_I(inode)->extent_tree;
588 
589 	/* we need the actual starting offset of this extent in the file */
590 	read_lock(&em_tree->lock);
591 	em = lookup_extent_mapping(em_tree,
592 				   page_offset(bio->bi_io_vec->bv_page),
593 				   PAGE_CACHE_SIZE);
594 	read_unlock(&em_tree->lock);
595 	if (!em)
596 		return -EIO;
597 
598 	compressed_len = em->block_len;
599 	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
600 	if (!cb)
601 		goto out;
602 
603 	atomic_set(&cb->pending_bios, 0);
604 	cb->errors = 0;
605 	cb->inode = inode;
606 	cb->mirror_num = mirror_num;
607 	sums = &cb->sums;
608 
609 	cb->start = em->orig_start;
610 	em_len = em->len;
611 	em_start = em->start;
612 
613 	free_extent_map(em);
614 	em = NULL;
615 
616 	cb->len = uncompressed_len;
617 	cb->compressed_len = compressed_len;
618 	cb->compress_type = extent_compress_type(bio_flags);
619 	cb->orig_bio = bio;
620 
621 	nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
622 				 PAGE_CACHE_SIZE;
623 	cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
624 				       GFP_NOFS);
625 	if (!cb->compressed_pages)
626 		goto fail1;
627 
628 	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
629 
630 	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
631 		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
632 							      __GFP_HIGHMEM);
633 		if (!cb->compressed_pages[pg_index]) {
634 			faili = pg_index - 1;
635 			ret = -ENOMEM;
636 			goto fail2;
637 		}
638 	}
639 	faili = nr_pages - 1;
640 	cb->nr_pages = nr_pages;
641 
642 	add_ra_bio_pages(inode, em_start + em_len, cb);
643 
644 	/* include any pages we added in add_ra-bio_pages */
645 	uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
646 	cb->len = uncompressed_len;
647 
648 	comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
649 	if (!comp_bio)
650 		goto fail2;
651 	comp_bio->bi_private = cb;
652 	comp_bio->bi_end_io = end_compressed_bio_read;
653 	atomic_inc(&cb->pending_bios);
654 
655 	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
656 		page = cb->compressed_pages[pg_index];
657 		page->mapping = inode->i_mapping;
658 		page->index = em_start >> PAGE_CACHE_SHIFT;
659 
660 		if (comp_bio->bi_size)
661 			ret = tree->ops->merge_bio_hook(READ, page, 0,
662 							PAGE_CACHE_SIZE,
663 							comp_bio, 0);
664 		else
665 			ret = 0;
666 
667 		page->mapping = NULL;
668 		if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) <
669 		    PAGE_CACHE_SIZE) {
670 			bio_get(comp_bio);
671 
672 			ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
673 			BUG_ON(ret); /* -ENOMEM */
674 
675 			/*
676 			 * inc the count before we submit the bio so
677 			 * we know the end IO handler won't happen before
678 			 * we inc the count.  Otherwise, the cb might get
679 			 * freed before we're done setting it up
680 			 */
681 			atomic_inc(&cb->pending_bios);
682 
683 			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
684 				ret = btrfs_lookup_bio_sums(root, inode,
685 							comp_bio, sums);
686 				BUG_ON(ret); /* -ENOMEM */
687 			}
688 			sums += (comp_bio->bi_size + root->sectorsize - 1) /
689 				root->sectorsize;
690 
691 			ret = btrfs_map_bio(root, READ, comp_bio,
692 					    mirror_num, 0);
693 			if (ret)
694 				bio_endio(comp_bio, ret);
695 
696 			bio_put(comp_bio);
697 
698 			comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
699 							GFP_NOFS);
700 			BUG_ON(!comp_bio);
701 			comp_bio->bi_private = cb;
702 			comp_bio->bi_end_io = end_compressed_bio_read;
703 
704 			bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0);
705 		}
706 		cur_disk_byte += PAGE_CACHE_SIZE;
707 	}
708 	bio_get(comp_bio);
709 
710 	ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
711 	BUG_ON(ret); /* -ENOMEM */
712 
713 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
714 		ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
715 		BUG_ON(ret); /* -ENOMEM */
716 	}
717 
718 	ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
719 	if (ret)
720 		bio_endio(comp_bio, ret);
721 
722 	bio_put(comp_bio);
723 	return 0;
724 
725 fail2:
726 	while (faili >= 0) {
727 		__free_page(cb->compressed_pages[faili]);
728 		faili--;
729 	}
730 
731 	kfree(cb->compressed_pages);
732 fail1:
733 	kfree(cb);
734 out:
735 	free_extent_map(em);
736 	return ret;
737 }
738 
739 static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
740 static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES];
741 static int comp_num_workspace[BTRFS_COMPRESS_TYPES];
742 static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES];
743 static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES];
744 
745 static struct btrfs_compress_op *btrfs_compress_op[] = {
746 	&btrfs_zlib_compress,
747 	&btrfs_lzo_compress,
748 };
749 
btrfs_init_compress(void)750 void __init btrfs_init_compress(void)
751 {
752 	int i;
753 
754 	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
755 		INIT_LIST_HEAD(&comp_idle_workspace[i]);
756 		spin_lock_init(&comp_workspace_lock[i]);
757 		atomic_set(&comp_alloc_workspace[i], 0);
758 		init_waitqueue_head(&comp_workspace_wait[i]);
759 	}
760 }
761 
762 /*
763  * this finds an available workspace or allocates a new one
764  * ERR_PTR is returned if things go bad.
765  */
find_workspace(int type)766 static struct list_head *find_workspace(int type)
767 {
768 	struct list_head *workspace;
769 	int cpus = num_online_cpus();
770 	int idx = type - 1;
771 
772 	struct list_head *idle_workspace	= &comp_idle_workspace[idx];
773 	spinlock_t *workspace_lock		= &comp_workspace_lock[idx];
774 	atomic_t *alloc_workspace		= &comp_alloc_workspace[idx];
775 	wait_queue_head_t *workspace_wait	= &comp_workspace_wait[idx];
776 	int *num_workspace			= &comp_num_workspace[idx];
777 again:
778 	spin_lock(workspace_lock);
779 	if (!list_empty(idle_workspace)) {
780 		workspace = idle_workspace->next;
781 		list_del(workspace);
782 		(*num_workspace)--;
783 		spin_unlock(workspace_lock);
784 		return workspace;
785 
786 	}
787 	if (atomic_read(alloc_workspace) > cpus) {
788 		DEFINE_WAIT(wait);
789 
790 		spin_unlock(workspace_lock);
791 		prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE);
792 		if (atomic_read(alloc_workspace) > cpus && !*num_workspace)
793 			schedule();
794 		finish_wait(workspace_wait, &wait);
795 		goto again;
796 	}
797 	atomic_inc(alloc_workspace);
798 	spin_unlock(workspace_lock);
799 
800 	workspace = btrfs_compress_op[idx]->alloc_workspace();
801 	if (IS_ERR(workspace)) {
802 		atomic_dec(alloc_workspace);
803 		wake_up(workspace_wait);
804 	}
805 	return workspace;
806 }
807 
808 /*
809  * put a workspace struct back on the list or free it if we have enough
810  * idle ones sitting around
811  */
free_workspace(int type,struct list_head * workspace)812 static void free_workspace(int type, struct list_head *workspace)
813 {
814 	int idx = type - 1;
815 	struct list_head *idle_workspace	= &comp_idle_workspace[idx];
816 	spinlock_t *workspace_lock		= &comp_workspace_lock[idx];
817 	atomic_t *alloc_workspace		= &comp_alloc_workspace[idx];
818 	wait_queue_head_t *workspace_wait	= &comp_workspace_wait[idx];
819 	int *num_workspace			= &comp_num_workspace[idx];
820 
821 	spin_lock(workspace_lock);
822 	if (*num_workspace < num_online_cpus()) {
823 		list_add_tail(workspace, idle_workspace);
824 		(*num_workspace)++;
825 		spin_unlock(workspace_lock);
826 		goto wake;
827 	}
828 	spin_unlock(workspace_lock);
829 
830 	btrfs_compress_op[idx]->free_workspace(workspace);
831 	atomic_dec(alloc_workspace);
832 wake:
833 	smp_mb();
834 	if (waitqueue_active(workspace_wait))
835 		wake_up(workspace_wait);
836 }
837 
838 /*
839  * cleanup function for module exit
840  */
free_workspaces(void)841 static void free_workspaces(void)
842 {
843 	struct list_head *workspace;
844 	int i;
845 
846 	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
847 		while (!list_empty(&comp_idle_workspace[i])) {
848 			workspace = comp_idle_workspace[i].next;
849 			list_del(workspace);
850 			btrfs_compress_op[i]->free_workspace(workspace);
851 			atomic_dec(&comp_alloc_workspace[i]);
852 		}
853 	}
854 }
855 
856 /*
857  * given an address space and start/len, compress the bytes.
858  *
859  * pages are allocated to hold the compressed result and stored
860  * in 'pages'
861  *
862  * out_pages is used to return the number of pages allocated.  There
863  * may be pages allocated even if we return an error
864  *
865  * total_in is used to return the number of bytes actually read.  It
866  * may be smaller then len if we had to exit early because we
867  * ran out of room in the pages array or because we cross the
868  * max_out threshold.
869  *
870  * total_out is used to return the total number of compressed bytes
871  *
872  * max_out tells us the max number of bytes that we're allowed to
873  * stuff into pages
874  */
btrfs_compress_pages(int type,struct address_space * mapping,u64 start,unsigned long len,struct page ** pages,unsigned long nr_dest_pages,unsigned long * out_pages,unsigned long * total_in,unsigned long * total_out,unsigned long max_out)875 int btrfs_compress_pages(int type, struct address_space *mapping,
876 			 u64 start, unsigned long len,
877 			 struct page **pages,
878 			 unsigned long nr_dest_pages,
879 			 unsigned long *out_pages,
880 			 unsigned long *total_in,
881 			 unsigned long *total_out,
882 			 unsigned long max_out)
883 {
884 	struct list_head *workspace;
885 	int ret;
886 
887 	workspace = find_workspace(type);
888 	if (IS_ERR(workspace))
889 		return -1;
890 
891 	ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
892 						      start, len, pages,
893 						      nr_dest_pages, out_pages,
894 						      total_in, total_out,
895 						      max_out);
896 	free_workspace(type, workspace);
897 	return ret;
898 }
899 
900 /*
901  * pages_in is an array of pages with compressed data.
902  *
903  * disk_start is the starting logical offset of this array in the file
904  *
905  * bvec is a bio_vec of pages from the file that we want to decompress into
906  *
907  * vcnt is the count of pages in the biovec
908  *
909  * srclen is the number of bytes in pages_in
910  *
911  * The basic idea is that we have a bio that was created by readpages.
912  * The pages in the bio are for the uncompressed data, and they may not
913  * be contiguous.  They all correspond to the range of bytes covered by
914  * the compressed extent.
915  */
btrfs_decompress_biovec(int type,struct page ** pages_in,u64 disk_start,struct bio_vec * bvec,int vcnt,size_t srclen)916 static int btrfs_decompress_biovec(int type, struct page **pages_in,
917 				   u64 disk_start, struct bio_vec *bvec,
918 				   int vcnt, size_t srclen)
919 {
920 	struct list_head *workspace;
921 	int ret;
922 
923 	workspace = find_workspace(type);
924 	if (IS_ERR(workspace))
925 		return -ENOMEM;
926 
927 	ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in,
928 							 disk_start,
929 							 bvec, vcnt, srclen);
930 	free_workspace(type, workspace);
931 	return ret;
932 }
933 
934 /*
935  * a less complex decompression routine.  Our compressed data fits in a
936  * single page, and we want to read a single page out of it.
937  * start_byte tells us the offset into the compressed data we're interested in
938  */
btrfs_decompress(int type,unsigned char * data_in,struct page * dest_page,unsigned long start_byte,size_t srclen,size_t destlen)939 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
940 		     unsigned long start_byte, size_t srclen, size_t destlen)
941 {
942 	struct list_head *workspace;
943 	int ret;
944 
945 	workspace = find_workspace(type);
946 	if (IS_ERR(workspace))
947 		return -ENOMEM;
948 
949 	ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
950 						  dest_page, start_byte,
951 						  srclen, destlen);
952 
953 	free_workspace(type, workspace);
954 	return ret;
955 }
956 
btrfs_exit_compress(void)957 void btrfs_exit_compress(void)
958 {
959 	free_workspaces();
960 }
961 
962 /*
963  * Copy uncompressed data from working buffer to pages.
964  *
965  * buf_start is the byte offset we're of the start of our workspace buffer.
966  *
967  * total_out is the last byte of the buffer
968  */
btrfs_decompress_buf2page(char * buf,unsigned long buf_start,unsigned long total_out,u64 disk_start,struct bio_vec * bvec,int vcnt,unsigned long * pg_index,unsigned long * pg_offset)969 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
970 			      unsigned long total_out, u64 disk_start,
971 			      struct bio_vec *bvec, int vcnt,
972 			      unsigned long *pg_index,
973 			      unsigned long *pg_offset)
974 {
975 	unsigned long buf_offset;
976 	unsigned long current_buf_start;
977 	unsigned long start_byte;
978 	unsigned long working_bytes = total_out - buf_start;
979 	unsigned long bytes;
980 	char *kaddr;
981 	struct page *page_out = bvec[*pg_index].bv_page;
982 
983 	/*
984 	 * start byte is the first byte of the page we're currently
985 	 * copying into relative to the start of the compressed data.
986 	 */
987 	start_byte = page_offset(page_out) - disk_start;
988 
989 	/* we haven't yet hit data corresponding to this page */
990 	if (total_out <= start_byte)
991 		return 1;
992 
993 	/*
994 	 * the start of the data we care about is offset into
995 	 * the middle of our working buffer
996 	 */
997 	if (total_out > start_byte && buf_start < start_byte) {
998 		buf_offset = start_byte - buf_start;
999 		working_bytes -= buf_offset;
1000 	} else {
1001 		buf_offset = 0;
1002 	}
1003 	current_buf_start = buf_start;
1004 
1005 	/* copy bytes from the working buffer into the pages */
1006 	while (working_bytes > 0) {
1007 		bytes = min(PAGE_CACHE_SIZE - *pg_offset,
1008 			    PAGE_CACHE_SIZE - buf_offset);
1009 		bytes = min(bytes, working_bytes);
1010 		kaddr = kmap_atomic(page_out);
1011 		memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
1012 		kunmap_atomic(kaddr);
1013 		flush_dcache_page(page_out);
1014 
1015 		*pg_offset += bytes;
1016 		buf_offset += bytes;
1017 		working_bytes -= bytes;
1018 		current_buf_start += bytes;
1019 
1020 		/* check if we need to pick another page */
1021 		if (*pg_offset == PAGE_CACHE_SIZE) {
1022 			(*pg_index)++;
1023 			if (*pg_index >= vcnt)
1024 				return 0;
1025 
1026 			page_out = bvec[*pg_index].bv_page;
1027 			*pg_offset = 0;
1028 			start_byte = page_offset(page_out) - disk_start;
1029 
1030 			/*
1031 			 * make sure our new page is covered by this
1032 			 * working buffer
1033 			 */
1034 			if (total_out <= start_byte)
1035 				return 1;
1036 
1037 			/*
1038 			 * the next page in the biovec might not be adjacent
1039 			 * to the last page, but it might still be found
1040 			 * inside this working buffer. bump our offset pointer
1041 			 */
1042 			if (total_out > start_byte &&
1043 			    current_buf_start < start_byte) {
1044 				buf_offset = start_byte - buf_start;
1045 				working_bytes = total_out - start_byte;
1046 				current_buf_start = buf_start + buf_offset;
1047 			}
1048 		}
1049 	}
1050 
1051 	return 1;
1052 }
1053