• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Squashfs - a compressed read only filesystem for Linux
3  *
4  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5  * Phillip Lougher <phillip@squashfs.org.uk>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version 2,
10  * or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20  *
21  * block.c
22  */
23 
24 /*
25  * This file implements the low-level routines to read and decompress
26  * datablocks and metadata blocks.
27  */
28 
29 #include <linux/fs.h>
30 #include <linux/vfs.h>
31 #include <linux/bio.h>
32 #include <linux/slab.h>
33 #include <linux/string.h>
34 #include <linux/pagemap.h>
35 #include <linux/buffer_head.h>
36 #include <linux/workqueue.h>
37 
38 #include "squashfs_fs.h"
39 #include "squashfs_fs_sb.h"
40 #include "squashfs.h"
41 #include "decompressor.h"
42 #include "page_actor.h"
43 
44 static struct workqueue_struct *squashfs_read_wq;
45 
46 struct squashfs_read_request {
47 	struct super_block *sb;
48 	u64 index;
49 	int length;
50 	int compressed;
51 	int offset;
52 	u64 read_end;
53 	struct squashfs_page_actor *output;
54 	enum {
55 		SQUASHFS_COPY,
56 		SQUASHFS_DECOMPRESS,
57 		SQUASHFS_METADATA,
58 	} data_processing;
59 	bool synchronous;
60 
61 	/*
62 	 * If the read is synchronous, it is possible to retrieve information
63 	 * about the request by setting these pointers.
64 	 */
65 	int *res;
66 	int *bytes_read;
67 	int *bytes_uncompressed;
68 
69 	int nr_buffers;
70 	struct buffer_head **bh;
71 	struct work_struct offload;
72 };
73 
74 struct squashfs_bio_request {
75 	struct buffer_head **bh;
76 	int nr_buffers;
77 };
78 
79 static int squashfs_bio_submit(struct squashfs_read_request *req);
80 
squashfs_init_read_wq(void)81 int squashfs_init_read_wq(void)
82 {
83 	squashfs_read_wq = create_workqueue("SquashFS read wq");
84 	return !!squashfs_read_wq;
85 }
86 
squashfs_destroy_read_wq(void)87 void squashfs_destroy_read_wq(void)
88 {
89 	flush_workqueue(squashfs_read_wq);
90 	destroy_workqueue(squashfs_read_wq);
91 }
92 
free_read_request(struct squashfs_read_request * req,int error)93 static void free_read_request(struct squashfs_read_request *req, int error)
94 {
95 	if (!req->synchronous)
96 		squashfs_page_actor_free(req->output, error);
97 	if (req->res)
98 		*(req->res) = error;
99 	kfree(req->bh);
100 	kfree(req);
101 }
102 
squashfs_process_blocks(struct squashfs_read_request * req)103 static void squashfs_process_blocks(struct squashfs_read_request *req)
104 {
105 	int error = 0;
106 	int bytes, i, length;
107 	struct squashfs_sb_info *msblk = req->sb->s_fs_info;
108 	struct squashfs_page_actor *actor = req->output;
109 	struct buffer_head **bh = req->bh;
110 	int nr_buffers = req->nr_buffers;
111 
112 	for (i = 0; i < nr_buffers; ++i) {
113 		if (!bh[i])
114 			continue;
115 		wait_on_buffer(bh[i]);
116 		if (!buffer_uptodate(bh[i]))
117 			error = -EIO;
118 	}
119 	if (error)
120 		goto cleanup;
121 
122 	if (req->data_processing == SQUASHFS_METADATA) {
123 		/* Extract the length of the metadata block */
124 		if (req->offset != msblk->devblksize - 1) {
125 			length = le16_to_cpup((__le16 *)
126 					(bh[0]->b_data + req->offset));
127 		} else {
128 			length = (unsigned char)bh[0]->b_data[req->offset];
129 			length |= (unsigned char)bh[1]->b_data[0] << 8;
130 		}
131 		req->compressed = SQUASHFS_COMPRESSED(length);
132 		req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
133 						       : SQUASHFS_COPY;
134 		length = SQUASHFS_COMPRESSED_SIZE(length);
135 		if (req->index + length + 2 > req->read_end) {
136 			for (i = 0; i < nr_buffers; ++i)
137 				put_bh(bh[i]);
138 			kfree(bh);
139 			req->length = length;
140 			req->index += 2;
141 			squashfs_bio_submit(req);
142 			return;
143 		}
144 		req->length = length;
145 		req->offset = (req->offset + 2) % PAGE_SIZE;
146 		if (req->offset < 2) {
147 			put_bh(bh[0]);
148 			++bh;
149 			--nr_buffers;
150 		}
151 	}
152 	if (req->bytes_read)
153 		*(req->bytes_read) = req->length;
154 
155 	if (req->data_processing == SQUASHFS_COPY) {
156 		squashfs_bh_to_actor(bh, nr_buffers, req->output, req->offset,
157 			req->length, msblk->devblksize);
158 	} else if (req->data_processing == SQUASHFS_DECOMPRESS) {
159 		req->length = squashfs_decompress(msblk, bh, nr_buffers,
160 			req->offset, req->length, actor);
161 		if (req->length < 0) {
162 			error = -EIO;
163 			goto cleanup;
164 		}
165 	}
166 
167 	/* Last page may have trailing bytes not filled */
168 	bytes = req->length % PAGE_SIZE;
169 	if (bytes && actor->page[actor->pages - 1])
170 		zero_user_segment(actor->page[actor->pages - 1], bytes,
171 				  PAGE_SIZE);
172 
173 cleanup:
174 	if (req->bytes_uncompressed)
175 		*(req->bytes_uncompressed) = req->length;
176 	if (error) {
177 		for (i = 0; i < nr_buffers; ++i)
178 			if (bh[i])
179 				put_bh(bh[i]);
180 	}
181 	free_read_request(req, error);
182 }
183 
read_wq_handler(struct work_struct * work)184 static void read_wq_handler(struct work_struct *work)
185 {
186 	squashfs_process_blocks(container_of(work,
187 		    struct squashfs_read_request, offload));
188 }
189 
squashfs_bio_end_io(struct bio * bio)190 static void squashfs_bio_end_io(struct bio *bio)
191 {
192 	int i;
193 	int error = bio->bi_error;
194 	struct squashfs_bio_request *bio_req = bio->bi_private;
195 
196 	bio_put(bio);
197 
198 	for (i = 0; i < bio_req->nr_buffers; ++i) {
199 		if (!bio_req->bh[i])
200 			continue;
201 		if (!error)
202 			set_buffer_uptodate(bio_req->bh[i]);
203 		else
204 			clear_buffer_uptodate(bio_req->bh[i]);
205 		unlock_buffer(bio_req->bh[i]);
206 	}
207 	kfree(bio_req);
208 }
209 
bh_is_optional(struct squashfs_read_request * req,int idx)210 static int bh_is_optional(struct squashfs_read_request *req, int idx)
211 {
212 	int start_idx, end_idx;
213 	struct squashfs_sb_info *msblk = req->sb->s_fs_info;
214 
215 	start_idx = (idx * msblk->devblksize - req->offset) >> PAGE_SHIFT;
216 	end_idx = ((idx + 1) * msblk->devblksize - req->offset + 1) >> PAGE_SHIFT;
217 	if (start_idx >= req->output->pages)
218 		return 1;
219 	if (start_idx < 0)
220 		start_idx = end_idx;
221 	if (end_idx >= req->output->pages)
222 		end_idx = start_idx;
223 	return !req->output->page[start_idx] && !req->output->page[end_idx];
224 }
225 
actor_getblks(struct squashfs_read_request * req,u64 block)226 static int actor_getblks(struct squashfs_read_request *req, u64 block)
227 {
228 	int i;
229 
230 	req->bh = kmalloc_array(req->nr_buffers, sizeof(*(req->bh)), GFP_NOIO);
231 	if (!req->bh)
232 		return -ENOMEM;
233 
234 	for (i = 0; i < req->nr_buffers; ++i) {
235 		/*
236 		 * When dealing with an uncompressed block, the actor may
237 		 * contains NULL pages. There's no need to read the buffers
238 		 * associated with these pages.
239 		 */
240 		if (!req->compressed && bh_is_optional(req, i)) {
241 			req->bh[i] = NULL;
242 			continue;
243 		}
244 		req->bh[i] = sb_getblk(req->sb, block + i);
245 		if (!req->bh[i]) {
246 			while (--i) {
247 				if (req->bh[i])
248 					put_bh(req->bh[i]);
249 			}
250 			return -1;
251 		}
252 	}
253 	return 0;
254 }
255 
squashfs_bio_submit(struct squashfs_read_request * req)256 static int squashfs_bio_submit(struct squashfs_read_request *req)
257 {
258 	struct bio *bio = NULL;
259 	struct buffer_head *bh;
260 	struct squashfs_bio_request *bio_req = NULL;
261 	int b = 0, prev_block = 0;
262 	struct squashfs_sb_info *msblk = req->sb->s_fs_info;
263 
264 	u64 read_start = round_down(req->index, msblk->devblksize);
265 	u64 read_end = round_up(req->index + req->length, msblk->devblksize);
266 	sector_t block = read_start >> msblk->devblksize_log2;
267 	sector_t block_end = read_end >> msblk->devblksize_log2;
268 	int offset = read_start - round_down(req->index, PAGE_SIZE);
269 	int nr_buffers = block_end - block;
270 	int blksz = msblk->devblksize;
271 	int bio_max_pages = nr_buffers > BIO_MAX_PAGES ? BIO_MAX_PAGES
272 						       : nr_buffers;
273 
274 	/* Setup the request */
275 	req->read_end = read_end;
276 	req->offset = req->index - read_start;
277 	req->nr_buffers = nr_buffers;
278 	if (actor_getblks(req, block) < 0)
279 		goto getblk_failed;
280 
281 	/* Create and submit the BIOs */
282 	for (b = 0; b < nr_buffers; ++b, offset += blksz) {
283 		bh = req->bh[b];
284 		if (!bh || !trylock_buffer(bh))
285 			continue;
286 		if (buffer_uptodate(bh)) {
287 			unlock_buffer(bh);
288 			continue;
289 		}
290 		offset %= PAGE_SIZE;
291 
292 		/* Append the buffer to the current BIO if it is contiguous */
293 		if (bio && bio_req && prev_block + 1 == b) {
294 			if (bio_add_page(bio, bh->b_page, blksz, offset)) {
295 				bio_req->nr_buffers += 1;
296 				prev_block = b;
297 				continue;
298 			}
299 		}
300 
301 		/* Otherwise, submit the current BIO and create a new one */
302 		if (bio)
303 			submit_bio(READ, bio);
304 		bio_req = kcalloc(1, sizeof(struct squashfs_bio_request),
305 				  GFP_NOIO);
306 		if (!bio_req)
307 			goto req_alloc_failed;
308 		bio_req->bh = &req->bh[b];
309 		bio = bio_alloc(GFP_NOIO, bio_max_pages);
310 		if (!bio)
311 			goto bio_alloc_failed;
312 		bio->bi_bdev = req->sb->s_bdev;
313 		bio->bi_iter.bi_sector = (block + b)
314 				       << (msblk->devblksize_log2 - 9);
315 		bio->bi_private = bio_req;
316 		bio->bi_end_io = squashfs_bio_end_io;
317 
318 		bio_add_page(bio, bh->b_page, blksz, offset);
319 		bio_req->nr_buffers += 1;
320 		prev_block = b;
321 	}
322 	if (bio)
323 		submit_bio(READ, bio);
324 
325 	if (req->synchronous)
326 		squashfs_process_blocks(req);
327 	else {
328 		INIT_WORK(&req->offload, read_wq_handler);
329 		schedule_work(&req->offload);
330 	}
331 	return 0;
332 
333 bio_alloc_failed:
334 	kfree(bio_req);
335 req_alloc_failed:
336 	unlock_buffer(bh);
337 	while (--nr_buffers >= b)
338 		if (req->bh[nr_buffers])
339 			put_bh(req->bh[nr_buffers]);
340 	while (--b >= 0)
341 		if (req->bh[b])
342 			wait_on_buffer(req->bh[b]);
343 getblk_failed:
344 	free_read_request(req, -ENOMEM);
345 	return -ENOMEM;
346 }
347 
read_metadata_block(struct squashfs_read_request * req,u64 * next_index)348 static int read_metadata_block(struct squashfs_read_request *req,
349 			       u64 *next_index)
350 {
351 	int ret, error, bytes_read = 0, bytes_uncompressed = 0;
352 	struct squashfs_sb_info *msblk = req->sb->s_fs_info;
353 
354 	if (req->index + 2 > msblk->bytes_used) {
355 		free_read_request(req, -EINVAL);
356 		return -EINVAL;
357 	}
358 	req->length = 2;
359 
360 	/* Do not read beyond the end of the device */
361 	if (req->index + req->length > msblk->bytes_used)
362 		req->length = msblk->bytes_used - req->index;
363 	req->data_processing = SQUASHFS_METADATA;
364 
365 	/*
366 	 * Reading metadata is always synchronous because we don't know the
367 	 * length in advance and the function is expected to update
368 	 * 'next_index' and return the length.
369 	 */
370 	req->synchronous = true;
371 	req->res = &error;
372 	req->bytes_read = &bytes_read;
373 	req->bytes_uncompressed = &bytes_uncompressed;
374 
375 	TRACE("Metadata block @ 0x%llx, %scompressed size %d, src size %d\n",
376 	      req->index, req->compressed ? "" : "un", bytes_read,
377 	      req->output->length);
378 
379 	ret = squashfs_bio_submit(req);
380 	if (ret)
381 		return ret;
382 	if (error)
383 		return error;
384 	if (next_index)
385 		*next_index += 2 + bytes_read;
386 	return bytes_uncompressed;
387 }
388 
read_data_block(struct squashfs_read_request * req,int length,u64 * next_index,bool synchronous)389 static int read_data_block(struct squashfs_read_request *req, int length,
390 			   u64 *next_index, bool synchronous)
391 {
392 	int ret, error = 0, bytes_uncompressed = 0, bytes_read = 0;
393 
394 	req->compressed = SQUASHFS_COMPRESSED_BLOCK(length);
395 	req->length = length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
396 	req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
397 					       : SQUASHFS_COPY;
398 
399 	req->synchronous = synchronous;
400 	if (synchronous) {
401 		req->res = &error;
402 		req->bytes_read = &bytes_read;
403 		req->bytes_uncompressed = &bytes_uncompressed;
404 	}
405 
406 	TRACE("Data block @ 0x%llx, %scompressed size %d, src size %d\n",
407 	      req->index, req->compressed ? "" : "un", req->length,
408 	      req->output->length);
409 
410 	ret = squashfs_bio_submit(req);
411 	if (ret)
412 		return ret;
413 	if (synchronous)
414 		ret = error ? error : bytes_uncompressed;
415 	if (next_index)
416 		*next_index += length;
417 	return ret;
418 }
419 
420 /*
421  * Read and decompress a metadata block or datablock.  Length is non-zero
422  * if a datablock is being read (the size is stored elsewhere in the
423  * filesystem), otherwise the length is obtained from the first two bytes of
424  * the metadata block.  A bit in the length field indicates if the block
425  * is stored uncompressed in the filesystem (usually because compression
426  * generated a larger block - this does occasionally happen with compression
427  * algorithms).
428  */
__squashfs_read_data(struct super_block * sb,u64 index,int length,u64 * next_index,struct squashfs_page_actor * output,bool sync)429 static int __squashfs_read_data(struct super_block *sb, u64 index, int length,
430 	u64 *next_index, struct squashfs_page_actor *output, bool sync)
431 {
432 	struct squashfs_read_request *req;
433 
434 	req = kcalloc(1, sizeof(struct squashfs_read_request), GFP_KERNEL);
435 	if (!req) {
436 		if (!sync)
437 			squashfs_page_actor_free(output, -ENOMEM);
438 		return -ENOMEM;
439 	}
440 
441 	req->sb = sb;
442 	req->index = index;
443 	req->output = output;
444 
445 	if (next_index)
446 		*next_index = index;
447 
448 	if (length)
449 		length = read_data_block(req, length, next_index, sync);
450 	else
451 		length = read_metadata_block(req, next_index);
452 
453 	if (length < 0) {
454 		ERROR("squashfs_read_data failed to read block 0x%llx\n",
455 		      (unsigned long long)index);
456 		return -EIO;
457 	}
458 
459 	return length;
460 }
461 
squashfs_read_data(struct super_block * sb,u64 index,int length,u64 * next_index,struct squashfs_page_actor * output)462 int squashfs_read_data(struct super_block *sb, u64 index, int length,
463 	u64 *next_index, struct squashfs_page_actor *output)
464 {
465 	return __squashfs_read_data(sb, index, length, next_index, output,
466 				    true);
467 }
468 
squashfs_read_data_async(struct super_block * sb,u64 index,int length,u64 * next_index,struct squashfs_page_actor * output)469 int squashfs_read_data_async(struct super_block *sb, u64 index, int length,
470 	u64 *next_index, struct squashfs_page_actor *output)
471 {
472 
473 	return __squashfs_read_data(sb, index, length, next_index, output,
474 				    false);
475 }
476