• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -*- linux-c -*- ------------------------------------------------------- *
2  *
3  *   Copyright 2001 H. Peter Anvin - All Rights Reserved
4  *
5  *   This program is free software; you can redistribute it and/or modify
6  *   it under the terms of the GNU General Public License as published by
7  *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
8  *   USA; either version 2 of the License, or (at your option) any later
9  *   version; incorporated herein by reference.
10  *
11  * ----------------------------------------------------------------------- */
12 
13 /*
14  * linux/fs/isofs/compress.c
15  *
16  * Transparent decompression of files on an iso9660 filesystem
17  */
18 
19 #include <linux/module.h>
20 #include <linux/init.h>
21 
22 #include <linux/vmalloc.h>
23 #include <linux/zlib.h>
24 
25 #include "isofs.h"
26 #include "zisofs.h"
27 
28 /* This should probably be global. */
29 static char zisofs_sink_page[PAGE_CACHE_SIZE];
30 
31 /*
32  * This contains the zlib memory allocation and the mutex for the
33  * allocation; this avoids failures at block-decompression time.
34  */
35 static void *zisofs_zlib_workspace;
36 static DEFINE_MUTEX(zisofs_zlib_lock);
37 
38 /*
39  * When decompressing, we typically obtain more than one page
40  * per reference.  We inject the additional pages into the page
41  * cache as a form of readahead.
42  */
zisofs_readpage(struct file * file,struct page * page)43 static int zisofs_readpage(struct file *file, struct page *page)
44 {
45 	struct inode *inode = file->f_path.dentry->d_inode;
46 	struct address_space *mapping = inode->i_mapping;
47 	unsigned int maxpage, xpage, fpage, blockindex;
48 	unsigned long offset;
49 	unsigned long blockptr, blockendptr, cstart, cend, csize;
50 	struct buffer_head *bh, *ptrbh[2];
51 	unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
52 	unsigned int bufshift = ISOFS_BUFFER_BITS(inode);
53 	unsigned long bufmask  = bufsize - 1;
54 	int err = -EIO;
55 	int i;
56 	unsigned int header_size = ISOFS_I(inode)->i_format_parm[0];
57 	unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
58 	/* unsigned long zisofs_block_size = 1UL << zisofs_block_shift; */
59 	unsigned int zisofs_block_page_shift = zisofs_block_shift-PAGE_CACHE_SHIFT;
60 	unsigned long zisofs_block_pages = 1UL << zisofs_block_page_shift;
61 	unsigned long zisofs_block_page_mask = zisofs_block_pages-1;
62 	struct page *pages[zisofs_block_pages];
63 	unsigned long index = page->index;
64 	int indexblocks;
65 
66 	/* We have already been given one page, this is the one
67 	   we must do. */
68 	xpage = index & zisofs_block_page_mask;
69 	pages[xpage] = page;
70 
71 	/* The remaining pages need to be allocated and inserted */
72 	offset = index & ~zisofs_block_page_mask;
73 	blockindex = offset >> zisofs_block_page_shift;
74 	maxpage = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
75 
76 	/*
77 	 * If this page is wholly outside i_size we just return zero;
78 	 * do_generic_file_read() will handle this for us
79 	 */
80 	if (page->index >= maxpage) {
81 		SetPageUptodate(page);
82 		unlock_page(page);
83 		return 0;
84 	}
85 
86 	maxpage = min(zisofs_block_pages, maxpage-offset);
87 
88 	for ( i = 0 ; i < maxpage ; i++, offset++ ) {
89 		if ( i != xpage ) {
90 			pages[i] = grab_cache_page_nowait(mapping, offset);
91 		}
92 		page = pages[i];
93 		if ( page ) {
94 			ClearPageError(page);
95 			kmap(page);
96 		}
97 	}
98 
99 	/* This is the last page filled, plus one; used in case of abort. */
100 	fpage = 0;
101 
102 	/* Find the pointer to this specific chunk */
103 	/* Note: we're not using isonum_731() here because the data is known aligned */
104 	/* Note: header_size is in 32-bit words (4 bytes) */
105 	blockptr = (header_size + blockindex) << 2;
106 	blockendptr = blockptr + 4;
107 
108 	indexblocks = ((blockptr^blockendptr) >> bufshift) ? 2 : 1;
109 	ptrbh[0] = ptrbh[1] = NULL;
110 
111 	if ( isofs_get_blocks(inode, blockptr >> bufshift, ptrbh, indexblocks) != indexblocks ) {
112 		if ( ptrbh[0] ) brelse(ptrbh[0]);
113 		printk(KERN_DEBUG "zisofs: Null buffer on reading block table, inode = %lu, block = %lu\n",
114 		       inode->i_ino, blockptr >> bufshift);
115 		goto eio;
116 	}
117 	ll_rw_block(READ, indexblocks, ptrbh);
118 
119 	bh = ptrbh[0];
120 	if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
121 		printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
122 		       inode->i_ino, blockptr >> bufshift);
123 		if ( ptrbh[1] )
124 			brelse(ptrbh[1]);
125 		goto eio;
126 	}
127 	cstart = le32_to_cpu(*(__le32 *)(bh->b_data + (blockptr & bufmask)));
128 
129 	if ( indexblocks == 2 ) {
130 		/* We just crossed a block boundary.  Switch to the next block */
131 		brelse(bh);
132 		bh = ptrbh[1];
133 		if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
134 			printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
135 			       inode->i_ino, blockendptr >> bufshift);
136 			goto eio;
137 		}
138 	}
139 	cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
140 	brelse(bh);
141 
142 	if (cstart > cend)
143 		goto eio;
144 
145 	csize = cend-cstart;
146 
147 	if (csize > deflateBound(1UL << zisofs_block_shift))
148 		goto eio;
149 
150 	/* Now page[] contains an array of pages, any of which can be NULL,
151 	   and the locks on which we hold.  We should now read the data and
152 	   release the pages.  If the pages are NULL the decompressed data
153 	   for that particular page should be discarded. */
154 
155 	if ( csize == 0 ) {
156 		/* This data block is empty. */
157 
158 		for ( fpage = 0 ; fpage < maxpage ; fpage++ ) {
159 			if ( (page = pages[fpage]) != NULL ) {
160 				memset(page_address(page), 0, PAGE_CACHE_SIZE);
161 
162 				flush_dcache_page(page);
163 				SetPageUptodate(page);
164 				kunmap(page);
165 				unlock_page(page);
166 				if ( fpage == xpage )
167 					err = 0; /* The critical page */
168 				else
169 					page_cache_release(page);
170 			}
171 		}
172 	} else {
173 		/* This data block is compressed. */
174 		z_stream stream;
175 		int bail = 0, left_out = -1;
176 		int zerr;
177 		int needblocks = (csize + (cstart & bufmask) + bufmask) >> bufshift;
178 		int haveblocks;
179 		struct buffer_head *bhs[needblocks+1];
180 		struct buffer_head **bhptr;
181 
182 		/* Because zlib is not thread-safe, do all the I/O at the top. */
183 
184 		blockptr = cstart >> bufshift;
185 		memset(bhs, 0, (needblocks+1)*sizeof(struct buffer_head *));
186 		haveblocks = isofs_get_blocks(inode, blockptr, bhs, needblocks);
187 		ll_rw_block(READ, haveblocks, bhs);
188 
189 		bhptr = &bhs[0];
190 		bh = *bhptr++;
191 
192 		/* First block is special since it may be fractional.
193 		   We also wait for it before grabbing the zlib
194 		   mutex; odds are that the subsequent blocks are
195 		   going to come in in short order so we don't hold
196 		   the zlib mutex longer than necessary. */
197 
198 		if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
199 			printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
200 			       fpage, xpage, csize);
201 			goto b_eio;
202 		}
203 		stream.next_in  = bh->b_data + (cstart & bufmask);
204 		stream.avail_in = min(bufsize-(cstart & bufmask), csize);
205 		csize -= stream.avail_in;
206 
207 		stream.workspace = zisofs_zlib_workspace;
208 		mutex_lock(&zisofs_zlib_lock);
209 
210 		zerr = zlib_inflateInit(&stream);
211 		if ( zerr != Z_OK ) {
212 			if ( err && zerr == Z_MEM_ERROR )
213 				err = -ENOMEM;
214 			printk(KERN_DEBUG "zisofs: zisofs_inflateInit returned %d\n",
215 			       zerr);
216 			goto z_eio;
217 		}
218 
219 		while ( !bail && fpage < maxpage ) {
220 			page = pages[fpage];
221 			if ( page )
222 				stream.next_out = page_address(page);
223 			else
224 				stream.next_out = (void *)&zisofs_sink_page;
225 			stream.avail_out = PAGE_CACHE_SIZE;
226 
227 			while ( stream.avail_out ) {
228 				int ao, ai;
229 				if ( stream.avail_in == 0 && left_out ) {
230 					if ( !csize ) {
231 						printk(KERN_WARNING "zisofs: ZF read beyond end of input\n");
232 						bail = 1;
233 						break;
234 					} else {
235 						bh = *bhptr++;
236 						if ( !bh ||
237 						     (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
238 							/* Reached an EIO */
239  							printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
240 							       fpage, xpage, csize);
241 
242 							bail = 1;
243 							break;
244 						}
245 						stream.next_in = bh->b_data;
246 						stream.avail_in = min(csize,bufsize);
247 						csize -= stream.avail_in;
248 					}
249 				}
250 				ao = stream.avail_out;  ai = stream.avail_in;
251 				zerr = zlib_inflate(&stream, Z_SYNC_FLUSH);
252 				left_out = stream.avail_out;
253 				if ( zerr == Z_BUF_ERROR && stream.avail_in == 0 )
254 					continue;
255 				if ( zerr != Z_OK ) {
256 					/* EOF, error, or trying to read beyond end of input */
257 					if ( err && zerr == Z_MEM_ERROR )
258 						err = -ENOMEM;
259 					if ( zerr != Z_STREAM_END )
260 						printk(KERN_DEBUG "zisofs: zisofs_inflate returned %d, inode = %lu, index = %lu, fpage = %d, xpage = %d, avail_in = %d, avail_out = %d, ai = %d, ao = %d\n",
261 						       zerr, inode->i_ino, index,
262 						       fpage, xpage,
263 						       stream.avail_in, stream.avail_out,
264 						       ai, ao);
265 					bail = 1;
266 					break;
267 				}
268 			}
269 
270 			if ( stream.avail_out && zerr == Z_STREAM_END ) {
271 				/* Fractional page written before EOF.  This may
272 				   be the last page in the file. */
273 				memset(stream.next_out, 0, stream.avail_out);
274 				stream.avail_out = 0;
275 			}
276 
277 			if ( !stream.avail_out ) {
278 				/* This page completed */
279 				if ( page ) {
280 					flush_dcache_page(page);
281 					SetPageUptodate(page);
282 					kunmap(page);
283 					unlock_page(page);
284 					if ( fpage == xpage )
285 						err = 0; /* The critical page */
286 					else
287 						page_cache_release(page);
288 				}
289 				fpage++;
290 			}
291 		}
292 		zlib_inflateEnd(&stream);
293 
294 	z_eio:
295 		mutex_unlock(&zisofs_zlib_lock);
296 
297 	b_eio:
298 		for ( i = 0 ; i < haveblocks ; i++ ) {
299 			if ( bhs[i] )
300 				brelse(bhs[i]);
301 		}
302 	}
303 
304 eio:
305 
306 	/* Release any residual pages, do not SetPageUptodate */
307 	while ( fpage < maxpage ) {
308 		page = pages[fpage];
309 		if ( page ) {
310 			flush_dcache_page(page);
311 			if ( fpage == xpage )
312 				SetPageError(page);
313 			kunmap(page);
314 			unlock_page(page);
315 			if ( fpage != xpage )
316 				page_cache_release(page);
317 		}
318 		fpage++;
319 	}
320 
321 	/* At this point, err contains 0 or -EIO depending on the "critical" page */
322 	return err;
323 }
324 
325 const struct address_space_operations zisofs_aops = {
326 	.readpage = zisofs_readpage,
327 	/* No sync_page operation supported? */
328 	/* No bmap operation supported */
329 };
330 
zisofs_init(void)331 int __init zisofs_init(void)
332 {
333 	zisofs_zlib_workspace = vmalloc(zlib_inflate_workspacesize());
334 	if ( !zisofs_zlib_workspace )
335 		return -ENOMEM;
336 
337 	return 0;
338 }
339 
zisofs_cleanup(void)340 void zisofs_cleanup(void)
341 {
342 	vfree(zisofs_zlib_workspace);
343 }
344