• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   *  linux/fs/hfs/extent.c
3   *
4   * Copyright (C) 1995-1997  Paul H. Hargrove
5   * (C) 2003 Ardis Technologies <roman@ardistech.com>
6   * This file may be distributed under the terms of the GNU General Public License.
7   *
8   * This file contains the functions related to the extents B-tree.
9   */
10  
11  #include <linux/pagemap.h>
12  
13  #include "hfs_fs.h"
14  #include "btree.h"
15  
16  /*================ File-local functions ================*/
17  
18  /*
19   * build_key
20   */
hfs_ext_build_key(hfs_btree_key * key,u32 cnid,u16 block,u8 type)21  static void hfs_ext_build_key(hfs_btree_key *key, u32 cnid, u16 block, u8 type)
22  {
23  	key->key_len = 7;
24  	key->ext.FkType = type;
25  	key->ext.FNum = cpu_to_be32(cnid);
26  	key->ext.FABN = cpu_to_be16(block);
27  }
28  
29  /*
30   * hfs_ext_compare()
31   *
32   * Description:
33   *   This is the comparison function used for the extents B-tree.  In
34   *   comparing extent B-tree entries, the file id is the most
35   *   significant field (compared as unsigned ints); the fork type is
36   *   the second most significant field (compared as unsigned chars);
37   *   and the allocation block number field is the least significant
38   *   (compared as unsigned ints).
39   * Input Variable(s):
40   *   struct hfs_ext_key *key1: pointer to the first key to compare
41   *   struct hfs_ext_key *key2: pointer to the second key to compare
42   * Output Variable(s):
43   *   NONE
44   * Returns:
45   *   int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2
46   * Preconditions:
47   *   key1 and key2 point to "valid" (struct hfs_ext_key)s.
48   * Postconditions:
49   *   This function has no side-effects */
hfs_ext_keycmp(const btree_key * key1,const btree_key * key2)50  int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2)
51  {
52  	__be32 fnum1, fnum2;
53  	__be16 block1, block2;
54  
55  	fnum1 = key1->ext.FNum;
56  	fnum2 = key2->ext.FNum;
57  	if (fnum1 != fnum2)
58  		return be32_to_cpu(fnum1) < be32_to_cpu(fnum2) ? -1 : 1;
59  	if (key1->ext.FkType != key2->ext.FkType)
60  		return key1->ext.FkType < key2->ext.FkType ? -1 : 1;
61  
62  	block1 = key1->ext.FABN;
63  	block2 = key2->ext.FABN;
64  	if (block1 == block2)
65  		return 0;
66  	return be16_to_cpu(block1) < be16_to_cpu(block2) ? -1 : 1;
67  }
68  
69  /*
70   * hfs_ext_find_block
71   *
72   * Find a block within an extent record
73   */
hfs_ext_find_block(struct hfs_extent * ext,u16 off)74  static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
75  {
76  	int i;
77  	u16 count;
78  
79  	for (i = 0; i < 3; ext++, i++) {
80  		count = be16_to_cpu(ext->count);
81  		if (off < count)
82  			return be16_to_cpu(ext->block) + off;
83  		off -= count;
84  	}
85  	/* panic? */
86  	return 0;
87  }
88  
hfs_ext_block_count(struct hfs_extent * ext)89  static int hfs_ext_block_count(struct hfs_extent *ext)
90  {
91  	int i;
92  	u16 count = 0;
93  
94  	for (i = 0; i < 3; ext++, i++)
95  		count += be16_to_cpu(ext->count);
96  	return count;
97  }
98  
hfs_ext_lastblock(struct hfs_extent * ext)99  static u16 hfs_ext_lastblock(struct hfs_extent *ext)
100  {
101  	int i;
102  
103  	ext += 2;
104  	for (i = 0; i < 2; ext--, i++)
105  		if (ext->count)
106  			break;
107  	return be16_to_cpu(ext->block) + be16_to_cpu(ext->count);
108  }
109  
__hfs_ext_write_extent(struct inode * inode,struct hfs_find_data * fd)110  static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
111  {
112  	int res;
113  
114  	hfs_ext_build_key(fd->search_key, inode->i_ino, HFS_I(inode)->cached_start,
115  			  HFS_IS_RSRC(inode) ?  HFS_FK_RSRC : HFS_FK_DATA);
116  	res = hfs_brec_find(fd);
117  	if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
118  		if (res != -ENOENT)
119  			return res;
120  		hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
121  		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
122  	} else {
123  		if (res)
124  			return res;
125  		hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength);
126  		HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY;
127  	}
128  	return 0;
129  }
130  
hfs_ext_write_extent(struct inode * inode)131  int hfs_ext_write_extent(struct inode *inode)
132  {
133  	struct hfs_find_data fd;
134  	int res = 0;
135  
136  	if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
137  		res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
138  		if (res)
139  			return res;
140  		res = __hfs_ext_write_extent(inode, &fd);
141  		hfs_find_exit(&fd);
142  	}
143  	return res;
144  }
145  
__hfs_ext_read_extent(struct hfs_find_data * fd,struct hfs_extent * extent,u32 cnid,u32 block,u8 type)146  static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent,
147  					u32 cnid, u32 block, u8 type)
148  {
149  	int res;
150  
151  	hfs_ext_build_key(fd->search_key, cnid, block, type);
152  	fd->key->ext.FNum = 0;
153  	res = hfs_brec_find(fd);
154  	if (res && res != -ENOENT)
155  		return res;
156  	if (fd->key->ext.FNum != fd->search_key->ext.FNum ||
157  	    fd->key->ext.FkType != fd->search_key->ext.FkType)
158  		return -ENOENT;
159  	if (fd->entrylength != sizeof(hfs_extent_rec))
160  		return -EIO;
161  	hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfs_extent_rec));
162  	return 0;
163  }
164  
__hfs_ext_cache_extent(struct hfs_find_data * fd,struct inode * inode,u32 block)165  static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
166  {
167  	int res;
168  
169  	if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
170  		res = __hfs_ext_write_extent(inode, fd);
171  		if (res)
172  			return res;
173  	}
174  
175  	res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino,
176  				    block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
177  	if (!res) {
178  		HFS_I(inode)->cached_start = be16_to_cpu(fd->key->ext.FABN);
179  		HFS_I(inode)->cached_blocks = hfs_ext_block_count(HFS_I(inode)->cached_extents);
180  	} else {
181  		HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
182  		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
183  	}
184  	return res;
185  }
186  
hfs_ext_read_extent(struct inode * inode,u16 block)187  static int hfs_ext_read_extent(struct inode *inode, u16 block)
188  {
189  	struct hfs_find_data fd;
190  	int res;
191  
192  	if (block >= HFS_I(inode)->cached_start &&
193  	    block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks)
194  		return 0;
195  
196  	res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
197  	if (!res) {
198  		res = __hfs_ext_cache_extent(&fd, inode, block);
199  		hfs_find_exit(&fd);
200  	}
201  	return res;
202  }
203  
hfs_dump_extent(struct hfs_extent * extent)204  static void hfs_dump_extent(struct hfs_extent *extent)
205  {
206  	int i;
207  
208  	hfs_dbg(EXTENT, "   ");
209  	for (i = 0; i < 3; i++)
210  		hfs_dbg_cont(EXTENT, " %u:%u",
211  			     be16_to_cpu(extent[i].block),
212  			     be16_to_cpu(extent[i].count));
213  	hfs_dbg_cont(EXTENT, "\n");
214  }
215  
hfs_add_extent(struct hfs_extent * extent,u16 offset,u16 alloc_block,u16 block_count)216  static int hfs_add_extent(struct hfs_extent *extent, u16 offset,
217  			  u16 alloc_block, u16 block_count)
218  {
219  	u16 count, start;
220  	int i;
221  
222  	hfs_dump_extent(extent);
223  	for (i = 0; i < 3; extent++, i++) {
224  		count = be16_to_cpu(extent->count);
225  		if (offset == count) {
226  			start = be16_to_cpu(extent->block);
227  			if (alloc_block != start + count) {
228  				if (++i >= 3)
229  					return -ENOSPC;
230  				extent++;
231  				extent->block = cpu_to_be16(alloc_block);
232  			} else
233  				block_count += count;
234  			extent->count = cpu_to_be16(block_count);
235  			return 0;
236  		} else if (offset < count)
237  			break;
238  		offset -= count;
239  	}
240  	/* panic? */
241  	return -EIO;
242  }
243  
hfs_free_extents(struct super_block * sb,struct hfs_extent * extent,u16 offset,u16 block_nr)244  static int hfs_free_extents(struct super_block *sb, struct hfs_extent *extent,
245  			    u16 offset, u16 block_nr)
246  {
247  	u16 count, start;
248  	int i;
249  
250  	hfs_dump_extent(extent);
251  	for (i = 0; i < 3; extent++, i++) {
252  		count = be16_to_cpu(extent->count);
253  		if (offset == count)
254  			goto found;
255  		else if (offset < count)
256  			break;
257  		offset -= count;
258  	}
259  	/* panic? */
260  	return -EIO;
261  found:
262  	for (;;) {
263  		start = be16_to_cpu(extent->block);
264  		if (count <= block_nr) {
265  			hfs_clear_vbm_bits(sb, start, count);
266  			extent->block = 0;
267  			extent->count = 0;
268  			block_nr -= count;
269  		} else {
270  			count -= block_nr;
271  			hfs_clear_vbm_bits(sb, start + count, block_nr);
272  			extent->count = cpu_to_be16(count);
273  			block_nr = 0;
274  		}
275  		if (!block_nr || !i)
276  			return 0;
277  		i--;
278  		extent--;
279  		count = be16_to_cpu(extent->count);
280  	}
281  }
282  
hfs_free_fork(struct super_block * sb,struct hfs_cat_file * file,int type)283  int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
284  {
285  	struct hfs_find_data fd;
286  	u32 total_blocks, blocks, start;
287  	u32 cnid = be32_to_cpu(file->FlNum);
288  	struct hfs_extent *extent;
289  	int res, i;
290  
291  	if (type == HFS_FK_DATA) {
292  		total_blocks = be32_to_cpu(file->PyLen);
293  		extent = file->ExtRec;
294  	} else {
295  		total_blocks = be32_to_cpu(file->RPyLen);
296  		extent = file->RExtRec;
297  	}
298  	total_blocks /= HFS_SB(sb)->alloc_blksz;
299  	if (!total_blocks)
300  		return 0;
301  
302  	blocks = 0;
303  	for (i = 0; i < 3; extent++, i++)
304  		blocks += be16_to_cpu(extent[i].count);
305  
306  	res = hfs_free_extents(sb, extent, blocks, blocks);
307  	if (res)
308  		return res;
309  	if (total_blocks == blocks)
310  		return 0;
311  
312  	res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
313  	if (res)
314  		return res;
315  	do {
316  		res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type);
317  		if (res)
318  			break;
319  		start = be16_to_cpu(fd.key->ext.FABN);
320  		hfs_free_extents(sb, extent, total_blocks - start, total_blocks);
321  		hfs_brec_remove(&fd);
322  		total_blocks = start;
323  	} while (total_blocks > blocks);
324  	hfs_find_exit(&fd);
325  
326  	return res;
327  }
328  
329  /*
330   * hfs_get_block
331   */
hfs_get_block(struct inode * inode,sector_t block,struct buffer_head * bh_result,int create)332  int hfs_get_block(struct inode *inode, sector_t block,
333  		  struct buffer_head *bh_result, int create)
334  {
335  	struct super_block *sb;
336  	u16 dblock, ablock;
337  	int res;
338  
339  	sb = inode->i_sb;
340  	/* Convert inode block to disk allocation block */
341  	ablock = (u32)block / HFS_SB(sb)->fs_div;
342  
343  	if (block >= HFS_I(inode)->fs_blocks) {
344  		if (block > HFS_I(inode)->fs_blocks || !create)
345  			return -EIO;
346  		if (ablock >= HFS_I(inode)->alloc_blocks) {
347  			res = hfs_extend_file(inode);
348  			if (res)
349  				return res;
350  		}
351  	} else
352  		create = 0;
353  
354  	if (ablock < HFS_I(inode)->first_blocks) {
355  		dblock = hfs_ext_find_block(HFS_I(inode)->first_extents, ablock);
356  		goto done;
357  	}
358  
359  	mutex_lock(&HFS_I(inode)->extents_lock);
360  	res = hfs_ext_read_extent(inode, ablock);
361  	if (!res)
362  		dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents,
363  					    ablock - HFS_I(inode)->cached_start);
364  	else {
365  		mutex_unlock(&HFS_I(inode)->extents_lock);
366  		return -EIO;
367  	}
368  	mutex_unlock(&HFS_I(inode)->extents_lock);
369  
370  done:
371  	map_bh(bh_result, sb, HFS_SB(sb)->fs_start +
372  	       dblock * HFS_SB(sb)->fs_div +
373  	       (u32)block % HFS_SB(sb)->fs_div);
374  
375  	if (create) {
376  		set_buffer_new(bh_result);
377  		HFS_I(inode)->phys_size += sb->s_blocksize;
378  		HFS_I(inode)->fs_blocks++;
379  		inode_add_bytes(inode, sb->s_blocksize);
380  		mark_inode_dirty(inode);
381  	}
382  	return 0;
383  }
384  
hfs_extend_file(struct inode * inode)385  int hfs_extend_file(struct inode *inode)
386  {
387  	struct super_block *sb = inode->i_sb;
388  	u32 start, len, goal;
389  	int res;
390  
391  	mutex_lock(&HFS_I(inode)->extents_lock);
392  	if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks)
393  		goal = hfs_ext_lastblock(HFS_I(inode)->first_extents);
394  	else {
395  		res = hfs_ext_read_extent(inode, HFS_I(inode)->alloc_blocks);
396  		if (res)
397  			goto out;
398  		goal = hfs_ext_lastblock(HFS_I(inode)->cached_extents);
399  	}
400  
401  	len = HFS_I(inode)->clump_blocks;
402  	start = hfs_vbm_search_free(sb, goal, &len);
403  	if (!len) {
404  		res = -ENOSPC;
405  		goto out;
406  	}
407  
408  	hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
409  	if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) {
410  		if (!HFS_I(inode)->first_blocks) {
411  			hfs_dbg(EXTENT, "first extents\n");
412  			/* no extents yet */
413  			HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
414  			HFS_I(inode)->first_extents[0].count = cpu_to_be16(len);
415  			res = 0;
416  		} else {
417  			/* try to append to extents in inode */
418  			res = hfs_add_extent(HFS_I(inode)->first_extents,
419  					     HFS_I(inode)->alloc_blocks,
420  					     start, len);
421  			if (res == -ENOSPC)
422  				goto insert_extent;
423  		}
424  		if (!res) {
425  			hfs_dump_extent(HFS_I(inode)->first_extents);
426  			HFS_I(inode)->first_blocks += len;
427  		}
428  	} else {
429  		res = hfs_add_extent(HFS_I(inode)->cached_extents,
430  				     HFS_I(inode)->alloc_blocks -
431  				     HFS_I(inode)->cached_start,
432  				     start, len);
433  		if (!res) {
434  			hfs_dump_extent(HFS_I(inode)->cached_extents);
435  			HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
436  			HFS_I(inode)->cached_blocks += len;
437  		} else if (res == -ENOSPC)
438  			goto insert_extent;
439  	}
440  out:
441  	mutex_unlock(&HFS_I(inode)->extents_lock);
442  	if (!res) {
443  		HFS_I(inode)->alloc_blocks += len;
444  		mark_inode_dirty(inode);
445  		if (inode->i_ino < HFS_FIRSTUSER_CNID)
446  			set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags);
447  		set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
448  		hfs_mark_mdb_dirty(sb);
449  	}
450  	return res;
451  
452  insert_extent:
453  	hfs_dbg(EXTENT, "insert new extent\n");
454  	res = hfs_ext_write_extent(inode);
455  	if (res)
456  		goto out;
457  
458  	memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
459  	HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start);
460  	HFS_I(inode)->cached_extents[0].count = cpu_to_be16(len);
461  	hfs_dump_extent(HFS_I(inode)->cached_extents);
462  	HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW;
463  	HFS_I(inode)->cached_start = HFS_I(inode)->alloc_blocks;
464  	HFS_I(inode)->cached_blocks = len;
465  
466  	res = 0;
467  	goto out;
468  }
469  
hfs_file_truncate(struct inode * inode)470  void hfs_file_truncate(struct inode *inode)
471  {
472  	struct super_block *sb = inode->i_sb;
473  	struct hfs_find_data fd;
474  	u16 blk_cnt, alloc_cnt, start;
475  	u32 size;
476  	int res;
477  
478  	hfs_dbg(INODE, "truncate: %lu, %Lu -> %Lu\n",
479  		inode->i_ino, (long long)HFS_I(inode)->phys_size,
480  		inode->i_size);
481  	if (inode->i_size > HFS_I(inode)->phys_size) {
482  		struct address_space *mapping = inode->i_mapping;
483  		void *fsdata;
484  		struct page *page;
485  
486  		/* XXX: Can use generic_cont_expand? */
487  		size = inode->i_size - 1;
488  		res = pagecache_write_begin(NULL, mapping, size+1, 0,
489  				AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
490  		if (!res) {
491  			res = pagecache_write_end(NULL, mapping, size+1, 0, 0,
492  					page, fsdata);
493  		}
494  		if (res)
495  			inode->i_size = HFS_I(inode)->phys_size;
496  		return;
497  	} else if (inode->i_size == HFS_I(inode)->phys_size)
498  		return;
499  	size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1;
500  	blk_cnt = size / HFS_SB(sb)->alloc_blksz;
501  	alloc_cnt = HFS_I(inode)->alloc_blocks;
502  	if (blk_cnt == alloc_cnt)
503  		goto out;
504  
505  	mutex_lock(&HFS_I(inode)->extents_lock);
506  	res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
507  	if (res) {
508  		mutex_unlock(&HFS_I(inode)->extents_lock);
509  		/* XXX: We lack error handling of hfs_file_truncate() */
510  		return;
511  	}
512  	while (1) {
513  		if (alloc_cnt == HFS_I(inode)->first_blocks) {
514  			hfs_free_extents(sb, HFS_I(inode)->first_extents,
515  					 alloc_cnt, alloc_cnt - blk_cnt);
516  			hfs_dump_extent(HFS_I(inode)->first_extents);
517  			HFS_I(inode)->first_blocks = blk_cnt;
518  			break;
519  		}
520  		res = __hfs_ext_cache_extent(&fd, inode, alloc_cnt);
521  		if (res)
522  			break;
523  		start = HFS_I(inode)->cached_start;
524  		hfs_free_extents(sb, HFS_I(inode)->cached_extents,
525  				 alloc_cnt - start, alloc_cnt - blk_cnt);
526  		hfs_dump_extent(HFS_I(inode)->cached_extents);
527  		if (blk_cnt > start) {
528  			HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
529  			break;
530  		}
531  		alloc_cnt = start;
532  		HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
533  		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
534  		hfs_brec_remove(&fd);
535  	}
536  	hfs_find_exit(&fd);
537  	mutex_unlock(&HFS_I(inode)->extents_lock);
538  
539  	HFS_I(inode)->alloc_blocks = blk_cnt;
540  out:
541  	HFS_I(inode)->phys_size = inode->i_size;
542  	HFS_I(inode)->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
543  	inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
544  	mark_inode_dirty(inode);
545  }
546