• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <asm/byteorder.h>
19 
20 #include "ufs_fs.h"
21 #include "ufs.h"
22 #include "swab.h"
23 #include "util.h"
24 
25 #define INVBLOCK ((u64)-1L)
26 
27 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
28 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
29 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
30 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
31 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
32 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
33 
34 /*
35  * Free 'count' fragments from fragment number 'fragment'
36  */
ufs_free_fragments(struct inode * inode,u64 fragment,unsigned count)37 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
38 {
39 	struct super_block * sb;
40 	struct ufs_sb_private_info * uspi;
41 	struct ufs_cg_private_info * ucpi;
42 	struct ufs_cylinder_group * ucg;
43 	unsigned cgno, bit, end_bit, bbase, blkmap, i;
44 	u64 blkno;
45 
46 	sb = inode->i_sb;
47 	uspi = UFS_SB(sb)->s_uspi;
48 
49 	UFSD("ENTER, fragment %llu, count %u\n",
50 	     (unsigned long long)fragment, count);
51 
52 	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
53 		ufs_error (sb, "ufs_free_fragments", "internal error");
54 
55 	mutex_lock(&UFS_SB(sb)->s_lock);
56 
57 	cgno = ufs_dtog(uspi, fragment);
58 	bit = ufs_dtogd(uspi, fragment);
59 	if (cgno >= uspi->s_ncg) {
60 		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
61 		goto failed;
62 	}
63 
64 	ucpi = ufs_load_cylinder (sb, cgno);
65 	if (!ucpi)
66 		goto failed;
67 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
68 	if (!ufs_cg_chkmagic(sb, ucg)) {
69 		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
70 		goto failed;
71 	}
72 
73 	end_bit = bit + count;
74 	bbase = ufs_blknum (bit);
75 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
76 	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
77 	for (i = bit; i < end_bit; i++) {
78 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
79 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
80 		else
81 			ufs_error (sb, "ufs_free_fragments",
82 				   "bit already cleared for fragment %u", i);
83 	}
84 
85 	inode_sub_bytes(inode, count << uspi->s_fshift);
86 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
87 	uspi->cs_total.cs_nffree += count;
88 	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
89 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
90 	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
91 
92 	/*
93 	 * Trying to reassemble free fragments into block
94 	 */
95 	blkno = ufs_fragstoblks (bbase);
96 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
97 		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
98 		uspi->cs_total.cs_nffree -= uspi->s_fpb;
99 		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
100 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
101 			ufs_clusteracct (sb, ucpi, blkno, 1);
102 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
103 		uspi->cs_total.cs_nbfree++;
104 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
105 		if (uspi->fs_magic != UFS2_MAGIC) {
106 			unsigned cylno = ufs_cbtocylno (bbase);
107 
108 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
109 						  ufs_cbtorpos(bbase)), 1);
110 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
111 		}
112 	}
113 
114 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
115 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
116 	if (sb->s_flags & MS_SYNCHRONOUS)
117 		ubh_sync_block(UCPI_UBH(ucpi));
118 	ufs_mark_sb_dirty(sb);
119 
120 	mutex_unlock(&UFS_SB(sb)->s_lock);
121 	UFSD("EXIT\n");
122 	return;
123 
124 failed:
125 	mutex_unlock(&UFS_SB(sb)->s_lock);
126 	UFSD("EXIT (FAILED)\n");
127 	return;
128 }
129 
130 /*
131  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
132  */
ufs_free_blocks(struct inode * inode,u64 fragment,unsigned count)133 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
134 {
135 	struct super_block * sb;
136 	struct ufs_sb_private_info * uspi;
137 	struct ufs_cg_private_info * ucpi;
138 	struct ufs_cylinder_group * ucg;
139 	unsigned overflow, cgno, bit, end_bit, i;
140 	u64 blkno;
141 
142 	sb = inode->i_sb;
143 	uspi = UFS_SB(sb)->s_uspi;
144 
145 	UFSD("ENTER, fragment %llu, count %u\n",
146 	     (unsigned long long)fragment, count);
147 
148 	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
149 		ufs_error (sb, "ufs_free_blocks", "internal error, "
150 			   "fragment %llu, count %u\n",
151 			   (unsigned long long)fragment, count);
152 		goto failed;
153 	}
154 
155 	mutex_lock(&UFS_SB(sb)->s_lock);
156 
157 do_more:
158 	overflow = 0;
159 	cgno = ufs_dtog(uspi, fragment);
160 	bit = ufs_dtogd(uspi, fragment);
161 	if (cgno >= uspi->s_ncg) {
162 		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
163 		goto failed_unlock;
164 	}
165 	end_bit = bit + count;
166 	if (end_bit > uspi->s_fpg) {
167 		overflow = bit + count - uspi->s_fpg;
168 		count -= overflow;
169 		end_bit -= overflow;
170 	}
171 
172 	ucpi = ufs_load_cylinder (sb, cgno);
173 	if (!ucpi)
174 		goto failed_unlock;
175 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
176 	if (!ufs_cg_chkmagic(sb, ucg)) {
177 		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
178 		goto failed_unlock;
179 	}
180 
181 	for (i = bit; i < end_bit; i += uspi->s_fpb) {
182 		blkno = ufs_fragstoblks(i);
183 		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
184 			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
185 		}
186 		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
187 		inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
188 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
189 			ufs_clusteracct (sb, ucpi, blkno, 1);
190 
191 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
192 		uspi->cs_total.cs_nbfree++;
193 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
194 
195 		if (uspi->fs_magic != UFS2_MAGIC) {
196 			unsigned cylno = ufs_cbtocylno(i);
197 
198 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
199 						  ufs_cbtorpos(i)), 1);
200 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
201 		}
202 	}
203 
204 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
205 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
206 	if (sb->s_flags & MS_SYNCHRONOUS)
207 		ubh_sync_block(UCPI_UBH(ucpi));
208 
209 	if (overflow) {
210 		fragment += count;
211 		count = overflow;
212 		goto do_more;
213 	}
214 
215 	ufs_mark_sb_dirty(sb);
216 	mutex_unlock(&UFS_SB(sb)->s_lock);
217 	UFSD("EXIT\n");
218 	return;
219 
220 failed_unlock:
221 	mutex_unlock(&UFS_SB(sb)->s_lock);
222 failed:
223 	UFSD("EXIT (FAILED)\n");
224 	return;
225 }
226 
227 /*
228  * Modify inode page cache in such way:
229  * have - blocks with b_blocknr equal to oldb...oldb+count-1
230  * get - blocks with b_blocknr equal to newb...newb+count-1
231  * also we suppose that oldb...oldb+count-1 blocks
232  * situated at the end of file.
233  *
234  * We can come here from ufs_writepage or ufs_prepare_write,
235  * locked_page is argument of these functions, so we already lock it.
236  */
ufs_change_blocknr(struct inode * inode,sector_t beg,unsigned int count,sector_t oldb,sector_t newb,struct page * locked_page)237 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
238 			       unsigned int count, sector_t oldb,
239 			       sector_t newb, struct page *locked_page)
240 {
241 	const unsigned blks_per_page =
242 		1 << (PAGE_SHIFT - inode->i_blkbits);
243 	const unsigned mask = blks_per_page - 1;
244 	struct address_space * const mapping = inode->i_mapping;
245 	pgoff_t index, cur_index, last_index;
246 	unsigned pos, j, lblock;
247 	sector_t end, i;
248 	struct page *page;
249 	struct buffer_head *head, *bh;
250 
251 	UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
252 	      inode->i_ino, count,
253 	     (unsigned long long)oldb, (unsigned long long)newb);
254 
255 	BUG_ON(!locked_page);
256 	BUG_ON(!PageLocked(locked_page));
257 
258 	cur_index = locked_page->index;
259 	end = count + beg;
260 	last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
261 	for (i = beg; i < end; i = (i | mask) + 1) {
262 		index = i >> (PAGE_SHIFT - inode->i_blkbits);
263 
264 		if (likely(cur_index != index)) {
265 			page = ufs_get_locked_page(mapping, index);
266 			if (!page)/* it was truncated */
267 				continue;
268 			if (IS_ERR(page)) {/* or EIO */
269 				ufs_error(inode->i_sb, __func__,
270 					  "read of page %llu failed\n",
271 					  (unsigned long long)index);
272 				continue;
273 			}
274 		} else
275 			page = locked_page;
276 
277 		head = page_buffers(page);
278 		bh = head;
279 		pos = i & mask;
280 		for (j = 0; j < pos; ++j)
281 			bh = bh->b_this_page;
282 
283 
284 		if (unlikely(index == last_index))
285 			lblock = end & mask;
286 		else
287 			lblock = blks_per_page;
288 
289 		do {
290 			if (j >= lblock)
291 				break;
292 			pos = (i - beg) + j;
293 
294 			if (!buffer_mapped(bh))
295 					map_bh(bh, inode->i_sb, oldb + pos);
296 			if (!buffer_uptodate(bh)) {
297 				ll_rw_block(REQ_OP_READ, 0, 1, &bh);
298 				wait_on_buffer(bh);
299 				if (!buffer_uptodate(bh)) {
300 					ufs_error(inode->i_sb, __func__,
301 						  "read of block failed\n");
302 					break;
303 				}
304 			}
305 
306 			UFSD(" change from %llu to %llu, pos %u\n",
307 			     (unsigned long long)(pos + oldb),
308 			     (unsigned long long)(pos + newb), pos);
309 
310 			bh->b_blocknr = newb + pos;
311 			unmap_underlying_metadata(bh->b_bdev,
312 						  bh->b_blocknr);
313 			mark_buffer_dirty(bh);
314 			++j;
315 			bh = bh->b_this_page;
316 		} while (bh != head);
317 
318 		if (likely(cur_index != index))
319 			ufs_put_locked_page(page);
320  	}
321 	UFSD("EXIT\n");
322 }
323 
ufs_clear_frags(struct inode * inode,sector_t beg,unsigned int n,int sync)324 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
325 			    int sync)
326 {
327 	struct buffer_head *bh;
328 	sector_t end = beg + n;
329 
330 	for (; beg < end; ++beg) {
331 		bh = sb_getblk(inode->i_sb, beg);
332 		lock_buffer(bh);
333 		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
334 		set_buffer_uptodate(bh);
335 		mark_buffer_dirty(bh);
336 		unlock_buffer(bh);
337 		if (IS_SYNC(inode) || sync)
338 			sync_dirty_buffer(bh);
339 		brelse(bh);
340 	}
341 }
342 
ufs_new_fragments(struct inode * inode,void * p,u64 fragment,u64 goal,unsigned count,int * err,struct page * locked_page)343 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
344 			   u64 goal, unsigned count, int *err,
345 			   struct page *locked_page)
346 {
347 	struct super_block * sb;
348 	struct ufs_sb_private_info * uspi;
349 	struct ufs_super_block_first * usb1;
350 	unsigned cgno, oldcount, newcount;
351 	u64 tmp, request, result;
352 
353 	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
354 	     inode->i_ino, (unsigned long long)fragment,
355 	     (unsigned long long)goal, count);
356 
357 	sb = inode->i_sb;
358 	uspi = UFS_SB(sb)->s_uspi;
359 	usb1 = ubh_get_usb_first(uspi);
360 	*err = -ENOSPC;
361 
362 	mutex_lock(&UFS_SB(sb)->s_lock);
363 	tmp = ufs_data_ptr_to_cpu(sb, p);
364 
365 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
366 		ufs_warning(sb, "ufs_new_fragments", "internal warning"
367 			    " fragment %llu, count %u",
368 			    (unsigned long long)fragment, count);
369 		count = uspi->s_fpb - ufs_fragnum(fragment);
370 	}
371 	oldcount = ufs_fragnum (fragment);
372 	newcount = oldcount + count;
373 
374 	/*
375 	 * Somebody else has just allocated our fragments
376 	 */
377 	if (oldcount) {
378 		if (!tmp) {
379 			ufs_error(sb, "ufs_new_fragments", "internal error, "
380 				  "fragment %llu, tmp %llu\n",
381 				  (unsigned long long)fragment,
382 				  (unsigned long long)tmp);
383 			mutex_unlock(&UFS_SB(sb)->s_lock);
384 			return INVBLOCK;
385 		}
386 		if (fragment < UFS_I(inode)->i_lastfrag) {
387 			UFSD("EXIT (ALREADY ALLOCATED)\n");
388 			mutex_unlock(&UFS_SB(sb)->s_lock);
389 			return 0;
390 		}
391 	}
392 	else {
393 		if (tmp) {
394 			UFSD("EXIT (ALREADY ALLOCATED)\n");
395 			mutex_unlock(&UFS_SB(sb)->s_lock);
396 			return 0;
397 		}
398 	}
399 
400 	/*
401 	 * There is not enough space for user on the device
402 	 */
403 	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
404 		mutex_unlock(&UFS_SB(sb)->s_lock);
405 		UFSD("EXIT (FAILED)\n");
406 		return 0;
407 	}
408 
409 	if (goal >= uspi->s_size)
410 		goal = 0;
411 	if (goal == 0)
412 		cgno = ufs_inotocg (inode->i_ino);
413 	else
414 		cgno = ufs_dtog(uspi, goal);
415 
416 	/*
417 	 * allocate new fragment
418 	 */
419 	if (oldcount == 0) {
420 		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
421 		if (result) {
422 			ufs_clear_frags(inode, result + oldcount,
423 					newcount - oldcount, locked_page != NULL);
424 			write_seqlock(&UFS_I(inode)->meta_lock);
425 			ufs_cpu_to_data_ptr(sb, p, result);
426 			write_sequnlock(&UFS_I(inode)->meta_lock);
427 			*err = 0;
428 			UFS_I(inode)->i_lastfrag =
429 				max(UFS_I(inode)->i_lastfrag, fragment + count);
430 		}
431 		mutex_unlock(&UFS_SB(sb)->s_lock);
432 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
433 		return result;
434 	}
435 
436 	/*
437 	 * resize block
438 	 */
439 	result = ufs_add_fragments(inode, tmp, oldcount, newcount);
440 	if (result) {
441 		*err = 0;
442 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
443 						fragment + count);
444 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
445 				locked_page != NULL);
446 		mutex_unlock(&UFS_SB(sb)->s_lock);
447 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
448 		return result;
449 	}
450 
451 	/*
452 	 * allocate new block and move data
453 	 */
454 	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
455 	    case UFS_OPTSPACE:
456 		request = newcount;
457 		if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
458 		    > uspi->s_dsize * uspi->s_minfree / (2 * 100))
459 			break;
460 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
461 		break;
462 	    default:
463 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
464 
465 	    case UFS_OPTTIME:
466 		request = uspi->s_fpb;
467 		if (uspi->cs_total.cs_nffree < uspi->s_dsize *
468 		    (uspi->s_minfree - 2) / 100)
469 			break;
470 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
471 		break;
472 	}
473 	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
474 	if (result) {
475 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
476 				locked_page != NULL);
477 		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
478 				   uspi->s_sbbase + tmp,
479 				   uspi->s_sbbase + result, locked_page);
480 		write_seqlock(&UFS_I(inode)->meta_lock);
481 		ufs_cpu_to_data_ptr(sb, p, result);
482 		write_sequnlock(&UFS_I(inode)->meta_lock);
483 		*err = 0;
484 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
485 						fragment + count);
486 		mutex_unlock(&UFS_SB(sb)->s_lock);
487 		if (newcount < request)
488 			ufs_free_fragments (inode, result + newcount, request - newcount);
489 		ufs_free_fragments (inode, tmp, oldcount);
490 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
491 		return result;
492 	}
493 
494 	mutex_unlock(&UFS_SB(sb)->s_lock);
495 	UFSD("EXIT (FAILED)\n");
496 	return 0;
497 }
498 
try_add_frags(struct inode * inode,unsigned frags)499 static bool try_add_frags(struct inode *inode, unsigned frags)
500 {
501 	unsigned size = frags * i_blocksize(inode);
502 	spin_lock(&inode->i_lock);
503 	__inode_add_bytes(inode, size);
504 	if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
505 		__inode_sub_bytes(inode, size);
506 		spin_unlock(&inode->i_lock);
507 		return false;
508 	}
509 	spin_unlock(&inode->i_lock);
510 	return true;
511 }
512 
ufs_add_fragments(struct inode * inode,u64 fragment,unsigned oldcount,unsigned newcount)513 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
514 			     unsigned oldcount, unsigned newcount)
515 {
516 	struct super_block * sb;
517 	struct ufs_sb_private_info * uspi;
518 	struct ufs_cg_private_info * ucpi;
519 	struct ufs_cylinder_group * ucg;
520 	unsigned cgno, fragno, fragoff, count, fragsize, i;
521 
522 	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
523 	     (unsigned long long)fragment, oldcount, newcount);
524 
525 	sb = inode->i_sb;
526 	uspi = UFS_SB(sb)->s_uspi;
527 	count = newcount - oldcount;
528 
529 	cgno = ufs_dtog(uspi, fragment);
530 	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
531 		return 0;
532 	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
533 		return 0;
534 	ucpi = ufs_load_cylinder (sb, cgno);
535 	if (!ucpi)
536 		return 0;
537 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
538 	if (!ufs_cg_chkmagic(sb, ucg)) {
539 		ufs_panic (sb, "ufs_add_fragments",
540 			"internal error, bad magic number on cg %u", cgno);
541 		return 0;
542 	}
543 
544 	fragno = ufs_dtogd(uspi, fragment);
545 	fragoff = ufs_fragnum (fragno);
546 	for (i = oldcount; i < newcount; i++)
547 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
548 			return 0;
549 
550 	if (!try_add_frags(inode, count))
551 		return 0;
552 	/*
553 	 * Block can be extended
554 	 */
555 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
556 	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
557 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
558 			break;
559 	fragsize = i - oldcount;
560 	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
561 		ufs_panic (sb, "ufs_add_fragments",
562 			"internal error or corrupted bitmap on cg %u", cgno);
563 	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
564 	if (fragsize != count)
565 		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
566 	for (i = oldcount; i < newcount; i++)
567 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
568 
569 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
570 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
571 	uspi->cs_total.cs_nffree -= count;
572 
573 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
574 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
575 	if (sb->s_flags & MS_SYNCHRONOUS)
576 		ubh_sync_block(UCPI_UBH(ucpi));
577 	ufs_mark_sb_dirty(sb);
578 
579 	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
580 
581 	return fragment;
582 }
583 
584 #define UFS_TEST_FREE_SPACE_CG \
585 	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
586 	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
587 		goto cg_found; \
588 	for (k = count; k < uspi->s_fpb; k++) \
589 		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
590 			goto cg_found;
591 
ufs_alloc_fragments(struct inode * inode,unsigned cgno,u64 goal,unsigned count,int * err)592 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
593 			       u64 goal, unsigned count, int *err)
594 {
595 	struct super_block * sb;
596 	struct ufs_sb_private_info * uspi;
597 	struct ufs_cg_private_info * ucpi;
598 	struct ufs_cylinder_group * ucg;
599 	unsigned oldcg, i, j, k, allocsize;
600 	u64 result;
601 
602 	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
603 	     inode->i_ino, cgno, (unsigned long long)goal, count);
604 
605 	sb = inode->i_sb;
606 	uspi = UFS_SB(sb)->s_uspi;
607 	oldcg = cgno;
608 
609 	/*
610 	 * 1. searching on preferred cylinder group
611 	 */
612 	UFS_TEST_FREE_SPACE_CG
613 
614 	/*
615 	 * 2. quadratic rehash
616 	 */
617 	for (j = 1; j < uspi->s_ncg; j *= 2) {
618 		cgno += j;
619 		if (cgno >= uspi->s_ncg)
620 			cgno -= uspi->s_ncg;
621 		UFS_TEST_FREE_SPACE_CG
622 	}
623 
624 	/*
625 	 * 3. brute force search
626 	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
627 	 */
628 	cgno = (oldcg + 1) % uspi->s_ncg;
629 	for (j = 2; j < uspi->s_ncg; j++) {
630 		cgno++;
631 		if (cgno >= uspi->s_ncg)
632 			cgno = 0;
633 		UFS_TEST_FREE_SPACE_CG
634 	}
635 
636 	UFSD("EXIT (FAILED)\n");
637 	return 0;
638 
639 cg_found:
640 	ucpi = ufs_load_cylinder (sb, cgno);
641 	if (!ucpi)
642 		return 0;
643 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
644 	if (!ufs_cg_chkmagic(sb, ucg))
645 		ufs_panic (sb, "ufs_alloc_fragments",
646 			"internal error, bad magic number on cg %u", cgno);
647 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
648 
649 	if (count == uspi->s_fpb) {
650 		result = ufs_alloccg_block (inode, ucpi, goal, err);
651 		if (result == INVBLOCK)
652 			return 0;
653 		goto succed;
654 	}
655 
656 	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
657 		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
658 			break;
659 
660 	if (allocsize == uspi->s_fpb) {
661 		result = ufs_alloccg_block (inode, ucpi, goal, err);
662 		if (result == INVBLOCK)
663 			return 0;
664 		goal = ufs_dtogd(uspi, result);
665 		for (i = count; i < uspi->s_fpb; i++)
666 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
667 		i = uspi->s_fpb - count;
668 
669 		inode_sub_bytes(inode, i << uspi->s_fshift);
670 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
671 		uspi->cs_total.cs_nffree += i;
672 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
673 		fs32_add(sb, &ucg->cg_frsum[i], 1);
674 		goto succed;
675 	}
676 
677 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
678 	if (result == INVBLOCK)
679 		return 0;
680 	if (!try_add_frags(inode, count))
681 		return 0;
682 	for (i = 0; i < count; i++)
683 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
684 
685 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
686 	uspi->cs_total.cs_nffree -= count;
687 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
688 	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
689 
690 	if (count != allocsize)
691 		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
692 
693 succed:
694 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
695 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
696 	if (sb->s_flags & MS_SYNCHRONOUS)
697 		ubh_sync_block(UCPI_UBH(ucpi));
698 	ufs_mark_sb_dirty(sb);
699 
700 	result += cgno * uspi->s_fpg;
701 	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
702 	return result;
703 }
704 
ufs_alloccg_block(struct inode * inode,struct ufs_cg_private_info * ucpi,u64 goal,int * err)705 static u64 ufs_alloccg_block(struct inode *inode,
706 			     struct ufs_cg_private_info *ucpi,
707 			     u64 goal, int *err)
708 {
709 	struct super_block * sb;
710 	struct ufs_sb_private_info * uspi;
711 	struct ufs_cylinder_group * ucg;
712 	u64 result, blkno;
713 
714 	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
715 
716 	sb = inode->i_sb;
717 	uspi = UFS_SB(sb)->s_uspi;
718 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
719 
720 	if (goal == 0) {
721 		goal = ucpi->c_rotor;
722 		goto norot;
723 	}
724 	goal = ufs_blknum (goal);
725 	goal = ufs_dtogd(uspi, goal);
726 
727 	/*
728 	 * If the requested block is available, use it.
729 	 */
730 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
731 		result = goal;
732 		goto gotit;
733 	}
734 
735 norot:
736 	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
737 	if (result == INVBLOCK)
738 		return INVBLOCK;
739 	ucpi->c_rotor = result;
740 gotit:
741 	if (!try_add_frags(inode, uspi->s_fpb))
742 		return 0;
743 	blkno = ufs_fragstoblks(result);
744 	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
745 	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
746 		ufs_clusteracct (sb, ucpi, blkno, -1);
747 
748 	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
749 	uspi->cs_total.cs_nbfree--;
750 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
751 
752 	if (uspi->fs_magic != UFS2_MAGIC) {
753 		unsigned cylno = ufs_cbtocylno((unsigned)result);
754 
755 		fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
756 					  ufs_cbtorpos((unsigned)result)), 1);
757 		fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
758 	}
759 
760 	UFSD("EXIT, result %llu\n", (unsigned long long)result);
761 
762 	return result;
763 }
764 
ubh_scanc(struct ufs_sb_private_info * uspi,struct ufs_buffer_head * ubh,unsigned begin,unsigned size,unsigned char * table,unsigned char mask)765 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
766 			  struct ufs_buffer_head *ubh,
767 			  unsigned begin, unsigned size,
768 			  unsigned char *table, unsigned char mask)
769 {
770 	unsigned rest, offset;
771 	unsigned char *cp;
772 
773 
774 	offset = begin & ~uspi->s_fmask;
775 	begin >>= uspi->s_fshift;
776 	for (;;) {
777 		if ((offset + size) < uspi->s_fsize)
778 			rest = size;
779 		else
780 			rest = uspi->s_fsize - offset;
781 		size -= rest;
782 		cp = ubh->bh[begin]->b_data + offset;
783 		while ((table[*cp++] & mask) == 0 && --rest)
784 			;
785 		if (rest || !size)
786 			break;
787 		begin++;
788 		offset = 0;
789 	}
790 	return (size + rest);
791 }
792 
793 /*
794  * Find a block of the specified size in the specified cylinder group.
795  * @sp: pointer to super block
796  * @ucpi: pointer to cylinder group info
797  * @goal: near which block we want find new one
798  * @count: specified size
799  */
ufs_bitmap_search(struct super_block * sb,struct ufs_cg_private_info * ucpi,u64 goal,unsigned count)800 static u64 ufs_bitmap_search(struct super_block *sb,
801 			     struct ufs_cg_private_info *ucpi,
802 			     u64 goal, unsigned count)
803 {
804 	/*
805 	 * Bit patterns for identifying fragments in the block map
806 	 * used as ((map & mask_arr) == want_arr)
807 	 */
808 	static const int mask_arr[9] = {
809 		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
810 	};
811 	static const int want_arr[9] = {
812 		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
813 	};
814 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
815 	unsigned start, length, loc;
816 	unsigned pos, want, blockmap, mask, end;
817 	u64 result;
818 
819 	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
820 	     (unsigned long long)goal, count);
821 
822 	if (goal)
823 		start = ufs_dtogd(uspi, goal) >> 3;
824 	else
825 		start = ucpi->c_frotor >> 3;
826 
827 	length = ((uspi->s_fpg + 7) >> 3) - start;
828 	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
829 		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
830 		1 << (count - 1 + (uspi->s_fpb & 7)));
831 	if (loc == 0) {
832 		length = start + 1;
833 		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
834 				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
835 				ufs_fragtable_other,
836 				1 << (count - 1 + (uspi->s_fpb & 7)));
837 		if (loc == 0) {
838 			ufs_error(sb, "ufs_bitmap_search",
839 				  "bitmap corrupted on cg %u, start %u,"
840 				  " length %u, count %u, freeoff %u\n",
841 				  ucpi->c_cgx, start, length, count,
842 				  ucpi->c_freeoff);
843 			return INVBLOCK;
844 		}
845 		start = 0;
846 	}
847 	result = (start + length - loc) << 3;
848 	ucpi->c_frotor = result;
849 
850 	/*
851 	 * found the byte in the map
852 	 */
853 
854 	for (end = result + 8; result < end; result += uspi->s_fpb) {
855 		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
856 		blockmap <<= 1;
857 		mask = mask_arr[count];
858 		want = want_arr[count];
859 		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
860 			if ((blockmap & mask) == want) {
861 				UFSD("EXIT, result %llu\n",
862 				     (unsigned long long)result);
863 				return result + pos;
864  			}
865 			mask <<= 1;
866 			want <<= 1;
867  		}
868  	}
869 
870 	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
871 		  ucpi->c_cgx);
872 	UFSD("EXIT (FAILED)\n");
873 	return INVBLOCK;
874 }
875 
ufs_clusteracct(struct super_block * sb,struct ufs_cg_private_info * ucpi,unsigned blkno,int cnt)876 static void ufs_clusteracct(struct super_block * sb,
877 	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
878 {
879 	struct ufs_sb_private_info * uspi;
880 	int i, start, end, forw, back;
881 
882 	uspi = UFS_SB(sb)->s_uspi;
883 	if (uspi->s_contigsumsize <= 0)
884 		return;
885 
886 	if (cnt > 0)
887 		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
888 	else
889 		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
890 
891 	/*
892 	 * Find the size of the cluster going forward.
893 	 */
894 	start = blkno + 1;
895 	end = start + uspi->s_contigsumsize;
896 	if ( end >= ucpi->c_nclusterblks)
897 		end = ucpi->c_nclusterblks;
898 	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
899 	if (i > end)
900 		i = end;
901 	forw = i - start;
902 
903 	/*
904 	 * Find the size of the cluster going backward.
905 	 */
906 	start = blkno - 1;
907 	end = start - uspi->s_contigsumsize;
908 	if (end < 0 )
909 		end = -1;
910 	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
911 	if ( i < end)
912 		i = end;
913 	back = start - i;
914 
915 	/*
916 	 * Account for old cluster and the possibly new forward and
917 	 * back clusters.
918 	 */
919 	i = back + forw + 1;
920 	if (i > uspi->s_contigsumsize)
921 		i = uspi->s_contigsumsize;
922 	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
923 	if (back > 0)
924 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
925 	if (forw > 0)
926 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
927 }
928 
929 
930 static unsigned char ufs_fragtable_8fpb[] = {
931 	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
932 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
933 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
934 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
935 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
936 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
937 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
938 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
939 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
940 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
941 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
942 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
943 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
944 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
945 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
946 	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
947 };
948 
949 static unsigned char ufs_fragtable_other[] = {
950 	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
951 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
952 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
953 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
954 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
955 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
956 	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
957 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
958 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
959 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
960 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
961 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
962 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
963 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
964 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
965 	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
966 };
967