• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/fs/hpfs/alloc.c
3  *
4  *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
5  *
6  *  HPFS bitmap operations
7  */
8 
9 #include "hpfs_fn.h"
10 
hpfs_claim_alloc(struct super_block * s,secno sec)11 static void hpfs_claim_alloc(struct super_block *s, secno sec)
12 {
13 	struct hpfs_sb_info *sbi = hpfs_sb(s);
14 	if (sbi->sb_n_free != (unsigned)-1) {
15 		if (unlikely(!sbi->sb_n_free)) {
16 			hpfs_error(s, "free count underflow, allocating sector %08x", sec);
17 			sbi->sb_n_free = -1;
18 			return;
19 		}
20 		sbi->sb_n_free--;
21 	}
22 }
23 
hpfs_claim_free(struct super_block * s,secno sec)24 static void hpfs_claim_free(struct super_block *s, secno sec)
25 {
26 	struct hpfs_sb_info *sbi = hpfs_sb(s);
27 	if (sbi->sb_n_free != (unsigned)-1) {
28 		if (unlikely(sbi->sb_n_free >= sbi->sb_fs_size)) {
29 			hpfs_error(s, "free count overflow, freeing sector %08x", sec);
30 			sbi->sb_n_free = -1;
31 			return;
32 		}
33 		sbi->sb_n_free++;
34 	}
35 }
36 
hpfs_claim_dirband_alloc(struct super_block * s,secno sec)37 static void hpfs_claim_dirband_alloc(struct super_block *s, secno sec)
38 {
39 	struct hpfs_sb_info *sbi = hpfs_sb(s);
40 	if (sbi->sb_n_free_dnodes != (unsigned)-1) {
41 		if (unlikely(!sbi->sb_n_free_dnodes)) {
42 			hpfs_error(s, "dirband free count underflow, allocating sector %08x", sec);
43 			sbi->sb_n_free_dnodes = -1;
44 			return;
45 		}
46 		sbi->sb_n_free_dnodes--;
47 	}
48 }
49 
hpfs_claim_dirband_free(struct super_block * s,secno sec)50 static void hpfs_claim_dirband_free(struct super_block *s, secno sec)
51 {
52 	struct hpfs_sb_info *sbi = hpfs_sb(s);
53 	if (sbi->sb_n_free_dnodes != (unsigned)-1) {
54 		if (unlikely(sbi->sb_n_free_dnodes >= sbi->sb_dirband_size / 4)) {
55 			hpfs_error(s, "dirband free count overflow, freeing sector %08x", sec);
56 			sbi->sb_n_free_dnodes = -1;
57 			return;
58 		}
59 		sbi->sb_n_free_dnodes++;
60 	}
61 }
62 
63 /*
64  * Check if a sector is allocated in bitmap
65  * This is really slow. Turned on only if chk==2
66  */
67 
chk_if_allocated(struct super_block * s,secno sec,char * msg)68 static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
69 {
70 	struct quad_buffer_head qbh;
71 	__le32 *bmp;
72 	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail;
73 	if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
74 		hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec);
75 		goto fail1;
76 	}
77 	hpfs_brelse4(&qbh);
78 	if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) {
79 		unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4;
80 		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail;
81 		if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) {
82 			hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec);
83 			goto fail1;
84 		}
85 		hpfs_brelse4(&qbh);
86 	}
87 	return 0;
88 	fail1:
89 	hpfs_brelse4(&qbh);
90 	fail:
91 	return 1;
92 }
93 
94 /*
95  * Check if sector(s) have proper number and additionally check if they're
96  * allocated in bitmap.
97  */
98 
hpfs_chk_sectors(struct super_block * s,secno start,int len,char * msg)99 int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg)
100 {
101 	if (start + len < start || start < 0x12 ||
102 	    start + len > hpfs_sb(s)->sb_fs_size) {
103 	    	hpfs_error(s, "sector(s) '%s' badly placed at %08x", msg, start);
104 		return 1;
105 	}
106 	if (hpfs_sb(s)->sb_chk>=2) {
107 		int i;
108 		for (i = 0; i < len; i++)
109 			if (chk_if_allocated(s, start + i, msg)) return 1;
110 	}
111 	return 0;
112 }
113 
alloc_in_bmp(struct super_block * s,secno near,unsigned n,unsigned forward)114 static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward)
115 {
116 	struct quad_buffer_head qbh;
117 	__le32 *bmp;
118 	unsigned bs = near & ~0x3fff;
119 	unsigned nr = (near & 0x3fff) & ~(n - 1);
120 	/*unsigned mnr;*/
121 	unsigned i, q;
122 	int a, b;
123 	secno ret = 0;
124 	if (n != 1 && n != 4) {
125 		hpfs_error(s, "Bad allocation size: %d", n);
126 		return 0;
127 	}
128 	if (bs != ~0x3fff) {
129 		if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls;
130 	} else {
131 		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto uls;
132 	}
133 	if (!tstbits(bmp, nr, n + forward)) {
134 		ret = bs + nr;
135 		goto rt;
136 	}
137 	q = nr + n; b = 0;
138 	while ((a = tstbits(bmp, q, n + forward)) != 0) {
139 		q += a;
140 		if (n != 1) q = ((q-1)&~(n-1))+n;
141 		if (!b) {
142 			if (q>>5 != nr>>5) {
143 				b = 1;
144 				q = nr & 0x1f;
145 			}
146 		} else if (q > nr) break;
147 	}
148 	if (!a) {
149 		ret = bs + q;
150 		goto rt;
151 	}
152 	nr >>= 5;
153 	/*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */
154 	i = nr;
155 	do {
156 		if (!le32_to_cpu(bmp[i])) goto cont;
157 		if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont;
158 		q = i<<5;
159 		if (i > 0) {
160 			unsigned k = le32_to_cpu(bmp[i-1]);
161 			while (k & 0x80000000) {
162 				q--; k <<= 1;
163 			}
164 		}
165 		if (n != 1) q = ((q-1)&~(n-1))+n;
166 		while ((a = tstbits(bmp, q, n + forward)) != 0) {
167 			q += a;
168 			if (n != 1) q = ((q-1)&~(n-1))+n;
169 			if (q>>5 > i) break;
170 		}
171 		if (!a) {
172 			ret = bs + q;
173 			goto rt;
174 		}
175 		cont:
176 		i++, i &= 0x1ff;
177 	} while (i != nr);
178 	rt:
179 	if (ret) {
180 		if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) {
181 			hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret);
182 			ret = 0;
183 			goto b;
184 		}
185 		bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f)));
186 		hpfs_mark_4buffers_dirty(&qbh);
187 	}
188 	b:
189 	hpfs_brelse4(&qbh);
190 	uls:
191 	return ret;
192 }
193 
194 /*
195  * Allocation strategy:	1) search place near the sector specified
196  *			2) search bitmap where free sectors last found
197  *			3) search all bitmaps
198  *			4) search all bitmaps ignoring number of pre-allocated
199  *				sectors
200  */
201 
hpfs_alloc_sector(struct super_block * s,secno near,unsigned n,int forward)202 secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward)
203 {
204 	secno sec;
205 	int i;
206 	unsigned n_bmps;
207 	struct hpfs_sb_info *sbi = hpfs_sb(s);
208 	int f_p = 0;
209 	int near_bmp;
210 	if (forward < 0) {
211 		forward = -forward;
212 		f_p = 1;
213 	}
214 	n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14;
215 	if (near && near < sbi->sb_fs_size) {
216 		if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret;
217 		near_bmp = near >> 14;
218 	} else near_bmp = n_bmps / 2;
219 	/*
220 	if (b != -1) {
221 		if ((sec = alloc_in_bmp(s, b<<14, n, f_p ? forward : forward/2))) {
222 			b &= 0x0fffffff;
223 			goto ret;
224 		}
225 		if (b > 0x10000000) if ((sec = alloc_in_bmp(s, (b&0xfffffff)<<14, n, f_p ? forward : 0))) goto ret;
226 	*/
227 	if (!f_p) if (forward > sbi->sb_max_fwd_alloc) forward = sbi->sb_max_fwd_alloc;
228 	less_fwd:
229 	for (i = 0; i < n_bmps; i++) {
230 		if (near_bmp+i < n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i) << 14, n, forward)))) {
231 			sbi->sb_c_bitmap = near_bmp+i;
232 			goto ret;
233 		}
234 		if (!forward) {
235 			if (near_bmp-i-1 >= 0 && ((sec = alloc_in_bmp(s, (near_bmp-i-1) << 14, n, forward)))) {
236 				sbi->sb_c_bitmap = near_bmp-i-1;
237 				goto ret;
238 			}
239 		} else {
240 			if (near_bmp+i >= n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i-n_bmps) << 14, n, forward)))) {
241 				sbi->sb_c_bitmap = near_bmp+i-n_bmps;
242 				goto ret;
243 			}
244 		}
245 		if (i == 1 && sbi->sb_c_bitmap != -1 && ((sec = alloc_in_bmp(s, (sbi->sb_c_bitmap) << 14, n, forward)))) {
246 			goto ret;
247 		}
248 	}
249 	if (!f_p) {
250 		if (forward) {
251 			sbi->sb_max_fwd_alloc = forward * 3 / 4;
252 			forward /= 2;
253 			goto less_fwd;
254 		}
255 	}
256 	sec = 0;
257 	ret:
258 	if (sec) {
259 		i = 0;
260 		do
261 			hpfs_claim_alloc(s, sec + i);
262 		while (unlikely(++i < n));
263 	}
264 	if (sec && f_p) {
265 		for (i = 0; i < forward; i++) {
266 			if (!hpfs_alloc_if_possible(s, sec + n + i)) {
267 				hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i);
268 				sec = 0;
269 				break;
270 			}
271 		}
272 	}
273 	return sec;
274 }
275 
alloc_in_dirband(struct super_block * s,secno near)276 static secno alloc_in_dirband(struct super_block *s, secno near)
277 {
278 	unsigned nr = near;
279 	secno sec;
280 	struct hpfs_sb_info *sbi = hpfs_sb(s);
281 	if (nr < sbi->sb_dirband_start)
282 		nr = sbi->sb_dirband_start;
283 	if (nr >= sbi->sb_dirband_start + sbi->sb_dirband_size)
284 		nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4;
285 	nr -= sbi->sb_dirband_start;
286 	nr >>= 2;
287 	sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0);
288 	if (!sec) return 0;
289 	hpfs_claim_dirband_alloc(s, sec);
290 	return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start;
291 }
292 
293 /* Alloc sector if it's free */
294 
hpfs_alloc_if_possible(struct super_block * s,secno sec)295 int hpfs_alloc_if_possible(struct super_block *s, secno sec)
296 {
297 	struct quad_buffer_head qbh;
298 	__le32 *bmp;
299 	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end;
300 	if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) {
301 		bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
302 		hpfs_mark_4buffers_dirty(&qbh);
303 		hpfs_brelse4(&qbh);
304 		hpfs_claim_alloc(s, sec);
305 		return 1;
306 	}
307 	hpfs_brelse4(&qbh);
308 	end:
309 	return 0;
310 }
311 
312 /* Free sectors in bitmaps */
313 
hpfs_free_sectors(struct super_block * s,secno sec,unsigned n)314 void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
315 {
316 	struct quad_buffer_head qbh;
317 	__le32 *bmp;
318 	struct hpfs_sb_info *sbi = hpfs_sb(s);
319 	/*pr_info("2 - ");*/
320 	if (!n) return;
321 	if (sec < 0x12) {
322 		hpfs_error(s, "Trying to free reserved sector %08x", sec);
323 		return;
324 	}
325 	sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n;
326 	if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff;
327 	new_map:
328 	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) {
329 		return;
330 	}
331 	new_tst:
332 	if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) {
333 		hpfs_error(s, "sector %08x not allocated", sec);
334 		hpfs_brelse4(&qbh);
335 		return;
336 	}
337 	bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f));
338 	hpfs_claim_free(s, sec);
339 	if (!--n) {
340 		hpfs_mark_4buffers_dirty(&qbh);
341 		hpfs_brelse4(&qbh);
342 		return;
343 	}
344 	if (!(++sec & 0x3fff)) {
345 		hpfs_mark_4buffers_dirty(&qbh);
346 		hpfs_brelse4(&qbh);
347 		goto new_map;
348 	}
349 	goto new_tst;
350 }
351 
352 /*
353  * Check if there are at least n free dnodes on the filesystem.
354  * Called before adding to dnode. If we run out of space while
355  * splitting dnodes, it would corrupt dnode tree.
356  */
357 
hpfs_check_free_dnodes(struct super_block * s,int n)358 int hpfs_check_free_dnodes(struct super_block *s, int n)
359 {
360 	int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14;
361 	int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff;
362 	int i, j;
363 	__le32 *bmp;
364 	struct quad_buffer_head qbh;
365 	if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
366 		for (j = 0; j < 512; j++) {
367 			unsigned k;
368 			if (!le32_to_cpu(bmp[j])) continue;
369 			for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) {
370 				hpfs_brelse4(&qbh);
371 				return 0;
372 			}
373 		}
374 	}
375 	hpfs_brelse4(&qbh);
376 	i = 0;
377 	if (hpfs_sb(s)->sb_c_bitmap != -1) {
378 		bmp = hpfs_map_bitmap(s, b, &qbh, "chkdn1");
379 		goto chk_bmp;
380 	}
381 	chk_next:
382 	if (i == b) i++;
383 	if (i >= n_bmps) return 1;
384 	bmp = hpfs_map_bitmap(s, i, &qbh, "chkdn2");
385 	chk_bmp:
386 	if (bmp) {
387 		for (j = 0; j < 512; j++) {
388 			u32 k;
389 			if (!le32_to_cpu(bmp[j])) continue;
390 			for (k = 0xf; k; k <<= 4)
391 				if ((le32_to_cpu(bmp[j]) & k) == k) {
392 					if (!--n) {
393 						hpfs_brelse4(&qbh);
394 						return 0;
395 					}
396 				}
397 		}
398 		hpfs_brelse4(&qbh);
399 	}
400 	i++;
401 	goto chk_next;
402 }
403 
hpfs_free_dnode(struct super_block * s,dnode_secno dno)404 void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
405 {
406 	if (hpfs_sb(s)->sb_chk) if (dno & 3) {
407 		hpfs_error(s, "hpfs_free_dnode: dnode %08x not aligned", dno);
408 		return;
409 	}
410 	if (dno < hpfs_sb(s)->sb_dirband_start ||
411 	    dno >= hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) {
412 		hpfs_free_sectors(s, dno, 4);
413 	} else {
414 		struct quad_buffer_head qbh;
415 		__le32 *bmp;
416 		unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4;
417 		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
418 			return;
419 		}
420 		bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f));
421 		hpfs_mark_4buffers_dirty(&qbh);
422 		hpfs_brelse4(&qbh);
423 		hpfs_claim_dirband_free(s, dno);
424 	}
425 }
426 
hpfs_alloc_dnode(struct super_block * s,secno near,dnode_secno * dno,struct quad_buffer_head * qbh)427 struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near,
428 			 dnode_secno *dno, struct quad_buffer_head *qbh)
429 {
430 	struct dnode *d;
431 	if (hpfs_get_free_dnodes(s) > FREE_DNODES_ADD) {
432 		if (!(*dno = alloc_in_dirband(s, near)))
433 			if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL;
434 	} else {
435 		if (!(*dno = hpfs_alloc_sector(s, near, 4, 0)))
436 			if (!(*dno = alloc_in_dirband(s, near))) return NULL;
437 	}
438 	if (!(d = hpfs_get_4sectors(s, *dno, qbh))) {
439 		hpfs_free_dnode(s, *dno);
440 		return NULL;
441 	}
442 	memset(d, 0, 2048);
443 	d->magic = cpu_to_le32(DNODE_MAGIC);
444 	d->first_free = cpu_to_le32(52);
445 	d->dirent[0] = 32;
446 	d->dirent[2] = 8;
447 	d->dirent[30] = 1;
448 	d->dirent[31] = 255;
449 	d->self = cpu_to_le32(*dno);
450 	return d;
451 }
452 
hpfs_alloc_fnode(struct super_block * s,secno near,fnode_secno * fno,struct buffer_head ** bh)453 struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *fno,
454 			  struct buffer_head **bh)
455 {
456 	struct fnode *f;
457 	if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL;
458 	if (!(f = hpfs_get_sector(s, *fno, bh))) {
459 		hpfs_free_sectors(s, *fno, 1);
460 		return NULL;
461 	}
462 	memset(f, 0, 512);
463 	f->magic = cpu_to_le32(FNODE_MAGIC);
464 	f->ea_offs = cpu_to_le16(0xc4);
465 	f->btree.n_free_nodes = 8;
466 	f->btree.first_free = cpu_to_le16(8);
467 	return f;
468 }
469 
hpfs_alloc_anode(struct super_block * s,secno near,anode_secno * ano,struct buffer_head ** bh)470 struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *ano,
471 			  struct buffer_head **bh)
472 {
473 	struct anode *a;
474 	if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL;
475 	if (!(a = hpfs_get_sector(s, *ano, bh))) {
476 		hpfs_free_sectors(s, *ano, 1);
477 		return NULL;
478 	}
479 	memset(a, 0, 512);
480 	a->magic = cpu_to_le32(ANODE_MAGIC);
481 	a->self = cpu_to_le32(*ano);
482 	a->btree.n_free_nodes = 40;
483 	a->btree.n_used_nodes = 0;
484 	a->btree.first_free = cpu_to_le16(8);
485 	return a;
486 }
487 
find_run(__le32 * bmp,unsigned * idx)488 static unsigned find_run(__le32 *bmp, unsigned *idx)
489 {
490 	unsigned len;
491 	while (tstbits(bmp, *idx, 1)) {
492 		(*idx)++;
493 		if (unlikely(*idx >= 0x4000))
494 			return 0;
495 	}
496 	len = 1;
497 	while (!tstbits(bmp, *idx + len, 1))
498 		len++;
499 	return len;
500 }
501 
do_trim(struct super_block * s,secno start,unsigned len,secno limit_start,secno limit_end,unsigned minlen,unsigned * result)502 static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit_end, unsigned minlen, unsigned *result)
503 {
504 	int err;
505 	secno end;
506 	if (fatal_signal_pending(current))
507 		return -EINTR;
508 	end = start + len;
509 	if (start < limit_start)
510 		start = limit_start;
511 	if (end > limit_end)
512 		end = limit_end;
513 	if (start >= end)
514 		return 0;
515 	if (end - start < minlen)
516 		return 0;
517 	err = sb_issue_discard(s, start, end - start, GFP_NOFS, 0);
518 	if (err)
519 		return err;
520 	*result += end - start;
521 	return 0;
522 }
523 
hpfs_trim_fs(struct super_block * s,u64 start,u64 end,u64 minlen,unsigned * result)524 int hpfs_trim_fs(struct super_block *s, u64 start, u64 end, u64 minlen, unsigned *result)
525 {
526 	int err = 0;
527 	struct hpfs_sb_info *sbi = hpfs_sb(s);
528 	unsigned idx, len, start_bmp, end_bmp;
529 	__le32 *bmp;
530 	struct quad_buffer_head qbh;
531 
532 	*result = 0;
533 	if (!end || end > sbi->sb_fs_size)
534 		end = sbi->sb_fs_size;
535 	if (start >= sbi->sb_fs_size)
536 		return 0;
537 	if (minlen > 0x4000)
538 		return 0;
539 	if (start < sbi->sb_dirband_start + sbi->sb_dirband_size && end > sbi->sb_dirband_start) {
540 		hpfs_lock(s);
541 		if (s->s_flags & MS_RDONLY) {
542 			err = -EROFS;
543 			goto unlock_1;
544 		}
545 		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
546 			err = -EIO;
547 			goto unlock_1;
548 		}
549 		idx = 0;
550 		while ((len = find_run(bmp, &idx)) && !err) {
551 			err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result);
552 			idx += len;
553 		}
554 		hpfs_brelse4(&qbh);
555 unlock_1:
556 		hpfs_unlock(s);
557 	}
558 	start_bmp = start >> 14;
559 	end_bmp = (end + 0x3fff) >> 14;
560 	while (start_bmp < end_bmp && !err) {
561 		hpfs_lock(s);
562 		if (s->s_flags & MS_RDONLY) {
563 			err = -EROFS;
564 			goto unlock_2;
565 		}
566 		if (!(bmp = hpfs_map_bitmap(s, start_bmp, &qbh, "trim"))) {
567 			err = -EIO;
568 			goto unlock_2;
569 		}
570 		idx = 0;
571 		while ((len = find_run(bmp, &idx)) && !err) {
572 			err = do_trim(s, (start_bmp << 14) + idx, len, start, end, minlen, result);
573 			idx += len;
574 		}
575 		hpfs_brelse4(&qbh);
576 unlock_2:
577 		hpfs_unlock(s);
578 		start_bmp++;
579 	}
580 	return err;
581 }
582