• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/hfsplus/bitmap.c
4  *
5  * Copyright (C) 2001
6  * Brad Boyer (flar@allandria.com)
7  * (C) 2003 Ardis Technologies <roman@ardistech.com>
8  *
9  * Handling of allocation file
10  */
11 
12 #include <linux/pagemap.h>
13 
14 #include "hfsplus_fs.h"
15 #include "hfsplus_raw.h"
16 
17 #define PAGE_CACHE_BITS	(PAGE_SIZE * 8)
18 
hfsplus_block_allocate(struct super_block * sb,u32 size,u32 offset,u32 * max)19 int hfsplus_block_allocate(struct super_block *sb, u32 size,
20 		u32 offset, u32 *max)
21 {
22 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
23 	struct page *page;
24 	struct address_space *mapping;
25 	__be32 *pptr, *curr, *end;
26 	u32 mask, start, len, n;
27 	__be32 val;
28 	int i;
29 
30 	len = *max;
31 	if (!len)
32 		return size;
33 
34 	hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
35 	mutex_lock(&sbi->alloc_mutex);
36 	mapping = sbi->alloc_file->i_mapping;
37 	page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
38 	if (IS_ERR(page)) {
39 		start = size;
40 		goto out;
41 	}
42 	pptr = kmap(page);
43 	curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
44 	i = offset % 32;
45 	offset &= ~(PAGE_CACHE_BITS - 1);
46 	if ((size ^ offset) / PAGE_CACHE_BITS)
47 		end = pptr + PAGE_CACHE_BITS / 32;
48 	else
49 		end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
50 
51 	/* scan the first partial u32 for zero bits */
52 	val = *curr;
53 	if (~val) {
54 		n = be32_to_cpu(val);
55 		mask = (1U << 31) >> i;
56 		for (; i < 32; mask >>= 1, i++) {
57 			if (!(n & mask))
58 				goto found;
59 		}
60 	}
61 	curr++;
62 
63 	/* scan complete u32s for the first zero bit */
64 	while (1) {
65 		while (curr < end) {
66 			val = *curr;
67 			if (~val) {
68 				n = be32_to_cpu(val);
69 				mask = 1 << 31;
70 				for (i = 0; i < 32; mask >>= 1, i++) {
71 					if (!(n & mask))
72 						goto found;
73 				}
74 			}
75 			curr++;
76 		}
77 		kunmap(page);
78 		offset += PAGE_CACHE_BITS;
79 		if (offset >= size)
80 			break;
81 		page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
82 					 NULL);
83 		if (IS_ERR(page)) {
84 			start = size;
85 			goto out;
86 		}
87 		curr = pptr = kmap(page);
88 		if ((size ^ offset) / PAGE_CACHE_BITS)
89 			end = pptr + PAGE_CACHE_BITS / 32;
90 		else
91 			end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
92 	}
93 	hfs_dbg(BITMAP, "bitmap full\n");
94 	start = size;
95 	goto out;
96 
97 found:
98 	start = offset + (curr - pptr) * 32 + i;
99 	if (start >= size) {
100 		hfs_dbg(BITMAP, "bitmap full\n");
101 		goto out;
102 	}
103 	/* do any partial u32 at the start */
104 	len = min(size - start, len);
105 	while (1) {
106 		n |= mask;
107 		if (++i >= 32)
108 			break;
109 		mask >>= 1;
110 		if (!--len || n & mask)
111 			goto done;
112 	}
113 	if (!--len)
114 		goto done;
115 	*curr++ = cpu_to_be32(n);
116 	/* do full u32s */
117 	while (1) {
118 		while (curr < end) {
119 			n = be32_to_cpu(*curr);
120 			if (len < 32)
121 				goto last;
122 			if (n) {
123 				len = 32;
124 				goto last;
125 			}
126 			*curr++ = cpu_to_be32(0xffffffff);
127 			len -= 32;
128 		}
129 		set_page_dirty(page);
130 		kunmap(page);
131 		offset += PAGE_CACHE_BITS;
132 		page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
133 					 NULL);
134 		if (IS_ERR(page)) {
135 			start = size;
136 			goto out;
137 		}
138 		pptr = kmap(page);
139 		curr = pptr;
140 		end = pptr + PAGE_CACHE_BITS / 32;
141 	}
142 last:
143 	/* do any partial u32 at end */
144 	mask = 1U << 31;
145 	for (i = 0; i < len; i++) {
146 		if (n & mask)
147 			break;
148 		n |= mask;
149 		mask >>= 1;
150 	}
151 done:
152 	*curr = cpu_to_be32(n);
153 	set_page_dirty(page);
154 	kunmap(page);
155 	*max = offset + (curr - pptr) * 32 + i - start;
156 	sbi->free_blocks -= *max;
157 	hfsplus_mark_mdb_dirty(sb);
158 	hfs_dbg(BITMAP, "-> %u,%u\n", start, *max);
159 out:
160 	mutex_unlock(&sbi->alloc_mutex);
161 	return start;
162 }
163 
hfsplus_block_free(struct super_block * sb,u32 offset,u32 count)164 int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
165 {
166 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
167 	struct page *page;
168 	struct address_space *mapping;
169 	__be32 *pptr, *curr, *end;
170 	u32 mask, len, pnr;
171 	int i;
172 
173 	/* is there any actual work to be done? */
174 	if (!count)
175 		return 0;
176 
177 	hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count);
178 	/* are all of the bits in range? */
179 	if ((offset + count) > sbi->total_blocks)
180 		return -ENOENT;
181 
182 	mutex_lock(&sbi->alloc_mutex);
183 	mapping = sbi->alloc_file->i_mapping;
184 	pnr = offset / PAGE_CACHE_BITS;
185 	page = read_mapping_page(mapping, pnr, NULL);
186 	if (IS_ERR(page))
187 		goto kaboom;
188 	pptr = kmap(page);
189 	curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
190 	end = pptr + PAGE_CACHE_BITS / 32;
191 	len = count;
192 
193 	/* do any partial u32 at the start */
194 	i = offset % 32;
195 	if (i) {
196 		int j = 32 - i;
197 		mask = 0xffffffffU << j;
198 		if (j > count) {
199 			mask |= 0xffffffffU >> (i + count);
200 			*curr++ &= cpu_to_be32(mask);
201 			goto out;
202 		}
203 		*curr++ &= cpu_to_be32(mask);
204 		count -= j;
205 	}
206 
207 	/* do full u32s */
208 	while (1) {
209 		while (curr < end) {
210 			if (count < 32)
211 				goto done;
212 			*curr++ = 0;
213 			count -= 32;
214 		}
215 		if (!count)
216 			break;
217 		set_page_dirty(page);
218 		kunmap(page);
219 		page = read_mapping_page(mapping, ++pnr, NULL);
220 		if (IS_ERR(page))
221 			goto kaboom;
222 		pptr = kmap(page);
223 		curr = pptr;
224 		end = pptr + PAGE_CACHE_BITS / 32;
225 	}
226 done:
227 	/* do any partial u32 at end */
228 	if (count) {
229 		mask = 0xffffffffU >> count;
230 		*curr &= cpu_to_be32(mask);
231 	}
232 out:
233 	set_page_dirty(page);
234 	kunmap(page);
235 	sbi->free_blocks += len;
236 	hfsplus_mark_mdb_dirty(sb);
237 	mutex_unlock(&sbi->alloc_mutex);
238 
239 	return 0;
240 
241 kaboom:
242 	pr_crit("unable to mark blocks free: error %ld\n", PTR_ERR(page));
243 	mutex_unlock(&sbi->alloc_mutex);
244 
245 	return -EIO;
246 }
247