• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2018-2019 HUAWEI, Inc.
4  *             http://www.huawei.com/
5  * Created by Miao Xie <miaoxie@huawei.com>
6  * with heavy changes by Gao Xiang <gaoxiang25@huawei.com>
7  */
8 #include <stdlib.h>
9 #include <erofs/cache.h>
10 #include "erofs/io.h"
11 #include "erofs/print.h"
12 
13 static struct erofs_buffer_block blkh = {
14 	.list = LIST_HEAD_INIT(blkh.list),
15 	.blkaddr = NULL_ADDR,
16 };
17 static erofs_blk_t tail_blkaddr;
18 
19 /* buckets for all mapped buffer blocks to boost up allocation */
20 static struct list_head mapped_buckets[META + 1][EROFS_BLKSIZ];
21 /* last mapped buffer block to accelerate erofs_mapbh() */
22 static struct erofs_buffer_block *last_mapped_block = &blkh;
23 
erofs_bh_flush_drop_directly(struct erofs_buffer_head * bh)24 static bool erofs_bh_flush_drop_directly(struct erofs_buffer_head *bh)
25 {
26 	return erofs_bh_flush_generic_end(bh);
27 }
28 
29 struct erofs_bhops erofs_drop_directly_bhops = {
30 	.flush = erofs_bh_flush_drop_directly,
31 };
32 
erofs_bh_flush_skip_write(struct erofs_buffer_head * bh)33 static bool erofs_bh_flush_skip_write(struct erofs_buffer_head *bh)
34 {
35 	return false;
36 }
37 
38 struct erofs_bhops erofs_skip_write_bhops = {
39 	.flush = erofs_bh_flush_skip_write,
40 };
41 
erofs_bh_flush_generic_write(struct erofs_buffer_head * bh,void * buf)42 int erofs_bh_flush_generic_write(struct erofs_buffer_head *bh, void *buf)
43 {
44 	struct erofs_buffer_head *nbh = list_next_entry(bh, list);
45 	erofs_off_t offset = erofs_btell(bh, false);
46 
47 	DBG_BUGON(nbh->off < bh->off);
48 	return dev_write(buf, offset, nbh->off - bh->off);
49 }
50 
erofs_bh_flush_buf_write(struct erofs_buffer_head * bh)51 static bool erofs_bh_flush_buf_write(struct erofs_buffer_head *bh)
52 {
53 	int err = erofs_bh_flush_generic_write(bh, bh->fsprivate);
54 
55 	if (err)
56 		return false;
57 	free(bh->fsprivate);
58 	return erofs_bh_flush_generic_end(bh);
59 }
60 
61 struct erofs_bhops erofs_buf_write_bhops = {
62 	.flush = erofs_bh_flush_buf_write,
63 };
64 
65 /* return buffer_head of erofs super block (with size 0) */
erofs_buffer_init(void)66 struct erofs_buffer_head *erofs_buffer_init(void)
67 {
68 	int i, j;
69 	struct erofs_buffer_head *bh = erofs_balloc(META, 0, 0, 0);
70 
71 	if (IS_ERR(bh))
72 		return bh;
73 
74 	bh->op = &erofs_skip_write_bhops;
75 
76 	for (i = 0; i < ARRAY_SIZE(mapped_buckets); i++)
77 		for (j = 0; j < ARRAY_SIZE(mapped_buckets[0]); j++)
78 			init_list_head(&mapped_buckets[i][j]);
79 	return bh;
80 }
81 
erofs_bupdate_mapped(struct erofs_buffer_block * bb)82 static void erofs_bupdate_mapped(struct erofs_buffer_block *bb)
83 {
84 	struct list_head *bkt;
85 
86 	if (bb->blkaddr == NULL_ADDR)
87 		return;
88 
89 	bkt = mapped_buckets[bb->type] + bb->buffers.off % EROFS_BLKSIZ;
90 	list_del(&bb->mapped_list);
91 	list_add_tail(&bb->mapped_list, bkt);
92 }
93 
94 /* return occupied bytes in specific buffer block if succeed */
__erofs_battach(struct erofs_buffer_block * bb,struct erofs_buffer_head * bh,erofs_off_t incr,unsigned int alignsize,unsigned int extrasize,bool dryrun)95 static int __erofs_battach(struct erofs_buffer_block *bb,
96 			   struct erofs_buffer_head *bh,
97 			   erofs_off_t incr,
98 			   unsigned int alignsize,
99 			   unsigned int extrasize,
100 			   bool dryrun)
101 {
102 	const erofs_off_t alignedoffset = roundup(bb->buffers.off, alignsize);
103 	const int oob = cmpsgn(roundup((bb->buffers.off - 1) % EROFS_BLKSIZ + 1,
104 				       alignsize) + incr + extrasize,
105 			       EROFS_BLKSIZ);
106 	bool tailupdate = false;
107 	erofs_blk_t blkaddr;
108 
109 	if (oob >= 0) {
110 		/* the next buffer block should be NULL_ADDR all the time */
111 		if (oob && list_next_entry(bb, list)->blkaddr != NULL_ADDR)
112 			return -EINVAL;
113 
114 		blkaddr = bb->blkaddr;
115 		if (blkaddr != NULL_ADDR) {
116 			tailupdate = (tail_blkaddr == blkaddr +
117 				      BLK_ROUND_UP(bb->buffers.off));
118 			if (oob && !tailupdate)
119 				return -EINVAL;
120 		}
121 	}
122 
123 	if (!dryrun) {
124 		if (bh) {
125 			bh->off = alignedoffset;
126 			bh->block = bb;
127 			list_add_tail(&bh->list, &bb->buffers.list);
128 		}
129 		bb->buffers.off = alignedoffset + incr;
130 		/* need to update the tail_blkaddr */
131 		if (tailupdate)
132 			tail_blkaddr = blkaddr + BLK_ROUND_UP(bb->buffers.off);
133 		erofs_bupdate_mapped(bb);
134 	}
135 	return (alignedoffset + incr - 1) % EROFS_BLKSIZ + 1;
136 }
137 
erofs_bh_balloon(struct erofs_buffer_head * bh,erofs_off_t incr)138 int erofs_bh_balloon(struct erofs_buffer_head *bh, erofs_off_t incr)
139 {
140 	struct erofs_buffer_block *const bb = bh->block;
141 
142 	/* should be the tail bh in the corresponding buffer block */
143 	if (bh->list.next != &bb->buffers.list)
144 		return -EINVAL;
145 
146 	return __erofs_battach(bb, NULL, incr, 1, 0, false);
147 }
148 
erofs_bfind_for_attach(int type,erofs_off_t size,unsigned int required_ext,unsigned int inline_ext,unsigned int alignsize,struct erofs_buffer_block ** bbp)149 static int erofs_bfind_for_attach(int type, erofs_off_t size,
150 				  unsigned int required_ext,
151 				  unsigned int inline_ext,
152 				  unsigned int alignsize,
153 				  struct erofs_buffer_block **bbp)
154 {
155 	struct erofs_buffer_block *cur, *bb;
156 	unsigned int used0, used_before, usedmax, used;
157 	int ret;
158 
159 	used0 = (size + required_ext) % EROFS_BLKSIZ + inline_ext;
160 	/* inline data should be in the same fs block */
161 	if (used0 > EROFS_BLKSIZ)
162 		return -ENOSPC;
163 
164 	if (!used0 || alignsize == EROFS_BLKSIZ) {
165 		*bbp = NULL;
166 		return 0;
167 	}
168 
169 	usedmax = 0;
170 	bb = NULL;
171 
172 	/* try to find a most-fit mapped buffer block first */
173 	if (size + required_ext + inline_ext >= EROFS_BLKSIZ)
174 		goto skip_mapped;
175 
176 	used_before = rounddown(EROFS_BLKSIZ -
177 				(size + required_ext + inline_ext), alignsize);
178 	for (; used_before; --used_before) {
179 		struct list_head *bt = mapped_buckets[type] + used_before;
180 
181 		if (list_empty(bt))
182 			continue;
183 		cur = list_first_entry(bt, struct erofs_buffer_block,
184 				       mapped_list);
185 
186 		/* last mapped block can be expended, don't handle it here */
187 		if (list_next_entry(cur, list)->blkaddr == NULL_ADDR) {
188 			DBG_BUGON(cur != last_mapped_block);
189 			continue;
190 		}
191 
192 		DBG_BUGON(cur->type != type);
193 		DBG_BUGON(cur->blkaddr == NULL_ADDR);
194 		DBG_BUGON(used_before != cur->buffers.off % EROFS_BLKSIZ);
195 
196 		ret = __erofs_battach(cur, NULL, size, alignsize,
197 				      required_ext + inline_ext, true);
198 		if (ret < 0) {
199 			DBG_BUGON(1);
200 			continue;
201 		}
202 
203 		/* should contain all data in the current block */
204 		used = ret + required_ext + inline_ext;
205 		DBG_BUGON(used > EROFS_BLKSIZ);
206 
207 		bb = cur;
208 		usedmax = used;
209 		break;
210 	}
211 
212 skip_mapped:
213 	/* try to start from the last mapped one, which can be expended */
214 	cur = last_mapped_block;
215 	if (cur == &blkh)
216 		cur = list_next_entry(cur, list);
217 	for (; cur != &blkh; cur = list_next_entry(cur, list)) {
218 		used_before = cur->buffers.off % EROFS_BLKSIZ;
219 
220 		/* skip if buffer block is just full */
221 		if (!used_before)
222 			continue;
223 
224 		/* skip if the entry which has different type */
225 		if (cur->type != type)
226 			continue;
227 
228 		ret = __erofs_battach(cur, NULL, size, alignsize,
229 				      required_ext + inline_ext, true);
230 		if (ret < 0)
231 			continue;
232 
233 		used = (ret + required_ext) % EROFS_BLKSIZ + inline_ext;
234 
235 		/* should contain inline data in current block */
236 		if (used > EROFS_BLKSIZ)
237 			continue;
238 
239 		/*
240 		 * remaining should be smaller than before or
241 		 * larger than allocating a new buffer block
242 		 */
243 		if (used < used_before && used < used0)
244 			continue;
245 
246 		if (usedmax < used) {
247 			bb = cur;
248 			usedmax = used;
249 		}
250 	}
251 	*bbp = bb;
252 	return 0;
253 }
254 
erofs_balloc(int type,erofs_off_t size,unsigned int required_ext,unsigned int inline_ext)255 struct erofs_buffer_head *erofs_balloc(int type, erofs_off_t size,
256 				       unsigned int required_ext,
257 				       unsigned int inline_ext)
258 {
259 	struct erofs_buffer_block *bb;
260 	struct erofs_buffer_head *bh;
261 	unsigned int alignsize;
262 
263 	int ret = get_alignsize(type, &type);
264 
265 	if (ret < 0)
266 		return ERR_PTR(ret);
267 
268 	DBG_BUGON(type < 0 || type > META);
269 	alignsize = ret;
270 
271 	/* try to find if we could reuse an allocated buffer block */
272 	ret = erofs_bfind_for_attach(type, size, required_ext, inline_ext,
273 				     alignsize, &bb);
274 	if (ret)
275 		return ERR_PTR(ret);
276 
277 	if (bb) {
278 		bh = malloc(sizeof(struct erofs_buffer_head));
279 		if (!bh)
280 			return ERR_PTR(-ENOMEM);
281 	} else {
282 		/* get a new buffer block instead */
283 		bb = malloc(sizeof(struct erofs_buffer_block));
284 		if (!bb)
285 			return ERR_PTR(-ENOMEM);
286 
287 		bb->type = type;
288 		bb->blkaddr = NULL_ADDR;
289 		bb->buffers.off = 0;
290 		init_list_head(&bb->buffers.list);
291 		list_add_tail(&bb->list, &blkh.list);
292 		init_list_head(&bb->mapped_list);
293 
294 		bh = malloc(sizeof(struct erofs_buffer_head));
295 		if (!bh) {
296 			free(bb);
297 			return ERR_PTR(-ENOMEM);
298 		}
299 	}
300 
301 	ret = __erofs_battach(bb, bh, size, alignsize,
302 			      required_ext + inline_ext, false);
303 	if (ret < 0)
304 		return ERR_PTR(ret);
305 	return bh;
306 }
307 
erofs_battach(struct erofs_buffer_head * bh,int type,unsigned int size)308 struct erofs_buffer_head *erofs_battach(struct erofs_buffer_head *bh,
309 					int type, unsigned int size)
310 {
311 	struct erofs_buffer_block *const bb = bh->block;
312 	struct erofs_buffer_head *nbh;
313 	unsigned int alignsize;
314 	int ret = get_alignsize(type, &type);
315 
316 	if (ret < 0)
317 		return ERR_PTR(ret);
318 	alignsize = ret;
319 
320 	/* should be the tail bh in the corresponding buffer block */
321 	if (bh->list.next != &bb->buffers.list)
322 		return ERR_PTR(-EINVAL);
323 
324 	nbh = malloc(sizeof(*nbh));
325 	if (!nbh)
326 		return ERR_PTR(-ENOMEM);
327 
328 	ret = __erofs_battach(bb, nbh, size, alignsize, 0, false);
329 	if (ret < 0) {
330 		free(nbh);
331 		return ERR_PTR(ret);
332 	}
333 	return nbh;
334 
335 }
336 
__erofs_mapbh(struct erofs_buffer_block * bb)337 static erofs_blk_t __erofs_mapbh(struct erofs_buffer_block *bb)
338 {
339 	erofs_blk_t blkaddr;
340 
341 	if (bb->blkaddr == NULL_ADDR) {
342 		bb->blkaddr = tail_blkaddr;
343 		last_mapped_block = bb;
344 		erofs_bupdate_mapped(bb);
345 	}
346 
347 	blkaddr = bb->blkaddr + BLK_ROUND_UP(bb->buffers.off);
348 	if (blkaddr > tail_blkaddr)
349 		tail_blkaddr = blkaddr;
350 
351 	return blkaddr;
352 }
353 
erofs_mapbh(struct erofs_buffer_block * bb)354 erofs_blk_t erofs_mapbh(struct erofs_buffer_block *bb)
355 {
356 	struct erofs_buffer_block *t = last_mapped_block;
357 
358 	if (bb && bb->blkaddr != NULL_ADDR)
359 		return bb->blkaddr;
360 	do {
361 		t = list_next_entry(t, list);
362 		if (t == &blkh)
363 			break;
364 
365 		DBG_BUGON(t->blkaddr != NULL_ADDR);
366 		(void)__erofs_mapbh(t);
367 	} while (t != bb);
368 	return tail_blkaddr;
369 }
370 
erofs_bflush(struct erofs_buffer_block * bb)371 bool erofs_bflush(struct erofs_buffer_block *bb)
372 {
373 	struct erofs_buffer_block *p, *n;
374 	erofs_blk_t blkaddr;
375 
376 	list_for_each_entry_safe(p, n, &blkh.list, list) {
377 		struct erofs_buffer_head *bh, *nbh;
378 		unsigned int padding;
379 		bool skip = false;
380 
381 		if (p == bb)
382 			break;
383 
384 		/* check if the buffer block can flush */
385 		list_for_each_entry(bh, &p->buffers.list, list)
386 			if (bh->op->preflush && !bh->op->preflush(bh))
387 				return false;
388 
389 		blkaddr = __erofs_mapbh(p);
390 
391 		list_for_each_entry_safe(bh, nbh, &p->buffers.list, list) {
392 			/* flush and remove bh */
393 			if (!bh->op->flush(bh))
394 				skip = true;
395 		}
396 
397 		if (skip)
398 			continue;
399 
400 		padding = EROFS_BLKSIZ - p->buffers.off % EROFS_BLKSIZ;
401 		if (padding != EROFS_BLKSIZ)
402 			dev_fillzero(blknr_to_addr(blkaddr) - padding,
403 				     padding, true);
404 
405 		DBG_BUGON(!list_empty(&p->buffers.list));
406 
407 		erofs_dbg("block %u to %u flushed", p->blkaddr, blkaddr - 1);
408 
409 		list_del(&p->mapped_list);
410 		list_del(&p->list);
411 		free(p);
412 	}
413 	return true;
414 }
415 
erofs_bdrop(struct erofs_buffer_head * bh,bool tryrevoke)416 void erofs_bdrop(struct erofs_buffer_head *bh, bool tryrevoke)
417 {
418 	struct erofs_buffer_block *const bb = bh->block;
419 	const erofs_blk_t blkaddr = bh->block->blkaddr;
420 	bool rollback = false;
421 
422 	/* tail_blkaddr could be rolled back after revoking all bhs */
423 	if (tryrevoke && blkaddr != NULL_ADDR &&
424 	    tail_blkaddr == blkaddr + BLK_ROUND_UP(bb->buffers.off))
425 		rollback = true;
426 
427 	bh->op = &erofs_drop_directly_bhops;
428 	erofs_bh_flush_generic_end(bh);
429 
430 	if (!list_empty(&bb->buffers.list))
431 		return;
432 
433 	if (bb == last_mapped_block)
434 		last_mapped_block = list_prev_entry(bb, list);
435 
436 	list_del(&bb->mapped_list);
437 	list_del(&bb->list);
438 	free(bb);
439 
440 	if (rollback)
441 		tail_blkaddr = blkaddr;
442 }
443