• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Bitmap of bitmaps, where each layer is number-of-bits-per-word smaller than
3   * the previous. Hence an 'axmap', since we axe each previous layer into a
4   * much smaller piece. I swear, that is why it's named like that. It has
5   * nothing to do with anything remotely narcissistic.
6   *
7   * A set bit at layer N indicates a full word at layer N-1, and so forth. As
8   * the bitmap becomes progressively more full, checking for existence
9   * becomes cheaper (since fewer layers are walked, making it a lot more
10   * cache friendly) and locating the next free space likewise.
11   *
12   * Axmaps get pretty close to optimal (1 bit per block) space usage, since
13   * layers quickly diminish in size. Doing the size math is straight forward,
14   * since we have log64(blocks) layers of maps. For 20000 blocks, overhead
15   * is roughly 1.9%, or 1.019 bits per block. The number quickly converges
16   * towards 1.0158, or 1.58% of overhead.
17   */
18  #include <stdio.h>
19  #include <stdlib.h>
20  #include <string.h>
21  #include <assert.h>
22  
23  #include "../arch/arch.h"
24  #include "axmap.h"
25  #include "../smalloc.h"
26  #include "../minmax.h"
27  
28  #if BITS_PER_LONG == 64
29  #define UNIT_SHIFT		6
30  #elif BITS_PER_LONG == 32
31  #define UNIT_SHIFT		5
32  #else
33  #error "Number of arch bits unknown"
34  #endif
35  
36  #define BLOCKS_PER_UNIT		(1UL << UNIT_SHIFT)
37  #define BLOCKS_PER_UNIT_MASK	(BLOCKS_PER_UNIT - 1)
38  
39  #define firstfree_valid(b)	((b)->first_free != (uint64_t) -1)
40  
41  struct axmap_level {
42  	int level;
43  	unsigned long map_size;
44  	unsigned long *map;
45  };
46  
47  struct axmap {
48  	unsigned int nr_levels;
49  	struct axmap_level *levels;
50  	uint64_t first_free;
51  	uint64_t nr_bits;
52  };
53  
ulog64(unsigned long val,unsigned int log)54  static unsigned long ulog64(unsigned long val, unsigned int log)
55  {
56  	while (log-- && val)
57  		val >>= UNIT_SHIFT;
58  
59  	return val;
60  }
61  
axmap_reset(struct axmap * axmap)62  void axmap_reset(struct axmap *axmap)
63  {
64  	int i;
65  
66  	for (i = 0; i < axmap->nr_levels; i++) {
67  		struct axmap_level *al = &axmap->levels[i];
68  
69  		memset(al->map, 0, al->map_size * sizeof(unsigned long));
70  	}
71  
72  	axmap->first_free = 0;
73  }
74  
axmap_free(struct axmap * axmap)75  void axmap_free(struct axmap *axmap)
76  {
77  	unsigned int i;
78  
79  	if (!axmap)
80  		return;
81  
82  	for (i = 0; i < axmap->nr_levels; i++)
83  		sfree(axmap->levels[i].map);
84  
85  	sfree(axmap->levels);
86  	sfree(axmap);
87  }
88  
axmap_new(unsigned long nr_bits)89  struct axmap *axmap_new(unsigned long nr_bits)
90  {
91  	struct axmap *axmap;
92  	unsigned int i, levels;
93  
94  	axmap = smalloc(sizeof(*axmap));
95  	if (!axmap)
96  		return NULL;
97  
98  	levels = 1;
99  	i = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
100  	while (i > 1) {
101  		i = (i + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
102  		levels++;
103  	}
104  
105  	axmap->nr_levels = levels;
106  	axmap->levels = smalloc(axmap->nr_levels * sizeof(struct axmap_level));
107  	axmap->nr_bits = nr_bits;
108  
109  	for (i = 0; i < axmap->nr_levels; i++) {
110  		struct axmap_level *al = &axmap->levels[i];
111  
112  		al->level = i;
113  		al->map_size = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
114  		al->map = smalloc(al->map_size * sizeof(unsigned long));
115  		if (!al->map)
116  			goto err;
117  
118  		nr_bits = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
119  	}
120  
121  	axmap_reset(axmap);
122  	return axmap;
123  err:
124  	for (i = 0; i < axmap->nr_levels; i++)
125  		if (axmap->levels[i].map)
126  			sfree(axmap->levels[i].map);
127  
128  	sfree(axmap->levels);
129  	return NULL;
130  }
131  
axmap_handler(struct axmap * axmap,uint64_t bit_nr,int (* func)(struct axmap_level *,unsigned long,unsigned int,void *),void * data)132  static int axmap_handler(struct axmap *axmap, uint64_t bit_nr,
133  			  int (*func)(struct axmap_level *, unsigned long, unsigned int,
134  			  void *), void *data)
135  {
136  	struct axmap_level *al;
137  	int i;
138  
139  	for (i = 0; i < axmap->nr_levels; i++) {
140  		unsigned long index = ulog64(bit_nr, i);
141  		unsigned long offset = index >> UNIT_SHIFT;
142  		unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
143  
144  		al = &axmap->levels[i];
145  
146  		if (func(al, offset, bit, data))
147  			return 1;
148  	}
149  
150  	return 0;
151  }
152  
axmap_handler_topdown(struct axmap * axmap,uint64_t bit_nr,int (* func)(struct axmap_level *,unsigned long,unsigned int,void *),void * data)153  static int axmap_handler_topdown(struct axmap *axmap, uint64_t bit_nr,
154  	int (*func)(struct axmap_level *, unsigned long, unsigned int, void *),
155  	void *data)
156  {
157  	struct axmap_level *al;
158  	int i, level = axmap->nr_levels;
159  
160  	for (i = axmap->nr_levels - 1; i >= 0; i--) {
161  		unsigned long index = ulog64(bit_nr, --level);
162  		unsigned long offset = index >> UNIT_SHIFT;
163  		unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
164  
165  		al = &axmap->levels[i];
166  
167  		if (func(al, offset, bit, data))
168  			return 1;
169  	}
170  
171  	return 0;
172  }
173  
axmap_clear_fn(struct axmap_level * al,unsigned long offset,unsigned int bit,void * unused)174  static int axmap_clear_fn(struct axmap_level *al, unsigned long offset,
175  			   unsigned int bit, void *unused)
176  {
177  	if (!(al->map[offset] & (1UL << bit)))
178  		return 1;
179  
180  	al->map[offset] &= ~(1UL << bit);
181  	return 0;
182  }
183  
axmap_clear(struct axmap * axmap,uint64_t bit_nr)184  void axmap_clear(struct axmap *axmap, uint64_t bit_nr)
185  {
186  	axmap_handler(axmap, bit_nr, axmap_clear_fn, NULL);
187  }
188  
189  struct axmap_set_data {
190  	unsigned int nr_bits;
191  	unsigned int set_bits;
192  };
193  
194  static unsigned long bit_masks[] = {
195  	0x0000000000000000, 0x0000000000000001, 0x0000000000000003, 0x0000000000000007,
196  	0x000000000000000f, 0x000000000000001f, 0x000000000000003f, 0x000000000000007f,
197  	0x00000000000000ff, 0x00000000000001ff, 0x00000000000003ff, 0x00000000000007ff,
198  	0x0000000000000fff, 0x0000000000001fff, 0x0000000000003fff, 0x0000000000007fff,
199  	0x000000000000ffff, 0x000000000001ffff, 0x000000000003ffff, 0x000000000007ffff,
200  	0x00000000000fffff, 0x00000000001fffff, 0x00000000003fffff, 0x00000000007fffff,
201  	0x0000000000ffffff, 0x0000000001ffffff, 0x0000000003ffffff, 0x0000000007ffffff,
202  	0x000000000fffffff, 0x000000001fffffff, 0x000000003fffffff, 0x000000007fffffff,
203  	0x00000000ffffffff,
204  #if BITS_PER_LONG == 64
205  	0x00000001ffffffff, 0x00000003ffffffff, 0x00000007ffffffff, 0x0000000fffffffff,
206  	0x0000001fffffffff, 0x0000003fffffffff, 0x0000007fffffffff, 0x000000ffffffffff,
207  	0x000001ffffffffff, 0x000003ffffffffff, 0x000007ffffffffff, 0x00000fffffffffff,
208  	0x00001fffffffffff, 0x00003fffffffffff, 0x00007fffffffffff, 0x0000ffffffffffff,
209  	0x0001ffffffffffff, 0x0003ffffffffffff, 0x0007ffffffffffff, 0x000fffffffffffff,
210  	0x001fffffffffffff, 0x003fffffffffffff, 0x007fffffffffffff, 0x00ffffffffffffff,
211  	0x01ffffffffffffff, 0x03ffffffffffffff, 0x07ffffffffffffff, 0x0fffffffffffffff,
212  	0x1fffffffffffffff, 0x3fffffffffffffff, 0x7fffffffffffffff, 0xffffffffffffffff
213  #endif
214  };
215  
axmap_set_fn(struct axmap_level * al,unsigned long offset,unsigned int bit,void * __data)216  static int axmap_set_fn(struct axmap_level *al, unsigned long offset,
217  			 unsigned int bit, void *__data)
218  {
219  	struct axmap_set_data *data = __data;
220  	unsigned long mask, overlap;
221  	unsigned int nr_bits;
222  
223  	nr_bits = min(data->nr_bits, BLOCKS_PER_UNIT - bit);
224  
225  	mask = bit_masks[nr_bits] << bit;
226  
227  	/*
228  	 * Mask off any potential overlap, only sets contig regions
229  	 */
230  	overlap = al->map[offset] & mask;
231  	if (overlap == mask)
232  		return 1;
233  
234  	while (overlap) {
235  		unsigned long clear_mask = ~(1UL << ffz(~overlap));
236  
237  		mask &= clear_mask;
238  		overlap &= clear_mask;
239  		nr_bits--;
240  	}
241  
242  	assert(mask);
243  	assert(!(al->map[offset] & mask));
244  
245  	al->map[offset] |= mask;
246  
247  	if (!al->level)
248  		data->set_bits = nr_bits;
249  
250  	data->nr_bits = 1;
251  	return al->map[offset] != -1UL;
252  }
253  
__axmap_set(struct axmap * axmap,uint64_t bit_nr,struct axmap_set_data * data)254  static void __axmap_set(struct axmap *axmap, uint64_t bit_nr,
255  			 struct axmap_set_data *data)
256  {
257  	unsigned int set_bits, nr_bits = data->nr_bits;
258  
259  	if (axmap->first_free >= bit_nr &&
260  	    axmap->first_free < bit_nr + data->nr_bits)
261  		axmap->first_free = -1ULL;
262  
263  	if (bit_nr > axmap->nr_bits)
264  		return;
265  	else if (bit_nr + nr_bits > axmap->nr_bits)
266  		nr_bits = axmap->nr_bits - bit_nr;
267  
268  	set_bits = 0;
269  	while (nr_bits) {
270  		axmap_handler(axmap, bit_nr, axmap_set_fn, data);
271  		set_bits += data->set_bits;
272  
273  		if (!data->set_bits ||
274  		    data->set_bits != (BLOCKS_PER_UNIT - nr_bits))
275  			break;
276  
277  		nr_bits -= data->set_bits;
278  		bit_nr += data->set_bits;
279  
280  		data->nr_bits = nr_bits;
281  	}
282  
283  	data->set_bits = set_bits;
284  }
285  
axmap_set(struct axmap * axmap,uint64_t bit_nr)286  void axmap_set(struct axmap *axmap, uint64_t bit_nr)
287  {
288  	struct axmap_set_data data = { .nr_bits = 1, };
289  
290  	__axmap_set(axmap, bit_nr, &data);
291  }
292  
axmap_set_nr(struct axmap * axmap,uint64_t bit_nr,unsigned int nr_bits)293  unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr, unsigned int nr_bits)
294  {
295  	unsigned int set_bits = 0;
296  
297  	do {
298  		struct axmap_set_data data = { .nr_bits = nr_bits, };
299  		unsigned int max_bits, this_set;
300  
301  		max_bits = BLOCKS_PER_UNIT - (bit_nr & BLOCKS_PER_UNIT_MASK);
302  		if (max_bits < nr_bits)
303  			data.nr_bits = max_bits;
304  
305  		this_set = data.nr_bits;
306  		__axmap_set(axmap, bit_nr, &data);
307  		set_bits += data.set_bits;
308  		if (data.set_bits != this_set)
309  			break;
310  
311  		nr_bits -= data.set_bits;
312  		bit_nr += data.set_bits;
313  	} while (nr_bits);
314  
315  	return set_bits;
316  }
317  
axmap_isset_fn(struct axmap_level * al,unsigned long offset,unsigned int bit,void * unused)318  static int axmap_isset_fn(struct axmap_level *al, unsigned long offset,
319  			    unsigned int bit, void *unused)
320  {
321  	return (al->map[offset] & (1UL << bit)) != 0;
322  }
323  
axmap_isset(struct axmap * axmap,uint64_t bit_nr)324  int axmap_isset(struct axmap *axmap, uint64_t bit_nr)
325  {
326  	if (bit_nr <= axmap->nr_bits)
327  		return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL);
328  
329  	return 0;
330  }
331  
axmap_find_first_free(struct axmap * axmap,unsigned int level,uint64_t index)332  static uint64_t axmap_find_first_free(struct axmap *axmap, unsigned int level,
333  				       uint64_t index)
334  {
335  	uint64_t ret = -1ULL;
336  	unsigned long j;
337  	int i;
338  
339  	/*
340  	 * Start at the bottom, then converge towards first free bit at the top
341  	 */
342  	for (i = level; i >= 0; i--) {
343  		struct axmap_level *al = &axmap->levels[i];
344  
345  		/*
346  		 * Clear 'ret', this is a bug condition.
347  		 */
348  		if (index >= al->map_size) {
349  			ret = -1ULL;
350  			break;
351  		}
352  
353  		for (j = index; j < al->map_size; j++) {
354  			if (al->map[j] == -1UL)
355  				continue;
356  
357  			/*
358  			 * First free bit here is our index into the first
359  			 * free bit at the next higher level
360  			 */
361  			ret = index = (j << UNIT_SHIFT) + ffz(al->map[j]);
362  			break;
363  		}
364  	}
365  
366  	if (ret < axmap->nr_bits)
367  		return ret;
368  
369  	return (uint64_t) -1ULL;
370  }
371  
axmap_first_free(struct axmap * axmap)372  uint64_t axmap_first_free(struct axmap *axmap)
373  {
374  	if (firstfree_valid(axmap))
375  		return axmap->first_free;
376  
377  	axmap->first_free = axmap_find_first_free(axmap, axmap->nr_levels - 1, 0);
378  	return axmap->first_free;
379  }
380  
381  struct axmap_next_free_data {
382  	unsigned int level;
383  	unsigned long offset;
384  	uint64_t bit;
385  };
386  
axmap_next_free_fn(struct axmap_level * al,unsigned long offset,unsigned int bit,void * __data)387  static int axmap_next_free_fn(struct axmap_level *al, unsigned long offset,
388  			       unsigned int bit, void *__data)
389  {
390  	struct axmap_next_free_data *data = __data;
391  	uint64_t mask = ~bit_masks[(data->bit + 1) & BLOCKS_PER_UNIT_MASK];
392  
393  	if (!(mask & ~al->map[offset]))
394  		return 0;
395  
396  	if (al->map[offset] != -1UL) {
397  		data->level = al->level;
398  		data->offset = offset;
399  		return 1;
400  	}
401  
402  	data->bit = (data->bit + BLOCKS_PER_UNIT - 1) / BLOCKS_PER_UNIT;
403  	return 0;
404  }
405  
406  /*
407   * 'bit_nr' is already set. Find the next free bit after this one.
408   */
axmap_next_free(struct axmap * axmap,uint64_t bit_nr)409  uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr)
410  {
411  	struct axmap_next_free_data data = { .level = -1U, .bit = bit_nr, };
412  	uint64_t ret;
413  
414  	if (firstfree_valid(axmap) && bit_nr < axmap->first_free)
415  		return axmap->first_free;
416  
417  	if (!axmap_handler(axmap, bit_nr, axmap_next_free_fn, &data))
418  		return axmap_first_free(axmap);
419  
420  	assert(data.level != -1U);
421  
422  	/*
423  	 * In the rare case that the map is unaligned, we might end up
424  	 * finding an offset that's beyond the valid end. For that case,
425  	 * find the first free one, the map is practically full.
426  	 */
427  	ret = axmap_find_first_free(axmap, data.level, data.offset);
428  	if (ret != -1ULL)
429  		return ret;
430  
431  	return axmap_first_free(axmap);
432  }
433