• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SLOB Allocator: Simple List Of Blocks
3  *
4  * Matt Mackall <mpm@selenic.com> 12/30/03
5  *
6  * NUMA support by Paul Mundt, 2007.
7  *
8  * How SLOB works:
9  *
10  * The core of SLOB is a traditional K&R style heap allocator, with
11  * support for returning aligned objects. The granularity of this
12  * allocator is as little as 2 bytes, however typically most architectures
13  * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
14  *
15  * The slob heap is a set of linked list of pages from alloc_pages(),
16  * and within each page, there is a singly-linked list of free blocks
17  * (slob_t). The heap is grown on demand. To reduce fragmentation,
18  * heap pages are segregated into three lists, with objects less than
19  * 256 bytes, objects less than 1024 bytes, and all other objects.
20  *
21  * Allocation from heap involves first searching for a page with
22  * sufficient free blocks (using a next-fit-like approach) followed by
23  * a first-fit scan of the page. Deallocation inserts objects back
24  * into the free list in address order, so this is effectively an
25  * address-ordered first fit.
26  *
27  * Above this is an implementation of kmalloc/kfree. Blocks returned
28  * from kmalloc are prepended with a 4-byte header with the kmalloc size.
29  * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
30  * alloc_pages() directly, allocating compound pages so the page order
31  * does not have to be separately tracked, and also stores the exact
32  * allocation size in page->private so that it can be used to accurately
33  * provide ksize(). These objects are detected in kfree() because slob_page()
34  * is false for them.
35  *
36  * SLAB is emulated on top of SLOB by simply calling constructors and
37  * destructors for every SLAB allocation. Objects are returned with the
38  * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
39  * case the low-level allocator will fragment blocks to create the proper
40  * alignment. Again, objects of page-size or greater are allocated by
41  * calling alloc_pages(). As SLAB objects know their size, no separate
42  * size bookkeeping is necessary and there is essentially no allocation
43  * space overhead, and compound pages aren't needed for multi-page
44  * allocations.
45  *
46  * NUMA support in SLOB is fairly simplistic, pushing most of the real
47  * logic down to the page allocator, and simply doing the node accounting
48  * on the upper levels. In the event that a node id is explicitly
49  * provided, alloc_pages_node() with the specified node id is used
50  * instead. The common case (or when the node id isn't explicitly provided)
51  * will default to the current node, as per numa_node_id().
52  *
53  * Node aware pages are still inserted in to the global freelist, and
54  * these are scanned for by matching against the node id encoded in the
55  * page flags. As a result, block allocations that can be satisfied from
56  * the freelist will only be done so on pages residing on the same node,
57  * in order to prevent random node placement.
58  */
59 
60 #include <linux/kernel.h>
61 #include <linux/slab.h>
62 #include <linux/mm.h>
63 #include <linux/cache.h>
64 #include <linux/init.h>
65 #include <linux/module.h>
66 #include <linux/rcupdate.h>
67 #include <linux/list.h>
68 #include <asm/atomic.h>
69 
70 /*
71  * slob_block has a field 'units', which indicates size of block if +ve,
72  * or offset of next block if -ve (in SLOB_UNITs).
73  *
74  * Free blocks of size 1 unit simply contain the offset of the next block.
75  * Those with larger size contain their size in the first SLOB_UNIT of
76  * memory, and the offset of the next free block in the second SLOB_UNIT.
77  */
78 #if PAGE_SIZE <= (32767 * 2)
79 typedef s16 slobidx_t;
80 #else
81 typedef s32 slobidx_t;
82 #endif
83 
84 struct slob_block {
85 	slobidx_t units;
86 };
87 typedef struct slob_block slob_t;
88 
89 /*
90  * We use struct page fields to manage some slob allocation aspects,
91  * however to avoid the horrible mess in include/linux/mm_types.h, we'll
92  * just define our own struct page type variant here.
93  */
94 struct slob_page {
95 	union {
96 		struct {
97 			unsigned long flags;	/* mandatory */
98 			atomic_t _count;	/* mandatory */
99 			slobidx_t units;	/* free units left in page */
100 			unsigned long pad[2];
101 			slob_t *free;		/* first free slob_t in page */
102 			struct list_head list;	/* linked list of free pages */
103 		};
104 		struct page page;
105 	};
106 };
struct_slob_page_wrong_size(void)107 static inline void struct_slob_page_wrong_size(void)
108 { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
109 
110 /*
111  * free_slob_page: call before a slob_page is returned to the page allocator.
112  */
free_slob_page(struct slob_page * sp)113 static inline void free_slob_page(struct slob_page *sp)
114 {
115 	reset_page_mapcount(&sp->page);
116 	sp->page.mapping = NULL;
117 }
118 
119 /*
120  * All partially free slob pages go on these lists.
121  */
122 #define SLOB_BREAK1 256
123 #define SLOB_BREAK2 1024
124 static LIST_HEAD(free_slob_small);
125 static LIST_HEAD(free_slob_medium);
126 static LIST_HEAD(free_slob_large);
127 
128 /*
129  * slob_page: True for all slob pages (false for bigblock pages)
130  */
slob_page(struct slob_page * sp)131 static inline int slob_page(struct slob_page *sp)
132 {
133 	return PageSlobPage((struct page *)sp);
134 }
135 
set_slob_page(struct slob_page * sp)136 static inline void set_slob_page(struct slob_page *sp)
137 {
138 	__SetPageSlobPage((struct page *)sp);
139 }
140 
clear_slob_page(struct slob_page * sp)141 static inline void clear_slob_page(struct slob_page *sp)
142 {
143 	__ClearPageSlobPage((struct page *)sp);
144 }
145 
146 /*
147  * slob_page_free: true for pages on free_slob_pages list.
148  */
slob_page_free(struct slob_page * sp)149 static inline int slob_page_free(struct slob_page *sp)
150 {
151 	return PageSlobFree((struct page *)sp);
152 }
153 
set_slob_page_free(struct slob_page * sp,struct list_head * list)154 static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
155 {
156 	list_add(&sp->list, list);
157 	__SetPageSlobFree((struct page *)sp);
158 }
159 
clear_slob_page_free(struct slob_page * sp)160 static inline void clear_slob_page_free(struct slob_page *sp)
161 {
162 	list_del(&sp->list);
163 	__ClearPageSlobFree((struct page *)sp);
164 }
165 
166 #define SLOB_UNIT sizeof(slob_t)
167 #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
168 #define SLOB_ALIGN L1_CACHE_BYTES
169 
170 /*
171  * struct slob_rcu is inserted at the tail of allocated slob blocks, which
172  * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
173  * the block using call_rcu.
174  */
175 struct slob_rcu {
176 	struct rcu_head head;
177 	int size;
178 };
179 
180 /*
181  * slob_lock protects all slob allocator structures.
182  */
183 static DEFINE_SPINLOCK(slob_lock);
184 
185 /*
186  * Encode the given size and next info into a free slob block s.
187  */
set_slob(slob_t * s,slobidx_t size,slob_t * next)188 static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
189 {
190 	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
191 	slobidx_t offset = next - base;
192 
193 	if (size > 1) {
194 		s[0].units = size;
195 		s[1].units = offset;
196 	} else
197 		s[0].units = -offset;
198 }
199 
200 /*
201  * Return the size of a slob block.
202  */
slob_units(slob_t * s)203 static slobidx_t slob_units(slob_t *s)
204 {
205 	if (s->units > 0)
206 		return s->units;
207 	return 1;
208 }
209 
210 /*
211  * Return the next free slob block pointer after this one.
212  */
slob_next(slob_t * s)213 static slob_t *slob_next(slob_t *s)
214 {
215 	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
216 	slobidx_t next;
217 
218 	if (s[0].units < 0)
219 		next = -s[0].units;
220 	else
221 		next = s[1].units;
222 	return base+next;
223 }
224 
225 /*
226  * Returns true if s is the last free block in its page.
227  */
slob_last(slob_t * s)228 static int slob_last(slob_t *s)
229 {
230 	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
231 }
232 
slob_new_page(gfp_t gfp,int order,int node)233 static void *slob_new_page(gfp_t gfp, int order, int node)
234 {
235 	void *page;
236 
237 #ifdef CONFIG_NUMA
238 	if (node != -1)
239 		page = alloc_pages_node(node, gfp, order);
240 	else
241 #endif
242 		page = alloc_pages(gfp, order);
243 
244 	if (!page)
245 		return NULL;
246 
247 	return page_address(page);
248 }
249 
250 /*
251  * Allocate a slob block within a given slob_page sp.
252  */
slob_page_alloc(struct slob_page * sp,size_t size,int align)253 static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
254 {
255 	slob_t *prev, *cur, *aligned = 0;
256 	int delta = 0, units = SLOB_UNITS(size);
257 
258 	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
259 		slobidx_t avail = slob_units(cur);
260 
261 		if (align) {
262 			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
263 			delta = aligned - cur;
264 		}
265 		if (avail >= units + delta) { /* room enough? */
266 			slob_t *next;
267 
268 			if (delta) { /* need to fragment head to align? */
269 				next = slob_next(cur);
270 				set_slob(aligned, avail - delta, next);
271 				set_slob(cur, delta, aligned);
272 				prev = cur;
273 				cur = aligned;
274 				avail = slob_units(cur);
275 			}
276 
277 			next = slob_next(cur);
278 			if (avail == units) { /* exact fit? unlink. */
279 				if (prev)
280 					set_slob(prev, slob_units(prev), next);
281 				else
282 					sp->free = next;
283 			} else { /* fragment */
284 				if (prev)
285 					set_slob(prev, slob_units(prev), cur + units);
286 				else
287 					sp->free = cur + units;
288 				set_slob(cur + units, avail - units, next);
289 			}
290 
291 			sp->units -= units;
292 			if (!sp->units)
293 				clear_slob_page_free(sp);
294 			return cur;
295 		}
296 		if (slob_last(cur))
297 			return NULL;
298 	}
299 }
300 
301 /*
302  * slob_alloc: entry point into the slob allocator.
303  */
slob_alloc(size_t size,gfp_t gfp,int align,int node)304 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
305 {
306 	struct slob_page *sp;
307 	struct list_head *prev;
308 	struct list_head *slob_list;
309 	slob_t *b = NULL;
310 	unsigned long flags;
311 
312 	if (size < SLOB_BREAK1)
313 		slob_list = &free_slob_small;
314 	else if (size < SLOB_BREAK2)
315 		slob_list = &free_slob_medium;
316 	else
317 		slob_list = &free_slob_large;
318 
319 	spin_lock_irqsave(&slob_lock, flags);
320 	/* Iterate through each partially free page, try to find room */
321 	list_for_each_entry(sp, slob_list, list) {
322 #ifdef CONFIG_NUMA
323 		/*
324 		 * If there's a node specification, search for a partial
325 		 * page with a matching node id in the freelist.
326 		 */
327 		if (node != -1 && page_to_nid(&sp->page) != node)
328 			continue;
329 #endif
330 		/* Enough room on this page? */
331 		if (sp->units < SLOB_UNITS(size))
332 			continue;
333 
334 		/* Attempt to alloc */
335 		prev = sp->list.prev;
336 		b = slob_page_alloc(sp, size, align);
337 		if (!b)
338 			continue;
339 
340 		/* Improve fragment distribution and reduce our average
341 		 * search time by starting our next search here. (see
342 		 * Knuth vol 1, sec 2.5, pg 449) */
343 		if (prev != slob_list->prev &&
344 				slob_list->next != prev->next)
345 			list_move_tail(slob_list, prev->next);
346 		break;
347 	}
348 	spin_unlock_irqrestore(&slob_lock, flags);
349 
350 	/* Not enough space: must allocate a new page */
351 	if (!b) {
352 		b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
353 		if (!b)
354 			return 0;
355 		sp = (struct slob_page *)virt_to_page(b);
356 		set_slob_page(sp);
357 
358 		spin_lock_irqsave(&slob_lock, flags);
359 		sp->units = SLOB_UNITS(PAGE_SIZE);
360 		sp->free = b;
361 		INIT_LIST_HEAD(&sp->list);
362 		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
363 		set_slob_page_free(sp, slob_list);
364 		b = slob_page_alloc(sp, size, align);
365 		BUG_ON(!b);
366 		spin_unlock_irqrestore(&slob_lock, flags);
367 	}
368 	if (unlikely((gfp & __GFP_ZERO) && b))
369 		memset(b, 0, size);
370 	return b;
371 }
372 
373 /*
374  * slob_free: entry point into the slob allocator.
375  */
slob_free(void * block,int size)376 static void slob_free(void *block, int size)
377 {
378 	struct slob_page *sp;
379 	slob_t *prev, *next, *b = (slob_t *)block;
380 	slobidx_t units;
381 	unsigned long flags;
382 
383 	if (unlikely(ZERO_OR_NULL_PTR(block)))
384 		return;
385 	BUG_ON(!size);
386 
387 	sp = (struct slob_page *)virt_to_page(block);
388 	units = SLOB_UNITS(size);
389 
390 	spin_lock_irqsave(&slob_lock, flags);
391 
392 	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
393 		/* Go directly to page allocator. Do not pass slob allocator */
394 		if (slob_page_free(sp))
395 			clear_slob_page_free(sp);
396 		clear_slob_page(sp);
397 		free_slob_page(sp);
398 		free_page((unsigned long)b);
399 		goto out;
400 	}
401 
402 	if (!slob_page_free(sp)) {
403 		/* This slob page is about to become partially free. Easy! */
404 		sp->units = units;
405 		sp->free = b;
406 		set_slob(b, units,
407 			(void *)((unsigned long)(b +
408 					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
409 		set_slob_page_free(sp, &free_slob_small);
410 		goto out;
411 	}
412 
413 	/*
414 	 * Otherwise the page is already partially free, so find reinsertion
415 	 * point.
416 	 */
417 	sp->units += units;
418 
419 	if (b < sp->free) {
420 		if (b + units == sp->free) {
421 			units += slob_units(sp->free);
422 			sp->free = slob_next(sp->free);
423 		}
424 		set_slob(b, units, sp->free);
425 		sp->free = b;
426 	} else {
427 		prev = sp->free;
428 		next = slob_next(prev);
429 		while (b > next) {
430 			prev = next;
431 			next = slob_next(prev);
432 		}
433 
434 		if (!slob_last(prev) && b + units == next) {
435 			units += slob_units(next);
436 			set_slob(b, units, slob_next(next));
437 		} else
438 			set_slob(b, units, next);
439 
440 		if (prev + slob_units(prev) == b) {
441 			units = slob_units(b) + slob_units(prev);
442 			set_slob(prev, units, slob_next(b));
443 		} else
444 			set_slob(prev, slob_units(prev), b);
445 	}
446 out:
447 	spin_unlock_irqrestore(&slob_lock, flags);
448 }
449 
450 /*
451  * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
452  */
453 
454 #ifndef ARCH_KMALLOC_MINALIGN
455 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
456 #endif
457 
458 #ifndef ARCH_SLAB_MINALIGN
459 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
460 #endif
461 
__kmalloc_node(size_t size,gfp_t gfp,int node)462 void *__kmalloc_node(size_t size, gfp_t gfp, int node)
463 {
464 	unsigned int *m;
465 	int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
466 
467 	if (size < PAGE_SIZE - align) {
468 		if (!size)
469 			return ZERO_SIZE_PTR;
470 
471 		m = slob_alloc(size + align, gfp, align, node);
472 		if (!m)
473 			return NULL;
474 		*m = size;
475 		return (void *)m + align;
476 	} else {
477 		void *ret;
478 
479 		ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
480 		if (ret) {
481 			struct page *page;
482 			page = virt_to_page(ret);
483 			page->private = size;
484 		}
485 		return ret;
486 	}
487 }
488 EXPORT_SYMBOL(__kmalloc_node);
489 
kfree(const void * block)490 void kfree(const void *block)
491 {
492 	struct slob_page *sp;
493 
494 	if (unlikely(ZERO_OR_NULL_PTR(block)))
495 		return;
496 
497 	sp = (struct slob_page *)virt_to_page(block);
498 	if (slob_page(sp)) {
499 		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
500 		unsigned int *m = (unsigned int *)(block - align);
501 		slob_free(m, *m + align);
502 	} else
503 		put_page(&sp->page);
504 }
505 EXPORT_SYMBOL(kfree);
506 
507 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
ksize(const void * block)508 size_t ksize(const void *block)
509 {
510 	struct slob_page *sp;
511 
512 	BUG_ON(!block);
513 	if (unlikely(block == ZERO_SIZE_PTR))
514 		return 0;
515 
516 	sp = (struct slob_page *)virt_to_page(block);
517 	if (slob_page(sp)) {
518 		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
519 		unsigned int *m = (unsigned int *)(block - align);
520 		return SLOB_UNITS(*m) * SLOB_UNIT;
521 	} else
522 		return sp->page.private;
523 }
524 EXPORT_SYMBOL(ksize);
525 
526 struct kmem_cache {
527 	unsigned int size, align;
528 	unsigned long flags;
529 	const char *name;
530 	void (*ctor)(void *);
531 };
532 
kmem_cache_create(const char * name,size_t size,size_t align,unsigned long flags,void (* ctor)(void *))533 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
534 	size_t align, unsigned long flags, void (*ctor)(void *))
535 {
536 	struct kmem_cache *c;
537 
538 	c = slob_alloc(sizeof(struct kmem_cache),
539 		GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
540 
541 	if (c) {
542 		c->name = name;
543 		c->size = size;
544 		if (flags & SLAB_DESTROY_BY_RCU) {
545 			/* leave room for rcu footer at the end of object */
546 			c->size += sizeof(struct slob_rcu);
547 		}
548 		c->flags = flags;
549 		c->ctor = ctor;
550 		/* ignore alignment unless it's forced */
551 		c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
552 		if (c->align < ARCH_SLAB_MINALIGN)
553 			c->align = ARCH_SLAB_MINALIGN;
554 		if (c->align < align)
555 			c->align = align;
556 	} else if (flags & SLAB_PANIC)
557 		panic("Cannot create slab cache %s\n", name);
558 
559 	return c;
560 }
561 EXPORT_SYMBOL(kmem_cache_create);
562 
kmem_cache_destroy(struct kmem_cache * c)563 void kmem_cache_destroy(struct kmem_cache *c)
564 {
565 	slob_free(c, sizeof(struct kmem_cache));
566 }
567 EXPORT_SYMBOL(kmem_cache_destroy);
568 
kmem_cache_alloc_node(struct kmem_cache * c,gfp_t flags,int node)569 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
570 {
571 	void *b;
572 
573 	if (c->size < PAGE_SIZE)
574 		b = slob_alloc(c->size, flags, c->align, node);
575 	else
576 		b = slob_new_page(flags, get_order(c->size), node);
577 
578 	if (c->ctor)
579 		c->ctor(b);
580 
581 	return b;
582 }
583 EXPORT_SYMBOL(kmem_cache_alloc_node);
584 
__kmem_cache_free(void * b,int size)585 static void __kmem_cache_free(void *b, int size)
586 {
587 	if (size < PAGE_SIZE)
588 		slob_free(b, size);
589 	else
590 		free_pages((unsigned long)b, get_order(size));
591 }
592 
kmem_rcu_free(struct rcu_head * head)593 static void kmem_rcu_free(struct rcu_head *head)
594 {
595 	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
596 	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
597 
598 	__kmem_cache_free(b, slob_rcu->size);
599 }
600 
kmem_cache_free(struct kmem_cache * c,void * b)601 void kmem_cache_free(struct kmem_cache *c, void *b)
602 {
603 	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
604 		struct slob_rcu *slob_rcu;
605 		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
606 		INIT_RCU_HEAD(&slob_rcu->head);
607 		slob_rcu->size = c->size;
608 		call_rcu(&slob_rcu->head, kmem_rcu_free);
609 	} else {
610 		__kmem_cache_free(b, c->size);
611 	}
612 }
613 EXPORT_SYMBOL(kmem_cache_free);
614 
kmem_cache_size(struct kmem_cache * c)615 unsigned int kmem_cache_size(struct kmem_cache *c)
616 {
617 	return c->size;
618 }
619 EXPORT_SYMBOL(kmem_cache_size);
620 
kmem_cache_name(struct kmem_cache * c)621 const char *kmem_cache_name(struct kmem_cache *c)
622 {
623 	return c->name;
624 }
625 EXPORT_SYMBOL(kmem_cache_name);
626 
kmem_cache_shrink(struct kmem_cache * d)627 int kmem_cache_shrink(struct kmem_cache *d)
628 {
629 	return 0;
630 }
631 EXPORT_SYMBOL(kmem_cache_shrink);
632 
kmem_ptr_validate(struct kmem_cache * a,const void * b)633 int kmem_ptr_validate(struct kmem_cache *a, const void *b)
634 {
635 	return 0;
636 }
637 
638 static unsigned int slob_ready __read_mostly;
639 
slab_is_available(void)640 int slab_is_available(void)
641 {
642 	return slob_ready;
643 }
644 
kmem_cache_init(void)645 void __init kmem_cache_init(void)
646 {
647 	slob_ready = 1;
648 }
649