1 /*
2 * zsmalloc memory allocator
3 *
4 * Copyright (C) 2011 Nitin Gupta
5 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the license that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 */
12
13 #ifdef CONFIG_ZSMALLOC_DEBUG
14 #define DEBUG
15 #endif
16
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/bitops.h>
20 #include <linux/errno.h>
21 #include <linux/highmem.h>
22 #include <linux/init.h>
23 #include <linux/string.h>
24 #include <linux/slab.h>
25 #include <asm/tlbflush.h>
26 #include <asm/pgtable.h>
27 #include <linux/cpumask.h>
28 #include <linux/cpu.h>
29 #include <linux/vmalloc.h>
30
31 #include "zsmalloc.h"
32 #include "zsmalloc_int.h"
33
34 /*
35 * A zspage's class index and fullness group
36 * are encoded in its (first)page->mapping
37 */
38 #define CLASS_IDX_BITS 28
39 #define FULLNESS_BITS 4
40 #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
41 #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
42
43 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
44 static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
45
is_first_page(struct page * page)46 static int is_first_page(struct page *page)
47 {
48 return test_bit(PG_private, &page->flags);
49 }
50
is_last_page(struct page * page)51 static int is_last_page(struct page *page)
52 {
53 return test_bit(PG_private_2, &page->flags);
54 }
55
get_zspage_mapping(struct page * page,unsigned int * class_idx,enum fullness_group * fullness)56 static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
57 enum fullness_group *fullness)
58 {
59 unsigned long m;
60 BUG_ON(!is_first_page(page));
61
62 m = (unsigned long)page->mapping;
63 *fullness = m & FULLNESS_MASK;
64 *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
65 }
66
set_zspage_mapping(struct page * page,unsigned int class_idx,enum fullness_group fullness)67 static void set_zspage_mapping(struct page *page, unsigned int class_idx,
68 enum fullness_group fullness)
69 {
70 unsigned long m;
71 BUG_ON(!is_first_page(page));
72
73 m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
74 (fullness & FULLNESS_MASK);
75 page->mapping = (struct address_space *)m;
76 }
77
get_size_class_index(int size)78 static int get_size_class_index(int size)
79 {
80 int idx = 0;
81
82 if (likely(size > ZS_MIN_ALLOC_SIZE))
83 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
84 ZS_SIZE_CLASS_DELTA);
85
86 return idx;
87 }
88
get_fullness_group(struct page * page)89 static enum fullness_group get_fullness_group(struct page *page)
90 {
91 int inuse, max_objects;
92 enum fullness_group fg;
93 BUG_ON(!is_first_page(page));
94
95 inuse = page->inuse;
96 max_objects = page->objects;
97
98 if (inuse == 0)
99 fg = ZS_EMPTY;
100 else if (inuse == max_objects)
101 fg = ZS_FULL;
102 else if (inuse <= max_objects / fullness_threshold_frac)
103 fg = ZS_ALMOST_EMPTY;
104 else
105 fg = ZS_ALMOST_FULL;
106
107 return fg;
108 }
109
insert_zspage(struct page * page,struct size_class * class,enum fullness_group fullness)110 static void insert_zspage(struct page *page, struct size_class *class,
111 enum fullness_group fullness)
112 {
113 struct page **head;
114
115 BUG_ON(!is_first_page(page));
116
117 if (fullness >= _ZS_NR_FULLNESS_GROUPS)
118 return;
119
120 head = &class->fullness_list[fullness];
121 if (*head)
122 list_add_tail(&page->lru, &(*head)->lru);
123
124 *head = page;
125 }
126
remove_zspage(struct page * page,struct size_class * class,enum fullness_group fullness)127 static void remove_zspage(struct page *page, struct size_class *class,
128 enum fullness_group fullness)
129 {
130 struct page **head;
131
132 BUG_ON(!is_first_page(page));
133
134 if (fullness >= _ZS_NR_FULLNESS_GROUPS)
135 return;
136
137 head = &class->fullness_list[fullness];
138 BUG_ON(!*head);
139 if (list_empty(&(*head)->lru))
140 *head = NULL;
141 else if (*head == page)
142 *head = (struct page *)list_entry((*head)->lru.next,
143 struct page, lru);
144
145 list_del_init(&page->lru);
146 }
147
fix_fullness_group(struct zs_pool * pool,struct page * page)148 static enum fullness_group fix_fullness_group(struct zs_pool *pool,
149 struct page *page)
150 {
151 int class_idx;
152 struct size_class *class;
153 enum fullness_group currfg, newfg;
154
155 BUG_ON(!is_first_page(page));
156
157 get_zspage_mapping(page, &class_idx, &currfg);
158 newfg = get_fullness_group(page);
159 if (newfg == currfg)
160 goto out;
161
162 class = &pool->size_class[class_idx];
163 remove_zspage(page, class, currfg);
164 insert_zspage(page, class, newfg);
165 set_zspage_mapping(page, class_idx, newfg);
166
167 out:
168 return newfg;
169 }
170
171 /*
172 * We have to decide on how many pages to link together
173 * to form a zspage for each size class. This is important
174 * to reduce wastage due to unusable space left at end of
175 * each zspage which is given as:
176 * wastage = Zp - Zp % size_class
177 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
178 *
179 * For example, for size class of 3/8 * PAGE_SIZE, we should
180 * link together 3 PAGE_SIZE sized pages to form a zspage
181 * since then we can perfectly fit in 8 such objects.
182 */
get_zspage_order(int class_size)183 static int get_zspage_order(int class_size)
184 {
185 int i, max_usedpc = 0;
186 /* zspage order which gives maximum used size per KB */
187 int max_usedpc_order = 1;
188
189 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
190 int zspage_size;
191 int waste, usedpc;
192
193 zspage_size = i * PAGE_SIZE;
194 waste = zspage_size % class_size;
195 usedpc = (zspage_size - waste) * 100 / zspage_size;
196
197 if (usedpc > max_usedpc) {
198 max_usedpc = usedpc;
199 max_usedpc_order = i;
200 }
201 }
202
203 return max_usedpc_order;
204 }
205
206 /*
207 * A single 'zspage' is composed of many system pages which are
208 * linked together using fields in struct page. This function finds
209 * the first/head page, given any component page of a zspage.
210 */
get_first_page(struct page * page)211 static struct page *get_first_page(struct page *page)
212 {
213 if (is_first_page(page))
214 return page;
215 else
216 return page->first_page;
217 }
218
get_next_page(struct page * page)219 static struct page *get_next_page(struct page *page)
220 {
221 struct page *next;
222
223 if (is_last_page(page))
224 next = NULL;
225 else if (is_first_page(page))
226 next = (struct page *)page->private;
227 else
228 next = list_entry(page->lru.next, struct page, lru);
229
230 return next;
231 }
232
233 /* Encode <page, obj_idx> as a single handle value */
obj_location_to_handle(struct page * page,unsigned long obj_idx)234 static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
235 {
236 unsigned long handle;
237
238 if (!page) {
239 BUG_ON(obj_idx);
240 return NULL;
241 }
242
243 handle = page_to_pfn(page) << OBJ_INDEX_BITS;
244 handle |= (obj_idx & OBJ_INDEX_MASK);
245
246 return (void *)handle;
247 }
248
249 /* Decode <page, obj_idx> pair from the given object handle */
obj_handle_to_location(void * handle,struct page ** page,unsigned long * obj_idx)250 static void obj_handle_to_location(void *handle, struct page **page,
251 unsigned long *obj_idx)
252 {
253 unsigned long hval = (unsigned long)handle;
254
255 *page = pfn_to_page(hval >> OBJ_INDEX_BITS);
256 *obj_idx = hval & OBJ_INDEX_MASK;
257 }
258
obj_idx_to_offset(struct page * page,unsigned long obj_idx,int class_size)259 static unsigned long obj_idx_to_offset(struct page *page,
260 unsigned long obj_idx, int class_size)
261 {
262 unsigned long off = 0;
263
264 if (!is_first_page(page))
265 off = page->index;
266
267 return off + obj_idx * class_size;
268 }
269
reset_page(struct page * page)270 static void reset_page(struct page *page)
271 {
272 clear_bit(PG_private, &page->flags);
273 clear_bit(PG_private_2, &page->flags);
274 set_page_private(page, 0);
275 page->mapping = NULL;
276 page->freelist = NULL;
277 reset_page_mapcount(page);
278 }
279
free_zspage(struct page * first_page)280 static void free_zspage(struct page *first_page)
281 {
282 struct page *nextp, *tmp, *head_extra;
283
284 BUG_ON(!is_first_page(first_page));
285 BUG_ON(first_page->inuse);
286
287 head_extra = (struct page *)page_private(first_page);
288
289 reset_page(first_page);
290 __free_page(first_page);
291
292 /* zspage with only 1 system page */
293 if (!head_extra)
294 return;
295
296 list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
297 list_del(&nextp->lru);
298 reset_page(nextp);
299 __free_page(nextp);
300 }
301 reset_page(head_extra);
302 __free_page(head_extra);
303 }
304
305 /* Initialize a newly allocated zspage */
init_zspage(struct page * first_page,struct size_class * class)306 static void init_zspage(struct page *first_page, struct size_class *class)
307 {
308 unsigned long off = 0;
309 struct page *page = first_page;
310
311 BUG_ON(!is_first_page(first_page));
312 while (page) {
313 struct page *next_page;
314 struct link_free *link;
315 unsigned int i, objs_on_page;
316
317 /*
318 * page->index stores offset of first object starting
319 * in the page. For the first page, this is always 0,
320 * so we use first_page->index (aka ->freelist) to store
321 * head of corresponding zspage's freelist.
322 */
323 if (page != first_page)
324 page->index = off;
325
326 link = (struct link_free *)kmap_atomic(page) +
327 off / sizeof(*link);
328 objs_on_page = (PAGE_SIZE - off) / class->size;
329
330 for (i = 1; i <= objs_on_page; i++) {
331 off += class->size;
332 if (off < PAGE_SIZE) {
333 link->next = obj_location_to_handle(page, i);
334 link += class->size / sizeof(*link);
335 }
336 }
337
338 /*
339 * We now come to the last (full or partial) object on this
340 * page, which must point to the first object on the next
341 * page (if present)
342 */
343 next_page = get_next_page(page);
344 link->next = obj_location_to_handle(next_page, 0);
345 kunmap_atomic(link);
346 page = next_page;
347 off = (off + class->size) % PAGE_SIZE;
348 }
349 }
350
351 /*
352 * Allocate a zspage for the given size class
353 */
alloc_zspage(struct size_class * class,gfp_t flags)354 static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
355 {
356 int i, error;
357 struct page *first_page = NULL;
358
359 /*
360 * Allocate individual pages and link them together as:
361 * 1. first page->private = first sub-page
362 * 2. all sub-pages are linked together using page->lru
363 * 3. each sub-page is linked to the first page using page->first_page
364 *
365 * For each size class, First/Head pages are linked together using
366 * page->lru. Also, we set PG_private to identify the first page
367 * (i.e. no other sub-page has this flag set) and PG_private_2 to
368 * identify the last page.
369 */
370 error = -ENOMEM;
371 for (i = 0; i < class->zspage_order; i++) {
372 struct page *page, *prev_page;
373
374 page = alloc_page(flags);
375 if (!page)
376 goto cleanup;
377
378 INIT_LIST_HEAD(&page->lru);
379 if (i == 0) { /* first page */
380 set_bit(PG_private, &page->flags);
381 set_page_private(page, 0);
382 first_page = page;
383 first_page->inuse = 0;
384 }
385 if (i == 1)
386 first_page->private = (unsigned long)page;
387 if (i >= 1)
388 page->first_page = first_page;
389 if (i >= 2)
390 list_add(&page->lru, &prev_page->lru);
391 if (i == class->zspage_order - 1) /* last page */
392 set_bit(PG_private_2, &page->flags);
393
394 prev_page = page;
395 }
396
397 init_zspage(first_page, class);
398
399 first_page->freelist = obj_location_to_handle(first_page, 0);
400 /* Maximum number of objects we can store in this zspage */
401 first_page->objects = class->zspage_order * PAGE_SIZE / class->size;
402
403 error = 0; /* Success */
404
405 cleanup:
406 if (unlikely(error) && first_page) {
407 free_zspage(first_page);
408 first_page = NULL;
409 }
410
411 return first_page;
412 }
413
find_get_zspage(struct size_class * class)414 static struct page *find_get_zspage(struct size_class *class)
415 {
416 int i;
417 struct page *page;
418
419 for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
420 page = class->fullness_list[i];
421 if (page)
422 break;
423 }
424
425 return page;
426 }
427
428
zs_cpu_notifier(struct notifier_block * nb,unsigned long action,void * pcpu)429 static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
430 void *pcpu)
431 {
432 int cpu = (long)pcpu;
433 struct mapping_area *area;
434
435 switch (action) {
436 case CPU_UP_PREPARE:
437 area = &per_cpu(zs_map_area, cpu);
438 if (area->vm)
439 break;
440 area->vm = alloc_vm_area(2 * PAGE_SIZE, area->vm_ptes);
441 if (!area->vm)
442 return notifier_from_errno(-ENOMEM);
443 break;
444 case CPU_DEAD:
445 case CPU_UP_CANCELED:
446 area = &per_cpu(zs_map_area, cpu);
447 if (area->vm)
448 free_vm_area(area->vm);
449 area->vm = NULL;
450 break;
451 }
452
453 return NOTIFY_OK;
454 }
455
456 static struct notifier_block zs_cpu_nb = {
457 .notifier_call = zs_cpu_notifier
458 };
459
zs_exit(void)460 static void zs_exit(void)
461 {
462 int cpu;
463
464 for_each_online_cpu(cpu)
465 zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
466 unregister_cpu_notifier(&zs_cpu_nb);
467 }
468
zs_init(void)469 static int zs_init(void)
470 {
471 int cpu, ret;
472
473 register_cpu_notifier(&zs_cpu_nb);
474 for_each_online_cpu(cpu) {
475 ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
476 if (notifier_to_errno(ret))
477 goto fail;
478 }
479 return 0;
480 fail:
481 zs_exit();
482 return notifier_to_errno(ret);
483 }
484
zs_create_pool(const char * name,gfp_t flags)485 struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
486 {
487 int i, ovhd_size;
488 struct zs_pool *pool;
489
490 if (!name)
491 return NULL;
492
493 ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
494 pool = kzalloc(ovhd_size, GFP_KERNEL);
495 if (!pool)
496 return NULL;
497
498 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
499 int size;
500 struct size_class *class;
501
502 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
503 if (size > ZS_MAX_ALLOC_SIZE)
504 size = ZS_MAX_ALLOC_SIZE;
505
506 class = &pool->size_class[i];
507 class->size = size;
508 class->index = i;
509 spin_lock_init(&class->lock);
510 class->zspage_order = get_zspage_order(size);
511
512 }
513
514 pool->flags = flags;
515 pool->name = name;
516
517 return pool;
518 }
519 EXPORT_SYMBOL_GPL(zs_create_pool);
520
zs_destroy_pool(struct zs_pool * pool)521 void zs_destroy_pool(struct zs_pool *pool)
522 {
523 int i;
524
525 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
526 int fg;
527 struct size_class *class = &pool->size_class[i];
528
529 for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
530 if (class->fullness_list[fg]) {
531 pr_info("Freeing non-empty class with size "
532 "%db, fullness group %d\n",
533 class->size, fg);
534 }
535 }
536 }
537 kfree(pool);
538 }
539 EXPORT_SYMBOL_GPL(zs_destroy_pool);
540
541 /**
542 * zs_malloc - Allocate block of given size from pool.
543 * @pool: pool to allocate from
544 * @size: size of block to allocate
545 * @page: page no. that holds the object
546 * @offset: location of object within page
547 *
548 * On success, <page, offset> identifies block allocated
549 * and 0 is returned. On failure, <page, offset> is set to
550 * 0 and -ENOMEM is returned.
551 *
552 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
553 */
zs_malloc(struct zs_pool * pool,size_t size)554 void *zs_malloc(struct zs_pool *pool, size_t size)
555 {
556 void *obj;
557 struct link_free *link;
558 int class_idx;
559 struct size_class *class;
560
561 struct page *first_page, *m_page;
562 unsigned long m_objidx, m_offset;
563
564 if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
565 return NULL;
566
567 class_idx = get_size_class_index(size);
568 class = &pool->size_class[class_idx];
569 BUG_ON(class_idx != class->index);
570
571 spin_lock(&class->lock);
572 first_page = find_get_zspage(class);
573
574 if (!first_page) {
575 spin_unlock(&class->lock);
576 first_page = alloc_zspage(class, pool->flags);
577 if (unlikely(!first_page))
578 return NULL;
579
580 set_zspage_mapping(first_page, class->index, ZS_EMPTY);
581 spin_lock(&class->lock);
582 class->pages_allocated += class->zspage_order;
583 }
584
585 obj = first_page->freelist;
586 obj_handle_to_location(obj, &m_page, &m_objidx);
587 m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
588
589 link = (struct link_free *)kmap_atomic(m_page) +
590 m_offset / sizeof(*link);
591 first_page->freelist = link->next;
592 memset(link, POISON_INUSE, sizeof(*link));
593 kunmap_atomic(link);
594
595 first_page->inuse++;
596 /* Now move the zspage to another fullness group, if required */
597 fix_fullness_group(pool, first_page);
598 spin_unlock(&class->lock);
599
600 return obj;
601 }
602 EXPORT_SYMBOL_GPL(zs_malloc);
603
zs_free(struct zs_pool * pool,void * obj)604 void zs_free(struct zs_pool *pool, void *obj)
605 {
606 struct link_free *link;
607 struct page *first_page, *f_page;
608 unsigned long f_objidx, f_offset;
609
610 int class_idx;
611 struct size_class *class;
612 enum fullness_group fullness;
613
614 if (unlikely(!obj))
615 return;
616
617 obj_handle_to_location(obj, &f_page, &f_objidx);
618 first_page = get_first_page(f_page);
619
620 get_zspage_mapping(first_page, &class_idx, &fullness);
621 class = &pool->size_class[class_idx];
622 f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
623
624 spin_lock(&class->lock);
625
626 /* Insert this object in containing zspage's freelist */
627 link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
628 + f_offset);
629 link->next = first_page->freelist;
630 kunmap_atomic(link);
631 first_page->freelist = obj;
632
633 first_page->inuse--;
634 fullness = fix_fullness_group(pool, first_page);
635
636 if (fullness == ZS_EMPTY)
637 class->pages_allocated -= class->zspage_order;
638
639 spin_unlock(&class->lock);
640
641 if (fullness == ZS_EMPTY)
642 free_zspage(first_page);
643 }
644 EXPORT_SYMBOL_GPL(zs_free);
645
zs_map_object(struct zs_pool * pool,void * handle)646 void *zs_map_object(struct zs_pool *pool, void *handle)
647 {
648 struct page *page;
649 unsigned long obj_idx, off;
650
651 unsigned int class_idx;
652 enum fullness_group fg;
653 struct size_class *class;
654 struct mapping_area *area;
655
656 BUG_ON(!handle);
657
658 obj_handle_to_location(handle, &page, &obj_idx);
659 get_zspage_mapping(get_first_page(page), &class_idx, &fg);
660 class = &pool->size_class[class_idx];
661 off = obj_idx_to_offset(page, obj_idx, class->size);
662
663 area = &get_cpu_var(zs_map_area);
664 if (off + class->size <= PAGE_SIZE) {
665 /* this object is contained entirely within a page */
666 area->vm_addr = kmap_atomic(page);
667 } else {
668 /* this object spans two pages */
669 struct page *nextp;
670
671 nextp = get_next_page(page);
672 BUG_ON(!nextp);
673
674
675 set_pte(area->vm_ptes[0], mk_pte(page, PAGE_KERNEL));
676 set_pte(area->vm_ptes[1], mk_pte(nextp, PAGE_KERNEL));
677
678 /* We pre-allocated VM area so mapping can never fail */
679 area->vm_addr = area->vm->addr;
680 }
681
682 return area->vm_addr + off;
683 }
684 EXPORT_SYMBOL_GPL(zs_map_object);
685
zs_unmap_object(struct zs_pool * pool,void * handle)686 void zs_unmap_object(struct zs_pool *pool, void *handle)
687 {
688 struct page *page;
689 unsigned long obj_idx, off;
690
691 unsigned int class_idx;
692 enum fullness_group fg;
693 struct size_class *class;
694 struct mapping_area *area;
695
696 BUG_ON(!handle);
697
698 obj_handle_to_location(handle, &page, &obj_idx);
699 get_zspage_mapping(get_first_page(page), &class_idx, &fg);
700 class = &pool->size_class[class_idx];
701 off = obj_idx_to_offset(page, obj_idx, class->size);
702
703 area = &__get_cpu_var(zs_map_area);
704 if (off + class->size <= PAGE_SIZE) {
705 kunmap_atomic(area->vm_addr);
706 } else {
707 set_pte(area->vm_ptes[0], __pte(0));
708 set_pte(area->vm_ptes[1], __pte(0));
709 __flush_tlb_one((unsigned long)area->vm_addr);
710 __flush_tlb_one((unsigned long)area->vm_addr + PAGE_SIZE);
711 }
712 put_cpu_var(zs_map_area);
713 }
714 EXPORT_SYMBOL_GPL(zs_unmap_object);
715
zs_get_total_size_bytes(struct zs_pool * pool)716 u64 zs_get_total_size_bytes(struct zs_pool *pool)
717 {
718 int i;
719 u64 npages = 0;
720
721 for (i = 0; i < ZS_SIZE_CLASSES; i++)
722 npages += pool->size_class[i].pages_allocated;
723
724 return npages << PAGE_SHIFT;
725 }
726 EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
727
728 module_init(zs_init);
729 module_exit(zs_exit);
730
731 MODULE_LICENSE("Dual BSD/GPL");
732 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
733