Lines Matching full:class
142 * determined). NOTE: all those class sizes must be set as multiple of
203 * Size of objects stored in this class. Must be multiple
281 unsigned int class:CLASS_BITS + 1; member
472 /* Protected by class->lock */
523 *class_idx = zspage->class; in get_zspage_mapping()
530 zspage->class = class_idx; in set_zspage_mapping()
536 * class maintains a list of zspages where each zspage is divided
539 * size class which has chunk size big enough to hold the give size.
553 static inline void zs_stat_inc(struct size_class *class, in zs_stat_inc() argument
556 class->stats.objs[type] += cnt; in zs_stat_inc()
560 static inline void zs_stat_dec(struct size_class *class, in zs_stat_dec() argument
563 class->stats.objs[type] -= cnt; in zs_stat_dec()
567 static inline unsigned long zs_stat_get(struct size_class *class, in zs_stat_get() argument
570 return class->stats.objs[type]; in zs_stat_get()
592 static unsigned long zs_can_compact(struct size_class *class);
598 struct size_class *class; in zs_stats_size_show() local
607 "class", "size", "almost_full", "almost_empty", in zs_stats_size_show()
612 class = pool->size_class[i]; in zs_stats_size_show()
614 if (class->index != i) in zs_stats_size_show()
617 spin_lock(&class->lock); in zs_stats_size_show()
618 class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL); in zs_stats_size_show()
619 class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY); in zs_stats_size_show()
620 obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); in zs_stats_size_show()
621 obj_used = zs_stat_get(class, OBJ_USED); in zs_stats_size_show()
622 freeable = zs_can_compact(class); in zs_stats_size_show()
623 spin_unlock(&class->lock); in zs_stats_size_show()
625 objs_per_zspage = class->objs_per_zspage; in zs_stats_size_show()
627 class->pages_per_zspage; in zs_stats_size_show()
631 i, class->size, class_almost_full, class_almost_empty, in zs_stats_size_show()
633 class->pages_per_zspage, freeable); in zs_stats_size_show()
705 * For each size class, zspages are divided into different groups
711 static enum fullness_group get_fullness_group(struct size_class *class, in get_fullness_group() argument
718 objs_per_zspage = class->objs_per_zspage; in get_fullness_group()
733 * Each size class maintains various freelists and zspages are assigned
736 * identified by <class, fullness_group>.
738 static void insert_zspage(struct size_class *class, in insert_zspage() argument
744 zs_stat_inc(class, fullness, 1); in insert_zspage()
745 head = list_first_entry_or_null(&class->fullness_list[fullness], in insert_zspage()
757 list_add(&zspage->list, &class->fullness_list[fullness]); in insert_zspage()
762 * by <class, fullness_group>.
764 static void remove_zspage(struct size_class *class, in remove_zspage() argument
768 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); in remove_zspage()
772 zs_stat_dec(class, fullness, 1); in remove_zspage()
776 * Each size class maintains zspages in different fullness groups depending
784 static enum fullness_group fix_fullness_group(struct size_class *class, in fix_fullness_group() argument
791 newfg = get_fullness_group(class, zspage); in fix_fullness_group()
796 remove_zspage(class, zspage, currfg); in fix_fullness_group()
797 insert_zspage(class, zspage, newfg); in fix_fullness_group()
808 * to form a zspage for each size class. This is important
815 * For example, for size class of 3/8 * PAGE_SIZE, we should
953 static void __free_zspage(struct zs_pool *pool, struct size_class *class, in __free_zspage() argument
962 assert_spin_locked(&class->lock); in __free_zspage()
980 zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage); in __free_zspage()
981 atomic_long_sub(class->pages_per_zspage, in __free_zspage()
985 static void free_zspage(struct zs_pool *pool, struct size_class *class, in free_zspage() argument
996 remove_zspage(class, zspage, ZS_EMPTY); in free_zspage()
997 __free_zspage(pool, class, zspage); in free_zspage()
1001 static void init_zspage(struct size_class *class, struct zspage *zspage) in init_zspage() argument
1017 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
1019 link += class->size / sizeof(*link); in init_zspage()
1045 static void create_page_chain(struct size_class *class, struct zspage *zspage, in create_page_chain() argument
1051 int nr_pages = class->pages_per_zspage; in create_page_chain()
1068 if (unlikely(class->objs_per_zspage == 1 && in create_page_chain()
1069 class->pages_per_zspage == 1)) in create_page_chain()
1079 * Allocate a zspage for the given size class
1082 struct size_class *class, in alloc_zspage() argument
1096 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
1113 create_page_chain(class, zspage, pages); in alloc_zspage()
1114 init_zspage(class, zspage); in alloc_zspage()
1119 static struct zspage *find_get_zspage(struct size_class *class) in find_get_zspage() argument
1125 zspage = list_first_entry_or_null(&class->fullness_list[i], in find_get_zspage()
1283 static bool zspage_full(struct size_class *class, struct zspage *zspage) in zspage_full() argument
1285 return get_zspage_inuse(zspage) == class->objs_per_zspage; in zspage_full()
1319 struct size_class *class; in zs_map_object() local
1342 class = pool->size_class[class_idx]; in zs_map_object()
1343 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_map_object()
1347 if (off + class->size <= PAGE_SIZE) { in zs_map_object()
1359 ret = __zs_map_object(area, pages, off, class->size); in zs_map_object()
1377 struct size_class *class; in zs_unmap_object() local
1384 class = pool->size_class[class_idx]; in zs_unmap_object()
1385 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_unmap_object()
1388 if (off + class->size <= PAGE_SIZE) in zs_unmap_object()
1397 __zs_unmap_object(area, pages, off, class->size); in zs_unmap_object()
1411 * The function returns the size of the first huge class - any object of equal
1425 static unsigned long obj_malloc(struct size_class *class, in obj_malloc() argument
1439 offset = obj * class->size; in obj_malloc()
1459 zs_stat_inc(class, OBJ_USED, 1); in obj_malloc()
1480 struct size_class *class; in zs_malloc() local
1493 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1495 spin_lock(&class->lock); in zs_malloc()
1496 zspage = find_get_zspage(class); in zs_malloc()
1498 obj = obj_malloc(class, zspage, handle); in zs_malloc()
1500 fix_fullness_group(class, zspage); in zs_malloc()
1502 spin_unlock(&class->lock); in zs_malloc()
1507 spin_unlock(&class->lock); in zs_malloc()
1509 zspage = alloc_zspage(pool, class, gfp); in zs_malloc()
1515 spin_lock(&class->lock); in zs_malloc()
1516 obj = obj_malloc(class, zspage, handle); in zs_malloc()
1517 newfg = get_fullness_group(class, zspage); in zs_malloc()
1518 insert_zspage(class, zspage, newfg); in zs_malloc()
1519 set_zspage_mapping(zspage, class->index, newfg); in zs_malloc()
1521 atomic_long_add(class->pages_per_zspage, in zs_malloc()
1523 zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage); in zs_malloc()
1527 spin_unlock(&class->lock); in zs_malloc()
1533 static void obj_free(struct size_class *class, unsigned long obj) in obj_free() argument
1544 f_offset = (class->size * f_objidx) & ~PAGE_MASK; in obj_free()
1555 zs_stat_dec(class, OBJ_USED, 1); in obj_free()
1565 struct size_class *class; in zs_free() local
1580 class = pool->size_class[class_idx]; in zs_free()
1582 spin_lock(&class->lock); in zs_free()
1583 obj_free(class, obj); in zs_free()
1584 fullness = fix_fullness_group(class, zspage); in zs_free()
1594 free_zspage(pool, class, zspage); in zs_free()
1597 spin_unlock(&class->lock); in zs_free()
1603 static void zs_object_copy(struct size_class *class, unsigned long dst, in zs_object_copy() argument
1613 s_size = d_size = class->size; in zs_object_copy()
1618 s_off = (class->size * s_objidx) & ~PAGE_MASK; in zs_object_copy()
1619 d_off = (class->size * d_objidx) & ~PAGE_MASK; in zs_object_copy()
1621 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1624 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1635 if (written == class->size) in zs_object_copy()
1649 s_size = class->size - written; in zs_object_copy()
1657 d_size = class->size - written; in zs_object_copy()
1670 static unsigned long find_alloced_obj(struct size_class *class, in find_alloced_obj() argument
1680 offset += class->size * index; in find_alloced_obj()
1691 offset += class->size; in find_alloced_obj()
1713 static int migrate_zspage(struct zs_pool *pool, struct size_class *class, in migrate_zspage() argument
1724 handle = find_alloced_obj(class, s_page, &obj_idx); in migrate_zspage()
1734 if (zspage_full(class, get_zspage(d_page))) { in migrate_zspage()
1741 free_obj = obj_malloc(class, get_zspage(d_page), handle); in migrate_zspage()
1742 zs_object_copy(class, free_obj, used_obj); in migrate_zspage()
1753 obj_free(class, used_obj); in migrate_zspage()
1763 static struct zspage *isolate_zspage(struct size_class *class, bool source) in isolate_zspage() argument
1775 zspage = list_first_entry_or_null(&class->fullness_list[fg[i]], in isolate_zspage()
1779 remove_zspage(class, zspage, fg[i]); in isolate_zspage()
1788 * putback_zspage - add @zspage into right class's fullness list
1789 * @class: destination class
1794 static enum fullness_group putback_zspage(struct size_class *class, in putback_zspage() argument
1801 fullness = get_fullness_group(class, zspage); in putback_zspage()
1802 insert_zspage(class, zspage, fullness); in putback_zspage()
1803 set_zspage_mapping(zspage, class->index, fullness); in putback_zspage()
1891 struct size_class *class, in putback_zspage_deferred() argument
1896 fg = putback_zspage(class, zspage); in putback_zspage_deferred()
1908 * checks the isolated count under &class->lock after enqueuing in zs_pool_dec_isolated()
1915 static void replace_sub_page(struct size_class *class, struct zspage *zspage, in replace_sub_page() argument
1931 create_page_chain(class, zspage, pages); in replace_sub_page()
1941 struct size_class *class; in zs_page_isolate() local
1957 * Without class lock, fullness could be stale while class_idx is okay in zs_page_isolate()
1959 * fullness again under class lock. in zs_page_isolate()
1964 class = pool->size_class[class_idx]; in zs_page_isolate()
1966 spin_lock(&class->lock); in zs_page_isolate()
1968 spin_unlock(&class->lock); in zs_page_isolate()
1974 spin_unlock(&class->lock); in zs_page_isolate()
1985 remove_zspage(class, zspage, fullness); in zs_page_isolate()
1989 spin_unlock(&class->lock); in zs_page_isolate()
1998 struct size_class *class; in zs_page_migrate() local
2027 class = pool->size_class[class_idx]; in zs_page_migrate()
2030 spin_lock(&class->lock); in zs_page_migrate()
2048 pos += class->size; in zs_page_migrate()
2059 addr += class->size) { in zs_page_migrate()
2075 replace_sub_page(class, zspage, newpage, page); in zs_page_migrate()
2091 putback_zspage_deferred(pool, class, zspage); in zs_page_migrate()
2107 addr += class->size) { in zs_page_migrate()
2117 spin_unlock(&class->lock); in zs_page_migrate()
2126 struct size_class *class; in zs_page_putback() local
2139 class = pool->size_class[class_idx]; in zs_page_putback()
2141 spin_lock(&class->lock); in zs_page_putback()
2148 putback_zspage_deferred(pool, class, zspage); in zs_page_putback()
2151 spin_unlock(&class->lock); in zs_page_putback()
2214 struct size_class *class; in async_free_zspage() local
2223 class = pool->size_class[i]; in async_free_zspage()
2224 if (class->index != i) in async_free_zspage()
2227 spin_lock(&class->lock); in async_free_zspage()
2228 list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages); in async_free_zspage()
2229 spin_unlock(&class->lock); in async_free_zspage()
2239 class = pool->size_class[class_idx]; in async_free_zspage()
2240 spin_lock(&class->lock); in async_free_zspage()
2242 spin_unlock(&class->lock); in async_free_zspage()
2273 static unsigned long zs_can_compact(struct size_class *class) in zs_can_compact() argument
2276 unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); in zs_can_compact()
2277 unsigned long obj_used = zs_stat_get(class, OBJ_USED); in zs_can_compact()
2283 obj_wasted /= class->objs_per_zspage; in zs_can_compact()
2285 return obj_wasted * class->pages_per_zspage; in zs_can_compact()
2288 static void __zs_compact(struct zs_pool *pool, struct size_class *class) in __zs_compact() argument
2294 spin_lock(&class->lock); in __zs_compact()
2295 while ((src_zspage = isolate_zspage(class, true))) { in __zs_compact()
2297 if (!zs_can_compact(class)) in __zs_compact()
2303 while ((dst_zspage = isolate_zspage(class, false))) { in __zs_compact()
2309 if (!migrate_zspage(pool, class, &cc)) in __zs_compact()
2312 putback_zspage(class, dst_zspage); in __zs_compact()
2319 putback_zspage(class, dst_zspage); in __zs_compact()
2320 if (putback_zspage(class, src_zspage) == ZS_EMPTY) { in __zs_compact()
2321 free_zspage(pool, class, src_zspage); in __zs_compact()
2322 pool->stats.pages_compacted += class->pages_per_zspage; in __zs_compact()
2324 spin_unlock(&class->lock); in __zs_compact()
2326 spin_lock(&class->lock); in __zs_compact()
2330 putback_zspage(class, src_zspage); in __zs_compact()
2332 spin_unlock(&class->lock); in __zs_compact()
2338 struct size_class *class; in zs_compact() local
2341 class = pool->size_class[i]; in zs_compact()
2342 if (!class) in zs_compact()
2344 if (class->index != i) in zs_compact()
2346 __zs_compact(pool, class); in zs_compact()
2381 struct size_class *class; in zs_shrinker_count() local
2387 class = pool->size_class[i]; in zs_shrinker_count()
2388 if (!class) in zs_shrinker_count()
2390 if (class->index != i) in zs_shrinker_count()
2393 pages_to_free += zs_can_compact(class); in zs_shrinker_count()
2455 struct size_class *class; in zs_create_pool() local
2467 * class. Any object bigger than or equal to that will in zs_create_pool()
2468 * endup in the huge class. in zs_create_pool()
2477 * size class search - so object may be smaller than in zs_create_pool()
2478 * huge class size, yet it still can end up in the huge in zs_create_pool()
2479 * class because it grows by ZS_HANDLE_SIZE extra bytes in zs_create_pool()
2480 * right before class lookup. in zs_create_pool()
2501 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); in zs_create_pool()
2502 if (!class) in zs_create_pool()
2505 class->size = size; in zs_create_pool()
2506 class->index = i; in zs_create_pool()
2507 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
2508 class->objs_per_zspage = objs_per_zspage; in zs_create_pool()
2509 spin_lock_init(&class->lock); in zs_create_pool()
2510 pool->size_class[i] = class; in zs_create_pool()
2513 INIT_LIST_HEAD(&class->fullness_list[fullness]); in zs_create_pool()
2515 prev_class = class; in zs_create_pool()
2550 struct size_class *class = pool->size_class[i]; in zs_destroy_pool() local
2552 if (!class) in zs_destroy_pool()
2555 if (class->index != i) in zs_destroy_pool()
2559 if (!list_empty(&class->fullness_list[fg])) { in zs_destroy_pool()
2560 pr_info("Freeing non-empty class with size %db, fullness group %d\n", in zs_destroy_pool()
2561 class->size, fg); in zs_destroy_pool()
2564 kfree(class); in zs_destroy_pool()