Lines Matching refs:class
282 unsigned int class:CLASS_BITS + 1; member
517 *class_idx = zspage->class; in get_zspage_mapping()
524 zspage->class = class_idx; in set_zspage_mapping()
547 static inline void zs_stat_inc(struct size_class *class, in zs_stat_inc() argument
550 class->stats.objs[type] += cnt; in zs_stat_inc()
554 static inline void zs_stat_dec(struct size_class *class, in zs_stat_dec() argument
557 class->stats.objs[type] -= cnt; in zs_stat_dec()
561 static inline unsigned long zs_stat_get(struct size_class *class, in zs_stat_get() argument
564 return class->stats.objs[type]; in zs_stat_get()
584 static unsigned long zs_can_compact(struct size_class *class);
590 struct size_class *class; in zs_stats_size_show() local
604 class = pool->size_class[i]; in zs_stats_size_show()
606 if (class->index != i) in zs_stats_size_show()
609 spin_lock(&class->lock); in zs_stats_size_show()
610 class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL); in zs_stats_size_show()
611 class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY); in zs_stats_size_show()
612 obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); in zs_stats_size_show()
613 obj_used = zs_stat_get(class, OBJ_USED); in zs_stats_size_show()
614 freeable = zs_can_compact(class); in zs_stats_size_show()
615 spin_unlock(&class->lock); in zs_stats_size_show()
617 objs_per_zspage = class->objs_per_zspage; in zs_stats_size_show()
619 class->pages_per_zspage; in zs_stats_size_show()
623 i, class->size, class_almost_full, class_almost_empty, in zs_stats_size_show()
625 class->pages_per_zspage, freeable); in zs_stats_size_show()
689 static enum fullness_group get_fullness_group(struct size_class *class, in get_fullness_group() argument
696 objs_per_zspage = class->objs_per_zspage; in get_fullness_group()
716 static void insert_zspage(struct size_class *class, in insert_zspage() argument
722 zs_stat_inc(class, fullness, 1); in insert_zspage()
723 head = list_first_entry_or_null(&class->fullness_list[fullness], in insert_zspage()
735 list_add(&zspage->list, &class->fullness_list[fullness]); in insert_zspage()
742 static void remove_zspage(struct size_class *class, in remove_zspage() argument
746 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); in remove_zspage()
750 zs_stat_dec(class, fullness, 1); in remove_zspage()
762 static enum fullness_group fix_fullness_group(struct size_class *class, in fix_fullness_group() argument
769 newfg = get_fullness_group(class, zspage); in fix_fullness_group()
774 remove_zspage(class, zspage, currfg); in fix_fullness_group()
775 insert_zspage(class, zspage, newfg); in fix_fullness_group()
931 static void __free_zspage(struct zs_pool *pool, struct size_class *class, in __free_zspage() argument
940 assert_spin_locked(&class->lock); in __free_zspage()
958 zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage); in __free_zspage()
959 atomic_long_sub(class->pages_per_zspage, in __free_zspage()
963 static void free_zspage(struct zs_pool *pool, struct size_class *class, in free_zspage() argument
974 remove_zspage(class, zspage, ZS_EMPTY); in free_zspage()
975 __free_zspage(pool, class, zspage); in free_zspage()
979 static void init_zspage(struct size_class *class, struct zspage *zspage) in init_zspage() argument
995 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
997 link += class->size / sizeof(*link); in init_zspage()
1023 static void create_page_chain(struct size_class *class, struct zspage *zspage, in create_page_chain() argument
1029 int nr_pages = class->pages_per_zspage; in create_page_chain()
1046 if (unlikely(class->objs_per_zspage == 1 && in create_page_chain()
1047 class->pages_per_zspage == 1)) in create_page_chain()
1060 struct size_class *class, in alloc_zspage() argument
1074 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
1094 create_page_chain(class, zspage, pages); in alloc_zspage()
1095 init_zspage(class, zspage); in alloc_zspage()
1100 static struct zspage *find_get_zspage(struct size_class *class) in find_get_zspage() argument
1106 zspage = list_first_entry_or_null(&class->fullness_list[i], in find_get_zspage()
1222 static bool zspage_full(struct size_class *class, struct zspage *zspage) in zspage_full() argument
1224 return get_zspage_inuse(zspage) == class->objs_per_zspage; in zspage_full()
1258 struct size_class *class; in zs_map_object() local
1281 class = pool->size_class[class_idx]; in zs_map_object()
1282 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_map_object()
1286 if (off + class->size <= PAGE_SIZE) { in zs_map_object()
1298 ret = __zs_map_object(area, pages, off, class->size); in zs_map_object()
1316 struct size_class *class; in zs_unmap_object() local
1323 class = pool->size_class[class_idx]; in zs_unmap_object()
1324 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_unmap_object()
1327 if (off + class->size <= PAGE_SIZE) in zs_unmap_object()
1336 __zs_unmap_object(area, pages, off, class->size); in zs_unmap_object()
1364 static unsigned long obj_malloc(struct size_class *class, in obj_malloc() argument
1378 offset = obj * class->size; in obj_malloc()
1398 zs_stat_inc(class, OBJ_USED, 1); in obj_malloc()
1419 struct size_class *class; in zs_malloc() local
1432 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1434 spin_lock(&class->lock); in zs_malloc()
1435 zspage = find_get_zspage(class); in zs_malloc()
1437 obj = obj_malloc(class, zspage, handle); in zs_malloc()
1439 fix_fullness_group(class, zspage); in zs_malloc()
1441 spin_unlock(&class->lock); in zs_malloc()
1446 spin_unlock(&class->lock); in zs_malloc()
1448 zspage = alloc_zspage(pool, class, gfp); in zs_malloc()
1454 spin_lock(&class->lock); in zs_malloc()
1455 obj = obj_malloc(class, zspage, handle); in zs_malloc()
1456 newfg = get_fullness_group(class, zspage); in zs_malloc()
1457 insert_zspage(class, zspage, newfg); in zs_malloc()
1458 set_zspage_mapping(zspage, class->index, newfg); in zs_malloc()
1460 atomic_long_add(class->pages_per_zspage, in zs_malloc()
1462 zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage); in zs_malloc()
1466 spin_unlock(&class->lock); in zs_malloc()
1472 static void obj_free(struct size_class *class, unsigned long obj) in obj_free() argument
1483 f_offset = (class->size * f_objidx) & ~PAGE_MASK; in obj_free()
1494 zs_stat_dec(class, OBJ_USED, 1); in obj_free()
1504 struct size_class *class; in zs_free() local
1519 class = pool->size_class[class_idx]; in zs_free()
1521 spin_lock(&class->lock); in zs_free()
1522 obj_free(class, obj); in zs_free()
1523 fullness = fix_fullness_group(class, zspage); in zs_free()
1533 free_zspage(pool, class, zspage); in zs_free()
1536 spin_unlock(&class->lock); in zs_free()
1542 static void zs_object_copy(struct size_class *class, unsigned long dst, in zs_object_copy() argument
1552 s_size = d_size = class->size; in zs_object_copy()
1557 s_off = (class->size * s_objidx) & ~PAGE_MASK; in zs_object_copy()
1558 d_off = (class->size * d_objidx) & ~PAGE_MASK; in zs_object_copy()
1560 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1563 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1574 if (written == class->size) in zs_object_copy()
1588 s_size = class->size - written; in zs_object_copy()
1596 d_size = class->size - written; in zs_object_copy()
1609 static unsigned long find_alloced_obj(struct size_class *class, in find_alloced_obj() argument
1619 offset += class->size * index; in find_alloced_obj()
1630 offset += class->size; in find_alloced_obj()
1652 static int migrate_zspage(struct zs_pool *pool, struct size_class *class, in migrate_zspage() argument
1663 handle = find_alloced_obj(class, s_page, &obj_idx); in migrate_zspage()
1673 if (zspage_full(class, get_zspage(d_page))) { in migrate_zspage()
1680 free_obj = obj_malloc(class, get_zspage(d_page), handle); in migrate_zspage()
1681 zs_object_copy(class, free_obj, used_obj); in migrate_zspage()
1692 obj_free(class, used_obj); in migrate_zspage()
1702 static struct zspage *isolate_zspage(struct size_class *class, bool source) in isolate_zspage() argument
1714 zspage = list_first_entry_or_null(&class->fullness_list[fg[i]], in isolate_zspage()
1718 remove_zspage(class, zspage, fg[i]); in isolate_zspage()
1733 static enum fullness_group putback_zspage(struct size_class *class, in putback_zspage() argument
1740 fullness = get_fullness_group(class, zspage); in putback_zspage()
1741 insert_zspage(class, zspage, fullness); in putback_zspage()
1742 set_zspage_mapping(zspage, class->index, fullness); in putback_zspage()
1825 struct size_class *class, in putback_zspage_deferred() argument
1830 fg = putback_zspage(class, zspage); in putback_zspage_deferred()
1850 static void replace_sub_page(struct size_class *class, struct zspage *zspage, in replace_sub_page() argument
1866 create_page_chain(class, zspage, pages); in replace_sub_page()
1876 struct size_class *class; in zs_page_isolate() local
1899 class = pool->size_class[class_idx]; in zs_page_isolate()
1901 spin_lock(&class->lock); in zs_page_isolate()
1903 spin_unlock(&class->lock); in zs_page_isolate()
1909 spin_unlock(&class->lock); in zs_page_isolate()
1920 remove_zspage(class, zspage, fullness); in zs_page_isolate()
1924 spin_unlock(&class->lock); in zs_page_isolate()
1933 struct size_class *class; in zs_page_migrate() local
1962 class = pool->size_class[class_idx]; in zs_page_migrate()
1965 spin_lock(&class->lock); in zs_page_migrate()
1983 pos += class->size; in zs_page_migrate()
1994 addr += class->size) { in zs_page_migrate()
2010 replace_sub_page(class, zspage, newpage, page); in zs_page_migrate()
2026 putback_zspage_deferred(pool, class, zspage); in zs_page_migrate()
2042 addr += class->size) { in zs_page_migrate()
2052 spin_unlock(&class->lock); in zs_page_migrate()
2061 struct size_class *class; in zs_page_putback() local
2074 class = pool->size_class[class_idx]; in zs_page_putback()
2076 spin_lock(&class->lock); in zs_page_putback()
2083 putback_zspage_deferred(pool, class, zspage); in zs_page_putback()
2086 spin_unlock(&class->lock); in zs_page_putback()
2149 struct size_class *class; in async_free_zspage() local
2158 class = pool->size_class[i]; in async_free_zspage()
2159 if (class->index != i) in async_free_zspage()
2162 spin_lock(&class->lock); in async_free_zspage()
2163 list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages); in async_free_zspage()
2164 spin_unlock(&class->lock); in async_free_zspage()
2174 class = pool->size_class[class_idx]; in async_free_zspage()
2175 spin_lock(&class->lock); in async_free_zspage()
2177 spin_unlock(&class->lock); in async_free_zspage()
2208 static unsigned long zs_can_compact(struct size_class *class) in zs_can_compact() argument
2211 unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); in zs_can_compact()
2212 unsigned long obj_used = zs_stat_get(class, OBJ_USED); in zs_can_compact()
2218 obj_wasted /= class->objs_per_zspage; in zs_can_compact()
2220 return obj_wasted * class->pages_per_zspage; in zs_can_compact()
2224 struct size_class *class) in __zs_compact() argument
2231 spin_lock(&class->lock); in __zs_compact()
2232 while ((src_zspage = isolate_zspage(class, true))) { in __zs_compact()
2234 if (!zs_can_compact(class)) in __zs_compact()
2240 while ((dst_zspage = isolate_zspage(class, false))) { in __zs_compact()
2246 if (!migrate_zspage(pool, class, &cc)) in __zs_compact()
2249 putback_zspage(class, dst_zspage); in __zs_compact()
2256 putback_zspage(class, dst_zspage); in __zs_compact()
2257 if (putback_zspage(class, src_zspage) == ZS_EMPTY) { in __zs_compact()
2258 free_zspage(pool, class, src_zspage); in __zs_compact()
2259 pages_freed += class->pages_per_zspage; in __zs_compact()
2261 spin_unlock(&class->lock); in __zs_compact()
2263 spin_lock(&class->lock); in __zs_compact()
2267 putback_zspage(class, src_zspage); in __zs_compact()
2269 spin_unlock(&class->lock); in __zs_compact()
2277 struct size_class *class; in zs_compact() local
2281 class = pool->size_class[i]; in zs_compact()
2282 if (!class) in zs_compact()
2284 if (class->index != i) in zs_compact()
2286 pages_freed += __zs_compact(pool, class); in zs_compact()
2321 struct size_class *class; in zs_shrinker_count() local
2327 class = pool->size_class[i]; in zs_shrinker_count()
2328 if (!class) in zs_shrinker_count()
2330 if (class->index != i) in zs_shrinker_count()
2333 pages_to_free += zs_can_compact(class); in zs_shrinker_count()
2395 struct size_class *class; in zs_create_pool() local
2441 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); in zs_create_pool()
2442 if (!class) in zs_create_pool()
2445 class->size = size; in zs_create_pool()
2446 class->index = i; in zs_create_pool()
2447 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
2448 class->objs_per_zspage = objs_per_zspage; in zs_create_pool()
2449 spin_lock_init(&class->lock); in zs_create_pool()
2450 pool->size_class[i] = class; in zs_create_pool()
2453 INIT_LIST_HEAD(&class->fullness_list[fullness]); in zs_create_pool()
2455 prev_class = class; in zs_create_pool()
2490 struct size_class *class = pool->size_class[i]; in zs_destroy_pool() local
2492 if (!class) in zs_destroy_pool()
2495 if (class->index != i) in zs_destroy_pool()
2499 if (!list_empty(&class->fullness_list[fg])) { in zs_destroy_pool()
2501 class->size, fg); in zs_destroy_pool()
2504 kfree(class); in zs_destroy_pool()