• Home
  • Raw
  • Download

Lines Matching +full:page +full:- +full:size

10  * Released under the terms of 3-clause BSD License
16 * struct page(s) to form a zspage.
18 * Usage of struct page fields:
19 * page->private: points to zspage
20 * page->freelist(index): links together all component pages of a zspage
21 * For the huge page, this is always 0, so we use this field
23 * page->units: first object offset in a subpage of zspage
25 * Usage of struct page flags:
26 * PG_private: identifies the first component page
27 * PG_owner_priv_1: identifies the huge component page
65 * span more than 1 page which avoids complex case of mapping 2 pages simply
71 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
100 #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
104 * encoding <page, obj_idx> and the encoded value has a room
115 * header keeps handle which is 4byte-aligned address so we
120 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
121 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
136 * On systems with 4K page size, this gives 255 size classes! There is a
137 * trader-off here:
138 * - Large number of size classes is potentially wasteful as free page are
140 * - Small number of size classes causes large internal fragmentation
141 * - Probably its better to use specific size classes (empirically
149 #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
183 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
203 * Size of objects stored in this class. Must be multiple
206 int size; member
216 static void SetPageHugeObject(struct page *page) in SetPageHugeObject() argument
218 SetPageOwnerPriv1(page); in SetPageHugeObject()
221 static void ClearPageHugeObject(struct page *page) in ClearPageHugeObject() argument
223 ClearPageOwnerPriv1(page); in ClearPageHugeObject()
226 static int PageHugeObject(struct page *page) in PageHugeObject() argument
228 return PageOwnerPriv1(page); in PageHugeObject()
233 * For every zspage, zspage->freeobj gives head of this list.
241 * It's valid for non-allocated object
287 struct page *first_page;
328 pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, in create_cache()
330 if (!pool->handle_cachep) in create_cache()
333 pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage), in create_cache()
335 if (!pool->zspage_cachep) { in create_cache()
336 kmem_cache_destroy(pool->handle_cachep); in create_cache()
337 pool->handle_cachep = NULL; in create_cache()
346 kmem_cache_destroy(pool->handle_cachep); in destroy_cache()
347 kmem_cache_destroy(pool->zspage_cachep); in destroy_cache()
352 return (unsigned long)kmem_cache_alloc(pool->handle_cachep, in cache_alloc_handle()
358 kmem_cache_free(pool->handle_cachep, (void *)handle); in cache_free_handle()
363 return kmem_cache_alloc(pool->zspage_cachep, in cache_alloc_zspage()
369 kmem_cache_free(pool->zspage_cachep, zspage); in cache_free_zspage()
403 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zs_zpool_malloc() argument
406 *handle = zs_malloc(pool, size, gfp); in zs_zpool_malloc()
407 return *handle ? 0 : -1; in zs_zpool_malloc()
456 MODULE_ALIAS("zpool-zsmalloc");
459 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
464 return zspage->isolated; in is_zspage_isolated()
467 static __maybe_unused int is_first_page(struct page *page) in is_first_page() argument
469 return PagePrivate(page); in is_first_page()
472 /* Protected by class->lock */
475 return zspage->inuse; in get_zspage_inuse()
480 zspage->inuse = val; in set_zspage_inuse()
485 zspage->inuse += val; in mod_zspage_inuse()
488 static inline struct page *get_first_page(struct zspage *zspage) in get_first_page()
490 struct page *first_page = zspage->first_page; in get_first_page()
496 static inline int get_first_obj_offset(struct page *page) in get_first_obj_offset() argument
498 return page->units; in get_first_obj_offset()
501 static inline void set_first_obj_offset(struct page *page, int offset) in set_first_obj_offset() argument
503 page->units = offset; in set_first_obj_offset()
508 return zspage->freeobj; in get_freeobj()
513 zspage->freeobj = obj; in set_freeobj()
520 BUG_ON(zspage->magic != ZSPAGE_MAGIC); in get_zspage_mapping()
522 *fullness = zspage->fullness; in get_zspage_mapping()
523 *class_idx = zspage->class; in get_zspage_mapping()
530 zspage->class = class_idx; in set_zspage_mapping()
531 zspage->fullness = fullness; in set_zspage_mapping()
535 * zsmalloc divides the pool into various size classes where each
538 * classes depending on its size. This function returns index of the
539 * size class which has chunk size big enough to hold the give size.
541 static int get_size_class_index(int size) in get_size_class_index() argument
545 if (likely(size > ZS_MIN_ALLOC_SIZE)) in get_size_class_index()
546 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, in get_size_class_index()
549 return min_t(int, ZS_SIZE_CLASSES - 1, idx); in get_size_class_index()
556 class->stats.objs[type] += cnt; in zs_stat_inc()
563 class->stats.objs[type] -= cnt; in zs_stat_dec()
570 return class->stats.objs[type]; in zs_stat_get()
597 struct zs_pool *pool = s->private; in zs_stats_size_show()
607 "class", "size", "almost_full", "almost_empty", in zs_stats_size_show()
612 class = pool->size_class[i]; in zs_stats_size_show()
614 if (class->index != i) in zs_stats_size_show()
617 spin_lock(&class->lock); in zs_stats_size_show()
623 spin_unlock(&class->lock); in zs_stats_size_show()
625 objs_per_zspage = class->objs_per_zspage; in zs_stats_size_show()
627 class->pages_per_zspage; in zs_stats_size_show()
631 i, class->size, class_almost_full, class_almost_empty, in zs_stats_size_show()
633 class->pages_per_zspage, freeable); in zs_stats_size_show()
667 pool->stat_dentry = entry; in zs_pool_stat_create()
670 pool->stat_dentry, pool, in zs_pool_stat_create()
675 debugfs_remove_recursive(pool->stat_dentry); in zs_pool_stat_create()
676 pool->stat_dentry = NULL; in zs_pool_stat_create()
682 debugfs_remove_recursive(pool->stat_dentry); in zs_pool_stat_destroy()
705 * For each size class, zspages are divided into different groups
709 * status of the given page.
718 objs_per_zspage = class->objs_per_zspage; in get_fullness_group()
733 * Each size class maintains various freelists and zspages are assigned
745 head = list_first_entry_or_null(&class->fullness_list[fullness], in insert_zspage()
749 * Put pages with higher ->inuse first. in insert_zspage()
753 list_add(&zspage->list, &head->list); in insert_zspage()
757 list_add(&zspage->list, &class->fullness_list[fullness]); in insert_zspage()
768 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); in remove_zspage()
771 list_del_init(&zspage->list); in remove_zspage()
776 * Each size class maintains zspages in different fullness groups depending
778 * objects, the fullness status of the page can change, say, from ALMOST_FULL
780 * a status change has occurred for the given page and accordingly moves the
781 * page from the freelist of the old fullness group to that of the new
808 * to form a zspage for each size class. This is important
812 * usage = Zp - wastage
813 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
815 * For example, for size class of 3/8 * PAGE_SIZE, we should
822 /* zspage order which gives maximum used size per KB */ in get_pages_per_zspage()
831 usedpc = (zspage_size - waste) * 100 / zspage_size; in get_pages_per_zspage()
842 static struct zspage *get_zspage(struct page *page) in get_zspage() argument
844 struct zspage *zspage = (struct zspage *)page->private; in get_zspage()
846 BUG_ON(zspage->magic != ZSPAGE_MAGIC); in get_zspage()
850 static struct page *get_next_page(struct page *page) in get_next_page() argument
852 if (unlikely(PageHugeObject(page))) in get_next_page()
855 return page->freelist; in get_next_page()
859 * obj_to_location - get (<page>, <obj_idx>) from encoded object value
861 * @page: page object resides in zspage
864 static void obj_to_location(unsigned long obj, struct page **page, in obj_to_location() argument
868 *page = pfn_to_page(obj >> OBJ_INDEX_BITS); in obj_to_location()
873 * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
874 * @page: page object resides in zspage
877 static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) in location_to_obj() argument
881 obj = page_to_pfn(page) << OBJ_INDEX_BITS; in location_to_obj()
893 static unsigned long obj_to_head(struct page *page, void *obj) in obj_to_head() argument
895 if (unlikely(PageHugeObject(page))) { in obj_to_head()
896 VM_BUG_ON_PAGE(!is_first_page(page), page); in obj_to_head()
897 return page->index; in obj_to_head()
922 static void reset_page(struct page *page) in reset_page() argument
924 __ClearPageMovable(page); in reset_page()
925 ClearPagePrivate(page); in reset_page()
926 set_page_private(page, 0); in reset_page()
927 page_mapcount_reset(page); in reset_page()
928 ClearPageHugeObject(page); in reset_page()
929 page->freelist = NULL; in reset_page()
934 struct page *cursor, *fail; in trylock_zspage()
956 struct page *page, *next; in __free_zspage() local
962 assert_spin_locked(&class->lock); in __free_zspage()
967 next = page = get_first_page(zspage); in __free_zspage()
969 VM_BUG_ON_PAGE(!PageLocked(page), page); in __free_zspage()
970 next = get_next_page(page); in __free_zspage()
971 reset_page(page); in __free_zspage()
972 unlock_page(page); in __free_zspage()
973 dec_zone_page_state(page, NR_ZSPAGES); in __free_zspage()
974 put_page(page); in __free_zspage()
975 page = next; in __free_zspage()
976 } while (page != NULL); in __free_zspage()
980 zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage); in __free_zspage()
981 atomic_long_sub(class->pages_per_zspage, in __free_zspage()
982 &pool->pages_allocated); in __free_zspage()
989 VM_BUG_ON(list_empty(&zspage->list)); in free_zspage()
1005 struct page *page = get_first_page(zspage); in init_zspage() local
1007 while (page) { in init_zspage()
1008 struct page *next_page; in init_zspage()
1012 set_first_obj_offset(page, off); in init_zspage()
1014 vaddr = kmap_atomic(page); in init_zspage()
1017 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
1018 link->next = freeobj++ << OBJ_TAG_BITS; in init_zspage()
1019 link += class->size / sizeof(*link); in init_zspage()
1024 * page, which must point to the first object on the next in init_zspage()
1025 * page (if present) in init_zspage()
1027 next_page = get_next_page(page); in init_zspage()
1029 link->next = freeobj++ << OBJ_TAG_BITS; in init_zspage()
1035 link->next = -1UL << OBJ_TAG_BITS; in init_zspage()
1038 page = next_page; in init_zspage()
1046 struct page *pages[]) in create_page_chain()
1049 struct page *page; in create_page_chain() local
1050 struct page *prev_page = NULL; in create_page_chain()
1051 int nr_pages = class->pages_per_zspage; in create_page_chain()
1055 * 1. all pages are linked together using page->freelist in create_page_chain()
1056 * 2. each sub-page point to zspage using page->private in create_page_chain()
1058 * we set PG_private to identify the first page (i.e. no other sub-page in create_page_chain()
1062 page = pages[i]; in create_page_chain()
1063 set_page_private(page, (unsigned long)zspage); in create_page_chain()
1064 page->freelist = NULL; in create_page_chain()
1066 zspage->first_page = page; in create_page_chain()
1067 SetPagePrivate(page); in create_page_chain()
1068 if (unlikely(class->objs_per_zspage == 1 && in create_page_chain()
1069 class->pages_per_zspage == 1)) in create_page_chain()
1070 SetPageHugeObject(page); in create_page_chain()
1072 prev_page->freelist = page; in create_page_chain()
1074 prev_page = page; in create_page_chain()
1079 * Allocate a zspage for the given size class
1086 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; in alloc_zspage()
1093 zspage->magic = ZSPAGE_MAGIC; in alloc_zspage()
1096 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
1097 struct page *page; in alloc_zspage() local
1099 page = alloc_page(gfp); in alloc_zspage()
1100 if (!page) { in alloc_zspage()
1101 while (--i >= 0) { in alloc_zspage()
1109 inc_zone_page_state(page, NR_ZSPAGES); in alloc_zspage()
1110 pages[i] = page; in alloc_zspage()
1124 for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) { in find_get_zspage()
1125 zspage = list_first_entry_or_null(&class->fullness_list[i], in find_get_zspage()
1141 if (area->vm) in __zs_cpu_up()
1143 area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); in __zs_cpu_up()
1144 if (!area->vm) in __zs_cpu_up()
1145 return -ENOMEM; in __zs_cpu_up()
1151 if (area->vm) in __zs_cpu_down()
1152 free_vm_area(area->vm); in __zs_cpu_down()
1153 area->vm = NULL; in __zs_cpu_down()
1157 struct page *pages[2], int off, int size) in __zs_map_object() argument
1159 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); in __zs_map_object()
1160 area->vm_addr = area->vm->addr; in __zs_map_object()
1161 return area->vm_addr + off; in __zs_map_object()
1165 struct page *pages[2], int off, int size) in __zs_unmap_object() argument
1167 unsigned long addr = (unsigned long)area->vm_addr; in __zs_unmap_object()
1180 if (area->vm_buf) in __zs_cpu_up()
1182 area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); in __zs_cpu_up()
1183 if (!area->vm_buf) in __zs_cpu_up()
1184 return -ENOMEM; in __zs_cpu_up()
1190 kfree(area->vm_buf); in __zs_cpu_down()
1191 area->vm_buf = NULL; in __zs_cpu_down()
1195 struct page *pages[2], int off, int size) in __zs_map_object() argument
1199 char *buf = area->vm_buf; in __zs_map_object()
1201 /* disable page faults to match kmap_atomic() return conditions */ in __zs_map_object()
1205 if (area->vm_mm == ZS_MM_WO) in __zs_map_object()
1208 sizes[0] = PAGE_SIZE - off; in __zs_map_object()
1209 sizes[1] = size - sizes[0]; in __zs_map_object()
1211 /* copy object to per-cpu buffer */ in __zs_map_object()
1219 return area->vm_buf; in __zs_map_object()
1223 struct page *pages[2], int off, int size) in __zs_unmap_object() argument
1230 if (area->vm_mm == ZS_MM_RO) in __zs_unmap_object()
1233 buf = area->vm_buf; in __zs_unmap_object()
1235 size -= ZS_HANDLE_SIZE; in __zs_unmap_object()
1238 sizes[0] = PAGE_SIZE - off; in __zs_unmap_object()
1239 sizes[1] = size - sizes[0]; in __zs_unmap_object()
1241 /* copy per-cpu buffer to object */ in __zs_unmap_object()
1250 /* enable page faults to match kunmap_atomic() return conditions */ in __zs_unmap_object()
1276 if (prev->pages_per_zspage == pages_per_zspage && in can_merge()
1277 prev->objs_per_zspage == objs_per_zspage) in can_merge()
1285 return get_zspage_inuse(zspage) == class->objs_per_zspage; in zspage_full()
1290 return atomic_long_read(&pool->pages_allocated); in zs_get_total_pages()
1295 * zs_map_object - get address of allocated object from handle.
1307 * This function returns with preemption and page faults disabled.
1313 struct page *page; in zs_map_object() local
1321 struct page *pages[2]; in zs_map_object()
1325 * Because we use per-cpu mapping areas shared among the in zs_map_object()
1335 obj_to_location(obj, &page, &obj_idx); in zs_map_object()
1336 zspage = get_zspage(page); in zs_map_object()
1342 class = pool->size_class[class_idx]; in zs_map_object()
1343 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_map_object()
1346 area->vm_mm = mm; in zs_map_object()
1347 if (off + class->size <= PAGE_SIZE) { in zs_map_object()
1348 /* this object is contained entirely within a page */ in zs_map_object()
1349 area->vm_addr = kmap_atomic(page); in zs_map_object()
1350 ret = area->vm_addr + off; in zs_map_object()
1355 pages[0] = page; in zs_map_object()
1356 pages[1] = get_next_page(page); in zs_map_object()
1359 ret = __zs_map_object(area, pages, off, class->size); in zs_map_object()
1361 if (likely(!PageHugeObject(page))) in zs_map_object()
1371 struct page *page; in zs_unmap_object() local
1381 obj_to_location(obj, &page, &obj_idx); in zs_unmap_object()
1382 zspage = get_zspage(page); in zs_unmap_object()
1384 class = pool->size_class[class_idx]; in zs_unmap_object()
1385 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_unmap_object()
1388 if (off + class->size <= PAGE_SIZE) in zs_unmap_object()
1389 kunmap_atomic(area->vm_addr); in zs_unmap_object()
1391 struct page *pages[2]; in zs_unmap_object()
1393 pages[0] = page; in zs_unmap_object()
1394 pages[1] = get_next_page(page); in zs_unmap_object()
1397 __zs_unmap_object(area, pages, off, class->size); in zs_unmap_object()
1407 * zs_huge_class_size() - Returns the size (in bytes) of the first huge
1411 * The function returns the size of the first huge class - any object of equal
1412 * or bigger size will be stored in zspage consisting of a single physical
1413 * page.
1417 * Return: the size (in bytes) of the first huge zsmalloc &size_class.
1432 struct page *m_page; in obj_malloc()
1439 offset = obj * class->size; in obj_malloc()
1449 set_freeobj(zspage, link->next >> OBJ_TAG_BITS); in obj_malloc()
1452 link->handle = handle; in obj_malloc()
1454 /* record handle to page->index */ in obj_malloc()
1455 zspage->first_page->index = handle; in obj_malloc()
1468 * zs_malloc - Allocate block of given size from pool.
1470 * @size: size of block to allocate
1475 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1477 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) in zs_malloc() argument
1484 if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) in zs_malloc()
1492 size += ZS_HANDLE_SIZE; in zs_malloc()
1493 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1495 spin_lock(&class->lock); in zs_malloc()
1502 spin_unlock(&class->lock); in zs_malloc()
1507 spin_unlock(&class->lock); in zs_malloc()
1515 spin_lock(&class->lock); in zs_malloc()
1519 set_zspage_mapping(zspage, class->index, newfg); in zs_malloc()
1521 atomic_long_add(class->pages_per_zspage, in zs_malloc()
1522 &pool->pages_allocated); in zs_malloc()
1523 zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage); in zs_malloc()
1527 spin_unlock(&class->lock); in zs_malloc()
1537 struct page *f_page; in obj_free()
1544 f_offset = (class->size * f_objidx) & ~PAGE_MASK; in obj_free()
1551 link->next = get_freeobj(zspage) << OBJ_TAG_BITS; in obj_free()
1554 mod_zspage_inuse(zspage, -1); in obj_free()
1561 struct page *f_page; in zs_free()
1580 class = pool->size_class[class_idx]; in zs_free()
1582 spin_lock(&class->lock); in zs_free()
1597 spin_unlock(&class->lock); in zs_free()
1606 struct page *s_page, *d_page; in zs_object_copy()
1610 int s_size, d_size, size; in zs_object_copy() local
1613 s_size = d_size = class->size; in zs_object_copy()
1618 s_off = (class->size * s_objidx) & ~PAGE_MASK; in zs_object_copy()
1619 d_off = (class->size * d_objidx) & ~PAGE_MASK; in zs_object_copy()
1621 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1622 s_size = PAGE_SIZE - s_off; in zs_object_copy()
1624 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1625 d_size = PAGE_SIZE - d_off; in zs_object_copy()
1631 size = min(s_size, d_size); in zs_object_copy()
1632 memcpy(d_addr + d_off, s_addr + s_off, size); in zs_object_copy()
1633 written += size; in zs_object_copy()
1635 if (written == class->size) in zs_object_copy()
1638 s_off += size; in zs_object_copy()
1639 s_size -= size; in zs_object_copy()
1640 d_off += size; in zs_object_copy()
1641 d_size -= size; in zs_object_copy()
1649 s_size = class->size - written; in zs_object_copy()
1657 d_size = class->size - written; in zs_object_copy()
1671 struct page *page, int *obj_idx) in find_alloced_obj() argument
1677 void *addr = kmap_atomic(page); in find_alloced_obj()
1679 offset = get_first_obj_offset(page); in find_alloced_obj()
1680 offset += class->size * index; in find_alloced_obj()
1683 head = obj_to_head(page, addr + offset); in find_alloced_obj()
1691 offset += class->size; in find_alloced_obj()
1704 struct page *s_page;
1705 /* Destination page for migration which should be a first page
1707 struct page *d_page;
1718 struct page *s_page = cc->s_page; in migrate_zspage()
1719 struct page *d_page = cc->d_page; in migrate_zspage()
1720 int obj_idx = cc->obj_idx; in migrate_zspage()
1736 ret = -ENOMEM; in migrate_zspage()
1757 cc->s_page = s_page; in migrate_zspage()
1758 cc->obj_idx = obj_idx; in migrate_zspage()
1775 zspage = list_first_entry_or_null(&class->fullness_list[fg[i]], in isolate_zspage()
1788 * putback_zspage - add @zspage into right class's fullness list
1790 * @zspage: target page
1803 set_zspage_mapping(zspage, class->index, fullness); in putback_zspage()
1815 struct page *page = get_first_page(zspage); in lock_zspage() local
1818 lock_page(page); in lock_zspage()
1819 } while ((page = get_next_page(page)) != NULL); in lock_zspage()
1856 rwlock_init(&zspage->lock); in migrate_lock_init()
1861 read_lock(&zspage->lock); in migrate_read_lock()
1866 read_unlock(&zspage->lock); in migrate_read_unlock()
1871 write_lock(&zspage->lock); in migrate_write_lock()
1876 write_unlock(&zspage->lock); in migrate_write_unlock()
1879 /* Number of isolated subpage for *page migration* in this zspage */
1882 zspage->isolated++; in inc_zspage_isolation()
1887 zspage->isolated--; in dec_zspage_isolation()
1898 schedule_work(&pool->free_work); in putback_zspage_deferred()
1904 VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0); in zs_pool_dec_isolated()
1905 atomic_long_dec(&pool->isolated_pages); in zs_pool_dec_isolated()
1908 * checks the isolated count under &class->lock after enqueuing in zs_pool_dec_isolated()
1911 if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying) in zs_pool_dec_isolated()
1912 wake_up_all(&pool->migration_wait); in zs_pool_dec_isolated()
1916 struct page *newpage, struct page *oldpage) in replace_sub_page()
1918 struct page *page; in replace_sub_page() local
1919 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; in replace_sub_page()
1922 page = get_first_page(zspage); in replace_sub_page()
1924 if (page == oldpage) in replace_sub_page()
1927 pages[idx] = page; in replace_sub_page()
1929 } while ((page = get_next_page(page)) != NULL); in replace_sub_page()
1934 newpage->index = oldpage->index; in replace_sub_page()
1938 static bool zs_page_isolate(struct page *page, isolate_mode_t mode) in zs_page_isolate() argument
1948 * Page is locked so zspage couldn't be destroyed. For detail, look at in zs_page_isolate()
1951 VM_BUG_ON_PAGE(!PageMovable(page), page); in zs_page_isolate()
1952 VM_BUG_ON_PAGE(PageIsolated(page), page); in zs_page_isolate()
1954 zspage = get_zspage(page); in zs_page_isolate()
1958 * because class_idx is constant unless page is freed so we should get in zs_page_isolate()
1962 mapping = page_mapping(page); in zs_page_isolate()
1963 pool = mapping->private_data; in zs_page_isolate()
1964 class = pool->size_class[class_idx]; in zs_page_isolate()
1966 spin_lock(&class->lock); in zs_page_isolate()
1968 spin_unlock(&class->lock); in zs_page_isolate()
1973 if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { in zs_page_isolate()
1974 spin_unlock(&class->lock); in zs_page_isolate()
1982 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { in zs_page_isolate()
1984 atomic_long_inc(&pool->isolated_pages); in zs_page_isolate()
1989 spin_unlock(&class->lock); in zs_page_isolate()
1994 static int zs_page_migrate(struct address_space *mapping, struct page *newpage, in zs_page_migrate()
1995 struct page *page, enum migrate_mode mode) in zs_page_migrate() argument
2002 struct page *dummy; in zs_page_migrate()
2008 int ret = -EAGAIN; in zs_page_migrate()
2016 return -EINVAL; in zs_page_migrate()
2018 VM_BUG_ON_PAGE(!PageMovable(page), page); in zs_page_migrate()
2019 VM_BUG_ON_PAGE(!PageIsolated(page), page); in zs_page_migrate()
2021 zspage = get_zspage(page); in zs_page_migrate()
2026 pool = mapping->private_data; in zs_page_migrate()
2027 class = pool->size_class[class_idx]; in zs_page_migrate()
2028 offset = get_first_obj_offset(page); in zs_page_migrate()
2030 spin_lock(&class->lock); in zs_page_migrate()
2033 * Set "offset" to end of the page so that every loops in zs_page_migrate()
2040 s_addr = kmap_atomic(page); in zs_page_migrate()
2042 head = obj_to_head(page, s_addr + pos); in zs_page_migrate()
2048 pos += class->size; in zs_page_migrate()
2059 addr += class->size) { in zs_page_migrate()
2060 head = obj_to_head(page, addr); in zs_page_migrate()
2075 replace_sub_page(class, zspage, newpage, page); in zs_page_migrate()
2081 * Page migration is done so let's putback isolated zspage to in zs_page_migrate()
2082 * the list if @page is final isolated subpage in the zspage. in zs_page_migrate()
2088 * Also, we ensure that everyone can see pool->destroying before in zs_page_migrate()
2095 if (page_zone(newpage) != page_zone(page)) { in zs_page_migrate()
2096 dec_zone_page_state(page, NR_ZSPAGES); in zs_page_migrate()
2100 reset_page(page); in zs_page_migrate()
2101 put_page(page); in zs_page_migrate()
2102 page = newpage; in zs_page_migrate()
2107 addr += class->size) { in zs_page_migrate()
2108 head = obj_to_head(page, addr); in zs_page_migrate()
2117 spin_unlock(&class->lock); in zs_page_migrate()
2123 static void zs_page_putback(struct page *page) in zs_page_putback() argument
2132 VM_BUG_ON_PAGE(!PageMovable(page), page); in zs_page_putback()
2133 VM_BUG_ON_PAGE(!PageIsolated(page), page); in zs_page_putback()
2135 zspage = get_zspage(page); in zs_page_putback()
2137 mapping = page_mapping(page); in zs_page_putback()
2138 pool = mapping->private_data; in zs_page_putback()
2139 class = pool->size_class[class_idx]; in zs_page_putback()
2141 spin_lock(&class->lock); in zs_page_putback()
2151 spin_unlock(&class->lock); in zs_page_putback()
2162 pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb); in zs_register_migration()
2163 if (IS_ERR(pool->inode)) { in zs_register_migration()
2164 pool->inode = NULL; in zs_register_migration()
2168 pool->inode->i_mapping->private_data = pool; in zs_register_migration()
2169 pool->inode->i_mapping->a_ops = &zsmalloc_aops; in zs_register_migration()
2175 return atomic_long_read(&pool->isolated_pages) == 0; in pool_isolated_are_drained()
2188 wait_event(pool->migration_wait, in wait_for_isolated_drain()
2194 pool->destroying = true; in zs_unregister_migration()
2197 * pool->destroying. Thus pool->isolated pages will either be 0 in which in zs_unregister_migration()
2198 * case we don't care, or it will be > 0 and pool->destroying will in zs_unregister_migration()
2203 flush_work(&pool->free_work); in zs_unregister_migration()
2204 iput(pool->inode); in zs_unregister_migration()
2223 class = pool->size_class[i]; in async_free_zspage()
2224 if (class->index != i) in async_free_zspage()
2227 spin_lock(&class->lock); in async_free_zspage()
2228 list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages); in async_free_zspage()
2229 spin_unlock(&class->lock); in async_free_zspage()
2234 list_del(&zspage->list); in async_free_zspage()
2239 class = pool->size_class[class_idx]; in async_free_zspage()
2240 spin_lock(&class->lock); in async_free_zspage()
2241 __free_zspage(pool, pool->size_class[class_idx], zspage); in async_free_zspage()
2242 spin_unlock(&class->lock); in async_free_zspage()
2248 schedule_work(&pool->free_work); in kick_deferred_free()
2253 INIT_WORK(&pool->free_work, async_free_zspage); in init_deferred_free()
2258 struct page *page = get_first_page(zspage); in SetZsPageMovable() local
2261 WARN_ON(!trylock_page(page)); in SetZsPageMovable()
2262 __SetPageMovable(page, pool->inode->i_mapping); in SetZsPageMovable()
2263 unlock_page(page); in SetZsPageMovable()
2264 } while ((page = get_next_page(page)) != NULL); in SetZsPageMovable()
2282 obj_wasted = obj_allocated - obj_used; in zs_can_compact()
2283 obj_wasted /= class->objs_per_zspage; in zs_can_compact()
2285 return obj_wasted * class->pages_per_zspage; in zs_can_compact()
2294 spin_lock(&class->lock); in __zs_compact()
2322 pool->stats.pages_compacted += class->pages_per_zspage; in __zs_compact()
2324 spin_unlock(&class->lock); in __zs_compact()
2326 spin_lock(&class->lock); in __zs_compact()
2332 spin_unlock(&class->lock); in __zs_compact()
2340 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { in zs_compact()
2341 class = pool->size_class[i]; in zs_compact()
2344 if (class->index != i) in zs_compact()
2349 return pool->stats.pages_compacted; in zs_compact()
2355 memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats)); in zs_pool_stats()
2366 pages_freed = pool->stats.pages_compacted; in zs_shrinker_scan()
2372 pages_freed = zs_compact(pool) - pages_freed; in zs_shrinker_scan()
2386 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { in zs_shrinker_count()
2387 class = pool->size_class[i]; in zs_shrinker_count()
2390 if (class->index != i) in zs_shrinker_count()
2401 unregister_shrinker(&pool->shrinker); in zs_unregister_shrinker()
2406 pool->shrinker.scan_objects = zs_shrinker_scan; in zs_register_shrinker()
2407 pool->shrinker.count_objects = zs_shrinker_count; in zs_register_shrinker()
2408 pool->shrinker.batch = 0; in zs_register_shrinker()
2409 pool->shrinker.seeks = DEFAULT_SEEKS; in zs_register_shrinker()
2411 return register_shrinker(&pool->shrinker); in zs_register_shrinker()
2415 * zs_create_pool - Creates an allocation pool to work from.
2436 pool->name = kstrdup(name, GFP_KERNEL); in zs_create_pool()
2437 if (!pool->name) in zs_create_pool()
2441 init_waitqueue_head(&pool->migration_wait); in zs_create_pool()
2448 * Iterate reversely, because, size of size_class that we want to use in zs_create_pool()
2449 * for merging should be larger or equal to current size. in zs_create_pool()
2451 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { in zs_create_pool()
2452 int size; in zs_create_pool() local
2458 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; in zs_create_pool()
2459 if (size > ZS_MAX_ALLOC_SIZE) in zs_create_pool()
2460 size = ZS_MAX_ALLOC_SIZE; in zs_create_pool()
2461 pages_per_zspage = get_pages_per_zspage(size); in zs_create_pool()
2462 objs_per_zspage = pages_per_zspage * PAGE_SIZE / size; in zs_create_pool()
2466 * so huge_class_size holds the size of the first huge in zs_create_pool()
2472 huge_class_size = size; in zs_create_pool()
2476 * unconditionally adds handle size before it performs in zs_create_pool()
2477 * size class search - so object may be smaller than in zs_create_pool()
2478 * huge class size, yet it still can end up in the huge in zs_create_pool()
2482 huge_class_size -= (ZS_HANDLE_SIZE - 1); in zs_create_pool()
2487 * as alloc/free for that size. Although it is natural that we in zs_create_pool()
2488 * have one size_class for each size, there is a chance that we in zs_create_pool()
2496 pool->size_class[i] = prev_class; in zs_create_pool()
2505 class->size = size; in zs_create_pool()
2506 class->index = i; in zs_create_pool()
2507 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
2508 class->objs_per_zspage = objs_per_zspage; in zs_create_pool()
2509 spin_lock_init(&class->lock); in zs_create_pool()
2510 pool->size_class[i] = class; in zs_create_pool()
2513 INIT_LIST_HEAD(&class->fullness_list[fullness]); in zs_create_pool()
2550 struct size_class *class = pool->size_class[i]; in zs_destroy_pool()
2555 if (class->index != i) in zs_destroy_pool()
2559 if (!list_empty(&class->fullness_list[fg])) { in zs_destroy_pool()
2560 pr_info("Freeing non-empty class with size %db, fullness group %d\n", in zs_destroy_pool()
2561 class->size, fg); in zs_destroy_pool()
2568 kfree(pool->name); in zs_destroy_pool()