• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * z3fold.c
4  *
5  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6  * Copyright (C) 2016, Sony Mobile Communications Inc.
7  *
8  * This implementation is based on zbud written by Seth Jennings.
9  *
10  * z3fold is an special purpose allocator for storing compressed pages. It
11  * can store up to three compressed pages per page which improves the
12  * compression ratio of zbud while retaining its main concepts (e. g. always
13  * storing an integral number of objects per page) and simplicity.
14  * It still has simple and deterministic reclaim properties that make it
15  * preferable to a higher density approach (with no requirement on integral
16  * number of object per page) when reclaim is used.
17  *
18  * As in zbud, pages are divided into "chunks".  The size of the chunks is
19  * fixed at compile time and is determined by NCHUNKS_ORDER below.
20  *
21  * z3fold doesn't export any API and is meant to be used via zpool API.
22  */
23 
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
30 #include <linux/mm.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/mount.h>
38 #include <linux/pseudo_fs.h>
39 #include <linux/fs.h>
40 #include <linux/preempt.h>
41 #include <linux/workqueue.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/zpool.h>
45 #include <linux/magic.h>
46 
47 /*
48  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
49  * adjusting internal fragmentation.  It also determines the number of
50  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
51  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
52  * in the beginning of an allocated page are occupied by z3fold header, so
53  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
54  * which shows the max number of free chunks in z3fold page, also there will
55  * be 63, or 62, respectively, freelists per pool.
56  */
57 #define NCHUNKS_ORDER	6
58 
59 #define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
60 #define CHUNK_SIZE	(1 << CHUNK_SHIFT)
61 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
62 #define ZHDR_CHUNKS	(ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
63 #define TOTAL_CHUNKS	(PAGE_SIZE >> CHUNK_SHIFT)
64 #define NCHUNKS		((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
65 
66 #define BUDDY_MASK	(0x3)
67 #define BUDDY_SHIFT	2
68 #define SLOTS_ALIGN	(0x40)
69 
70 /*****************
71  * Structures
72 *****************/
73 struct z3fold_pool;
74 struct z3fold_ops {
75 	int (*evict)(struct z3fold_pool *pool, unsigned long handle);
76 };
77 
78 enum buddy {
79 	HEADLESS = 0,
80 	FIRST,
81 	MIDDLE,
82 	LAST,
83 	BUDDIES_MAX = LAST
84 };
85 
86 struct z3fold_buddy_slots {
87 	/*
88 	 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
89 	 * be enough slots to hold all possible variants
90 	 */
91 	unsigned long slot[BUDDY_MASK + 1];
92 	unsigned long pool; /* back link + flags */
93 };
94 #define HANDLE_FLAG_MASK	(0x03)
95 
96 /*
97  * struct z3fold_header - z3fold page metadata occupying first chunks of each
98  *			z3fold page, except for HEADLESS pages
99  * @buddy:		links the z3fold page into the relevant list in the
100  *			pool
101  * @page_lock:		per-page lock
102  * @refcount:		reference count for the z3fold page
103  * @work:		work_struct for page layout optimization
104  * @slots:		pointer to the structure holding buddy slots
105  * @pool:		pointer to the containing pool
106  * @cpu:		CPU which this page "belongs" to
107  * @first_chunks:	the size of the first buddy in chunks, 0 if free
108  * @middle_chunks:	the size of the middle buddy in chunks, 0 if free
109  * @last_chunks:	the size of the last buddy in chunks, 0 if free
110  * @first_num:		the starting number (for the first handle)
111  * @mapped_count:	the number of objects currently mapped
112  */
113 struct z3fold_header {
114 	struct list_head buddy;
115 	spinlock_t page_lock;
116 	struct kref refcount;
117 	struct work_struct work;
118 	struct z3fold_buddy_slots *slots;
119 	struct z3fold_pool *pool;
120 	short cpu;
121 	unsigned short first_chunks;
122 	unsigned short middle_chunks;
123 	unsigned short last_chunks;
124 	unsigned short start_middle;
125 	unsigned short first_num:2;
126 	unsigned short mapped_count:2;
127 };
128 
129 /**
130  * struct z3fold_pool - stores metadata for each z3fold pool
131  * @name:	pool name
132  * @lock:	protects pool unbuddied/lru lists
133  * @stale_lock:	protects pool stale page list
134  * @unbuddied:	per-cpu array of lists tracking z3fold pages that contain 2-
135  *		buddies; the list each z3fold page is added to depends on
136  *		the size of its free region.
137  * @lru:	list tracking the z3fold pages in LRU order by most recently
138  *		added buddy.
139  * @stale:	list of pages marked for freeing
140  * @pages_nr:	number of z3fold pages in the pool.
141  * @c_handle:	cache for z3fold_buddy_slots allocation
142  * @ops:	pointer to a structure of user defined operations specified at
143  *		pool creation time.
144  * @compact_wq:	workqueue for page layout background optimization
145  * @release_wq:	workqueue for safe page release
146  * @work:	work_struct for safe page release
147  * @inode:	inode for z3fold pseudo filesystem
148  *
149  * This structure is allocated at pool creation time and maintains metadata
150  * pertaining to a particular z3fold pool.
151  */
152 struct z3fold_pool {
153 	const char *name;
154 	spinlock_t lock;
155 	spinlock_t stale_lock;
156 	struct list_head *unbuddied;
157 	struct list_head lru;
158 	struct list_head stale;
159 	atomic64_t pages_nr;
160 	struct kmem_cache *c_handle;
161 	const struct z3fold_ops *ops;
162 	struct zpool *zpool;
163 	const struct zpool_ops *zpool_ops;
164 	struct workqueue_struct *compact_wq;
165 	struct workqueue_struct *release_wq;
166 	struct work_struct work;
167 	struct inode *inode;
168 };
169 
170 /*
171  * Internal z3fold page flags
172  */
173 enum z3fold_page_flags {
174 	PAGE_HEADLESS = 0,
175 	MIDDLE_CHUNK_MAPPED,
176 	NEEDS_COMPACTING,
177 	PAGE_STALE,
178 	PAGE_CLAIMED, /* by either reclaim or free */
179 };
180 
181 /*****************
182  * Helpers
183 *****************/
184 
185 /* Converts an allocation size in bytes to size in z3fold chunks */
size_to_chunks(size_t size)186 static int size_to_chunks(size_t size)
187 {
188 	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
189 }
190 
191 #define for_each_unbuddied_list(_iter, _begin) \
192 	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
193 
194 static void compact_page_work(struct work_struct *w);
195 
alloc_slots(struct z3fold_pool * pool,gfp_t gfp)196 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
197 							gfp_t gfp)
198 {
199 	struct z3fold_buddy_slots *slots;
200 
201 	slots = kmem_cache_alloc(pool->c_handle,
202 				 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
203 
204 	if (slots) {
205 		memset(slots->slot, 0, sizeof(slots->slot));
206 		slots->pool = (unsigned long)pool;
207 	}
208 
209 	return slots;
210 }
211 
slots_to_pool(struct z3fold_buddy_slots * s)212 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
213 {
214 	return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
215 }
216 
handle_to_slots(unsigned long handle)217 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
218 {
219 	return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
220 }
221 
free_handle(unsigned long handle)222 static inline void free_handle(unsigned long handle)
223 {
224 	struct z3fold_buddy_slots *slots;
225 	int i;
226 	bool is_free;
227 
228 	if (handle & (1 << PAGE_HEADLESS))
229 		return;
230 
231 	WARN_ON(*(unsigned long *)handle == 0);
232 	*(unsigned long *)handle = 0;
233 	slots = handle_to_slots(handle);
234 	is_free = true;
235 	for (i = 0; i <= BUDDY_MASK; i++) {
236 		if (slots->slot[i]) {
237 			is_free = false;
238 			break;
239 		}
240 	}
241 
242 	if (is_free) {
243 		struct z3fold_pool *pool = slots_to_pool(slots);
244 
245 		kmem_cache_free(pool->c_handle, slots);
246 	}
247 }
248 
z3fold_init_fs_context(struct fs_context * fc)249 static int z3fold_init_fs_context(struct fs_context *fc)
250 {
251 	return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
252 }
253 
254 static struct file_system_type z3fold_fs = {
255 	.name		= "z3fold",
256 	.init_fs_context = z3fold_init_fs_context,
257 	.kill_sb	= kill_anon_super,
258 };
259 
260 static struct vfsmount *z3fold_mnt;
z3fold_mount(void)261 static int z3fold_mount(void)
262 {
263 	int ret = 0;
264 
265 	z3fold_mnt = kern_mount(&z3fold_fs);
266 	if (IS_ERR(z3fold_mnt))
267 		ret = PTR_ERR(z3fold_mnt);
268 
269 	return ret;
270 }
271 
z3fold_unmount(void)272 static void z3fold_unmount(void)
273 {
274 	kern_unmount(z3fold_mnt);
275 }
276 
277 static const struct address_space_operations z3fold_aops;
z3fold_register_migration(struct z3fold_pool * pool)278 static int z3fold_register_migration(struct z3fold_pool *pool)
279 {
280 	pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
281 	if (IS_ERR(pool->inode)) {
282 		pool->inode = NULL;
283 		return 1;
284 	}
285 
286 	pool->inode->i_mapping->private_data = pool;
287 	pool->inode->i_mapping->a_ops = &z3fold_aops;
288 	return 0;
289 }
290 
z3fold_unregister_migration(struct z3fold_pool * pool)291 static void z3fold_unregister_migration(struct z3fold_pool *pool)
292 {
293 	if (pool->inode)
294 		iput(pool->inode);
295  }
296 
297 /* Initializes the z3fold header of a newly allocated z3fold page */
init_z3fold_page(struct page * page,bool headless,struct z3fold_pool * pool,gfp_t gfp)298 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
299 					struct z3fold_pool *pool, gfp_t gfp)
300 {
301 	struct z3fold_header *zhdr = page_address(page);
302 	struct z3fold_buddy_slots *slots;
303 
304 	INIT_LIST_HEAD(&page->lru);
305 	clear_bit(PAGE_HEADLESS, &page->private);
306 	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
307 	clear_bit(NEEDS_COMPACTING, &page->private);
308 	clear_bit(PAGE_STALE, &page->private);
309 	clear_bit(PAGE_CLAIMED, &page->private);
310 	if (headless)
311 		return zhdr;
312 
313 	slots = alloc_slots(pool, gfp);
314 	if (!slots)
315 		return NULL;
316 
317 	spin_lock_init(&zhdr->page_lock);
318 	kref_init(&zhdr->refcount);
319 	zhdr->first_chunks = 0;
320 	zhdr->middle_chunks = 0;
321 	zhdr->last_chunks = 0;
322 	zhdr->first_num = 0;
323 	zhdr->start_middle = 0;
324 	zhdr->cpu = -1;
325 	zhdr->slots = slots;
326 	zhdr->pool = pool;
327 	INIT_LIST_HEAD(&zhdr->buddy);
328 	INIT_WORK(&zhdr->work, compact_page_work);
329 	return zhdr;
330 }
331 
332 /* Resets the struct page fields and frees the page */
free_z3fold_page(struct page * page,bool headless)333 static void free_z3fold_page(struct page *page, bool headless)
334 {
335 	if (!headless) {
336 		lock_page(page);
337 		__ClearPageMovable(page);
338 		unlock_page(page);
339 	}
340 	ClearPagePrivate(page);
341 	__free_page(page);
342 }
343 
344 /* Lock a z3fold page */
z3fold_page_lock(struct z3fold_header * zhdr)345 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
346 {
347 	spin_lock(&zhdr->page_lock);
348 }
349 
350 /* Try to lock a z3fold page */
z3fold_page_trylock(struct z3fold_header * zhdr)351 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
352 {
353 	return spin_trylock(&zhdr->page_lock);
354 }
355 
356 /* Unlock a z3fold page */
z3fold_page_unlock(struct z3fold_header * zhdr)357 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
358 {
359 	spin_unlock(&zhdr->page_lock);
360 }
361 
362 /* Helper function to build the index */
__idx(struct z3fold_header * zhdr,enum buddy bud)363 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
364 {
365 	return (bud + zhdr->first_num) & BUDDY_MASK;
366 }
367 
368 /*
369  * Encodes the handle of a particular buddy within a z3fold page
370  * Pool lock should be held as this function accesses first_num
371  */
__encode_handle(struct z3fold_header * zhdr,struct z3fold_buddy_slots * slots,enum buddy bud)372 static unsigned long __encode_handle(struct z3fold_header *zhdr,
373 				struct z3fold_buddy_slots *slots,
374 				enum buddy bud)
375 {
376 	unsigned long h = (unsigned long)zhdr;
377 	int idx = 0;
378 
379 	/*
380 	 * For a headless page, its handle is its pointer with the extra
381 	 * PAGE_HEADLESS bit set
382 	 */
383 	if (bud == HEADLESS)
384 		return h | (1 << PAGE_HEADLESS);
385 
386 	/* otherwise, return pointer to encoded handle */
387 	idx = __idx(zhdr, bud);
388 	h += idx;
389 	if (bud == LAST)
390 		h |= (zhdr->last_chunks << BUDDY_SHIFT);
391 
392 	slots->slot[idx] = h;
393 	return (unsigned long)&slots->slot[idx];
394 }
395 
encode_handle(struct z3fold_header * zhdr,enum buddy bud)396 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
397 {
398 	return __encode_handle(zhdr, zhdr->slots, bud);
399 }
400 
401 /* Returns the z3fold page where a given handle is stored */
handle_to_z3fold_header(unsigned long h)402 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
403 {
404 	unsigned long addr = h;
405 
406 	if (!(addr & (1 << PAGE_HEADLESS)))
407 		addr = *(unsigned long *)h;
408 
409 	return (struct z3fold_header *)(addr & PAGE_MASK);
410 }
411 
412 /* only for LAST bud, returns zero otherwise */
handle_to_chunks(unsigned long handle)413 static unsigned short handle_to_chunks(unsigned long handle)
414 {
415 	unsigned long addr = *(unsigned long *)handle;
416 
417 	return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
418 }
419 
420 /*
421  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
422  *  but that doesn't matter. because the masking will result in the
423  *  correct buddy number.
424  */
handle_to_buddy(unsigned long handle)425 static enum buddy handle_to_buddy(unsigned long handle)
426 {
427 	struct z3fold_header *zhdr;
428 	unsigned long addr;
429 
430 	WARN_ON(handle & (1 << PAGE_HEADLESS));
431 	addr = *(unsigned long *)handle;
432 	zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
433 	return (addr - zhdr->first_num) & BUDDY_MASK;
434 }
435 
zhdr_to_pool(struct z3fold_header * zhdr)436 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
437 {
438 	return zhdr->pool;
439 }
440 
__release_z3fold_page(struct z3fold_header * zhdr,bool locked)441 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
442 {
443 	struct page *page = virt_to_page(zhdr);
444 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
445 
446 	WARN_ON(!list_empty(&zhdr->buddy));
447 	set_bit(PAGE_STALE, &page->private);
448 	clear_bit(NEEDS_COMPACTING, &page->private);
449 	spin_lock(&pool->lock);
450 	if (!list_empty(&page->lru))
451 		list_del_init(&page->lru);
452 	spin_unlock(&pool->lock);
453 	if (locked)
454 		z3fold_page_unlock(zhdr);
455 	spin_lock(&pool->stale_lock);
456 	list_add(&zhdr->buddy, &pool->stale);
457 	queue_work(pool->release_wq, &pool->work);
458 	spin_unlock(&pool->stale_lock);
459 }
460 
461 static void __attribute__((__unused__))
release_z3fold_page(struct kref * ref)462 			release_z3fold_page(struct kref *ref)
463 {
464 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
465 						refcount);
466 	__release_z3fold_page(zhdr, false);
467 }
468 
release_z3fold_page_locked(struct kref * ref)469 static void release_z3fold_page_locked(struct kref *ref)
470 {
471 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
472 						refcount);
473 	WARN_ON(z3fold_page_trylock(zhdr));
474 	__release_z3fold_page(zhdr, true);
475 }
476 
release_z3fold_page_locked_list(struct kref * ref)477 static void release_z3fold_page_locked_list(struct kref *ref)
478 {
479 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
480 					       refcount);
481 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
482 	spin_lock(&pool->lock);
483 	list_del_init(&zhdr->buddy);
484 	spin_unlock(&pool->lock);
485 
486 	WARN_ON(z3fold_page_trylock(zhdr));
487 	__release_z3fold_page(zhdr, true);
488 }
489 
free_pages_work(struct work_struct * w)490 static void free_pages_work(struct work_struct *w)
491 {
492 	struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
493 
494 	spin_lock(&pool->stale_lock);
495 	while (!list_empty(&pool->stale)) {
496 		struct z3fold_header *zhdr = list_first_entry(&pool->stale,
497 						struct z3fold_header, buddy);
498 		struct page *page = virt_to_page(zhdr);
499 
500 		list_del(&zhdr->buddy);
501 		if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
502 			continue;
503 		spin_unlock(&pool->stale_lock);
504 		cancel_work_sync(&zhdr->work);
505 		free_z3fold_page(page, false);
506 		cond_resched();
507 		spin_lock(&pool->stale_lock);
508 	}
509 	spin_unlock(&pool->stale_lock);
510 }
511 
512 /*
513  * Returns the number of free chunks in a z3fold page.
514  * NB: can't be used with HEADLESS pages.
515  */
num_free_chunks(struct z3fold_header * zhdr)516 static int num_free_chunks(struct z3fold_header *zhdr)
517 {
518 	int nfree;
519 	/*
520 	 * If there is a middle object, pick up the bigger free space
521 	 * either before or after it. Otherwise just subtract the number
522 	 * of chunks occupied by the first and the last objects.
523 	 */
524 	if (zhdr->middle_chunks != 0) {
525 		int nfree_before = zhdr->first_chunks ?
526 			0 : zhdr->start_middle - ZHDR_CHUNKS;
527 		int nfree_after = zhdr->last_chunks ?
528 			0 : TOTAL_CHUNKS -
529 				(zhdr->start_middle + zhdr->middle_chunks);
530 		nfree = max(nfree_before, nfree_after);
531 	} else
532 		nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
533 	return nfree;
534 }
535 
536 /* Add to the appropriate unbuddied list */
add_to_unbuddied(struct z3fold_pool * pool,struct z3fold_header * zhdr)537 static inline void add_to_unbuddied(struct z3fold_pool *pool,
538 				struct z3fold_header *zhdr)
539 {
540 	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
541 			zhdr->middle_chunks == 0) {
542 		struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
543 
544 		int freechunks = num_free_chunks(zhdr);
545 		spin_lock(&pool->lock);
546 		list_add(&zhdr->buddy, &unbuddied[freechunks]);
547 		spin_unlock(&pool->lock);
548 		zhdr->cpu = smp_processor_id();
549 		put_cpu_ptr(pool->unbuddied);
550 	}
551 }
552 
mchunk_memmove(struct z3fold_header * zhdr,unsigned short dst_chunk)553 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
554 				unsigned short dst_chunk)
555 {
556 	void *beg = zhdr;
557 	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
558 		       beg + (zhdr->start_middle << CHUNK_SHIFT),
559 		       zhdr->middle_chunks << CHUNK_SHIFT);
560 }
561 
562 #define BIG_CHUNK_GAP	3
563 /* Has to be called with lock held */
z3fold_compact_page(struct z3fold_header * zhdr)564 static int z3fold_compact_page(struct z3fold_header *zhdr)
565 {
566 	struct page *page = virt_to_page(zhdr);
567 
568 	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
569 		return 0; /* can't move middle chunk, it's used */
570 
571 	if (unlikely(PageIsolated(page)))
572 		return 0;
573 
574 	if (zhdr->middle_chunks == 0)
575 		return 0; /* nothing to compact */
576 
577 	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
578 		/* move to the beginning */
579 		mchunk_memmove(zhdr, ZHDR_CHUNKS);
580 		zhdr->first_chunks = zhdr->middle_chunks;
581 		zhdr->middle_chunks = 0;
582 		zhdr->start_middle = 0;
583 		zhdr->first_num++;
584 		return 1;
585 	}
586 
587 	/*
588 	 * moving data is expensive, so let's only do that if
589 	 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
590 	 */
591 	if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
592 	    zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
593 			BIG_CHUNK_GAP) {
594 		mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
595 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
596 		return 1;
597 	} else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
598 		   TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
599 					+ zhdr->middle_chunks) >=
600 			BIG_CHUNK_GAP) {
601 		unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
602 			zhdr->middle_chunks;
603 		mchunk_memmove(zhdr, new_start);
604 		zhdr->start_middle = new_start;
605 		return 1;
606 	}
607 
608 	return 0;
609 }
610 
do_compact_page(struct z3fold_header * zhdr,bool locked)611 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
612 {
613 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
614 	struct page *page;
615 
616 	page = virt_to_page(zhdr);
617 	if (locked)
618 		WARN_ON(z3fold_page_trylock(zhdr));
619 	else
620 		z3fold_page_lock(zhdr);
621 	if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
622 		z3fold_page_unlock(zhdr);
623 		return;
624 	}
625 	spin_lock(&pool->lock);
626 	list_del_init(&zhdr->buddy);
627 	spin_unlock(&pool->lock);
628 
629 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
630 		atomic64_dec(&pool->pages_nr);
631 		return;
632 	}
633 
634 	if (unlikely(PageIsolated(page) ||
635 		     test_bit(PAGE_CLAIMED, &page->private) ||
636 		     test_bit(PAGE_STALE, &page->private))) {
637 		z3fold_page_unlock(zhdr);
638 		return;
639 	}
640 
641 	z3fold_compact_page(zhdr);
642 	add_to_unbuddied(pool, zhdr);
643 	z3fold_page_unlock(zhdr);
644 }
645 
compact_page_work(struct work_struct * w)646 static void compact_page_work(struct work_struct *w)
647 {
648 	struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
649 						work);
650 
651 	do_compact_page(zhdr, false);
652 }
653 
654 /* returns _locked_ z3fold page header or NULL */
__z3fold_alloc(struct z3fold_pool * pool,size_t size,bool can_sleep)655 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
656 						size_t size, bool can_sleep)
657 {
658 	struct z3fold_header *zhdr = NULL;
659 	struct page *page;
660 	struct list_head *unbuddied;
661 	int chunks = size_to_chunks(size), i;
662 
663 lookup:
664 	/* First, try to find an unbuddied z3fold page. */
665 	unbuddied = get_cpu_ptr(pool->unbuddied);
666 	for_each_unbuddied_list(i, chunks) {
667 		struct list_head *l = &unbuddied[i];
668 
669 		zhdr = list_first_entry_or_null(READ_ONCE(l),
670 					struct z3fold_header, buddy);
671 
672 		if (!zhdr)
673 			continue;
674 
675 		/* Re-check under lock. */
676 		spin_lock(&pool->lock);
677 		l = &unbuddied[i];
678 		if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
679 						struct z3fold_header, buddy)) ||
680 		    !z3fold_page_trylock(zhdr)) {
681 			spin_unlock(&pool->lock);
682 			zhdr = NULL;
683 			put_cpu_ptr(pool->unbuddied);
684 			if (can_sleep)
685 				cond_resched();
686 			goto lookup;
687 		}
688 		list_del_init(&zhdr->buddy);
689 		zhdr->cpu = -1;
690 		spin_unlock(&pool->lock);
691 
692 		page = virt_to_page(zhdr);
693 		if (test_bit(NEEDS_COMPACTING, &page->private)) {
694 			z3fold_page_unlock(zhdr);
695 			zhdr = NULL;
696 			put_cpu_ptr(pool->unbuddied);
697 			if (can_sleep)
698 				cond_resched();
699 			goto lookup;
700 		}
701 
702 		/*
703 		 * this page could not be removed from its unbuddied
704 		 * list while pool lock was held, and then we've taken
705 		 * page lock so kref_put could not be called before
706 		 * we got here, so it's safe to just call kref_get()
707 		 */
708 		kref_get(&zhdr->refcount);
709 		break;
710 	}
711 	put_cpu_ptr(pool->unbuddied);
712 
713 	if (!zhdr) {
714 		int cpu;
715 
716 		/* look for _exact_ match on other cpus' lists */
717 		for_each_online_cpu(cpu) {
718 			struct list_head *l;
719 
720 			unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
721 			spin_lock(&pool->lock);
722 			l = &unbuddied[chunks];
723 
724 			zhdr = list_first_entry_or_null(READ_ONCE(l),
725 						struct z3fold_header, buddy);
726 
727 			if (!zhdr || !z3fold_page_trylock(zhdr)) {
728 				spin_unlock(&pool->lock);
729 				zhdr = NULL;
730 				continue;
731 			}
732 			list_del_init(&zhdr->buddy);
733 			zhdr->cpu = -1;
734 			spin_unlock(&pool->lock);
735 
736 			page = virt_to_page(zhdr);
737 			if (test_bit(NEEDS_COMPACTING, &page->private)) {
738 				z3fold_page_unlock(zhdr);
739 				zhdr = NULL;
740 				if (can_sleep)
741 					cond_resched();
742 				continue;
743 			}
744 			kref_get(&zhdr->refcount);
745 			break;
746 		}
747 	}
748 
749 	return zhdr;
750 }
751 
752 /*
753  * API Functions
754  */
755 
756 /**
757  * z3fold_create_pool() - create a new z3fold pool
758  * @name:	pool name
759  * @gfp:	gfp flags when allocating the z3fold pool structure
760  * @ops:	user-defined operations for the z3fold pool
761  *
762  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
763  * failed.
764  */
z3fold_create_pool(const char * name,gfp_t gfp,const struct z3fold_ops * ops)765 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
766 		const struct z3fold_ops *ops)
767 {
768 	struct z3fold_pool *pool = NULL;
769 	int i, cpu;
770 
771 	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
772 	if (!pool)
773 		goto out;
774 	pool->c_handle = kmem_cache_create("z3fold_handle",
775 				sizeof(struct z3fold_buddy_slots),
776 				SLOTS_ALIGN, 0, NULL);
777 	if (!pool->c_handle)
778 		goto out_c;
779 	spin_lock_init(&pool->lock);
780 	spin_lock_init(&pool->stale_lock);
781 	pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
782 	if (!pool->unbuddied)
783 		goto out_pool;
784 	for_each_possible_cpu(cpu) {
785 		struct list_head *unbuddied =
786 				per_cpu_ptr(pool->unbuddied, cpu);
787 		for_each_unbuddied_list(i, 0)
788 			INIT_LIST_HEAD(&unbuddied[i]);
789 	}
790 	INIT_LIST_HEAD(&pool->lru);
791 	INIT_LIST_HEAD(&pool->stale);
792 	atomic64_set(&pool->pages_nr, 0);
793 	pool->name = name;
794 	pool->compact_wq = create_singlethread_workqueue(pool->name);
795 	if (!pool->compact_wq)
796 		goto out_unbuddied;
797 	pool->release_wq = create_singlethread_workqueue(pool->name);
798 	if (!pool->release_wq)
799 		goto out_wq;
800 	if (z3fold_register_migration(pool))
801 		goto out_rwq;
802 	INIT_WORK(&pool->work, free_pages_work);
803 	pool->ops = ops;
804 	return pool;
805 
806 out_rwq:
807 	destroy_workqueue(pool->release_wq);
808 out_wq:
809 	destroy_workqueue(pool->compact_wq);
810 out_unbuddied:
811 	free_percpu(pool->unbuddied);
812 out_pool:
813 	kmem_cache_destroy(pool->c_handle);
814 out_c:
815 	kfree(pool);
816 out:
817 	return NULL;
818 }
819 
820 /**
821  * z3fold_destroy_pool() - destroys an existing z3fold pool
822  * @pool:	the z3fold pool to be destroyed
823  *
824  * The pool should be emptied before this function is called.
825  */
z3fold_destroy_pool(struct z3fold_pool * pool)826 static void z3fold_destroy_pool(struct z3fold_pool *pool)
827 {
828 	kmem_cache_destroy(pool->c_handle);
829 
830 	/*
831 	 * We need to destroy pool->compact_wq before pool->release_wq,
832 	 * as any pending work on pool->compact_wq will call
833 	 * queue_work(pool->release_wq, &pool->work).
834 	 *
835 	 * There are still outstanding pages until both workqueues are drained,
836 	 * so we cannot unregister migration until then.
837 	 */
838 
839 	destroy_workqueue(pool->compact_wq);
840 	destroy_workqueue(pool->release_wq);
841 	z3fold_unregister_migration(pool);
842 	free_percpu(pool->unbuddied);
843 	kfree(pool);
844 }
845 
846 /**
847  * z3fold_alloc() - allocates a region of a given size
848  * @pool:	z3fold pool from which to allocate
849  * @size:	size in bytes of the desired allocation
850  * @gfp:	gfp flags used if the pool needs to grow
851  * @handle:	handle of the new allocation
852  *
853  * This function will attempt to find a free region in the pool large enough to
854  * satisfy the allocation request.  A search of the unbuddied lists is
855  * performed first. If no suitable free region is found, then a new page is
856  * allocated and added to the pool to satisfy the request.
857  *
858  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
859  * as z3fold pool pages.
860  *
861  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
862  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
863  * a new page.
864  */
z3fold_alloc(struct z3fold_pool * pool,size_t size,gfp_t gfp,unsigned long * handle)865 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
866 			unsigned long *handle)
867 {
868 	int chunks = size_to_chunks(size);
869 	struct z3fold_header *zhdr = NULL;
870 	struct page *page = NULL;
871 	enum buddy bud;
872 	bool can_sleep = gfpflags_allow_blocking(gfp);
873 
874 	if (!size)
875 		return -EINVAL;
876 
877 	if (size > PAGE_SIZE)
878 		return -ENOSPC;
879 
880 	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
881 		bud = HEADLESS;
882 	else {
883 retry:
884 		zhdr = __z3fold_alloc(pool, size, can_sleep);
885 		if (zhdr) {
886 			if (zhdr->first_chunks == 0) {
887 				if (zhdr->middle_chunks != 0 &&
888 				    chunks >= zhdr->start_middle)
889 					bud = LAST;
890 				else
891 					bud = FIRST;
892 			} else if (zhdr->last_chunks == 0)
893 				bud = LAST;
894 			else if (zhdr->middle_chunks == 0)
895 				bud = MIDDLE;
896 			else {
897 				if (kref_put(&zhdr->refcount,
898 					     release_z3fold_page_locked))
899 					atomic64_dec(&pool->pages_nr);
900 				else
901 					z3fold_page_unlock(zhdr);
902 				pr_err("No free chunks in unbuddied\n");
903 				WARN_ON(1);
904 				goto retry;
905 			}
906 			page = virt_to_page(zhdr);
907 			goto found;
908 		}
909 		bud = FIRST;
910 	}
911 
912 	page = NULL;
913 	if (can_sleep) {
914 		spin_lock(&pool->stale_lock);
915 		zhdr = list_first_entry_or_null(&pool->stale,
916 						struct z3fold_header, buddy);
917 		/*
918 		 * Before allocating a page, let's see if we can take one from
919 		 * the stale pages list. cancel_work_sync() can sleep so we
920 		 * limit this case to the contexts where we can sleep
921 		 */
922 		if (zhdr) {
923 			list_del(&zhdr->buddy);
924 			spin_unlock(&pool->stale_lock);
925 			cancel_work_sync(&zhdr->work);
926 			page = virt_to_page(zhdr);
927 		} else {
928 			spin_unlock(&pool->stale_lock);
929 		}
930 	}
931 	if (!page)
932 		page = alloc_page(gfp);
933 
934 	if (!page)
935 		return -ENOMEM;
936 
937 	zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
938 	if (!zhdr) {
939 		__free_page(page);
940 		return -ENOMEM;
941 	}
942 	atomic64_inc(&pool->pages_nr);
943 
944 	if (bud == HEADLESS) {
945 		set_bit(PAGE_HEADLESS, &page->private);
946 		goto headless;
947 	}
948 	if (can_sleep) {
949 		lock_page(page);
950 		__SetPageMovable(page, pool->inode->i_mapping);
951 		unlock_page(page);
952 	} else {
953 		if (trylock_page(page)) {
954 			__SetPageMovable(page, pool->inode->i_mapping);
955 			unlock_page(page);
956 		}
957 	}
958 	z3fold_page_lock(zhdr);
959 
960 found:
961 	if (bud == FIRST)
962 		zhdr->first_chunks = chunks;
963 	else if (bud == LAST)
964 		zhdr->last_chunks = chunks;
965 	else {
966 		zhdr->middle_chunks = chunks;
967 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
968 	}
969 	add_to_unbuddied(pool, zhdr);
970 
971 headless:
972 	spin_lock(&pool->lock);
973 	/* Add/move z3fold page to beginning of LRU */
974 	if (!list_empty(&page->lru))
975 		list_del(&page->lru);
976 
977 	list_add(&page->lru, &pool->lru);
978 
979 	*handle = encode_handle(zhdr, bud);
980 	spin_unlock(&pool->lock);
981 	if (bud != HEADLESS)
982 		z3fold_page_unlock(zhdr);
983 
984 	return 0;
985 }
986 
987 /**
988  * z3fold_free() - frees the allocation associated with the given handle
989  * @pool:	pool in which the allocation resided
990  * @handle:	handle associated with the allocation returned by z3fold_alloc()
991  *
992  * In the case that the z3fold page in which the allocation resides is under
993  * reclaim, as indicated by the PG_reclaim flag being set, this function
994  * only sets the first|last_chunks to 0.  The page is actually freed
995  * once both buddies are evicted (see z3fold_reclaim_page() below).
996  */
z3fold_free(struct z3fold_pool * pool,unsigned long handle)997 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
998 {
999 	struct z3fold_header *zhdr;
1000 	struct page *page;
1001 	enum buddy bud;
1002 	bool page_claimed;
1003 
1004 	zhdr = handle_to_z3fold_header(handle);
1005 	page = virt_to_page(zhdr);
1006 	page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1007 
1008 	if (test_bit(PAGE_HEADLESS, &page->private)) {
1009 		/* if a headless page is under reclaim, just leave.
1010 		 * NB: we use test_and_set_bit for a reason: if the bit
1011 		 * has not been set before, we release this page
1012 		 * immediately so we don't care about its value any more.
1013 		 */
1014 		if (!page_claimed) {
1015 			spin_lock(&pool->lock);
1016 			list_del(&page->lru);
1017 			spin_unlock(&pool->lock);
1018 			free_z3fold_page(page, true);
1019 			atomic64_dec(&pool->pages_nr);
1020 		}
1021 		return;
1022 	}
1023 
1024 	/* Non-headless case */
1025 	z3fold_page_lock(zhdr);
1026 	bud = handle_to_buddy(handle);
1027 
1028 	switch (bud) {
1029 	case FIRST:
1030 		zhdr->first_chunks = 0;
1031 		break;
1032 	case MIDDLE:
1033 		zhdr->middle_chunks = 0;
1034 		break;
1035 	case LAST:
1036 		zhdr->last_chunks = 0;
1037 		break;
1038 	default:
1039 		pr_err("%s: unknown bud %d\n", __func__, bud);
1040 		WARN_ON(1);
1041 		z3fold_page_unlock(zhdr);
1042 		return;
1043 	}
1044 
1045 	free_handle(handle);
1046 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1047 		atomic64_dec(&pool->pages_nr);
1048 		return;
1049 	}
1050 	if (page_claimed) {
1051 		/* the page has not been claimed by us */
1052 		z3fold_page_unlock(zhdr);
1053 		return;
1054 	}
1055 	if (unlikely(PageIsolated(page)) ||
1056 	    test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1057 		z3fold_page_unlock(zhdr);
1058 		clear_bit(PAGE_CLAIMED, &page->private);
1059 		return;
1060 	}
1061 	if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1062 		spin_lock(&pool->lock);
1063 		list_del_init(&zhdr->buddy);
1064 		spin_unlock(&pool->lock);
1065 		zhdr->cpu = -1;
1066 		kref_get(&zhdr->refcount);
1067 		do_compact_page(zhdr, true);
1068 		clear_bit(PAGE_CLAIMED, &page->private);
1069 		return;
1070 	}
1071 	kref_get(&zhdr->refcount);
1072 	queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1073 	clear_bit(PAGE_CLAIMED, &page->private);
1074 	z3fold_page_unlock(zhdr);
1075 }
1076 
1077 /**
1078  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1079  * @pool:	pool from which a page will attempt to be evicted
1080  * @retries:	number of pages on the LRU list for which eviction will
1081  *		be attempted before failing
1082  *
1083  * z3fold reclaim is different from normal system reclaim in that it is done
1084  * from the bottom, up. This is because only the bottom layer, z3fold, has
1085  * information on how the allocations are organized within each z3fold page.
1086  * This has the potential to create interesting locking situations between
1087  * z3fold and the user, however.
1088  *
1089  * To avoid these, this is how z3fold_reclaim_page() should be called:
1090  *
1091  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1092  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1093  * call the user-defined eviction handler with the pool and handle as
1094  * arguments.
1095  *
1096  * If the handle can not be evicted, the eviction handler should return
1097  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1098  * appropriate list and try the next z3fold page on the LRU up to
1099  * a user defined number of retries.
1100  *
1101  * If the handle is successfully evicted, the eviction handler should
1102  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1103  * contains logic to delay freeing the page if the page is under reclaim,
1104  * as indicated by the setting of the PG_reclaim flag on the underlying page.
1105  *
1106  * If all buddies in the z3fold page are successfully evicted, then the
1107  * z3fold page can be freed.
1108  *
1109  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1110  * no pages to evict or an eviction handler is not registered, -EAGAIN if
1111  * the retry limit was hit.
1112  */
z3fold_reclaim_page(struct z3fold_pool * pool,unsigned int retries)1113 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1114 {
1115 	int i, ret = 0;
1116 	struct z3fold_header *zhdr = NULL;
1117 	struct page *page = NULL;
1118 	struct list_head *pos;
1119 	struct z3fold_buddy_slots slots;
1120 	unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1121 
1122 	spin_lock(&pool->lock);
1123 	if (!pool->ops || !pool->ops->evict || retries == 0) {
1124 		spin_unlock(&pool->lock);
1125 		return -EINVAL;
1126 	}
1127 	for (i = 0; i < retries; i++) {
1128 		if (list_empty(&pool->lru)) {
1129 			spin_unlock(&pool->lock);
1130 			return -EINVAL;
1131 		}
1132 		list_for_each_prev(pos, &pool->lru) {
1133 			page = list_entry(pos, struct page, lru);
1134 
1135 			/* this bit could have been set by free, in which case
1136 			 * we pass over to the next page in the pool.
1137 			 */
1138 			if (test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1139 				page = NULL;
1140 				continue;
1141 			}
1142 
1143 			if (unlikely(PageIsolated(page))) {
1144 				clear_bit(PAGE_CLAIMED, &page->private);
1145 				page = NULL;
1146 				continue;
1147 			}
1148 			zhdr = page_address(page);
1149 			if (test_bit(PAGE_HEADLESS, &page->private))
1150 				break;
1151 
1152 			if (!z3fold_page_trylock(zhdr)) {
1153 				clear_bit(PAGE_CLAIMED, &page->private);
1154 				zhdr = NULL;
1155 				continue; /* can't evict at this point */
1156 			}
1157 			kref_get(&zhdr->refcount);
1158 			list_del_init(&zhdr->buddy);
1159 			zhdr->cpu = -1;
1160 			break;
1161 		}
1162 
1163 		if (!zhdr)
1164 			break;
1165 
1166 		list_del_init(&page->lru);
1167 		spin_unlock(&pool->lock);
1168 
1169 		if (!test_bit(PAGE_HEADLESS, &page->private)) {
1170 			/*
1171 			 * We need encode the handles before unlocking, and
1172 			 * use our local slots structure because z3fold_free
1173 			 * can zero out zhdr->slots and we can't do much
1174 			 * about that
1175 			 */
1176 			first_handle = 0;
1177 			last_handle = 0;
1178 			middle_handle = 0;
1179 			if (zhdr->first_chunks)
1180 				first_handle = __encode_handle(zhdr, &slots,
1181 								FIRST);
1182 			if (zhdr->middle_chunks)
1183 				middle_handle = __encode_handle(zhdr, &slots,
1184 								MIDDLE);
1185 			if (zhdr->last_chunks)
1186 				last_handle = __encode_handle(zhdr, &slots,
1187 								LAST);
1188 			/*
1189 			 * it's safe to unlock here because we hold a
1190 			 * reference to this page
1191 			 */
1192 			z3fold_page_unlock(zhdr);
1193 		} else {
1194 			first_handle = __encode_handle(zhdr, &slots, HEADLESS);
1195 			last_handle = middle_handle = 0;
1196 		}
1197 
1198 		/* Issue the eviction callback(s) */
1199 		if (middle_handle) {
1200 			ret = pool->ops->evict(pool, middle_handle);
1201 			if (ret)
1202 				goto next;
1203 		}
1204 		if (first_handle) {
1205 			ret = pool->ops->evict(pool, first_handle);
1206 			if (ret)
1207 				goto next;
1208 		}
1209 		if (last_handle) {
1210 			ret = pool->ops->evict(pool, last_handle);
1211 			if (ret)
1212 				goto next;
1213 		}
1214 next:
1215 		if (test_bit(PAGE_HEADLESS, &page->private)) {
1216 			if (ret == 0) {
1217 				free_z3fold_page(page, true);
1218 				atomic64_dec(&pool->pages_nr);
1219 				return 0;
1220 			}
1221 			spin_lock(&pool->lock);
1222 			list_add(&page->lru, &pool->lru);
1223 			spin_unlock(&pool->lock);
1224 			clear_bit(PAGE_CLAIMED, &page->private);
1225 		} else {
1226 			z3fold_page_lock(zhdr);
1227 			if (kref_put(&zhdr->refcount,
1228 					release_z3fold_page_locked)) {
1229 				atomic64_dec(&pool->pages_nr);
1230 				return 0;
1231 			}
1232 			/*
1233 			 * if we are here, the page is still not completely
1234 			 * free. Take the global pool lock then to be able
1235 			 * to add it back to the lru list
1236 			 */
1237 			spin_lock(&pool->lock);
1238 			list_add(&page->lru, &pool->lru);
1239 			spin_unlock(&pool->lock);
1240 			z3fold_page_unlock(zhdr);
1241 			clear_bit(PAGE_CLAIMED, &page->private);
1242 		}
1243 
1244 		/* We started off locked to we need to lock the pool back */
1245 		spin_lock(&pool->lock);
1246 	}
1247 	spin_unlock(&pool->lock);
1248 	return -EAGAIN;
1249 }
1250 
1251 /**
1252  * z3fold_map() - maps the allocation associated with the given handle
1253  * @pool:	pool in which the allocation resides
1254  * @handle:	handle associated with the allocation to be mapped
1255  *
1256  * Extracts the buddy number from handle and constructs the pointer to the
1257  * correct starting chunk within the page.
1258  *
1259  * Returns: a pointer to the mapped allocation
1260  */
z3fold_map(struct z3fold_pool * pool,unsigned long handle)1261 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1262 {
1263 	struct z3fold_header *zhdr;
1264 	struct page *page;
1265 	void *addr;
1266 	enum buddy buddy;
1267 
1268 	zhdr = handle_to_z3fold_header(handle);
1269 	addr = zhdr;
1270 	page = virt_to_page(zhdr);
1271 
1272 	if (test_bit(PAGE_HEADLESS, &page->private))
1273 		goto out;
1274 
1275 	z3fold_page_lock(zhdr);
1276 	buddy = handle_to_buddy(handle);
1277 	switch (buddy) {
1278 	case FIRST:
1279 		addr += ZHDR_SIZE_ALIGNED;
1280 		break;
1281 	case MIDDLE:
1282 		addr += zhdr->start_middle << CHUNK_SHIFT;
1283 		set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1284 		break;
1285 	case LAST:
1286 		addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1287 		break;
1288 	default:
1289 		pr_err("unknown buddy id %d\n", buddy);
1290 		WARN_ON(1);
1291 		addr = NULL;
1292 		break;
1293 	}
1294 
1295 	if (addr)
1296 		zhdr->mapped_count++;
1297 	z3fold_page_unlock(zhdr);
1298 out:
1299 	return addr;
1300 }
1301 
1302 /**
1303  * z3fold_unmap() - unmaps the allocation associated with the given handle
1304  * @pool:	pool in which the allocation resides
1305  * @handle:	handle associated with the allocation to be unmapped
1306  */
z3fold_unmap(struct z3fold_pool * pool,unsigned long handle)1307 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1308 {
1309 	struct z3fold_header *zhdr;
1310 	struct page *page;
1311 	enum buddy buddy;
1312 
1313 	zhdr = handle_to_z3fold_header(handle);
1314 	page = virt_to_page(zhdr);
1315 
1316 	if (test_bit(PAGE_HEADLESS, &page->private))
1317 		return;
1318 
1319 	z3fold_page_lock(zhdr);
1320 	buddy = handle_to_buddy(handle);
1321 	if (buddy == MIDDLE)
1322 		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1323 	zhdr->mapped_count--;
1324 	z3fold_page_unlock(zhdr);
1325 }
1326 
1327 /**
1328  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1329  * @pool:	pool whose size is being queried
1330  *
1331  * Returns: size in pages of the given pool.
1332  */
z3fold_get_pool_size(struct z3fold_pool * pool)1333 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1334 {
1335 	return atomic64_read(&pool->pages_nr);
1336 }
1337 
z3fold_page_isolate(struct page * page,isolate_mode_t mode)1338 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1339 {
1340 	struct z3fold_header *zhdr;
1341 	struct z3fold_pool *pool;
1342 
1343 	VM_BUG_ON_PAGE(!PageMovable(page), page);
1344 	VM_BUG_ON_PAGE(PageIsolated(page), page);
1345 
1346 	if (test_bit(PAGE_HEADLESS, &page->private) ||
1347 	    test_bit(PAGE_CLAIMED, &page->private))
1348 		return false;
1349 
1350 	zhdr = page_address(page);
1351 	z3fold_page_lock(zhdr);
1352 	if (test_bit(NEEDS_COMPACTING, &page->private) ||
1353 	    test_bit(PAGE_STALE, &page->private))
1354 		goto out;
1355 
1356 	pool = zhdr_to_pool(zhdr);
1357 
1358 	if (zhdr->mapped_count == 0) {
1359 		kref_get(&zhdr->refcount);
1360 		if (!list_empty(&zhdr->buddy))
1361 			list_del_init(&zhdr->buddy);
1362 		spin_lock(&pool->lock);
1363 		if (!list_empty(&page->lru))
1364 			list_del(&page->lru);
1365 		spin_unlock(&pool->lock);
1366 		z3fold_page_unlock(zhdr);
1367 		return true;
1368 	}
1369 out:
1370 	z3fold_page_unlock(zhdr);
1371 	return false;
1372 }
1373 
z3fold_page_migrate(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)1374 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1375 			       struct page *page, enum migrate_mode mode)
1376 {
1377 	struct z3fold_header *zhdr, *new_zhdr;
1378 	struct z3fold_pool *pool;
1379 	struct address_space *new_mapping;
1380 
1381 	VM_BUG_ON_PAGE(!PageMovable(page), page);
1382 	VM_BUG_ON_PAGE(!PageIsolated(page), page);
1383 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1384 
1385 	zhdr = page_address(page);
1386 	pool = zhdr_to_pool(zhdr);
1387 
1388 	if (!z3fold_page_trylock(zhdr)) {
1389 		return -EAGAIN;
1390 	}
1391 	if (zhdr->mapped_count != 0) {
1392 		z3fold_page_unlock(zhdr);
1393 		return -EBUSY;
1394 	}
1395 	if (work_pending(&zhdr->work)) {
1396 		z3fold_page_unlock(zhdr);
1397 		return -EAGAIN;
1398 	}
1399 	new_zhdr = page_address(newpage);
1400 	memcpy(new_zhdr, zhdr, PAGE_SIZE);
1401 	newpage->private = page->private;
1402 	page->private = 0;
1403 	z3fold_page_unlock(zhdr);
1404 	spin_lock_init(&new_zhdr->page_lock);
1405 	INIT_WORK(&new_zhdr->work, compact_page_work);
1406 	/*
1407 	 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1408 	 * so we only have to reinitialize it.
1409 	 */
1410 	INIT_LIST_HEAD(&new_zhdr->buddy);
1411 	new_mapping = page_mapping(page);
1412 	__ClearPageMovable(page);
1413 	ClearPagePrivate(page);
1414 
1415 	get_page(newpage);
1416 	z3fold_page_lock(new_zhdr);
1417 	if (new_zhdr->first_chunks)
1418 		encode_handle(new_zhdr, FIRST);
1419 	if (new_zhdr->last_chunks)
1420 		encode_handle(new_zhdr, LAST);
1421 	if (new_zhdr->middle_chunks)
1422 		encode_handle(new_zhdr, MIDDLE);
1423 	set_bit(NEEDS_COMPACTING, &newpage->private);
1424 	new_zhdr->cpu = smp_processor_id();
1425 	spin_lock(&pool->lock);
1426 	list_add(&newpage->lru, &pool->lru);
1427 	spin_unlock(&pool->lock);
1428 	__SetPageMovable(newpage, new_mapping);
1429 	z3fold_page_unlock(new_zhdr);
1430 
1431 	queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1432 
1433 	page_mapcount_reset(page);
1434 	put_page(page);
1435 	return 0;
1436 }
1437 
z3fold_page_putback(struct page * page)1438 static void z3fold_page_putback(struct page *page)
1439 {
1440 	struct z3fold_header *zhdr;
1441 	struct z3fold_pool *pool;
1442 
1443 	zhdr = page_address(page);
1444 	pool = zhdr_to_pool(zhdr);
1445 
1446 	z3fold_page_lock(zhdr);
1447 	if (!list_empty(&zhdr->buddy))
1448 		list_del_init(&zhdr->buddy);
1449 	INIT_LIST_HEAD(&page->lru);
1450 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1451 		atomic64_dec(&pool->pages_nr);
1452 		return;
1453 	}
1454 	spin_lock(&pool->lock);
1455 	list_add(&page->lru, &pool->lru);
1456 	spin_unlock(&pool->lock);
1457 	z3fold_page_unlock(zhdr);
1458 }
1459 
1460 static const struct address_space_operations z3fold_aops = {
1461 	.isolate_page = z3fold_page_isolate,
1462 	.migratepage = z3fold_page_migrate,
1463 	.putback_page = z3fold_page_putback,
1464 };
1465 
1466 /*****************
1467  * zpool
1468  ****************/
1469 
z3fold_zpool_evict(struct z3fold_pool * pool,unsigned long handle)1470 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1471 {
1472 	if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1473 		return pool->zpool_ops->evict(pool->zpool, handle);
1474 	else
1475 		return -ENOENT;
1476 }
1477 
1478 static const struct z3fold_ops z3fold_zpool_ops = {
1479 	.evict =	z3fold_zpool_evict
1480 };
1481 
z3fold_zpool_create(const char * name,gfp_t gfp,const struct zpool_ops * zpool_ops,struct zpool * zpool)1482 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1483 			       const struct zpool_ops *zpool_ops,
1484 			       struct zpool *zpool)
1485 {
1486 	struct z3fold_pool *pool;
1487 
1488 	pool = z3fold_create_pool(name, gfp,
1489 				zpool_ops ? &z3fold_zpool_ops : NULL);
1490 	if (pool) {
1491 		pool->zpool = zpool;
1492 		pool->zpool_ops = zpool_ops;
1493 	}
1494 	return pool;
1495 }
1496 
z3fold_zpool_destroy(void * pool)1497 static void z3fold_zpool_destroy(void *pool)
1498 {
1499 	z3fold_destroy_pool(pool);
1500 }
1501 
z3fold_zpool_malloc(void * pool,size_t size,gfp_t gfp,unsigned long * handle)1502 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1503 			unsigned long *handle)
1504 {
1505 	return z3fold_alloc(pool, size, gfp, handle);
1506 }
z3fold_zpool_free(void * pool,unsigned long handle)1507 static void z3fold_zpool_free(void *pool, unsigned long handle)
1508 {
1509 	z3fold_free(pool, handle);
1510 }
1511 
z3fold_zpool_shrink(void * pool,unsigned int pages,unsigned int * reclaimed)1512 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1513 			unsigned int *reclaimed)
1514 {
1515 	unsigned int total = 0;
1516 	int ret = -EINVAL;
1517 
1518 	while (total < pages) {
1519 		ret = z3fold_reclaim_page(pool, 8);
1520 		if (ret < 0)
1521 			break;
1522 		total++;
1523 	}
1524 
1525 	if (reclaimed)
1526 		*reclaimed = total;
1527 
1528 	return ret;
1529 }
1530 
z3fold_zpool_map(void * pool,unsigned long handle,enum zpool_mapmode mm)1531 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1532 			enum zpool_mapmode mm)
1533 {
1534 	return z3fold_map(pool, handle);
1535 }
z3fold_zpool_unmap(void * pool,unsigned long handle)1536 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1537 {
1538 	z3fold_unmap(pool, handle);
1539 }
1540 
z3fold_zpool_total_size(void * pool)1541 static u64 z3fold_zpool_total_size(void *pool)
1542 {
1543 	return z3fold_get_pool_size(pool) * PAGE_SIZE;
1544 }
1545 
1546 static struct zpool_driver z3fold_zpool_driver = {
1547 	.type =		"z3fold",
1548 	.owner =	THIS_MODULE,
1549 	.create =	z3fold_zpool_create,
1550 	.destroy =	z3fold_zpool_destroy,
1551 	.malloc =	z3fold_zpool_malloc,
1552 	.free =		z3fold_zpool_free,
1553 	.shrink =	z3fold_zpool_shrink,
1554 	.map =		z3fold_zpool_map,
1555 	.unmap =	z3fold_zpool_unmap,
1556 	.total_size =	z3fold_zpool_total_size,
1557 };
1558 
1559 MODULE_ALIAS("zpool-z3fold");
1560 
init_z3fold(void)1561 static int __init init_z3fold(void)
1562 {
1563 	int ret;
1564 
1565 	/* Make sure the z3fold header is not larger than the page size */
1566 	BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1567 	ret = z3fold_mount();
1568 	if (ret)
1569 		return ret;
1570 
1571 	zpool_register_driver(&z3fold_zpool_driver);
1572 
1573 	return 0;
1574 }
1575 
exit_z3fold(void)1576 static void __exit exit_z3fold(void)
1577 {
1578 	z3fold_unmount();
1579 	zpool_unregister_driver(&z3fold_zpool_driver);
1580 }
1581 
1582 module_init(init_z3fold);
1583 module_exit(exit_z3fold);
1584 
1585 MODULE_LICENSE("GPL");
1586 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1587 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");
1588