• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2022 Alibaba Cloud
6  */
7 #include "compress.h"
8 #include <linux/psi.h>
9 #include <linux/cpuhotplug.h>
10 #include <linux/kthread.h>
11 #include <trace/events/erofs.h>
12 
13 #define Z_EROFS_PCLUSTER_MAX_PAGES	(Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
14 #define Z_EROFS_INLINE_BVECS		2
15 
16 /*
17  * let's leave a type here in case of introducing
18  * another tagged pointer later.
19  */
20 typedef void *z_erofs_next_pcluster_t;
21 
22 struct z_erofs_bvec {
23 	struct page *page;
24 	int offset;
25 	unsigned int end;
26 };
27 
28 #define __Z_EROFS_BVSET(name, total) \
29 struct name { \
30 	/* point to the next page which contains the following bvecs */ \
31 	struct page *nextpage; \
32 	struct z_erofs_bvec bvec[total]; \
33 }
34 __Z_EROFS_BVSET(z_erofs_bvset,);
35 __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
36 
37 /*
38  * Structure fields follow one of the following exclusion rules.
39  *
40  * I: Modifiable by initialization/destruction paths and read-only
41  *    for everyone else;
42  *
43  * L: Field should be protected by the pcluster lock;
44  *
45  * A: Field should be accessed / updated in atomic for parallelized code.
46  */
47 struct z_erofs_pcluster {
48 	struct erofs_workgroup obj;
49 	struct mutex lock;
50 
51 	/* A: point to next chained pcluster or TAILs */
52 	z_erofs_next_pcluster_t next;
53 
54 	/* L: the maximum decompression size of this round */
55 	unsigned int length;
56 
57 	/* L: total number of bvecs */
58 	unsigned int vcnt;
59 
60 	/* I: pcluster size (compressed size) in bytes */
61 	unsigned int pclustersize;
62 
63 	/* I: page offset of start position of decompression */
64 	unsigned short pageofs_out;
65 
66 	/* I: page offset of inline compressed data */
67 	unsigned short pageofs_in;
68 
69 	union {
70 		/* L: inline a certain number of bvec for bootstrap */
71 		struct z_erofs_bvset_inline bvset;
72 
73 		/* I: can be used to free the pcluster by RCU. */
74 		struct rcu_head rcu;
75 	};
76 
77 	/* I: compression algorithm format */
78 	unsigned char algorithmformat;
79 
80 	/* L: whether partial decompression or not */
81 	bool partial;
82 
83 	/* L: indicate several pageofs_outs or not */
84 	bool multibases;
85 
86 	/* A: compressed bvecs (can be cached or inplaced pages) */
87 	struct z_erofs_bvec compressed_bvecs[];
88 };
89 
90 /* let's avoid the valid 32-bit kernel addresses */
91 
92 /* the end of a chain of pclusters */
93 #define Z_EROFS_PCLUSTER_TAIL           ((void *)0x5F0ECAFE)
94 #define Z_EROFS_PCLUSTER_NIL            (NULL)
95 
96 struct z_erofs_decompressqueue {
97 	struct super_block *sb;
98 	atomic_t pending_bios;
99 	z_erofs_next_pcluster_t head;
100 
101 	union {
102 		struct completion done;
103 		struct work_struct work;
104 		struct kthread_work kthread_work;
105 	} u;
106 	bool eio, sync;
107 };
108 
z_erofs_is_inline_pcluster(struct z_erofs_pcluster * pcl)109 static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
110 {
111 	return !pcl->obj.index;
112 }
113 
z_erofs_pclusterpages(struct z_erofs_pcluster * pcl)114 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
115 {
116 	return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT;
117 }
118 
119 /*
120  * bit 30: I/O error occurred on this page
121  * bit 0 - 29: remaining parts to complete this page
122  */
123 #define Z_EROFS_PAGE_EIO			(1 << 30)
124 
z_erofs_onlinepage_init(struct page * page)125 static inline void z_erofs_onlinepage_init(struct page *page)
126 {
127 	union {
128 		atomic_t o;
129 		unsigned long v;
130 	} u = { .o = ATOMIC_INIT(1) };
131 
132 	set_page_private(page, u.v);
133 	smp_wmb();
134 	SetPagePrivate(page);
135 }
136 
z_erofs_onlinepage_split(struct page * page)137 static inline void z_erofs_onlinepage_split(struct page *page)
138 {
139 	atomic_inc((atomic_t *)&page->private);
140 }
141 
z_erofs_page_mark_eio(struct page * page)142 static inline void z_erofs_page_mark_eio(struct page *page)
143 {
144 	int orig;
145 
146 	do {
147 		orig = atomic_read((atomic_t *)&page->private);
148 	} while (atomic_cmpxchg((atomic_t *)&page->private, orig,
149 				orig | Z_EROFS_PAGE_EIO) != orig);
150 }
151 
z_erofs_onlinepage_endio(struct page * page)152 static inline void z_erofs_onlinepage_endio(struct page *page)
153 {
154 	unsigned int v;
155 
156 	DBG_BUGON(!PagePrivate(page));
157 	v = atomic_dec_return((atomic_t *)&page->private);
158 	if (!(v & ~Z_EROFS_PAGE_EIO)) {
159 		set_page_private(page, 0);
160 		ClearPagePrivate(page);
161 		if (!(v & Z_EROFS_PAGE_EIO))
162 			SetPageUptodate(page);
163 		unlock_page(page);
164 	}
165 }
166 
167 #define Z_EROFS_ONSTACK_PAGES		32
168 
169 /*
170  * since pclustersize is variable for big pcluster feature, introduce slab
171  * pools implementation for different pcluster sizes.
172  */
173 struct z_erofs_pcluster_slab {
174 	struct kmem_cache *slab;
175 	unsigned int maxpages;
176 	char name[48];
177 };
178 
179 #define _PCLP(n) { .maxpages = n }
180 
181 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
182 	_PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
183 	_PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
184 };
185 
186 struct z_erofs_bvec_iter {
187 	struct page *bvpage;
188 	struct z_erofs_bvset *bvset;
189 	unsigned int nr, cur;
190 };
191 
z_erofs_bvec_iter_end(struct z_erofs_bvec_iter * iter)192 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
193 {
194 	if (iter->bvpage)
195 		kunmap_local(iter->bvset);
196 	return iter->bvpage;
197 }
198 
z_erofs_bvset_flip(struct z_erofs_bvec_iter * iter)199 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
200 {
201 	unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec;
202 	/* have to access nextpage in advance, otherwise it will be unmapped */
203 	struct page *nextpage = iter->bvset->nextpage;
204 	struct page *oldpage;
205 
206 	DBG_BUGON(!nextpage);
207 	oldpage = z_erofs_bvec_iter_end(iter);
208 	iter->bvpage = nextpage;
209 	iter->bvset = kmap_local_page(nextpage);
210 	iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
211 	iter->cur = 0;
212 	return oldpage;
213 }
214 
z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter * iter,struct z_erofs_bvset_inline * bvset,unsigned int bootstrap_nr,unsigned int cur)215 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
216 				    struct z_erofs_bvset_inline *bvset,
217 				    unsigned int bootstrap_nr,
218 				    unsigned int cur)
219 {
220 	*iter = (struct z_erofs_bvec_iter) {
221 		.nr = bootstrap_nr,
222 		.bvset = (struct z_erofs_bvset *)bvset,
223 	};
224 
225 	while (cur > iter->nr) {
226 		cur -= iter->nr;
227 		z_erofs_bvset_flip(iter);
228 	}
229 	iter->cur = cur;
230 }
231 
z_erofs_bvec_enqueue(struct z_erofs_bvec_iter * iter,struct z_erofs_bvec * bvec,struct page ** candidate_bvpage,struct page ** pagepool)232 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
233 				struct z_erofs_bvec *bvec,
234 				struct page **candidate_bvpage,
235 				struct page **pagepool)
236 {
237 	if (iter->cur >= iter->nr) {
238 		struct page *nextpage = *candidate_bvpage;
239 
240 		if (!nextpage) {
241 			nextpage = erofs_allocpage(pagepool, GFP_NOFS);
242 			if (!nextpage)
243 				return -ENOMEM;
244 			set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
245 		}
246 		DBG_BUGON(iter->bvset->nextpage);
247 		iter->bvset->nextpage = nextpage;
248 		z_erofs_bvset_flip(iter);
249 
250 		iter->bvset->nextpage = NULL;
251 		*candidate_bvpage = NULL;
252 	}
253 	iter->bvset->bvec[iter->cur++] = *bvec;
254 	return 0;
255 }
256 
z_erofs_bvec_dequeue(struct z_erofs_bvec_iter * iter,struct z_erofs_bvec * bvec,struct page ** old_bvpage)257 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
258 				 struct z_erofs_bvec *bvec,
259 				 struct page **old_bvpage)
260 {
261 	if (iter->cur == iter->nr)
262 		*old_bvpage = z_erofs_bvset_flip(iter);
263 	else
264 		*old_bvpage = NULL;
265 	*bvec = iter->bvset->bvec[iter->cur++];
266 }
267 
z_erofs_destroy_pcluster_pool(void)268 static void z_erofs_destroy_pcluster_pool(void)
269 {
270 	int i;
271 
272 	for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
273 		if (!pcluster_pool[i].slab)
274 			continue;
275 		kmem_cache_destroy(pcluster_pool[i].slab);
276 		pcluster_pool[i].slab = NULL;
277 	}
278 }
279 
z_erofs_create_pcluster_pool(void)280 static int z_erofs_create_pcluster_pool(void)
281 {
282 	struct z_erofs_pcluster_slab *pcs;
283 	struct z_erofs_pcluster *a;
284 	unsigned int size;
285 
286 	for (pcs = pcluster_pool;
287 	     pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
288 		size = struct_size(a, compressed_bvecs, pcs->maxpages);
289 
290 		sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
291 		pcs->slab = kmem_cache_create(pcs->name, size, 0,
292 					      SLAB_RECLAIM_ACCOUNT, NULL);
293 		if (pcs->slab)
294 			continue;
295 
296 		z_erofs_destroy_pcluster_pool();
297 		return -ENOMEM;
298 	}
299 	return 0;
300 }
301 
z_erofs_alloc_pcluster(unsigned int size)302 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
303 {
304 	unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
305 	struct z_erofs_pcluster_slab *pcs = pcluster_pool;
306 
307 	for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
308 		struct z_erofs_pcluster *pcl;
309 
310 		if (nrpages > pcs->maxpages)
311 			continue;
312 
313 		pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
314 		if (!pcl)
315 			return ERR_PTR(-ENOMEM);
316 		pcl->pclustersize = size;
317 		return pcl;
318 	}
319 	return ERR_PTR(-EINVAL);
320 }
321 
z_erofs_free_pcluster(struct z_erofs_pcluster * pcl)322 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
323 {
324 	unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
325 	int i;
326 
327 	for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
328 		struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
329 
330 		if (pclusterpages > pcs->maxpages)
331 			continue;
332 
333 		kmem_cache_free(pcs->slab, pcl);
334 		return;
335 	}
336 	DBG_BUGON(1);
337 }
338 
339 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
340 
341 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
342 static struct kthread_worker __rcu **z_erofs_pcpu_workers;
343 
erofs_destroy_percpu_workers(void)344 static void erofs_destroy_percpu_workers(void)
345 {
346 	struct kthread_worker *worker;
347 	unsigned int cpu;
348 
349 	for_each_possible_cpu(cpu) {
350 		worker = rcu_dereference_protected(
351 					z_erofs_pcpu_workers[cpu], 1);
352 		rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
353 		if (worker)
354 			kthread_destroy_worker(worker);
355 	}
356 	kfree(z_erofs_pcpu_workers);
357 }
358 
erofs_init_percpu_worker(int cpu)359 static struct kthread_worker *erofs_init_percpu_worker(int cpu)
360 {
361 	struct kthread_worker *worker =
362 		kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu);
363 
364 	if (IS_ERR(worker))
365 		return worker;
366 	if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI))
367 		sched_set_fifo_low(worker->task);
368 	else
369 		sched_set_normal(worker->task, 0);
370 	return worker;
371 }
372 
erofs_init_percpu_workers(void)373 static int erofs_init_percpu_workers(void)
374 {
375 	struct kthread_worker *worker;
376 	unsigned int cpu;
377 
378 	z_erofs_pcpu_workers = kcalloc(num_possible_cpus(),
379 			sizeof(struct kthread_worker *), GFP_ATOMIC);
380 	if (!z_erofs_pcpu_workers)
381 		return -ENOMEM;
382 
383 	for_each_online_cpu(cpu) {	/* could miss cpu{off,on}line? */
384 		worker = erofs_init_percpu_worker(cpu);
385 		if (!IS_ERR(worker))
386 			rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
387 	}
388 	return 0;
389 }
390 #else
erofs_destroy_percpu_workers(void)391 static inline void erofs_destroy_percpu_workers(void) {}
erofs_init_percpu_workers(void)392 static inline int erofs_init_percpu_workers(void) { return 0; }
393 #endif
394 
395 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
396 static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock);
397 static enum cpuhp_state erofs_cpuhp_state;
398 
erofs_cpu_online(unsigned int cpu)399 static int erofs_cpu_online(unsigned int cpu)
400 {
401 	struct kthread_worker *worker, *old;
402 
403 	worker = erofs_init_percpu_worker(cpu);
404 	if (IS_ERR(worker))
405 		return PTR_ERR(worker);
406 
407 	spin_lock(&z_erofs_pcpu_worker_lock);
408 	old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
409 			lockdep_is_held(&z_erofs_pcpu_worker_lock));
410 	if (!old)
411 		rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
412 	spin_unlock(&z_erofs_pcpu_worker_lock);
413 	if (old)
414 		kthread_destroy_worker(worker);
415 	return 0;
416 }
417 
erofs_cpu_offline(unsigned int cpu)418 static int erofs_cpu_offline(unsigned int cpu)
419 {
420 	struct kthread_worker *worker;
421 
422 	spin_lock(&z_erofs_pcpu_worker_lock);
423 	worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
424 			lockdep_is_held(&z_erofs_pcpu_worker_lock));
425 	rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
426 	spin_unlock(&z_erofs_pcpu_worker_lock);
427 
428 	synchronize_rcu();
429 	if (worker)
430 		kthread_destroy_worker(worker);
431 	return 0;
432 }
433 
erofs_cpu_hotplug_init(void)434 static int erofs_cpu_hotplug_init(void)
435 {
436 	int state;
437 
438 	state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
439 			"fs/erofs:online", erofs_cpu_online, erofs_cpu_offline);
440 	if (state < 0)
441 		return state;
442 
443 	erofs_cpuhp_state = state;
444 	return 0;
445 }
446 
erofs_cpu_hotplug_destroy(void)447 static void erofs_cpu_hotplug_destroy(void)
448 {
449 	if (erofs_cpuhp_state)
450 		cpuhp_remove_state_nocalls(erofs_cpuhp_state);
451 }
452 #else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */
erofs_cpu_hotplug_init(void)453 static inline int erofs_cpu_hotplug_init(void) { return 0; }
erofs_cpu_hotplug_destroy(void)454 static inline void erofs_cpu_hotplug_destroy(void) {}
455 #endif
456 
z_erofs_exit_zip_subsystem(void)457 void z_erofs_exit_zip_subsystem(void)
458 {
459 	erofs_cpu_hotplug_destroy();
460 	erofs_destroy_percpu_workers();
461 	destroy_workqueue(z_erofs_workqueue);
462 	z_erofs_destroy_pcluster_pool();
463 }
464 
z_erofs_init_zip_subsystem(void)465 int __init z_erofs_init_zip_subsystem(void)
466 {
467 	int err = z_erofs_create_pcluster_pool();
468 
469 	if (err)
470 		goto out_error_pcluster_pool;
471 
472 	z_erofs_workqueue = alloc_workqueue("erofs_worker",
473 			WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus());
474 	if (!z_erofs_workqueue) {
475 		err = -ENOMEM;
476 		goto out_error_workqueue_init;
477 	}
478 
479 	err = erofs_init_percpu_workers();
480 	if (err)
481 		goto out_error_pcpu_worker;
482 
483 	err = erofs_cpu_hotplug_init();
484 	if (err < 0)
485 		goto out_error_cpuhp_init;
486 	return err;
487 
488 out_error_cpuhp_init:
489 	erofs_destroy_percpu_workers();
490 out_error_pcpu_worker:
491 	destroy_workqueue(z_erofs_workqueue);
492 out_error_workqueue_init:
493 	z_erofs_destroy_pcluster_pool();
494 out_error_pcluster_pool:
495 	return err;
496 }
497 
498 enum z_erofs_pclustermode {
499 	Z_EROFS_PCLUSTER_INFLIGHT,
500 	/*
501 	 * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
502 	 * could be dispatched into bypass queue later due to uptodated managed
503 	 * pages. All related online pages cannot be reused for inplace I/O (or
504 	 * bvpage) since it can be directly decoded without I/O submission.
505 	 */
506 	Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
507 	/*
508 	 * The pcluster was just linked to a decompression chain by us.  It can
509 	 * also be linked with the remaining pclusters, which means if the
510 	 * processing page is the tail page of a pcluster, this pcluster can
511 	 * safely use the whole page (since the previous pcluster is within the
512 	 * same chain) for in-place I/O, as illustrated below:
513 	 *  ___________________________________________________
514 	 * |  tail (partial) page  |    head (partial) page    |
515 	 * |  (of the current pcl) |   (of the previous pcl)   |
516 	 * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
517 	 *
518 	 * [  (*) the page above can be used as inplace I/O.   ]
519 	 */
520 	Z_EROFS_PCLUSTER_FOLLOWED,
521 };
522 
523 struct z_erofs_decompress_frontend {
524 	struct inode *const inode;
525 	struct erofs_map_blocks map;
526 	struct z_erofs_bvec_iter biter;
527 
528 	struct page *pagepool;
529 	struct page *candidate_bvpage;
530 	struct z_erofs_pcluster *pcl;
531 	z_erofs_next_pcluster_t owned_head;
532 	enum z_erofs_pclustermode mode;
533 
534 	/* used for applying cache strategy on the fly */
535 	bool backmost;
536 	erofs_off_t headoffset;
537 
538 	/* a pointer used to pick up inplace I/O pages */
539 	unsigned int icur;
540 };
541 
542 #define DECOMPRESS_FRONTEND_INIT(__i) { \
543 	.inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
544 	.mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true }
545 
z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend * fe)546 static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
547 {
548 	unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
549 
550 	if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
551 		return false;
552 
553 	if (fe->backmost)
554 		return true;
555 
556 	if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
557 	    fe->map.m_la < fe->headoffset)
558 		return true;
559 
560 	return false;
561 }
562 
z_erofs_bind_cache(struct z_erofs_decompress_frontend * fe)563 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
564 {
565 	struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
566 	struct z_erofs_pcluster *pcl = fe->pcl;
567 	unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
568 	bool shouldalloc = z_erofs_should_alloc_cache(fe);
569 	bool standalone = true;
570 	/*
571 	 * optimistic allocation without direct reclaim since inplace I/O
572 	 * can be used if low memory otherwise.
573 	 */
574 	gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
575 			__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
576 	unsigned int i;
577 
578 	if (i_blocksize(fe->inode) != PAGE_SIZE ||
579 	    fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
580 		return;
581 
582 	for (i = 0; i < pclusterpages; ++i) {
583 		struct page *page, *newpage;
584 		void *t;	/* mark pages just found for debugging */
585 
586 		/* Inaccurate check w/o locking to avoid unneeded lookups */
587 		if (READ_ONCE(pcl->compressed_bvecs[i].page))
588 			continue;
589 
590 		page = find_get_page(mc, pcl->obj.index + i);
591 		if (page) {
592 			t = (void *)((unsigned long)page | 1);
593 			newpage = NULL;
594 		} else {
595 			/* I/O is needed, no possible to decompress directly */
596 			standalone = false;
597 			if (!shouldalloc)
598 				continue;
599 
600 			/*
601 			 * Try cached I/O if allocation succeeds or fallback to
602 			 * in-place I/O instead to avoid any direct reclaim.
603 			 */
604 			newpage = erofs_allocpage(&fe->pagepool, gfp);
605 			if (!newpage)
606 				continue;
607 			set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
608 			t = (void *)((unsigned long)newpage | 1);
609 		}
610 		spin_lock(&pcl->obj.lock);
611 		if (!pcl->compressed_bvecs[i].page) {
612 			pcl->compressed_bvecs[i].page = t;
613 			spin_unlock(&pcl->obj.lock);
614 			continue;
615 		}
616 		spin_unlock(&pcl->obj.lock);
617 
618 		if (page)
619 			put_page(page);
620 		else if (newpage)
621 			erofs_pagepool_add(&fe->pagepool, newpage);
622 	}
623 
624 	/*
625 	 * don't do inplace I/O if all compressed pages are available in
626 	 * managed cache since it can be moved to the bypass queue instead.
627 	 */
628 	if (standalone)
629 		fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
630 }
631 
632 /* called by erofs_shrinker to get rid of all compressed_pages */
erofs_try_to_free_all_cached_pages(struct erofs_sb_info * sbi,struct erofs_workgroup * grp)633 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
634 				       struct erofs_workgroup *grp)
635 {
636 	struct z_erofs_pcluster *const pcl =
637 		container_of(grp, struct z_erofs_pcluster, obj);
638 	unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
639 	int i;
640 
641 	DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
642 	/*
643 	 * refcount of workgroup is now freezed as 1,
644 	 * therefore no need to worry about available decompression users.
645 	 */
646 	for (i = 0; i < pclusterpages; ++i) {
647 		struct page *page = pcl->compressed_bvecs[i].page;
648 
649 		if (!page)
650 			continue;
651 
652 		/* block other users from reclaiming or migrating the page */
653 		if (!trylock_page(page))
654 			return -EBUSY;
655 
656 		if (!erofs_page_is_managed(sbi, page))
657 			continue;
658 
659 		/* barrier is implied in the following 'unlock_page' */
660 		WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
661 		detach_page_private(page);
662 		unlock_page(page);
663 	}
664 	return 0;
665 }
666 
z_erofs_cache_release_folio(struct folio * folio,gfp_t gfp)667 static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
668 {
669 	struct z_erofs_pcluster *pcl = folio_get_private(folio);
670 	unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
671 	bool ret;
672 	int i;
673 
674 	if (!folio_test_private(folio))
675 		return true;
676 
677 	if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1))
678 		return false;
679 
680 	ret = false;
681 	DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
682 	for (i = 0; i < pclusterpages; ++i) {
683 		if (pcl->compressed_bvecs[i].page == &folio->page) {
684 			WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
685 			ret = true;
686 			break;
687 		}
688 	}
689 	erofs_workgroup_unfreeze(&pcl->obj, 1);
690 
691 	if (ret)
692 		folio_detach_private(folio);
693 	return ret;
694 }
695 
696 /*
697  * It will be called only on inode eviction. In case that there are still some
698  * decompression requests in progress, wait with rescheduling for a bit here.
699  * An extra lock could be introduced instead but it seems unnecessary.
700  */
z_erofs_cache_invalidate_folio(struct folio * folio,size_t offset,size_t length)701 static void z_erofs_cache_invalidate_folio(struct folio *folio,
702 					   size_t offset, size_t length)
703 {
704 	const size_t stop = length + offset;
705 
706 	/* Check for potential overflow in debug mode */
707 	DBG_BUGON(stop > folio_size(folio) || stop < length);
708 
709 	if (offset == 0 && stop == folio_size(folio))
710 		while (!z_erofs_cache_release_folio(folio, GFP_NOFS))
711 			cond_resched();
712 }
713 
714 static const struct address_space_operations z_erofs_cache_aops = {
715 	.release_folio = z_erofs_cache_release_folio,
716 	.invalidate_folio = z_erofs_cache_invalidate_folio,
717 };
718 
erofs_init_managed_cache(struct super_block * sb)719 int erofs_init_managed_cache(struct super_block *sb)
720 {
721 	struct inode *const inode = new_inode(sb);
722 
723 	if (!inode)
724 		return -ENOMEM;
725 
726 	set_nlink(inode, 1);
727 	inode->i_size = OFFSET_MAX;
728 	inode->i_mapping->a_ops = &z_erofs_cache_aops;
729 	mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
730 	EROFS_SB(sb)->managed_cache = inode;
731 	return 0;
732 }
733 
734 /* callers must be with pcluster lock held */
z_erofs_attach_page(struct z_erofs_decompress_frontend * fe,struct z_erofs_bvec * bvec,bool exclusive)735 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
736 			       struct z_erofs_bvec *bvec, bool exclusive)
737 {
738 	struct z_erofs_pcluster *pcl = fe->pcl;
739 	int ret;
740 
741 	if (exclusive) {
742 		/* give priority for inplaceio to use file pages first */
743 		spin_lock(&pcl->obj.lock);
744 		while (fe->icur > 0) {
745 			if (pcl->compressed_bvecs[--fe->icur].page)
746 				continue;
747 			pcl->compressed_bvecs[fe->icur] = *bvec;
748 			spin_unlock(&pcl->obj.lock);
749 			return 0;
750 		}
751 		spin_unlock(&pcl->obj.lock);
752 
753 		/* otherwise, check if it can be used as a bvpage */
754 		if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
755 		    !fe->candidate_bvpage)
756 			fe->candidate_bvpage = bvec->page;
757 	}
758 	ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
759 				   &fe->pagepool);
760 	fe->pcl->vcnt += (ret >= 0);
761 	return ret;
762 }
763 
z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend * f)764 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
765 {
766 	struct z_erofs_pcluster *pcl = f->pcl;
767 	z_erofs_next_pcluster_t *owned_head = &f->owned_head;
768 
769 	/* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
770 	if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
771 		    *owned_head) == Z_EROFS_PCLUSTER_NIL) {
772 		*owned_head = &pcl->next;
773 		/* so we can attach this pcluster to our submission chain. */
774 		f->mode = Z_EROFS_PCLUSTER_FOLLOWED;
775 		return;
776 	}
777 
778 	/* type 2, it belongs to an ongoing chain */
779 	f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
780 }
781 
z_erofs_register_pcluster(struct z_erofs_decompress_frontend * fe)782 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
783 {
784 	struct erofs_map_blocks *map = &fe->map;
785 	struct super_block *sb = fe->inode->i_sb;
786 	bool ztailpacking = map->m_flags & EROFS_MAP_META;
787 	struct z_erofs_pcluster *pcl;
788 	struct erofs_workgroup *grp;
789 	int err;
790 
791 	if (!(map->m_flags & EROFS_MAP_ENCODED) ||
792 	    (!ztailpacking && !erofs_blknr(sb, map->m_pa))) {
793 		DBG_BUGON(1);
794 		return -EFSCORRUPTED;
795 	}
796 
797 	/* no available pcluster, let's allocate one */
798 	pcl = z_erofs_alloc_pcluster(map->m_plen);
799 	if (IS_ERR(pcl))
800 		return PTR_ERR(pcl);
801 
802         spin_lock_init(&pcl->obj.lock);
803 	atomic_set(&pcl->obj.refcount, 1);
804 	pcl->algorithmformat = map->m_algorithmformat;
805 	pcl->length = 0;
806 	pcl->partial = true;
807 
808 	/* new pclusters should be claimed as type 1, primary and followed */
809 	pcl->next = fe->owned_head;
810 	pcl->pageofs_out = map->m_la & ~PAGE_MASK;
811 	fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
812 
813 	/*
814 	 * lock all primary followed works before visible to others
815 	 * and mutex_trylock *never* fails for a new pcluster.
816 	 */
817 	mutex_init(&pcl->lock);
818 	DBG_BUGON(!mutex_trylock(&pcl->lock));
819 
820 	if (ztailpacking) {
821 		pcl->obj.index = 0;	/* which indicates ztailpacking */
822 	} else {
823 		pcl->obj.index = erofs_blknr(sb, map->m_pa);
824 
825 		grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
826 		if (IS_ERR(grp)) {
827 			err = PTR_ERR(grp);
828 			goto err_out;
829 		}
830 
831 		if (grp != &pcl->obj) {
832 			fe->pcl = container_of(grp,
833 					struct z_erofs_pcluster, obj);
834 			err = -EEXIST;
835 			goto err_out;
836 		}
837 	}
838 	fe->owned_head = &pcl->next;
839 	fe->pcl = pcl;
840 	return 0;
841 
842 err_out:
843 	mutex_unlock(&pcl->lock);
844 	z_erofs_free_pcluster(pcl);
845 	return err;
846 }
847 
z_erofs_pcluster_begin(struct z_erofs_decompress_frontend * fe)848 static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
849 {
850 	struct erofs_map_blocks *map = &fe->map;
851 	struct super_block *sb = fe->inode->i_sb;
852 	erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
853 	struct erofs_workgroup *grp = NULL;
854 	int ret;
855 
856 	DBG_BUGON(fe->pcl);
857 
858 	/* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
859 	DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
860 
861 	if (!(map->m_flags & EROFS_MAP_META)) {
862 		grp = erofs_find_workgroup(sb, blknr);
863 	} else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
864 		DBG_BUGON(1);
865 		return -EFSCORRUPTED;
866 	}
867 
868 	if (grp) {
869 		fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
870 		ret = -EEXIST;
871 	} else {
872 		ret = z_erofs_register_pcluster(fe);
873 	}
874 
875 	if (ret == -EEXIST) {
876 		mutex_lock(&fe->pcl->lock);
877 		z_erofs_try_to_claim_pcluster(fe);
878 	} else if (ret) {
879 		return ret;
880 	}
881 
882 	z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
883 				Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
884 	if (!z_erofs_is_inline_pcluster(fe->pcl)) {
885 		/* bind cache first when cached decompression is preferred */
886 		z_erofs_bind_cache(fe);
887 	} else {
888 		void *mptr;
889 
890 		mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP);
891 		if (IS_ERR(mptr)) {
892 			ret = PTR_ERR(mptr);
893 			erofs_err(sb, "failed to get inline data %d", ret);
894 			return ret;
895 		}
896 		get_page(map->buf.page);
897 		WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
898 		fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
899 		fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
900 	}
901 	/* file-backed inplace I/O pages are traversed in reverse order */
902 	fe->icur = z_erofs_pclusterpages(fe->pcl);
903 	return 0;
904 }
905 
906 /*
907  * keep in mind that no referenced pclusters will be freed
908  * only after a RCU grace period.
909  */
z_erofs_rcu_callback(struct rcu_head * head)910 static void z_erofs_rcu_callback(struct rcu_head *head)
911 {
912 	z_erofs_free_pcluster(container_of(head,
913 			struct z_erofs_pcluster, rcu));
914 }
915 
erofs_workgroup_free_rcu(struct erofs_workgroup * grp)916 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
917 {
918 	struct z_erofs_pcluster *const pcl =
919 		container_of(grp, struct z_erofs_pcluster, obj);
920 
921 	call_rcu(&pcl->rcu, z_erofs_rcu_callback);
922 }
923 
z_erofs_pcluster_end(struct z_erofs_decompress_frontend * fe)924 static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
925 {
926 	struct z_erofs_pcluster *pcl = fe->pcl;
927 
928 	if (!pcl)
929 		return;
930 
931 	z_erofs_bvec_iter_end(&fe->biter);
932 	mutex_unlock(&pcl->lock);
933 
934 	if (fe->candidate_bvpage)
935 		fe->candidate_bvpage = NULL;
936 
937 	/*
938 	 * if all pending pages are added, don't hold its reference
939 	 * any longer if the pcluster isn't hosted by ourselves.
940 	 */
941 	if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
942 		erofs_workgroup_put(&pcl->obj);
943 
944 	fe->pcl = NULL;
945 	fe->backmost = false;
946 }
947 
z_erofs_read_fragment(struct super_block * sb,struct page * page,unsigned int cur,unsigned int end,erofs_off_t pos)948 static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
949 			unsigned int cur, unsigned int end, erofs_off_t pos)
950 {
951 	struct inode *packed_inode = EROFS_SB(sb)->packed_inode;
952 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
953 	unsigned int cnt;
954 	u8 *src;
955 
956 	if (!packed_inode)
957 		return -EFSCORRUPTED;
958 
959 	for (; cur < end; cur += cnt, pos += cnt) {
960 		cnt = min_t(unsigned int, end - cur,
961 			    sb->s_blocksize - erofs_blkoff(sb, pos));
962 		src = erofs_bread(&buf, packed_inode,
963 				  erofs_blknr(sb, pos), EROFS_KMAP);
964 		if (IS_ERR(src)) {
965 			erofs_put_metabuf(&buf);
966 			return PTR_ERR(src);
967 		}
968 		memcpy_to_page(page, cur, src + erofs_blkoff(sb, pos), cnt);
969 	}
970 	erofs_put_metabuf(&buf);
971 	return 0;
972 }
973 
z_erofs_do_read_page(struct z_erofs_decompress_frontend * fe,struct page * page)974 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
975 				struct page *page)
976 {
977 	struct inode *const inode = fe->inode;
978 	struct erofs_map_blocks *const map = &fe->map;
979 	const loff_t offset = page_offset(page);
980 	const unsigned int bs = i_blocksize(inode);
981 	bool tight = true, exclusive;
982 	unsigned int cur, end, len, split;
983 	int err = 0;
984 
985 	z_erofs_onlinepage_init(page);
986 	split = 0;
987 	end = PAGE_SIZE;
988 repeat:
989 	if (offset + end - 1 < map->m_la ||
990 	    offset + end - 1 >= map->m_la + map->m_llen) {
991 		erofs_dbg("out-of-range map @ pos %llu", offset + end - 1);
992 		z_erofs_pcluster_end(fe);
993 		map->m_la = offset + end - 1;
994 		map->m_llen = 0;
995 		err = z_erofs_map_blocks_iter(inode, map, 0);
996 		if (err)
997 			goto out;
998 	}
999 
1000 	cur = offset > map->m_la ? 0 : map->m_la - offset;
1001 	/* bump split parts first to avoid several separate cases */
1002 	++split;
1003 
1004 	if (!(map->m_flags & EROFS_MAP_MAPPED)) {
1005 		zero_user_segment(page, cur, end);
1006 		tight = false;
1007 		goto next_part;
1008 	}
1009 
1010 	if (map->m_flags & EROFS_MAP_FRAGMENT) {
1011 		erofs_off_t fpos = offset + cur - map->m_la;
1012 
1013 		len = min_t(unsigned int, map->m_llen - fpos, end - cur);
1014 		err = z_erofs_read_fragment(inode->i_sb, page, cur, cur + len,
1015 				EROFS_I(inode)->z_fragmentoff + fpos);
1016 		if (err)
1017 			goto out;
1018 		tight = false;
1019 		goto next_part;
1020 	}
1021 
1022 	if (!fe->pcl) {
1023 		err = z_erofs_pcluster_begin(fe);
1024 		if (err)
1025 			goto out;
1026 	}
1027 
1028 	/*
1029 	 * Ensure the current partial page belongs to this submit chain rather
1030 	 * than other concurrent submit chains or the noio(bypass) chain since
1031 	 * those chains are handled asynchronously thus the page cannot be used
1032 	 * for inplace I/O or bvpage (should be processed in a strict order.)
1033 	 */
1034 	tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
1035 	exclusive = (!cur && ((split <= 1) || (tight && bs == PAGE_SIZE)));
1036 	if (cur)
1037 		tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
1038 
1039 	err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) {
1040 					.page = page,
1041 					.offset = offset - map->m_la,
1042 					.end = end,
1043 				  }), exclusive);
1044 	if (err)
1045 		goto out;
1046 
1047 	z_erofs_onlinepage_split(page);
1048 	if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
1049 		fe->pcl->multibases = true;
1050 	if (fe->pcl->length < offset + end - map->m_la) {
1051 		fe->pcl->length = offset + end - map->m_la;
1052 		fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
1053 	}
1054 	if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
1055 	    !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
1056 	    fe->pcl->length == map->m_llen)
1057 		fe->pcl->partial = false;
1058 next_part:
1059 	/* shorten the remaining extent to update progress */
1060 	map->m_llen = offset + cur - map->m_la;
1061 	map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
1062 
1063 	end = cur;
1064 	if (end > 0)
1065 		goto repeat;
1066 
1067 out:
1068 	if (err)
1069 		z_erofs_page_mark_eio(page);
1070 	z_erofs_onlinepage_endio(page);
1071 
1072 	erofs_dbg("%s, finish page: %pK split: %u map->m_llen %llu",
1073 		  __func__, page, split, map->m_llen);
1074 	return err;
1075 }
1076 
z_erofs_is_sync_decompress(struct erofs_sb_info * sbi,unsigned int readahead_pages)1077 static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
1078 				       unsigned int readahead_pages)
1079 {
1080 	/* auto: enable for read_folio, disable for readahead */
1081 	if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
1082 	    !readahead_pages)
1083 		return true;
1084 
1085 	if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
1086 	    (readahead_pages <= sbi->opt.max_sync_decompress_pages))
1087 		return true;
1088 
1089 	return false;
1090 }
1091 
z_erofs_page_is_invalidated(struct page * page)1092 static bool z_erofs_page_is_invalidated(struct page *page)
1093 {
1094 	return !page->mapping && !z_erofs_is_shortlived_page(page);
1095 }
1096 
1097 struct z_erofs_decompress_backend {
1098 	struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
1099 	struct super_block *sb;
1100 	struct z_erofs_pcluster *pcl;
1101 
1102 	/* pages with the longest decompressed length for deduplication */
1103 	struct page **decompressed_pages;
1104 	/* pages to keep the compressed data */
1105 	struct page **compressed_pages;
1106 
1107 	struct list_head decompressed_secondary_bvecs;
1108 	struct page **pagepool;
1109 	unsigned int onstack_used, nr_pages;
1110 };
1111 
1112 struct z_erofs_bvec_item {
1113 	struct z_erofs_bvec bvec;
1114 	struct list_head list;
1115 };
1116 
z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend * be,struct z_erofs_bvec * bvec)1117 static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
1118 					 struct z_erofs_bvec *bvec)
1119 {
1120 	struct z_erofs_bvec_item *item;
1121 	unsigned int pgnr;
1122 
1123 	if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
1124 	    (bvec->end == PAGE_SIZE ||
1125 	     bvec->offset + bvec->end == be->pcl->length)) {
1126 		pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
1127 		DBG_BUGON(pgnr >= be->nr_pages);
1128 		if (!be->decompressed_pages[pgnr]) {
1129 			be->decompressed_pages[pgnr] = bvec->page;
1130 			return;
1131 		}
1132 	}
1133 
1134 	/* (cold path) one pcluster is requested multiple times */
1135 	item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL);
1136 	item->bvec = *bvec;
1137 	list_add(&item->list, &be->decompressed_secondary_bvecs);
1138 }
1139 
z_erofs_fill_other_copies(struct z_erofs_decompress_backend * be,int err)1140 static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
1141 				      int err)
1142 {
1143 	unsigned int off0 = be->pcl->pageofs_out;
1144 	struct list_head *p, *n;
1145 
1146 	list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) {
1147 		struct z_erofs_bvec_item *bvi;
1148 		unsigned int end, cur;
1149 		void *dst, *src;
1150 
1151 		bvi = container_of(p, struct z_erofs_bvec_item, list);
1152 		cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
1153 		end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
1154 			    bvi->bvec.end);
1155 		dst = kmap_local_page(bvi->bvec.page);
1156 		while (cur < end) {
1157 			unsigned int pgnr, scur, len;
1158 
1159 			pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
1160 			DBG_BUGON(pgnr >= be->nr_pages);
1161 
1162 			scur = bvi->bvec.offset + cur -
1163 					((pgnr << PAGE_SHIFT) - off0);
1164 			len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
1165 			if (!be->decompressed_pages[pgnr]) {
1166 				err = -EFSCORRUPTED;
1167 				cur += len;
1168 				continue;
1169 			}
1170 			src = kmap_local_page(be->decompressed_pages[pgnr]);
1171 			memcpy(dst + cur, src + scur, len);
1172 			kunmap_local(src);
1173 			cur += len;
1174 		}
1175 		kunmap_local(dst);
1176 		if (err)
1177 			z_erofs_page_mark_eio(bvi->bvec.page);
1178 		z_erofs_onlinepage_endio(bvi->bvec.page);
1179 		list_del(p);
1180 		kfree(bvi);
1181 	}
1182 }
1183 
z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend * be)1184 static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
1185 {
1186 	struct z_erofs_pcluster *pcl = be->pcl;
1187 	struct z_erofs_bvec_iter biter;
1188 	struct page *old_bvpage;
1189 	int i;
1190 
1191 	z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
1192 	for (i = 0; i < pcl->vcnt; ++i) {
1193 		struct z_erofs_bvec bvec;
1194 
1195 		z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
1196 
1197 		if (old_bvpage)
1198 			z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1199 
1200 		DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
1201 		z_erofs_do_decompressed_bvec(be, &bvec);
1202 	}
1203 
1204 	old_bvpage = z_erofs_bvec_iter_end(&biter);
1205 	if (old_bvpage)
1206 		z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1207 }
1208 
z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend * be,bool * overlapped)1209 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
1210 				  bool *overlapped)
1211 {
1212 	struct z_erofs_pcluster *pcl = be->pcl;
1213 	unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1214 	int i, err = 0;
1215 
1216 	*overlapped = false;
1217 	for (i = 0; i < pclusterpages; ++i) {
1218 		struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
1219 		struct page *page = bvec->page;
1220 
1221 		/* compressed pages ought to be present before decompressing */
1222 		if (!page) {
1223 			DBG_BUGON(1);
1224 			continue;
1225 		}
1226 		be->compressed_pages[i] = page;
1227 
1228 		if (z_erofs_is_inline_pcluster(pcl)) {
1229 			if (!PageUptodate(page))
1230 				err = -EIO;
1231 			continue;
1232 		}
1233 
1234 		DBG_BUGON(z_erofs_page_is_invalidated(page));
1235 		if (!z_erofs_is_shortlived_page(page)) {
1236 			if (erofs_page_is_managed(EROFS_SB(be->sb), page)) {
1237 				if (!PageUptodate(page))
1238 					err = -EIO;
1239 				continue;
1240 			}
1241 			z_erofs_do_decompressed_bvec(be, bvec);
1242 			*overlapped = true;
1243 		}
1244 	}
1245 
1246 	if (err)
1247 		return err;
1248 	return 0;
1249 }
1250 
z_erofs_decompress_pcluster(struct z_erofs_decompress_backend * be,int err)1251 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1252 				       int err)
1253 {
1254 	struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
1255 	struct z_erofs_pcluster *pcl = be->pcl;
1256 	unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1257 	const struct z_erofs_decompressor *decompressor =
1258 				&erofs_decompressors[pcl->algorithmformat];
1259 	int i, err2;
1260 	struct page *page;
1261 	bool overlapped;
1262 
1263 	mutex_lock(&pcl->lock);
1264 	be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
1265 
1266 	/* allocate (de)compressed page arrays if cannot be kept on stack */
1267 	be->decompressed_pages = NULL;
1268 	be->compressed_pages = NULL;
1269 	be->onstack_used = 0;
1270 	if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
1271 		be->decompressed_pages = be->onstack_pages;
1272 		be->onstack_used = be->nr_pages;
1273 		memset(be->decompressed_pages, 0,
1274 		       sizeof(struct page *) * be->nr_pages);
1275 	}
1276 
1277 	if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
1278 		be->compressed_pages = be->onstack_pages + be->onstack_used;
1279 
1280 	if (!be->decompressed_pages)
1281 		be->decompressed_pages =
1282 			kvcalloc(be->nr_pages, sizeof(struct page *),
1283 				 GFP_KERNEL | __GFP_NOFAIL);
1284 	if (!be->compressed_pages)
1285 		be->compressed_pages =
1286 			kvcalloc(pclusterpages, sizeof(struct page *),
1287 				 GFP_KERNEL | __GFP_NOFAIL);
1288 
1289 	z_erofs_parse_out_bvecs(be);
1290 	err2 = z_erofs_parse_in_bvecs(be, &overlapped);
1291 	if (err2)
1292 		err = err2;
1293 	if (err)
1294 		goto out;
1295 
1296 	err = decompressor->decompress(&(struct z_erofs_decompress_req) {
1297 					.sb = be->sb,
1298 					.in = be->compressed_pages,
1299 					.out = be->decompressed_pages,
1300 					.pageofs_in = pcl->pageofs_in,
1301 					.pageofs_out = pcl->pageofs_out,
1302 					.inputsize = pcl->pclustersize,
1303 					.outputsize = pcl->length,
1304 					.alg = pcl->algorithmformat,
1305 					.inplace_io = overlapped,
1306 					.partial_decoding = pcl->partial,
1307 					.fillgaps = pcl->multibases,
1308 				 }, be->pagepool);
1309 
1310 out:
1311 	/* must handle all compressed pages before actual file pages */
1312 	if (z_erofs_is_inline_pcluster(pcl)) {
1313 		page = pcl->compressed_bvecs[0].page;
1314 		WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
1315 		put_page(page);
1316 	} else {
1317 		for (i = 0; i < pclusterpages; ++i) {
1318 			/* consider shortlived pages added when decompressing */
1319 			page = be->compressed_pages[i];
1320 
1321 			if (erofs_page_is_managed(sbi, page))
1322 				continue;
1323 			(void)z_erofs_put_shortlivedpage(be->pagepool, page);
1324 			WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
1325 		}
1326 	}
1327 	if (be->compressed_pages < be->onstack_pages ||
1328 	    be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
1329 		kvfree(be->compressed_pages);
1330 	z_erofs_fill_other_copies(be, err);
1331 
1332 	for (i = 0; i < be->nr_pages; ++i) {
1333 		page = be->decompressed_pages[i];
1334 		if (!page)
1335 			continue;
1336 
1337 		DBG_BUGON(z_erofs_page_is_invalidated(page));
1338 
1339 		/* recycle all individual short-lived pages */
1340 		if (z_erofs_put_shortlivedpage(be->pagepool, page))
1341 			continue;
1342 		if (err)
1343 			z_erofs_page_mark_eio(page);
1344 		z_erofs_onlinepage_endio(page);
1345 	}
1346 
1347 	if (be->decompressed_pages != be->onstack_pages)
1348 		kvfree(be->decompressed_pages);
1349 
1350 	pcl->length = 0;
1351 	pcl->partial = true;
1352 	pcl->multibases = false;
1353 	pcl->bvset.nextpage = NULL;
1354 	pcl->vcnt = 0;
1355 
1356 	/* pcluster lock MUST be taken before the following line */
1357 	WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
1358 	mutex_unlock(&pcl->lock);
1359 	return err;
1360 }
1361 
z_erofs_decompress_queue(const struct z_erofs_decompressqueue * io,struct page ** pagepool)1362 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
1363 				     struct page **pagepool)
1364 {
1365 	struct z_erofs_decompress_backend be = {
1366 		.sb = io->sb,
1367 		.pagepool = pagepool,
1368 		.decompressed_secondary_bvecs =
1369 			LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
1370 	};
1371 	z_erofs_next_pcluster_t owned = io->head;
1372 	while (owned != Z_EROFS_PCLUSTER_TAIL) {
1373 		DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
1374 
1375 		be.pcl = container_of(owned, struct z_erofs_pcluster, next);
1376 		owned = READ_ONCE(be.pcl->next);
1377 
1378 		z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
1379 		erofs_workgroup_put(&be.pcl->obj);
1380 	}
1381 }
1382 
z_erofs_decompressqueue_work(struct work_struct * work)1383 static void z_erofs_decompressqueue_work(struct work_struct *work)
1384 {
1385 	struct z_erofs_decompressqueue *bgq =
1386 		container_of(work, struct z_erofs_decompressqueue, u.work);
1387 	struct page *pagepool = NULL;
1388 
1389 	DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL);
1390 	z_erofs_decompress_queue(bgq, &pagepool);
1391 	erofs_release_pages(&pagepool);
1392 	kvfree(bgq);
1393 }
1394 
1395 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
z_erofs_decompressqueue_kthread_work(struct kthread_work * work)1396 static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
1397 {
1398 	z_erofs_decompressqueue_work((struct work_struct *)work);
1399 }
1400 #endif
1401 
z_erofs_decompress_kickoff(struct z_erofs_decompressqueue * io,int bios)1402 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
1403 				       int bios)
1404 {
1405 	struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
1406 
1407 	/* wake up the caller thread for sync decompression */
1408 	if (io->sync) {
1409 		if (!atomic_add_return(bios, &io->pending_bios))
1410 			complete(&io->u.done);
1411 		return;
1412 	}
1413 
1414 	if (atomic_add_return(bios, &io->pending_bios))
1415 		return;
1416 	/* Use (kthread_)work and sync decompression for atomic contexts only */
1417 	if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) {
1418 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1419 		struct kthread_worker *worker;
1420 
1421 		rcu_read_lock();
1422 		worker = rcu_dereference(
1423 				z_erofs_pcpu_workers[raw_smp_processor_id()]);
1424 		if (!worker) {
1425 			INIT_WORK(&io->u.work, z_erofs_decompressqueue_work);
1426 			queue_work(z_erofs_workqueue, &io->u.work);
1427 		} else {
1428 			kthread_queue_work(worker, &io->u.kthread_work);
1429 		}
1430 		rcu_read_unlock();
1431 #else
1432 		queue_work(z_erofs_workqueue, &io->u.work);
1433 #endif
1434 		/* enable sync decompression for readahead */
1435 		if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
1436 			sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
1437 		return;
1438 	}
1439 	z_erofs_decompressqueue_work(&io->u.work);
1440 }
1441 
z_erofs_fill_bio_vec(struct bio_vec * bvec,struct z_erofs_decompress_frontend * f,struct z_erofs_pcluster * pcl,unsigned int nr,struct address_space * mc)1442 static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
1443 				 struct z_erofs_decompress_frontend *f,
1444 				 struct z_erofs_pcluster *pcl,
1445 				 unsigned int nr,
1446 				 struct address_space *mc)
1447 {
1448 	gfp_t gfp = mapping_gfp_mask(mc);
1449 	bool tocache = false;
1450 	struct z_erofs_bvec zbv;
1451 	struct address_space *mapping;
1452 	struct page *page;
1453 	int justfound, bs = i_blocksize(f->inode);
1454 
1455 	/* Except for inplace pages, the entire page can be used for I/Os */
1456 	bvec->bv_offset = 0;
1457 	bvec->bv_len = PAGE_SIZE;
1458 repeat:
1459 	spin_lock(&pcl->obj.lock);
1460 	zbv = pcl->compressed_bvecs[nr];
1461 	page = zbv.page;
1462 	justfound = (unsigned long)page & 1UL;
1463 	page = (struct page *)((unsigned long)page & ~1UL);
1464 	pcl->compressed_bvecs[nr].page = page;
1465 	spin_unlock(&pcl->obj.lock);
1466 	if (!page)
1467 		goto out_allocpage;
1468 
1469 	bvec->bv_page = page;
1470 	DBG_BUGON(z_erofs_is_shortlived_page(page));
1471 	/*
1472 	 * Handle preallocated cached pages.  We tried to allocate such pages
1473 	 * without triggering direct reclaim.  If allocation failed, inplace
1474 	 * file-backed pages will be used instead.
1475 	 */
1476 	if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
1477 		set_page_private(page, 0);
1478 		tocache = true;
1479 		goto out_tocache;
1480 	}
1481 
1482 	mapping = READ_ONCE(page->mapping);
1483 	/*
1484 	 * File-backed pages for inplace I/Os are all locked steady,
1485 	 * therefore it is impossible for `mapping` to be NULL.
1486 	 */
1487 	if (mapping && mapping != mc) {
1488 		if (zbv.offset < 0)
1489 			bvec->bv_offset = round_up(-zbv.offset, bs);
1490 		bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset;
1491 		return;
1492 	}
1493 
1494 	lock_page(page);
1495 	/* only true if page reclaim goes wrong, should never happen */
1496 	DBG_BUGON(justfound && PagePrivate(page));
1497 
1498 	/* the cached page is still in managed cache */
1499 	if (page->mapping == mc) {
1500 		/*
1501 		 * The cached page is still available but without a valid
1502 		 * `->private` pcluster hint.  Let's reconnect them.
1503 		 */
1504 		if (!PagePrivate(page)) {
1505 			DBG_BUGON(!justfound);
1506 			/* compressed_bvecs[] already takes a ref */
1507 			attach_page_private(page, pcl);
1508 			put_page(page);
1509 		}
1510 
1511 		/* no need to submit if it is already up-to-date */
1512 		if (PageUptodate(page)) {
1513 			unlock_page(page);
1514 			bvec->bv_page = NULL;
1515 		}
1516 		return;
1517 	}
1518 
1519 	/*
1520 	 * It has been truncated, so it's unsafe to reuse this one. Let's
1521 	 * allocate a new page for compressed data.
1522 	 */
1523 	DBG_BUGON(page->mapping);
1524 	DBG_BUGON(!justfound);
1525 
1526 	tocache = true;
1527 	unlock_page(page);
1528 	put_page(page);
1529 out_allocpage:
1530 	page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL);
1531 	spin_lock(&pcl->obj.lock);
1532 	if (pcl->compressed_bvecs[nr].page) {
1533 		erofs_pagepool_add(&f->pagepool, page);
1534 		spin_unlock(&pcl->obj.lock);
1535 		cond_resched();
1536 		goto repeat;
1537 	}
1538 	pcl->compressed_bvecs[nr].page = page;
1539 	spin_unlock(&pcl->obj.lock);
1540 	bvec->bv_page = page;
1541 out_tocache:
1542 	if (!tocache || bs != PAGE_SIZE ||
1543 	    add_to_page_cache_lru(page, mc, pcl->obj.index + nr, gfp)) {
1544 		/* turn into a temporary shortlived page (1 ref) */
1545 		set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
1546 		return;
1547 	}
1548 	attach_page_private(page, pcl);
1549 	/* drop a refcount added by allocpage (then 2 refs in total here) */
1550 	put_page(page);
1551 }
1552 
jobqueue_init(struct super_block * sb,struct z_erofs_decompressqueue * fgq,bool * fg)1553 static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
1554 			      struct z_erofs_decompressqueue *fgq, bool *fg)
1555 {
1556 	struct z_erofs_decompressqueue *q;
1557 
1558 	if (fg && !*fg) {
1559 		q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
1560 		if (!q) {
1561 			*fg = true;
1562 			goto fg_out;
1563 		}
1564 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1565 		kthread_init_work(&q->u.kthread_work,
1566 				  z_erofs_decompressqueue_kthread_work);
1567 #else
1568 		INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
1569 #endif
1570 	} else {
1571 fg_out:
1572 		q = fgq;
1573 		init_completion(&fgq->u.done);
1574 		atomic_set(&fgq->pending_bios, 0);
1575 		q->eio = false;
1576 		q->sync = true;
1577 	}
1578 	q->sb = sb;
1579 	q->head = Z_EROFS_PCLUSTER_TAIL;
1580 	return q;
1581 }
1582 
1583 /* define decompression jobqueue types */
1584 enum {
1585 	JQ_BYPASS,
1586 	JQ_SUBMIT,
1587 	NR_JOBQUEUES,
1588 };
1589 
move_to_bypass_jobqueue(struct z_erofs_pcluster * pcl,z_erofs_next_pcluster_t qtail[],z_erofs_next_pcluster_t owned_head)1590 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1591 				    z_erofs_next_pcluster_t qtail[],
1592 				    z_erofs_next_pcluster_t owned_head)
1593 {
1594 	z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
1595 	z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
1596 
1597 	WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
1598 
1599 	WRITE_ONCE(*submit_qtail, owned_head);
1600 	WRITE_ONCE(*bypass_qtail, &pcl->next);
1601 
1602 	qtail[JQ_BYPASS] = &pcl->next;
1603 }
1604 
z_erofs_submissionqueue_endio(struct bio * bio)1605 static void z_erofs_submissionqueue_endio(struct bio *bio)
1606 {
1607 	struct z_erofs_decompressqueue *q = bio->bi_private;
1608 	blk_status_t err = bio->bi_status;
1609 	struct bio_vec *bvec;
1610 	struct bvec_iter_all iter_all;
1611 
1612 	bio_for_each_segment_all(bvec, bio, iter_all) {
1613 		struct page *page = bvec->bv_page;
1614 
1615 		DBG_BUGON(PageUptodate(page));
1616 		DBG_BUGON(z_erofs_page_is_invalidated(page));
1617 		if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
1618 			if (!err)
1619 				SetPageUptodate(page);
1620 			unlock_page(page);
1621 		}
1622 	}
1623 	if (err)
1624 		q->eio = true;
1625 	z_erofs_decompress_kickoff(q, -1);
1626 	bio_put(bio);
1627 }
1628 
z_erofs_submit_queue(struct z_erofs_decompress_frontend * f,struct z_erofs_decompressqueue * fgq,bool * force_fg,bool readahead)1629 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1630 				 struct z_erofs_decompressqueue *fgq,
1631 				 bool *force_fg, bool readahead)
1632 {
1633 	struct super_block *sb = f->inode->i_sb;
1634 	struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
1635 	z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
1636 	struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
1637 	z_erofs_next_pcluster_t owned_head = f->owned_head;
1638 	/* bio is NULL initially, so no need to initialize last_{index,bdev} */
1639 	erofs_off_t last_pa;
1640 	struct block_device *last_bdev;
1641 	unsigned int nr_bios = 0;
1642 	struct bio *bio = NULL;
1643 	unsigned long pflags;
1644 	int memstall = 0;
1645 
1646 	/* No need to read from device for pclusters in the bypass queue. */
1647 	q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
1648 	q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg);
1649 
1650 	qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1651 	qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1652 
1653 	/* by default, all need io submission */
1654 	q[JQ_SUBMIT]->head = owned_head;
1655 
1656 	do {
1657 		struct erofs_map_dev mdev;
1658 		struct z_erofs_pcluster *pcl;
1659 		erofs_off_t cur, end;
1660 		struct bio_vec bvec;
1661 		unsigned int i = 0;
1662 		bool bypass = true;
1663 
1664 		DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
1665 		pcl = container_of(owned_head, struct z_erofs_pcluster, next);
1666 		owned_head = READ_ONCE(pcl->next);
1667 
1668 		if (z_erofs_is_inline_pcluster(pcl)) {
1669 			move_to_bypass_jobqueue(pcl, qtail, owned_head);
1670 			continue;
1671 		}
1672 
1673 		/* no device id here, thus it will always succeed */
1674 		mdev = (struct erofs_map_dev) {
1675 			.m_pa = erofs_pos(sb, pcl->obj.index),
1676 		};
1677 		(void)erofs_map_dev(sb, &mdev);
1678 
1679 		cur = mdev.m_pa;
1680 		end = cur + pcl->pclustersize;
1681 		do {
1682 			z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc);
1683 			if (!bvec.bv_page)
1684 				continue;
1685 
1686 			if (bio && (cur != last_pa ||
1687 				    last_bdev != mdev.m_bdev)) {
1688 submit_bio_retry:
1689 				submit_bio(bio);
1690 				if (memstall) {
1691 					psi_memstall_leave(&pflags);
1692 					memstall = 0;
1693 				}
1694 				bio = NULL;
1695 			}
1696 
1697 			if (unlikely(PageWorkingset(bvec.bv_page)) &&
1698 			    !memstall) {
1699 				psi_memstall_enter(&pflags);
1700 				memstall = 1;
1701 			}
1702 
1703 			if (!bio) {
1704 				bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
1705 						REQ_OP_READ, GFP_NOIO);
1706 				bio->bi_end_io = z_erofs_submissionqueue_endio;
1707 				bio->bi_iter.bi_sector = cur >> 9;
1708 				bio->bi_private = q[JQ_SUBMIT];
1709 				if (readahead)
1710 					bio->bi_opf |= REQ_RAHEAD;
1711 				++nr_bios;
1712 				last_bdev = mdev.m_bdev;
1713 			}
1714 
1715 			if (cur + bvec.bv_len > end)
1716 				bvec.bv_len = end - cur;
1717 			DBG_BUGON(bvec.bv_len < sb->s_blocksize);
1718 			if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
1719 					  bvec.bv_offset))
1720 				goto submit_bio_retry;
1721 
1722 			last_pa = cur + bvec.bv_len;
1723 			bypass = false;
1724 		} while ((cur += bvec.bv_len) < end);
1725 
1726 		if (!bypass)
1727 			qtail[JQ_SUBMIT] = &pcl->next;
1728 		else
1729 			move_to_bypass_jobqueue(pcl, qtail, owned_head);
1730 	} while (owned_head != Z_EROFS_PCLUSTER_TAIL);
1731 
1732 	if (bio) {
1733 		submit_bio(bio);
1734 		if (memstall)
1735 			psi_memstall_leave(&pflags);
1736 	}
1737 
1738 	/*
1739 	 * although background is preferred, no one is pending for submission.
1740 	 * don't issue decompression but drop it directly instead.
1741 	 */
1742 	if (!*force_fg && !nr_bios) {
1743 		kvfree(q[JQ_SUBMIT]);
1744 		return;
1745 	}
1746 	z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
1747 }
1748 
z_erofs_runqueue(struct z_erofs_decompress_frontend * f,bool force_fg,bool ra)1749 static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1750 			     bool force_fg, bool ra)
1751 {
1752 	struct z_erofs_decompressqueue io[NR_JOBQUEUES];
1753 
1754 	if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
1755 		return;
1756 	z_erofs_submit_queue(f, io, &force_fg, ra);
1757 
1758 	/* handle bypass queue (no i/o pclusters) immediately */
1759 	z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
1760 
1761 	if (!force_fg)
1762 		return;
1763 
1764 	/* wait until all bios are completed */
1765 	wait_for_completion_io(&io[JQ_SUBMIT].u.done);
1766 
1767 	/* handle synchronous decompress queue in the caller context */
1768 	z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool);
1769 }
1770 
1771 /*
1772  * Since partial uptodate is still unimplemented for now, we have to use
1773  * approximate readmore strategies as a start.
1774  */
z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend * f,struct readahead_control * rac,bool backmost)1775 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1776 		struct readahead_control *rac, bool backmost)
1777 {
1778 	struct inode *inode = f->inode;
1779 	struct erofs_map_blocks *map = &f->map;
1780 	erofs_off_t cur, end, headoffset = f->headoffset;
1781 	int err;
1782 
1783 	if (backmost) {
1784 		if (rac)
1785 			end = headoffset + readahead_length(rac) - 1;
1786 		else
1787 			end = headoffset + PAGE_SIZE - 1;
1788 		map->m_la = end;
1789 		err = z_erofs_map_blocks_iter(inode, map,
1790 					      EROFS_GET_BLOCKS_READMORE);
1791 		if (err)
1792 			return;
1793 
1794 		/* expand ra for the trailing edge if readahead */
1795 		if (rac) {
1796 			cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
1797 			readahead_expand(rac, headoffset, cur - headoffset);
1798 			return;
1799 		}
1800 		end = round_up(end, PAGE_SIZE);
1801 	} else {
1802 		end = round_up(map->m_la, PAGE_SIZE);
1803 
1804 		if (!map->m_llen)
1805 			return;
1806 	}
1807 
1808 	cur = map->m_la + map->m_llen - 1;
1809 	while ((cur >= end) && (cur < i_size_read(inode))) {
1810 		pgoff_t index = cur >> PAGE_SHIFT;
1811 		struct page *page;
1812 
1813 		page = erofs_grab_cache_page_nowait(inode->i_mapping, index);
1814 		if (page) {
1815 			if (PageUptodate(page)) {
1816 				unlock_page(page);
1817 			} else {
1818 				err = z_erofs_do_read_page(f, page);
1819 				if (err)
1820 					erofs_err(inode->i_sb,
1821 						  "readmore error at page %lu @ nid %llu",
1822 						  index, EROFS_I(inode)->nid);
1823 			}
1824 			put_page(page);
1825 		}
1826 
1827 		if (cur < PAGE_SIZE)
1828 			break;
1829 		cur = (index << PAGE_SHIFT) - 1;
1830 	}
1831 }
1832 
z_erofs_read_folio(struct file * file,struct folio * folio)1833 static int z_erofs_read_folio(struct file *file, struct folio *folio)
1834 {
1835 	struct page *page = &folio->page;
1836 	struct inode *const inode = page->mapping->host;
1837 	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1838 	struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1839 	int err;
1840 
1841 	trace_erofs_readpage(page, false);
1842 	f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1843 
1844 	z_erofs_pcluster_readmore(&f, NULL, true);
1845 	err = z_erofs_do_read_page(&f, page);
1846 	z_erofs_pcluster_readmore(&f, NULL, false);
1847 	z_erofs_pcluster_end(&f);
1848 
1849 	/* if some compressed cluster ready, need submit them anyway */
1850 	z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
1851 
1852 	if (err)
1853 		erofs_err(inode->i_sb, "failed to read, err [%d]", err);
1854 
1855 	erofs_put_metabuf(&f.map.buf);
1856 	erofs_release_pages(&f.pagepool);
1857 	return err;
1858 }
1859 
z_erofs_readahead(struct readahead_control * rac)1860 static void z_erofs_readahead(struct readahead_control *rac)
1861 {
1862 	struct inode *const inode = rac->mapping->host;
1863 	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1864 	struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1865 	struct page *head = NULL, *page;
1866 	unsigned int nr_pages;
1867 
1868 	f.headoffset = readahead_pos(rac);
1869 
1870 	z_erofs_pcluster_readmore(&f, rac, true);
1871 	nr_pages = readahead_count(rac);
1872 	trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
1873 
1874 	while ((page = readahead_page(rac))) {
1875 		set_page_private(page, (unsigned long)head);
1876 		head = page;
1877 	}
1878 
1879 	while (head) {
1880 		struct page *page = head;
1881 		int err;
1882 
1883 		/* traversal in reverse order */
1884 		head = (void *)page_private(page);
1885 
1886 		err = z_erofs_do_read_page(&f, page);
1887 		if (err)
1888 			erofs_err(inode->i_sb,
1889 				  "readahead error at page %lu @ nid %llu",
1890 				  page->index, EROFS_I(inode)->nid);
1891 		put_page(page);
1892 	}
1893 	z_erofs_pcluster_readmore(&f, rac, false);
1894 	z_erofs_pcluster_end(&f);
1895 
1896 	z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_pages), true);
1897 	erofs_put_metabuf(&f.map.buf);
1898 	erofs_release_pages(&f.pagepool);
1899 }
1900 
1901 const struct address_space_operations z_erofs_aops = {
1902 	.read_folio = z_erofs_read_folio,
1903 	.readahead = z_erofs_readahead,
1904 };
1905