• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2022, Alibaba Cloud
4  * Copyright (C) 2022, Bytedance Inc. All rights reserved.
5  */
6 #include <linux/fscache.h>
7 #include "internal.h"
8 
9 static DEFINE_MUTEX(erofs_domain_list_lock);
10 static DEFINE_MUTEX(erofs_domain_cookies_lock);
11 static LIST_HEAD(erofs_domain_list);
12 static struct vfsmount *erofs_pseudo_mnt;
13 
erofs_fscache_alloc_request(struct address_space * mapping,loff_t start,size_t len)14 static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space *mapping,
15 					     loff_t start, size_t len)
16 {
17 	struct netfs_io_request *rreq;
18 
19 	rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
20 	if (!rreq)
21 		return ERR_PTR(-ENOMEM);
22 
23 	rreq->start	= start;
24 	rreq->len	= len;
25 	rreq->mapping	= mapping;
26 	rreq->inode	= mapping->host;
27 	INIT_LIST_HEAD(&rreq->subrequests);
28 	refcount_set(&rreq->ref, 1);
29 	return rreq;
30 }
31 
erofs_fscache_put_request(struct netfs_io_request * rreq)32 static void erofs_fscache_put_request(struct netfs_io_request *rreq)
33 {
34 	if (!refcount_dec_and_test(&rreq->ref))
35 		return;
36 	if (rreq->cache_resources.ops)
37 		rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
38 	kfree(rreq);
39 }
40 
erofs_fscache_put_subrequest(struct netfs_io_subrequest * subreq)41 static void erofs_fscache_put_subrequest(struct netfs_io_subrequest *subreq)
42 {
43 	if (!refcount_dec_and_test(&subreq->ref))
44 		return;
45 	erofs_fscache_put_request(subreq->rreq);
46 	kfree(subreq);
47 }
48 
erofs_fscache_clear_subrequests(struct netfs_io_request * rreq)49 static void erofs_fscache_clear_subrequests(struct netfs_io_request *rreq)
50 {
51 	struct netfs_io_subrequest *subreq;
52 
53 	while (!list_empty(&rreq->subrequests)) {
54 		subreq = list_first_entry(&rreq->subrequests,
55 				struct netfs_io_subrequest, rreq_link);
56 		list_del(&subreq->rreq_link);
57 		erofs_fscache_put_subrequest(subreq);
58 	}
59 }
60 
erofs_fscache_rreq_unlock_folios(struct netfs_io_request * rreq)61 static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq)
62 {
63 	struct netfs_io_subrequest *subreq;
64 	struct folio *folio;
65 	unsigned int iopos = 0;
66 	pgoff_t start_page = rreq->start / PAGE_SIZE;
67 	pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
68 	bool subreq_failed = false;
69 
70 	XA_STATE(xas, &rreq->mapping->i_pages, start_page);
71 
72 	subreq = list_first_entry(&rreq->subrequests,
73 				  struct netfs_io_subrequest, rreq_link);
74 	subreq_failed = (subreq->error < 0);
75 
76 	rcu_read_lock();
77 	xas_for_each(&xas, folio, last_page) {
78 		unsigned int pgpos, pgend;
79 		bool pg_failed = false;
80 
81 		if (xas_retry(&xas, folio))
82 			continue;
83 
84 		pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
85 		pgend = pgpos + folio_size(folio);
86 
87 		for (;;) {
88 			if (!subreq) {
89 				pg_failed = true;
90 				break;
91 			}
92 
93 			pg_failed |= subreq_failed;
94 			if (pgend < iopos + subreq->len)
95 				break;
96 
97 			iopos += subreq->len;
98 			if (!list_is_last(&subreq->rreq_link,
99 					  &rreq->subrequests)) {
100 				subreq = list_next_entry(subreq, rreq_link);
101 				subreq_failed = (subreq->error < 0);
102 			} else {
103 				subreq = NULL;
104 				subreq_failed = false;
105 			}
106 			if (pgend == iopos)
107 				break;
108 		}
109 
110 		if (!pg_failed)
111 			folio_mark_uptodate(folio);
112 
113 		folio_unlock(folio);
114 	}
115 	rcu_read_unlock();
116 }
117 
erofs_fscache_rreq_complete(struct netfs_io_request * rreq)118 static void erofs_fscache_rreq_complete(struct netfs_io_request *rreq)
119 {
120 	erofs_fscache_rreq_unlock_folios(rreq);
121 	erofs_fscache_clear_subrequests(rreq);
122 	erofs_fscache_put_request(rreq);
123 }
124 
erofc_fscache_subreq_complete(void * priv,ssize_t transferred_or_error,bool was_async)125 static void erofc_fscache_subreq_complete(void *priv,
126 		ssize_t transferred_or_error, bool was_async)
127 {
128 	struct netfs_io_subrequest *subreq = priv;
129 	struct netfs_io_request *rreq = subreq->rreq;
130 
131 	if (IS_ERR_VALUE(transferred_or_error))
132 		subreq->error = transferred_or_error;
133 
134 	if (atomic_dec_and_test(&rreq->nr_outstanding))
135 		erofs_fscache_rreq_complete(rreq);
136 
137 	erofs_fscache_put_subrequest(subreq);
138 }
139 
140 /*
141  * Read data from fscache and fill the read data into page cache described by
142  * @rreq, which shall be both aligned with PAGE_SIZE. @pstart describes
143  * the start physical address in the cache file.
144  */
erofs_fscache_read_folios_async(struct fscache_cookie * cookie,struct netfs_io_request * rreq,loff_t pstart)145 static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
146 				struct netfs_io_request *rreq, loff_t pstart)
147 {
148 	enum netfs_io_source source;
149 	struct super_block *sb = rreq->mapping->host->i_sb;
150 	struct netfs_io_subrequest *subreq;
151 	struct netfs_cache_resources *cres = &rreq->cache_resources;
152 	struct iov_iter iter;
153 	loff_t start = rreq->start;
154 	size_t len = rreq->len;
155 	size_t done = 0;
156 	int ret;
157 
158 	atomic_set(&rreq->nr_outstanding, 1);
159 
160 	ret = fscache_begin_read_operation(cres, cookie);
161 	if (ret)
162 		goto out;
163 
164 	while (done < len) {
165 		subreq = kzalloc(sizeof(struct netfs_io_subrequest),
166 				 GFP_KERNEL);
167 		if (subreq) {
168 			INIT_LIST_HEAD(&subreq->rreq_link);
169 			refcount_set(&subreq->ref, 2);
170 			subreq->rreq = rreq;
171 			refcount_inc(&rreq->ref);
172 		} else {
173 			ret = -ENOMEM;
174 			goto out;
175 		}
176 
177 		subreq->start = pstart + done;
178 		subreq->len	=  len - done;
179 		subreq->flags = 1 << NETFS_SREQ_ONDEMAND;
180 
181 		list_add_tail(&subreq->rreq_link, &rreq->subrequests);
182 
183 		source = cres->ops->prepare_read(subreq, LLONG_MAX);
184 		if (WARN_ON(subreq->len == 0))
185 			source = NETFS_INVALID_READ;
186 		if (source != NETFS_READ_FROM_CACHE) {
187 			erofs_err(sb, "failed to fscache prepare_read (source %d)",
188 				  source);
189 			ret = -EIO;
190 			subreq->error = ret;
191 			erofs_fscache_put_subrequest(subreq);
192 			goto out;
193 		}
194 
195 		atomic_inc(&rreq->nr_outstanding);
196 
197 		iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages,
198 				start + done, subreq->len);
199 
200 		ret = fscache_read(cres, subreq->start, &iter,
201 				   NETFS_READ_HOLE_FAIL,
202 				   erofc_fscache_subreq_complete, subreq);
203 		if (ret == -EIOCBQUEUED)
204 			ret = 0;
205 		if (ret) {
206 			erofs_err(sb, "failed to fscache_read (ret %d)", ret);
207 			goto out;
208 		}
209 
210 		done += subreq->len;
211 	}
212 out:
213 	if (atomic_dec_and_test(&rreq->nr_outstanding))
214 		erofs_fscache_rreq_complete(rreq);
215 
216 	return ret;
217 }
218 
erofs_fscache_meta_read_folio(struct file * data,struct folio * folio)219 static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
220 {
221 	int ret;
222 	struct super_block *sb = folio_mapping(folio)->host->i_sb;
223 	struct netfs_io_request *rreq;
224 	struct erofs_map_dev mdev = {
225 		.m_deviceid = 0,
226 		.m_pa = folio_pos(folio),
227 	};
228 
229 	ret = erofs_map_dev(sb, &mdev);
230 	if (ret)
231 		goto out;
232 
233 	rreq = erofs_fscache_alloc_request(folio_mapping(folio),
234 				folio_pos(folio), folio_size(folio));
235 	if (IS_ERR(rreq)) {
236 		ret = PTR_ERR(rreq);
237 		goto out;
238 	}
239 
240 	return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
241 				rreq, mdev.m_pa);
242 out:
243 	folio_unlock(folio);
244 	return ret;
245 }
246 
247 /*
248  * Read into page cache in the range described by (@pos, @len).
249  *
250  * On return, the caller is responsible for page unlocking if the output @unlock
251  * is true, or the callee will take this responsibility through netfs_io_request
252  * interface.
253  *
254  * The return value is the number of bytes successfully handled, or negative
255  * error code on failure. The only exception is that, the length of the range
256  * instead of the error code is returned on failure after netfs_io_request is
257  * allocated, so that .readahead() could advance rac accordingly.
258  */
erofs_fscache_data_read(struct address_space * mapping,loff_t pos,size_t len,bool * unlock)259 static int erofs_fscache_data_read(struct address_space *mapping,
260 				   loff_t pos, size_t len, bool *unlock)
261 {
262 	struct inode *inode = mapping->host;
263 	struct super_block *sb = inode->i_sb;
264 	struct netfs_io_request *rreq;
265 	struct erofs_map_blocks map;
266 	struct erofs_map_dev mdev;
267 	struct iov_iter iter;
268 	size_t count;
269 	int ret;
270 
271 	*unlock = true;
272 
273 	map.m_la = pos;
274 	ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
275 	if (ret)
276 		return ret;
277 
278 	if (map.m_flags & EROFS_MAP_META) {
279 		struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
280 		erofs_blk_t blknr;
281 		size_t offset, size;
282 		void *src;
283 
284 		/* For tail packing layout, the offset may be non-zero. */
285 		offset = erofs_blkoff(sb, map.m_pa);
286 		blknr = erofs_blknr(sb, map.m_pa);
287 		size = map.m_llen;
288 
289 		src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
290 		if (IS_ERR(src))
291 			return PTR_ERR(src);
292 
293 		iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE);
294 		if (copy_to_iter(src + offset, size, &iter) != size) {
295 			erofs_put_metabuf(&buf);
296 			return -EFAULT;
297 		}
298 		iov_iter_zero(PAGE_SIZE - size, &iter);
299 		erofs_put_metabuf(&buf);
300 		return PAGE_SIZE;
301 	}
302 
303 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
304 		count = len;
305 		iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, count);
306 		iov_iter_zero(count, &iter);
307 		return count;
308 	}
309 
310 	count = min_t(size_t, map.m_llen - (pos - map.m_la), len);
311 	DBG_BUGON(!count || count % PAGE_SIZE);
312 
313 	mdev = (struct erofs_map_dev) {
314 		.m_deviceid = map.m_deviceid,
315 		.m_pa = map.m_pa,
316 	};
317 	ret = erofs_map_dev(sb, &mdev);
318 	if (ret)
319 		return ret;
320 
321 	rreq = erofs_fscache_alloc_request(mapping, pos, count);
322 	if (IS_ERR(rreq))
323 		return PTR_ERR(rreq);
324 
325 	*unlock = false;
326 	erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
327 			rreq, mdev.m_pa + (pos - map.m_la));
328 	return count;
329 }
330 
erofs_fscache_read_folio(struct file * file,struct folio * folio)331 static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
332 {
333 	bool unlock;
334 	int ret;
335 
336 	ret = erofs_fscache_data_read(folio_mapping(folio), folio_pos(folio),
337 				      folio_size(folio), &unlock);
338 	if (unlock) {
339 		if (ret > 0)
340 			folio_mark_uptodate(folio);
341 		folio_unlock(folio);
342 	}
343 	return ret < 0 ? ret : 0;
344 }
345 
erofs_fscache_readahead(struct readahead_control * rac)346 static void erofs_fscache_readahead(struct readahead_control *rac)
347 {
348 	struct folio *folio;
349 	size_t len, done = 0;
350 	loff_t start, pos;
351 	bool unlock;
352 	int ret, size;
353 
354 	if (!readahead_count(rac))
355 		return;
356 
357 	start = readahead_pos(rac);
358 	len = readahead_length(rac);
359 
360 	do {
361 		pos = start + done;
362 		ret = erofs_fscache_data_read(rac->mapping, pos,
363 					      len - done, &unlock);
364 		if (ret <= 0)
365 			return;
366 
367 		size = ret;
368 		while (size) {
369 			folio = readahead_folio(rac);
370 			size -= folio_size(folio);
371 			if (unlock) {
372 				folio_mark_uptodate(folio);
373 				folio_unlock(folio);
374 			}
375 		}
376 	} while ((done += ret) < len);
377 }
378 
379 static const struct address_space_operations erofs_fscache_meta_aops = {
380 	.read_folio = erofs_fscache_meta_read_folio,
381 };
382 
383 const struct address_space_operations erofs_fscache_access_aops = {
384 	.read_folio = erofs_fscache_read_folio,
385 	.readahead = erofs_fscache_readahead,
386 };
387 
erofs_fscache_domain_put(struct erofs_domain * domain)388 static void erofs_fscache_domain_put(struct erofs_domain *domain)
389 {
390 	if (!domain)
391 		return;
392 	mutex_lock(&erofs_domain_list_lock);
393 	if (refcount_dec_and_test(&domain->ref)) {
394 		list_del(&domain->list);
395 		if (list_empty(&erofs_domain_list)) {
396 			kern_unmount(erofs_pseudo_mnt);
397 			erofs_pseudo_mnt = NULL;
398 		}
399 		fscache_relinquish_volume(domain->volume, NULL, false);
400 		mutex_unlock(&erofs_domain_list_lock);
401 		kfree(domain->domain_id);
402 		kfree(domain);
403 		return;
404 	}
405 	mutex_unlock(&erofs_domain_list_lock);
406 }
407 
erofs_fscache_register_volume(struct super_block * sb)408 static int erofs_fscache_register_volume(struct super_block *sb)
409 {
410 	struct erofs_sb_info *sbi = EROFS_SB(sb);
411 	char *domain_id = sbi->domain_id;
412 	struct fscache_volume *volume;
413 	char *name;
414 	int ret = 0;
415 
416 	name = kasprintf(GFP_KERNEL, "erofs,%s",
417 			 domain_id ? domain_id : sbi->fsid);
418 	if (!name)
419 		return -ENOMEM;
420 
421 	volume = fscache_acquire_volume(name, NULL, NULL, 0);
422 	if (IS_ERR_OR_NULL(volume)) {
423 		erofs_err(sb, "failed to register volume for %s", name);
424 		ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
425 		volume = NULL;
426 	}
427 
428 	sbi->volume = volume;
429 	kfree(name);
430 	return ret;
431 }
432 
erofs_fscache_init_domain(struct super_block * sb)433 static int erofs_fscache_init_domain(struct super_block *sb)
434 {
435 	int err;
436 	struct erofs_domain *domain;
437 	struct erofs_sb_info *sbi = EROFS_SB(sb);
438 
439 	domain = kzalloc(sizeof(struct erofs_domain), GFP_KERNEL);
440 	if (!domain)
441 		return -ENOMEM;
442 
443 	domain->domain_id = kstrdup(sbi->domain_id, GFP_KERNEL);
444 	if (!domain->domain_id) {
445 		kfree(domain);
446 		return -ENOMEM;
447 	}
448 
449 	err = erofs_fscache_register_volume(sb);
450 	if (err)
451 		goto out;
452 
453 	if (!erofs_pseudo_mnt) {
454 		erofs_pseudo_mnt = kern_mount(&erofs_fs_type);
455 		if (IS_ERR(erofs_pseudo_mnt)) {
456 			err = PTR_ERR(erofs_pseudo_mnt);
457 			goto out;
458 		}
459 	}
460 
461 	domain->volume = sbi->volume;
462 	refcount_set(&domain->ref, 1);
463 	list_add(&domain->list, &erofs_domain_list);
464 	sbi->domain = domain;
465 	return 0;
466 out:
467 	kfree(domain->domain_id);
468 	kfree(domain);
469 	return err;
470 }
471 
erofs_fscache_register_domain(struct super_block * sb)472 static int erofs_fscache_register_domain(struct super_block *sb)
473 {
474 	int err;
475 	struct erofs_domain *domain;
476 	struct erofs_sb_info *sbi = EROFS_SB(sb);
477 
478 	mutex_lock(&erofs_domain_list_lock);
479 	list_for_each_entry(domain, &erofs_domain_list, list) {
480 		if (!strcmp(domain->domain_id, sbi->domain_id)) {
481 			sbi->domain = domain;
482 			sbi->volume = domain->volume;
483 			refcount_inc(&domain->ref);
484 			mutex_unlock(&erofs_domain_list_lock);
485 			return 0;
486 		}
487 	}
488 	err = erofs_fscache_init_domain(sb);
489 	mutex_unlock(&erofs_domain_list_lock);
490 	return err;
491 }
492 
493 static
erofs_fscache_acquire_cookie(struct super_block * sb,char * name,unsigned int flags)494 struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb,
495 						   char *name,
496 						   unsigned int flags)
497 {
498 	struct fscache_volume *volume = EROFS_SB(sb)->volume;
499 	struct erofs_fscache *ctx;
500 	struct fscache_cookie *cookie;
501 	int ret;
502 
503 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
504 	if (!ctx)
505 		return ERR_PTR(-ENOMEM);
506 
507 	cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
508 					name, strlen(name), NULL, 0, 0);
509 	if (!cookie) {
510 		erofs_err(sb, "failed to get cookie for %s", name);
511 		ret = -EINVAL;
512 		goto err;
513 	}
514 
515 	fscache_use_cookie(cookie, false);
516 	ctx->cookie = cookie;
517 
518 	if (flags & EROFS_REG_COOKIE_NEED_INODE) {
519 		struct inode *const inode = new_inode(sb);
520 
521 		if (!inode) {
522 			erofs_err(sb, "failed to get anon inode for %s", name);
523 			ret = -ENOMEM;
524 			goto err_cookie;
525 		}
526 
527 		set_nlink(inode, 1);
528 		inode->i_size = OFFSET_MAX;
529 		inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
530 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
531 		inode->i_blkbits = EROFS_SB(sb)->blkszbits;
532 
533 		ctx->inode = inode;
534 	}
535 
536 	return ctx;
537 
538 err_cookie:
539 	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
540 	fscache_relinquish_cookie(ctx->cookie, false);
541 err:
542 	kfree(ctx);
543 	return ERR_PTR(ret);
544 }
545 
erofs_fscache_relinquish_cookie(struct erofs_fscache * ctx)546 static void erofs_fscache_relinquish_cookie(struct erofs_fscache *ctx)
547 {
548 	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
549 	fscache_relinquish_cookie(ctx->cookie, false);
550 	iput(ctx->inode);
551 	kfree(ctx->name);
552 	kfree(ctx);
553 }
554 
555 static
erofs_fscache_domain_init_cookie(struct super_block * sb,char * name,unsigned int flags)556 struct erofs_fscache *erofs_fscache_domain_init_cookie(struct super_block *sb,
557 						       char *name,
558 						       unsigned int flags)
559 {
560 	int err;
561 	struct inode *inode;
562 	struct erofs_fscache *ctx;
563 	struct erofs_domain *domain = EROFS_SB(sb)->domain;
564 
565 	ctx = erofs_fscache_acquire_cookie(sb, name, flags);
566 	if (IS_ERR(ctx))
567 		return ctx;
568 
569 	ctx->name = kstrdup(name, GFP_KERNEL);
570 	if (!ctx->name) {
571 		err = -ENOMEM;
572 		goto out;
573 	}
574 
575 	inode = new_inode(erofs_pseudo_mnt->mnt_sb);
576 	if (!inode) {
577 		err = -ENOMEM;
578 		goto out;
579 	}
580 
581 	ctx->domain = domain;
582 	ctx->anon_inode = inode;
583 	inode->i_private = ctx;
584 	refcount_inc(&domain->ref);
585 	return ctx;
586 out:
587 	erofs_fscache_relinquish_cookie(ctx);
588 	return ERR_PTR(err);
589 }
590 
591 static
erofs_domain_register_cookie(struct super_block * sb,char * name,unsigned int flags)592 struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
593 						   char *name,
594 						   unsigned int flags)
595 {
596 	struct inode *inode;
597 	struct erofs_fscache *ctx;
598 	struct erofs_domain *domain = EROFS_SB(sb)->domain;
599 	struct super_block *psb = erofs_pseudo_mnt->mnt_sb;
600 
601 	mutex_lock(&erofs_domain_cookies_lock);
602 	spin_lock(&psb->s_inode_list_lock);
603 	list_for_each_entry(inode, &psb->s_inodes, i_sb_list) {
604 		ctx = inode->i_private;
605 		if (!ctx || ctx->domain != domain || strcmp(ctx->name, name))
606 			continue;
607 		if (!(flags & EROFS_REG_COOKIE_NEED_NOEXIST)) {
608 			igrab(inode);
609 		} else {
610 			erofs_err(sb, "%s already exists in domain %s", name,
611 				  domain->domain_id);
612 			ctx = ERR_PTR(-EEXIST);
613 		}
614 		spin_unlock(&psb->s_inode_list_lock);
615 		mutex_unlock(&erofs_domain_cookies_lock);
616 		return ctx;
617 	}
618 	spin_unlock(&psb->s_inode_list_lock);
619 	ctx = erofs_fscache_domain_init_cookie(sb, name, flags);
620 	mutex_unlock(&erofs_domain_cookies_lock);
621 	return ctx;
622 }
623 
erofs_fscache_register_cookie(struct super_block * sb,char * name,unsigned int flags)624 struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
625 						    char *name,
626 						    unsigned int flags)
627 {
628 	if (EROFS_SB(sb)->domain_id)
629 		return erofs_domain_register_cookie(sb, name, flags);
630 	return erofs_fscache_acquire_cookie(sb, name, flags);
631 }
632 
erofs_fscache_unregister_cookie(struct erofs_fscache * ctx)633 void erofs_fscache_unregister_cookie(struct erofs_fscache *ctx)
634 {
635 	bool drop;
636 	struct erofs_domain *domain;
637 
638 	if (!ctx)
639 		return;
640 	domain = ctx->domain;
641 	if (domain) {
642 		mutex_lock(&erofs_domain_cookies_lock);
643 		drop = atomic_read(&ctx->anon_inode->i_count) == 1;
644 		iput(ctx->anon_inode);
645 		mutex_unlock(&erofs_domain_cookies_lock);
646 		if (!drop)
647 			return;
648 	}
649 
650 	erofs_fscache_relinquish_cookie(ctx);
651 	erofs_fscache_domain_put(domain);
652 }
653 
erofs_fscache_register_fs(struct super_block * sb)654 int erofs_fscache_register_fs(struct super_block *sb)
655 {
656 	int ret;
657 	struct erofs_sb_info *sbi = EROFS_SB(sb);
658 	struct erofs_fscache *fscache;
659 	unsigned int flags;
660 
661 	if (sbi->domain_id)
662 		ret = erofs_fscache_register_domain(sb);
663 	else
664 		ret = erofs_fscache_register_volume(sb);
665 	if (ret)
666 		return ret;
667 
668 	/*
669 	 * When shared domain is enabled, using NEED_NOEXIST to guarantee
670 	 * the primary data blob (aka fsid) is unique in the shared domain.
671 	 *
672 	 * For non-shared-domain case, fscache_acquire_volume() invoked by
673 	 * erofs_fscache_register_volume() has already guaranteed
674 	 * the uniqueness of primary data blob.
675 	 *
676 	 * Acquired domain/volume will be relinquished in kill_sb() on error.
677 	 */
678 	flags = EROFS_REG_COOKIE_NEED_INODE;
679 	if (sbi->domain_id)
680 		flags |= EROFS_REG_COOKIE_NEED_NOEXIST;
681 	fscache = erofs_fscache_register_cookie(sb, sbi->fsid, flags);
682 	if (IS_ERR(fscache))
683 		return PTR_ERR(fscache);
684 
685 	sbi->s_fscache = fscache;
686 	return 0;
687 }
688 
erofs_fscache_unregister_fs(struct super_block * sb)689 void erofs_fscache_unregister_fs(struct super_block *sb)
690 {
691 	struct erofs_sb_info *sbi = EROFS_SB(sb);
692 
693 	erofs_fscache_unregister_cookie(sbi->s_fscache);
694 
695 	if (sbi->domain)
696 		erofs_fscache_domain_put(sbi->domain);
697 	else
698 		fscache_relinquish_volume(sbi->volume, NULL, false);
699 
700 	sbi->s_fscache = NULL;
701 	sbi->volume = NULL;
702 	sbi->domain = NULL;
703 }
704