• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Created by Gao Xiang <gaoxiang25@huawei.com>
6  */
7 #ifndef __EROFS_FS_ZPVEC_H
8 #define __EROFS_FS_ZPVEC_H
9 
10 #include "tagptr.h"
11 
12 /* page type in pagevec for decompress subsystem */
13 enum z_erofs_page_type {
14 	/* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
15 	Z_EROFS_PAGE_TYPE_EXCLUSIVE,
16 
17 	Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
18 
19 	Z_EROFS_VLE_PAGE_TYPE_HEAD,
20 	Z_EROFS_VLE_PAGE_TYPE_MAX
21 };
22 
23 extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
24 	__bad_page_type_exclusive(void);
25 
26 /* pagevec tagged pointer */
27 typedef tagptr2_t	erofs_vtptr_t;
28 
29 /* pagevec collector */
30 struct z_erofs_pagevec_ctor {
31 	struct page *curr, *next;
32 	erofs_vtptr_t *pages;
33 
34 	unsigned int nr, index;
35 };
36 
z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor * ctor,bool atomic)37 static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
38 					     bool atomic)
39 {
40 	if (!ctor->curr)
41 		return;
42 
43 	if (atomic)
44 		kunmap_atomic(ctor->pages);
45 	else
46 		kunmap(ctor->curr);
47 }
48 
49 static inline struct page *
z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor * ctor,unsigned int nr)50 z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
51 			       unsigned int nr)
52 {
53 	unsigned int index;
54 
55 	/* keep away from occupied pages */
56 	if (ctor->next)
57 		return ctor->next;
58 
59 	for (index = 0; index < nr; ++index) {
60 		const erofs_vtptr_t t = ctor->pages[index];
61 		const unsigned int tags = tagptr_unfold_tags(t);
62 
63 		if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
64 			return tagptr_unfold_ptr(t);
65 	}
66 	DBG_BUGON(nr >= ctor->nr);
67 	return NULL;
68 }
69 
70 static inline void
z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor * ctor,bool atomic)71 z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
72 			      bool atomic)
73 {
74 	struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
75 
76 	z_erofs_pagevec_ctor_exit(ctor, atomic);
77 
78 	ctor->curr = next;
79 	ctor->next = NULL;
80 	ctor->pages = atomic ?
81 		kmap_atomic(ctor->curr) : kmap(ctor->curr);
82 
83 	ctor->nr = PAGE_SIZE / sizeof(struct page *);
84 	ctor->index = 0;
85 }
86 
z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor * ctor,unsigned int nr,erofs_vtptr_t * pages,unsigned int i)87 static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
88 					     unsigned int nr,
89 					     erofs_vtptr_t *pages,
90 					     unsigned int i)
91 {
92 	ctor->nr = nr;
93 	ctor->curr = ctor->next = NULL;
94 	ctor->pages = pages;
95 
96 	if (i >= nr) {
97 		i -= nr;
98 		z_erofs_pagevec_ctor_pagedown(ctor, false);
99 		while (i > ctor->nr) {
100 			i -= ctor->nr;
101 			z_erofs_pagevec_ctor_pagedown(ctor, false);
102 		}
103 	}
104 	ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
105 	ctor->index = i;
106 }
107 
z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor * ctor,struct page * page,enum z_erofs_page_type type,bool pvec_safereuse)108 static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
109 					   struct page *page,
110 					   enum z_erofs_page_type type,
111 					   bool pvec_safereuse)
112 {
113 	if (!ctor->next) {
114 		/* some pages cannot be reused as pvec safely without I/O */
115 		if (type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && !pvec_safereuse)
116 			type = Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED;
117 
118 		if (type != Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
119 		    ctor->index + 1 == ctor->nr)
120 			return false;
121 	}
122 
123 	if (ctor->index >= ctor->nr)
124 		z_erofs_pagevec_ctor_pagedown(ctor, false);
125 
126 	/* exclusive page type must be 0 */
127 	if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
128 		__bad_page_type_exclusive();
129 
130 	/* should remind that collector->next never equal to 1, 2 */
131 	if (type == (uintptr_t)ctor->next) {
132 		ctor->next = page;
133 	}
134 	ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
135 	return true;
136 }
137 
138 static inline struct page *
z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor * ctor,enum z_erofs_page_type * type)139 z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor,
140 			enum z_erofs_page_type *type)
141 {
142 	erofs_vtptr_t t;
143 
144 	if (ctor->index >= ctor->nr) {
145 		DBG_BUGON(!ctor->next);
146 		z_erofs_pagevec_ctor_pagedown(ctor, true);
147 	}
148 
149 	t = ctor->pages[ctor->index];
150 
151 	*type = tagptr_unfold_tags(t);
152 
153 	/* should remind that collector->next never equal to 1, 2 */
154 	if (*type == (uintptr_t)ctor->next)
155 		ctor->next = tagptr_unfold_ptr(t);
156 
157 	ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0);
158 	return tagptr_unfold_ptr(t);
159 }
160 #endif
161 
162