• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #ifndef __EROFS_FS_ZPVEC_H
7 #define __EROFS_FS_ZPVEC_H
8 
9 #include "tagptr.h"
10 
11 /* page type in pagevec for decompress subsystem */
12 enum z_erofs_page_type {
13 	/* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
14 	Z_EROFS_PAGE_TYPE_EXCLUSIVE,
15 
16 	Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
17 
18 	Z_EROFS_VLE_PAGE_TYPE_HEAD,
19 	Z_EROFS_VLE_PAGE_TYPE_MAX
20 };
21 
22 extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
23 	__bad_page_type_exclusive(void);
24 
25 /* pagevec tagged pointer */
26 typedef tagptr2_t	erofs_vtptr_t;
27 
28 /* pagevec collector */
29 struct z_erofs_pagevec_ctor {
30 	struct page *curr, *next;
31 	erofs_vtptr_t *pages;
32 
33 	unsigned int nr, index;
34 };
35 
z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor * ctor,bool atomic)36 static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
37 					     bool atomic)
38 {
39 	if (!ctor->curr)
40 		return;
41 
42 	if (atomic)
43 		kunmap_atomic(ctor->pages);
44 	else
45 		kunmap(ctor->curr);
46 }
47 
48 static inline struct page *
z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor * ctor,unsigned int nr)49 z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
50 			       unsigned int nr)
51 {
52 	unsigned int index;
53 
54 	/* keep away from occupied pages */
55 	if (ctor->next)
56 		return ctor->next;
57 
58 	for (index = 0; index < nr; ++index) {
59 		const erofs_vtptr_t t = ctor->pages[index];
60 		const unsigned int tags = tagptr_unfold_tags(t);
61 
62 		if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
63 			return tagptr_unfold_ptr(t);
64 	}
65 	DBG_BUGON(nr >= ctor->nr);
66 	return NULL;
67 }
68 
69 static inline void
z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor * ctor,bool atomic)70 z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
71 			      bool atomic)
72 {
73 	struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
74 
75 	z_erofs_pagevec_ctor_exit(ctor, atomic);
76 
77 	ctor->curr = next;
78 	ctor->next = NULL;
79 	ctor->pages = atomic ?
80 		kmap_atomic(ctor->curr) : kmap(ctor->curr);
81 
82 	ctor->nr = PAGE_SIZE / sizeof(struct page *);
83 	ctor->index = 0;
84 }
85 
z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor * ctor,unsigned int nr,erofs_vtptr_t * pages,unsigned int i)86 static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
87 					     unsigned int nr,
88 					     erofs_vtptr_t *pages,
89 					     unsigned int i)
90 {
91 	ctor->nr = nr;
92 	ctor->curr = ctor->next = NULL;
93 	ctor->pages = pages;
94 
95 	if (i >= nr) {
96 		i -= nr;
97 		z_erofs_pagevec_ctor_pagedown(ctor, false);
98 		while (i > ctor->nr) {
99 			i -= ctor->nr;
100 			z_erofs_pagevec_ctor_pagedown(ctor, false);
101 		}
102 	}
103 	ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
104 	ctor->index = i;
105 }
106 
z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor * ctor,struct page * page,enum z_erofs_page_type type,bool pvec_safereuse)107 static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
108 					   struct page *page,
109 					   enum z_erofs_page_type type,
110 					   bool pvec_safereuse)
111 {
112 	if (!ctor->next) {
113 		/* some pages cannot be reused as pvec safely without I/O */
114 		if (type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && !pvec_safereuse)
115 			type = Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED;
116 
117 		if (type != Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
118 		    ctor->index + 1 == ctor->nr)
119 			return false;
120 	}
121 
122 	if (ctor->index >= ctor->nr)
123 		z_erofs_pagevec_ctor_pagedown(ctor, false);
124 
125 	/* exclusive page type must be 0 */
126 	if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
127 		__bad_page_type_exclusive();
128 
129 	/* should remind that collector->next never equal to 1, 2 */
130 	if (type == (uintptr_t)ctor->next) {
131 		ctor->next = page;
132 	}
133 	ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
134 	return true;
135 }
136 
137 static inline struct page *
z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor * ctor,enum z_erofs_page_type * type)138 z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor,
139 			enum z_erofs_page_type *type)
140 {
141 	erofs_vtptr_t t;
142 
143 	if (ctor->index >= ctor->nr) {
144 		DBG_BUGON(!ctor->next);
145 		z_erofs_pagevec_ctor_pagedown(ctor, true);
146 	}
147 
148 	t = ctor->pages[ctor->index];
149 
150 	*type = tagptr_unfold_tags(t);
151 
152 	/* should remind that collector->next never equal to 1, 2 */
153 	if (*type == (uintptr_t)ctor->next)
154 		ctor->next = tagptr_unfold_ptr(t);
155 
156 	ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0);
157 	return tagptr_unfold_ptr(t);
158 }
159 #endif
160