1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7 #ifndef __EROFS_FS_ZDATA_H
8 #define __EROFS_FS_ZDATA_H
9
10 #include <linux/kthread.h>
11 #include "internal.h"
12 #include "zpvec.h"
13
14 #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
15 #define Z_EROFS_NR_INLINE_PAGEVECS 3
16
17 /*
18 * Structure fields follow one of the following exclusion rules.
19 *
20 * I: Modifiable by initialization/destruction paths and read-only
21 * for everyone else;
22 *
23 * L: Field should be protected by pageset lock;
24 *
25 * A: Field should be accessed / updated in atomic for parallelized code.
26 */
27 struct z_erofs_collection {
28 struct mutex lock;
29
30 /* I: page offset of start position of decompression */
31 unsigned short pageofs;
32
33 /* L: maximum relative page index in pagevec[] */
34 unsigned short nr_pages;
35
36 /* L: total number of pages in pagevec[] */
37 unsigned int vcnt;
38
39 union {
40 /* L: inline a certain number of pagevecs for bootstrap */
41 erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
42
43 /* I: can be used to free the pcluster by RCU. */
44 struct rcu_head rcu;
45 };
46 };
47
48 #define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
49 #define Z_EROFS_PCLUSTER_LENGTH_BIT 1
50
51 /*
52 * let's leave a type here in case of introducing
53 * another tagged pointer later.
54 */
55 typedef void *z_erofs_next_pcluster_t;
56
57 struct z_erofs_pcluster {
58 struct erofs_workgroup obj;
59 struct z_erofs_collection primary_collection;
60
61 /* A: point to next chained pcluster or TAILs */
62 z_erofs_next_pcluster_t next;
63
64 /* A: lower limit of decompressed length and if full length or not */
65 unsigned int length;
66
67 /* I: physical cluster size in pages */
68 unsigned short pclusterpages;
69
70 /* I: compression algorithm format */
71 unsigned char algorithmformat;
72
73 /* A: compressed pages (can be cached or inplaced pages) */
74 struct page *compressed_pages[];
75 };
76
77 #define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
78
79 /* let's avoid the valid 32-bit kernel addresses */
80
81 /* the chained workgroup has't submitted io (still open) */
82 #define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE)
83 /* the chained workgroup has already submitted io */
84 #define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD)
85
86 #define Z_EROFS_PCLUSTER_NIL (NULL)
87
88 struct z_erofs_decompressqueue {
89 struct super_block *sb;
90 atomic_t pending_bios;
91 z_erofs_next_pcluster_t head;
92
93 union {
94 struct completion done;
95 struct work_struct work;
96 struct kthread_work kthread_work;
97 } u;
98 };
99
100 #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
erofs_page_is_managed(const struct erofs_sb_info * sbi,struct page * page)101 static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
102 struct page *page)
103 {
104 return page->mapping == MNGD_MAPPING(sbi);
105 }
106
107 #define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
108 #define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
109 #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
110
111 /*
112 * waiters (aka. ongoing_packs): # to unlock the page
113 * sub-index: 0 - for partial page, >= 1 full page sub-index
114 */
115 typedef atomic_t z_erofs_onlinepage_t;
116
117 /* type punning */
118 union z_erofs_onlinepage_converter {
119 z_erofs_onlinepage_t *o;
120 unsigned long *v;
121 };
122
z_erofs_onlinepage_index(struct page * page)123 static inline unsigned int z_erofs_onlinepage_index(struct page *page)
124 {
125 union z_erofs_onlinepage_converter u;
126
127 DBG_BUGON(!PagePrivate(page));
128 u.v = &page_private(page);
129
130 return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
131 }
132
z_erofs_onlinepage_init(struct page * page)133 static inline void z_erofs_onlinepage_init(struct page *page)
134 {
135 union {
136 z_erofs_onlinepage_t o;
137 unsigned long v;
138 /* keep from being unlocked in advance */
139 } u = { .o = ATOMIC_INIT(1) };
140
141 set_page_private(page, u.v);
142 smp_wmb();
143 SetPagePrivate(page);
144 }
145
z_erofs_onlinepage_fixup(struct page * page,uintptr_t index,bool down)146 static inline void z_erofs_onlinepage_fixup(struct page *page,
147 uintptr_t index, bool down)
148 {
149 union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
150 int orig, orig_index, val;
151
152 repeat:
153 orig = atomic_read(u.o);
154 orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
155 if (orig_index) {
156 if (!index)
157 return;
158
159 DBG_BUGON(orig_index != index);
160 }
161
162 val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
163 ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
164 if (atomic_cmpxchg(u.o, orig, val) != orig)
165 goto repeat;
166 }
167
z_erofs_onlinepage_endio(struct page * page)168 static inline void z_erofs_onlinepage_endio(struct page *page)
169 {
170 union z_erofs_onlinepage_converter u;
171 unsigned int v;
172
173 DBG_BUGON(!PagePrivate(page));
174 u.v = &page_private(page);
175
176 v = atomic_dec_return(u.o);
177 if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
178 set_page_private(page, 0);
179 ClearPagePrivate(page);
180 if (!PageError(page))
181 SetPageUptodate(page);
182 unlock_page(page);
183 }
184 erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
185 }
186
187 #define Z_EROFS_VMAP_ONSTACK_PAGES \
188 min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
189 #define Z_EROFS_VMAP_GLOBAL_PAGES 2048
190
191 #endif
192
193