1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7 #ifndef __EROFS_FS_ZDATA_H
8 #define __EROFS_FS_ZDATA_H
9
10 #include "internal.h"
11 #include "zpvec.h"
12
13 #define Z_EROFS_NR_INLINE_PAGEVECS 3
14
15 /*
16 * Structure fields follow one of the following exclusion rules.
17 *
18 * I: Modifiable by initialization/destruction paths and read-only
19 * for everyone else;
20 *
21 * L: Field should be protected by pageset lock;
22 *
23 * A: Field should be accessed / updated in atomic for parallelized code.
24 */
25 struct z_erofs_collection {
26 struct mutex lock;
27
28 /* I: page offset of start position of decompression */
29 unsigned short pageofs;
30
31 /* L: maximum relative page index in pagevec[] */
32 unsigned short nr_pages;
33
34 /* L: total number of pages in pagevec[] */
35 unsigned int vcnt;
36
37 union {
38 /* L: inline a certain number of pagevecs for bootstrap */
39 erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
40
41 /* I: can be used to free the pcluster by RCU. */
42 struct rcu_head rcu;
43 };
44 };
45
46 #define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
47 #define Z_EROFS_PCLUSTER_LENGTH_BIT 1
48
49 /*
50 * let's leave a type here in case of introducing
51 * another tagged pointer later.
52 */
53 typedef void *z_erofs_next_pcluster_t;
54
55 struct z_erofs_pcluster {
56 struct erofs_workgroup obj;
57 struct z_erofs_collection primary_collection;
58
59 /* A: point to next chained pcluster or TAILs */
60 z_erofs_next_pcluster_t next;
61
62 /* A: compressed pages (including multi-usage pages) */
63 struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
64
65 /* A: lower limit of decompressed length and if full length or not */
66 unsigned int length;
67
68 /* I: compression algorithm format */
69 unsigned char algorithmformat;
70 /* I: bit shift of physical cluster size */
71 unsigned char clusterbits;
72 };
73
74 #define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
75
76 /* let's avoid the valid 32-bit kernel addresses */
77
78 /* the chained workgroup has't submitted io (still open) */
79 #define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE)
80 /* the chained workgroup has already submitted io */
81 #define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD)
82
83 #define Z_EROFS_PCLUSTER_NIL (NULL)
84
85 #define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster)
86
87 struct z_erofs_unzip_io {
88 atomic_t pending_bios;
89 z_erofs_next_pcluster_t head;
90
91 union {
92 wait_queue_head_t wait;
93 struct work_struct work;
94 } u;
95 };
96
97 struct z_erofs_unzip_io_sb {
98 struct z_erofs_unzip_io io;
99 struct super_block *sb;
100 };
101
102 #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
erofs_page_is_managed(const struct erofs_sb_info * sbi,struct page * page)103 static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
104 struct page *page)
105 {
106 return page->mapping == MNGD_MAPPING(sbi);
107 }
108
109 #define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
110 #define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
111 #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
112
113 /*
114 * waiters (aka. ongoing_packs): # to unlock the page
115 * sub-index: 0 - for partial page, >= 1 full page sub-index
116 */
117 typedef atomic_t z_erofs_onlinepage_t;
118
119 /* type punning */
120 union z_erofs_onlinepage_converter {
121 z_erofs_onlinepage_t *o;
122 unsigned long *v;
123 };
124
z_erofs_onlinepage_index(struct page * page)125 static inline unsigned int z_erofs_onlinepage_index(struct page *page)
126 {
127 union z_erofs_onlinepage_converter u;
128
129 DBG_BUGON(!PagePrivate(page));
130 u.v = &page_private(page);
131
132 return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
133 }
134
z_erofs_onlinepage_init(struct page * page)135 static inline void z_erofs_onlinepage_init(struct page *page)
136 {
137 union {
138 z_erofs_onlinepage_t o;
139 unsigned long v;
140 /* keep from being unlocked in advance */
141 } u = { .o = ATOMIC_INIT(1) };
142
143 set_page_private(page, u.v);
144 smp_wmb();
145 SetPagePrivate(page);
146 }
147
z_erofs_onlinepage_fixup(struct page * page,uintptr_t index,bool down)148 static inline void z_erofs_onlinepage_fixup(struct page *page,
149 uintptr_t index, bool down)
150 {
151 unsigned long *p, o, v, id;
152 repeat:
153 p = &page_private(page);
154 o = READ_ONCE(*p);
155
156 id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
157 if (id) {
158 if (!index)
159 return;
160
161 DBG_BUGON(id != index);
162 }
163
164 v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
165 ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
166 if (cmpxchg(p, o, v) != o)
167 goto repeat;
168 }
169
z_erofs_onlinepage_endio(struct page * page)170 static inline void z_erofs_onlinepage_endio(struct page *page)
171 {
172 union z_erofs_onlinepage_converter u;
173 unsigned int v;
174
175 DBG_BUGON(!PagePrivate(page));
176 u.v = &page_private(page);
177
178 v = atomic_dec_return(u.o);
179 if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
180 ClearPagePrivate(page);
181 if (!PageError(page))
182 SetPageUptodate(page);
183 unlock_page(page);
184 }
185 erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
186 }
187
188 #define Z_EROFS_VMAP_ONSTACK_PAGES \
189 min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
190 #define Z_EROFS_VMAP_GLOBAL_PAGES 2048
191
192 #endif
193
194