• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #ifndef __EROFS_FS_ZDATA_H
7 #define __EROFS_FS_ZDATA_H
8 
9 #include <linux/kthread.h>
10 #include "internal.h"
11 #include "zpvec.h"
12 
13 #define Z_EROFS_PCLUSTER_MAX_PAGES	(Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
14 #define Z_EROFS_NR_INLINE_PAGEVECS      3
15 
16 /*
17  * Structure fields follow one of the following exclusion rules.
18  *
19  * I: Modifiable by initialization/destruction paths and read-only
20  *    for everyone else;
21  *
22  * L: Field should be protected by pageset lock;
23  *
24  * A: Field should be accessed / updated in atomic for parallelized code.
25  */
26 struct z_erofs_collection {
27 	struct mutex lock;
28 
29 	/* I: page offset of start position of decompression */
30 	unsigned short pageofs;
31 
32 	/* L: maximum relative page index in pagevec[] */
33 	unsigned short nr_pages;
34 
35 	/* L: total number of pages in pagevec[] */
36 	unsigned int vcnt;
37 
38 	union {
39 		/* L: inline a certain number of pagevecs for bootstrap */
40 		erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
41 
42 		/* I: can be used to free the pcluster by RCU. */
43 		struct rcu_head rcu;
44 	};
45 };
46 
47 #define Z_EROFS_PCLUSTER_FULL_LENGTH    0x00000001
48 #define Z_EROFS_PCLUSTER_LENGTH_BIT     1
49 
50 /*
51  * let's leave a type here in case of introducing
52  * another tagged pointer later.
53  */
54 typedef void *z_erofs_next_pcluster_t;
55 
56 struct z_erofs_pcluster {
57 	struct erofs_workgroup obj;
58 	struct z_erofs_collection primary_collection;
59 
60 	/* A: point to next chained pcluster or TAILs */
61 	z_erofs_next_pcluster_t next;
62 
63 	/* A: lower limit of decompressed length and if full length or not */
64 	unsigned int length;
65 
66 	/* I: physical cluster size in pages */
67 	unsigned short pclusterpages;
68 
69 	/* I: compression algorithm format */
70 	unsigned char algorithmformat;
71 
72 	/* A: compressed pages (can be cached or inplaced pages) */
73 	struct page *compressed_pages[];
74 };
75 
76 #define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
77 
78 /* let's avoid the valid 32-bit kernel addresses */
79 
80 /* the chained workgroup has't submitted io (still open) */
81 #define Z_EROFS_PCLUSTER_TAIL           ((void *)0x5F0ECAFE)
82 /* the chained workgroup has already submitted io */
83 #define Z_EROFS_PCLUSTER_TAIL_CLOSED    ((void *)0x5F0EDEAD)
84 
85 #define Z_EROFS_PCLUSTER_NIL            (NULL)
86 
87 struct z_erofs_decompressqueue {
88 	struct super_block *sb;
89 	atomic_t pending_bios;
90 	z_erofs_next_pcluster_t head;
91 
92 	union {
93 		struct completion done;
94 		struct work_struct work;
95 		struct kthread_work kthread_work;
96 	} u;
97 };
98 
99 #define Z_EROFS_ONLINEPAGE_COUNT_BITS   2
100 #define Z_EROFS_ONLINEPAGE_COUNT_MASK   ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
101 #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT  (Z_EROFS_ONLINEPAGE_COUNT_BITS)
102 
103 /*
104  * waiters (aka. ongoing_packs): # to unlock the page
105  * sub-index: 0 - for partial page, >= 1 full page sub-index
106  */
107 typedef atomic_t z_erofs_onlinepage_t;
108 
109 /* type punning */
110 union z_erofs_onlinepage_converter {
111 	z_erofs_onlinepage_t *o;
112 	unsigned long *v;
113 };
114 
z_erofs_onlinepage_index(struct page * page)115 static inline unsigned int z_erofs_onlinepage_index(struct page *page)
116 {
117 	union z_erofs_onlinepage_converter u;
118 
119 	DBG_BUGON(!PagePrivate(page));
120 	u.v = &page_private(page);
121 
122 	return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
123 }
124 
z_erofs_onlinepage_init(struct page * page)125 static inline void z_erofs_onlinepage_init(struct page *page)
126 {
127 	union {
128 		z_erofs_onlinepage_t o;
129 		unsigned long v;
130 	/* keep from being unlocked in advance */
131 	} u = { .o = ATOMIC_INIT(1) };
132 
133 	set_page_private(page, u.v);
134 	smp_wmb();
135 	SetPagePrivate(page);
136 }
137 
z_erofs_onlinepage_fixup(struct page * page,uintptr_t index,bool down)138 static inline void z_erofs_onlinepage_fixup(struct page *page,
139 	uintptr_t index, bool down)
140 {
141 	union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
142 	int orig, orig_index, val;
143 
144 repeat:
145 	orig = atomic_read(u.o);
146 	orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
147 	if (orig_index) {
148 		if (!index)
149 			return;
150 
151 		DBG_BUGON(orig_index != index);
152 	}
153 
154 	val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
155 		((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
156 	if (atomic_cmpxchg(u.o, orig, val) != orig)
157 		goto repeat;
158 }
159 
z_erofs_onlinepage_endio(struct page * page)160 static inline void z_erofs_onlinepage_endio(struct page *page)
161 {
162 	union z_erofs_onlinepage_converter u;
163 	unsigned int v;
164 
165 	DBG_BUGON(!PagePrivate(page));
166 	u.v = &page_private(page);
167 
168 	v = atomic_dec_return(u.o);
169 	if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
170 		set_page_private(page, 0);
171 		ClearPagePrivate(page);
172 		if (!PageError(page))
173 			SetPageUptodate(page);
174 		unlock_page(page);
175 	}
176 	erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
177 }
178 
179 #define Z_EROFS_VMAP_ONSTACK_PAGES	\
180 	min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
181 #define Z_EROFS_VMAP_GLOBAL_PAGES	2048
182 
183 #endif
184