• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/block/zram/zram_group/zram_group.c
4  *
5  * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd.
6  */
7 
8 #define pr_fmt(fmt) "[ZRAM_GROUP]" fmt
9 
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include "zram_group.h"
13 
14 #define CHECK(cond, ...) ((cond) || (pr_err(__VA_ARGS__), false))
15 #define CHECK_BOUND(var, min, max) \
16 	CHECK((var) >= (min) && (var) <= (max), \
17 			"%s %u out of bounds %u ~ %u!\n", \
18 			#var, (var), (min), (max))
19 
20 /*
21  * idx2node for obj table
22  */
get_obj(u32 index,void * private)23 static struct zlist_node *get_obj(u32 index, void *private)
24 {
25 	struct zram_group *zgrp = private;
26 
27 	if (index < zgrp->nr_obj)
28 		return &zgrp->obj[index];
29 
30 	index -= zgrp->nr_obj;
31 	BUG_ON(!index);
32 	if (index < zgrp->nr_grp)
33 		return &zgrp->grp_obj_head[index];
34 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
35 	index -= zgrp->nr_grp;
36 	BUG_ON(index >= zgrp->wbgrp.nr_ext);
37 	return &zgrp->wbgrp.ext_obj_head[index];
38 #endif
39 	BUG();
40 }
41 
zram_group_meta_free(struct zram_group * zgrp)42 void zram_group_meta_free(struct zram_group *zgrp)
43 {
44 	if (!CHECK(zgrp, "zram group is not enable!\n"))
45 		return;
46 
47 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
48 	zram_group_remove_writeback(zgrp);
49 #endif
50 	vfree(zgrp->grp_obj_head);
51 	vfree(zgrp->obj);
52 	zlist_table_free(zgrp->obj_tab);
53 	vfree(zgrp->stats);
54 	kfree(zgrp);
55 
56 	pr_info("zram group freed.\n");
57 }
58 
zram_group_meta_alloc(u32 nr_obj,u32 nr_grp)59 struct zram_group *zram_group_meta_alloc(u32 nr_obj, u32 nr_grp)
60 {
61 	struct zram_group *zgrp = NULL;
62 	u32 i;
63 
64 	if (!CHECK_BOUND(nr_grp, 1, ZGRP_MAX_GRP - 1))
65 		return NULL;
66 
67 	/* reserve gid 0 */
68 	nr_grp++;
69 	if (!CHECK_BOUND(nr_obj, 1, ZGRP_MAX_OBJ))
70 		return NULL;
71 	zgrp = kzalloc(sizeof(struct zram_group), GFP_KERNEL);
72 	if (!zgrp)
73 		goto err;
74 	zgrp->nr_obj = nr_obj;
75 	zgrp->nr_grp = nr_grp;
76 	zgrp->grp_obj_head = vmalloc(sizeof(struct zlist_node) * zgrp->nr_grp);
77 	if (!zgrp->grp_obj_head)
78 		goto err;
79 	zgrp->obj = vmalloc(sizeof(struct zlist_node) * zgrp->nr_obj);
80 	if (!zgrp->obj)
81 		goto err;
82 	zgrp->obj_tab = zlist_table_alloc(get_obj, zgrp, GFP_KERNEL);
83 	if (!zgrp->obj_tab)
84 		goto err;
85 	zgrp->stats = vzalloc(sizeof(struct zram_group_stats) * zgrp->nr_grp);
86 	if (!zgrp->stats)
87 		goto err;
88 	zgrp->gsdev = NULL;
89 
90 	for (i = 0; i < zgrp->nr_obj; i++)
91 		zlist_node_init(i, zgrp->obj_tab);
92 	for (i = 1; i < zgrp->nr_grp; i++)
93 		zlist_node_init(i + zgrp->nr_obj, zgrp->obj_tab);
94 
95 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
96 	zgrp->wbgrp.enable = false;
97 	mutex_init(&zgrp->wbgrp.init_lock);
98 #endif
99 	pr_info("zram_group alloc succ.\n");
100 	return zgrp;
101 err:
102 	pr_err("zram_group alloc failed!\n");
103 	zram_group_meta_free(zgrp);
104 
105 	return NULL;
106 }
107 
108 /*
109  * insert obj at @index into group @gid as the HOTTEST obj
110  */
zgrp_obj_insert(struct zram_group * zgrp,u32 index,u16 gid)111 void zgrp_obj_insert(struct zram_group *zgrp, u32 index, u16 gid)
112 {
113 	u32 hid;
114 
115 	if (!zgrp) {
116 		pr_debug("zram group is not enable!");
117 		return;
118 	}
119 	if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
120 		return;
121 	if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
122 		return;
123 	hid = gid + zgrp->nr_obj;
124 	zlist_add(hid, index, zgrp->obj_tab);
125 	pr_debug("insert obj %u to group %u\n", index, gid);
126 }
127 
128 /*
129  * remove obj at @index from group @gid
130  */
zgrp_obj_delete(struct zram_group * zgrp,u32 index,u16 gid)131 bool zgrp_obj_delete(struct zram_group *zgrp, u32 index, u16 gid)
132 {
133 	u32 hid;
134 
135 	if (!zgrp) {
136 		pr_debug("zram group is not enable!");
137 		return false;
138 	}
139 	if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
140 		return false;
141 	if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
142 		return false;
143 	pr_debug("delete obj %u from group %u\n", index, gid);
144 	hid = gid + zgrp->nr_obj;
145 
146 	return zlist_del(hid, index, zgrp->obj_tab);
147 }
148 
149 /*
150  * try to isolate the last @nr objs of @gid, store their indexes in array @idxs
151  * and @return the obj cnt actually isolated. isolate all objs if nr is 0.
152  */
zgrp_isolate_objs(struct zram_group * zgrp,u16 gid,u32 * idxs,u32 nr,bool * last)153 u32 zgrp_isolate_objs(struct zram_group *zgrp, u16 gid, u32 *idxs, u32 nr, bool *last)
154 {
155 	u32 hid, idx;
156 	u32 cnt = 0;
157 	u32 i;
158 
159 	if (last)
160 		*last = false;
161 	if (!zgrp) {
162 		pr_debug("zram group is not enable!");
163 		return 0;
164 	}
165 	if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
166 		return 0;
167 	if (!CHECK(idxs, "return array idxs is null!\n"))
168 		return 0;
169 	hid = gid + zgrp->nr_obj;
170 	zlist_lock(hid, zgrp->obj_tab);
171 	zlist_for_each_entry_reverse(idx, hid, zgrp->obj_tab) {
172 		idxs[cnt++] = idx;
173 		if (nr && cnt == nr)
174 			break;
175 	}
176 	for (i = 0; i < cnt; i++)
177 		zlist_del_nolock(hid, idxs[i], zgrp->obj_tab);
178 	if (last)
179 		*last = cnt && zlist_is_isolated_nolock(hid, zgrp->obj_tab);
180 	zlist_unlock(hid, zgrp->obj_tab);
181 
182 	pr_debug("isolated %u objs from group %u.\n", cnt, gid);
183 
184 	return cnt;
185 }
186 
187 /*
188  * check if the obj at @index is isolate from zram groups
189  */
zgrp_obj_is_isolated(struct zram_group * zgrp,u32 index)190 bool zgrp_obj_is_isolated(struct zram_group *zgrp, u32 index)
191 {
192 	bool ret = false;
193 
194 	if (!zgrp) {
195 		pr_debug("zram group is not enable!");
196 		return false;
197 	}
198 	if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
199 		return false;
200 
201 	zlist_lock(index, zgrp->obj_tab);
202 	ret = zlist_is_isolated_nolock(index, zgrp->obj_tab);
203 	zlist_unlock(index, zgrp->obj_tab);
204 
205 	return ret;
206 }
207 /*
208  * insert obj at @index into group @gid as the COLDEST obj
209  */
zgrp_obj_putback(struct zram_group * zgrp,u32 index,u16 gid)210 void zgrp_obj_putback(struct zram_group *zgrp, u32 index, u16 gid)
211 {
212 	u32 hid;
213 
214 	if (!zgrp) {
215 		pr_debug("zram group is not enable!");
216 		return;
217 	}
218 	if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
219 		return;
220 	if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
221 		return;
222 	hid = gid + zgrp->nr_obj;
223 	zlist_add_tail(hid, index, zgrp->obj_tab);
224 	pr_debug("putback obj %u to group %u\n", index, gid);
225 }
226 
zgrp_obj_stats_inc(struct zram_group * zgrp,u16 gid,u32 size)227 void zgrp_obj_stats_inc(struct zram_group *zgrp, u16 gid, u32 size)
228 {
229 	if (!zgrp) {
230 		pr_debug("zram group is not enable!");
231 		return;
232 	}
233 	if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
234 		return;
235 
236 	atomic_inc(&zgrp->stats[gid].zram_pages);
237 	atomic64_add(size, &zgrp->stats[gid].zram_size);
238 	atomic_inc(&zgrp->stats[0].zram_pages);
239 	atomic64_add(size, &zgrp->stats[0].zram_size);
240 }
241 
zgrp_obj_stats_dec(struct zram_group * zgrp,u16 gid,u32 size)242 void zgrp_obj_stats_dec(struct zram_group *zgrp, u16 gid, u32 size)
243 {
244 	if (!zgrp) {
245 		pr_debug("zram group is not enable!");
246 		return;
247 	}
248 	if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
249 		return;
250 
251 	atomic_dec(&zgrp->stats[gid].zram_pages);
252 	atomic64_sub(size, &zgrp->stats[gid].zram_size);
253 	atomic_dec(&zgrp->stats[0].zram_pages);
254 	atomic64_sub(size, &zgrp->stats[0].zram_size);
255 }
256 
zgrp_fault_stats_inc(struct zram_group * zgrp,u16 gid,u32 size)257 void zgrp_fault_stats_inc(struct zram_group *zgrp, u16 gid, u32 size)
258 {
259 	if (!zgrp) {
260 		pr_debug("zram group is not enable!");
261 		return;
262 	}
263 	if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
264 		return;
265 
266 	atomic64_inc(&zgrp->stats[gid].zram_fault);
267 	atomic64_inc(&zgrp->stats[0].zram_fault);
268 }
269 
270 #ifdef CONFIG_ZRAM_GROUP_DEBUG
zram_group_dump(struct zram_group * zgrp,u16 gid,u32 index)271 void zram_group_dump(struct zram_group *zgrp, u16 gid, u32 index)
272 {
273 	u32 hid, idx;
274 
275 	if (!zgrp) {
276 		pr_debug("zram group is not enable!");
277 		return;
278 	}
279 	hid = gid + zgrp->nr_obj;
280 	if (gid == 0) {
281 		struct zlist_node *node = NULL;
282 
283 		if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
284 			return;
285 		node = idx2node(index, zgrp->obj_tab);
286 		pr_err("dump index %u = %u %u %u %u\n", index,
287 				node->prev, node->next,
288 				node->lock, node->priv);
289 	} else {
290 		if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
291 			return;
292 		pr_err("dump index of group %u\n", gid);
293 		zlist_for_each_entry(idx, hid, zgrp->obj_tab)
294 			pr_err("%u\n", idx);
295 	}
296 }
297 #endif
298 
299 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
300 /*
301  * idx2node for ext table
302  */
get_ext(u32 index,void * private)303 static struct zlist_node *get_ext(u32 index, void *private)
304 {
305 	struct zram_group *zgrp = private;
306 
307 	if (index < zgrp->wbgrp.nr_ext)
308 		return &zgrp->wbgrp.ext[index];
309 
310 	index -= zgrp->wbgrp.nr_ext;
311 	BUG_ON(!index);
312 	return &zgrp->wbgrp.grp_ext_head[index];
313 }
314 
315 /*
316  * disable writeback for zram group @zgrp
317  */
zram_group_remove_writeback(struct zram_group * zgrp)318 void zram_group_remove_writeback(struct zram_group *zgrp)
319 {
320 	if (!CHECK(zgrp, "zram group is not enable!\n"))
321 		return;
322 	if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
323 		return;
324 	zgrp->wbgrp.enable = false;
325 	vfree(zgrp->wbgrp.grp_ext_head);
326 	vfree(zgrp->wbgrp.ext);
327 	zlist_table_free(zgrp->wbgrp.ext_tab);
328 	vfree(zgrp->wbgrp.ext_obj_head);
329 	pr_info("zram group writeback is removed.\n");
330 }
331 
332 /*
333  * init & enable writeback on exist zram group @zgrp with a backing device of
334  * @nr_ext extents.
335  */
zram_group_apply_writeback(struct zram_group * zgrp,u32 nr_ext)336 int zram_group_apply_writeback(struct zram_group *zgrp, u32 nr_ext)
337 {
338 	struct writeback_group *wbgrp = NULL;
339 	u32 i;
340 	int ret = 0;
341 
342 	if (!CHECK(zgrp, "zram group is not enable!\n"))
343 		return -EINVAL;
344 
345 	mutex_lock(&zgrp->wbgrp.init_lock);
346 	if (!CHECK(!zgrp->wbgrp.enable, "zram group writeback is already enable!\n"))
347 		goto out;
348 	if (!CHECK_BOUND(nr_ext, 1, ZGRP_MAX_EXT)) {
349 		ret = -EINVAL;
350 		goto out;
351 	}
352 	wbgrp = &zgrp->wbgrp;
353 	wbgrp->nr_ext = nr_ext;
354 	wbgrp->grp_ext_head = vmalloc(sizeof(struct zlist_node) * zgrp->nr_grp);
355 	if (!wbgrp->grp_ext_head) {
356 		ret = -ENOMEM;
357 		goto out;
358 	}
359 	wbgrp->ext = vmalloc(sizeof(struct zlist_node) * wbgrp->nr_ext);
360 	if (!wbgrp->ext) {
361 		ret = -ENOMEM;
362 		goto out;
363 	}
364 	wbgrp->ext_obj_head = vmalloc(sizeof(struct zlist_node) * wbgrp->nr_ext);
365 	if (!wbgrp->ext_obj_head) {
366 		ret = -ENOMEM;
367 		goto out;
368 	}
369 
370 	wbgrp->ext_tab = zlist_table_alloc(get_ext, zgrp, GFP_KERNEL);
371 	if (!wbgrp->ext_tab) {
372 		ret = -ENOMEM;
373 		goto out;
374 	}
375 
376 	for (i = 0; i < wbgrp->nr_ext; i++)
377 		zlist_node_init(i, wbgrp->ext_tab);
378 	for (i = 1; i < zgrp->nr_grp; i++)
379 		zlist_node_init(i + wbgrp->nr_ext, wbgrp->ext_tab);
380 
381 	for (i = 0; i < wbgrp->nr_ext; i++)
382 		zlist_node_init(i + zgrp->nr_obj + zgrp->nr_grp, zgrp->obj_tab);
383 
384 	init_waitqueue_head(&wbgrp->fault_wq);
385 	wbgrp->enable = true;
386 	pr_info("zram group writeback is enabled.\n");
387 out:
388 	mutex_unlock(&zgrp->wbgrp.init_lock);
389 
390 	if (ret) {
391 		zram_group_remove_writeback(zgrp);
392 		pr_err("zram group writeback enable failed!\n");
393 	}
394 
395 	return ret;
396 }
397 
398 /*
399  * attach extent at @eid to group @gid as the HOTTEST extent
400  */
zgrp_ext_insert(struct zram_group * zgrp,u32 eid,u16 gid)401 void zgrp_ext_insert(struct zram_group *zgrp, u32 eid, u16 gid)
402 {
403 	u32 hid;
404 
405 	if (!zgrp) {
406 		pr_debug("zram group is not enable!");
407 		return;
408 	}
409 	if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
410 		return;
411 	if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
412 		return;
413 	if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
414 		return;
415 	hid = gid + zgrp->wbgrp.nr_ext;
416 	zlist_add(hid, eid, zgrp->wbgrp.ext_tab);
417 	pr_debug("insert extent %u to group %u\n", eid, gid);
418 }
419 
420 /*
421  * remove extent at @eid from group @gid
422  */
zgrp_ext_delete(struct zram_group * zgrp,u32 eid,u16 gid)423 bool zgrp_ext_delete(struct zram_group *zgrp, u32 eid, u16 gid)
424 {
425 	u32 hid;
426 	bool isolated = false;
427 
428 	if (!zgrp) {
429 		pr_debug("zram group is not enable!");
430 		return false;
431 	}
432 	if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
433 		return false;
434 	if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
435 		return false;
436 	if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
437 		return false;
438 
439 	zlist_lock(eid, zgrp->wbgrp.ext_tab);
440 	isolated = zlist_is_isolated_nolock(eid, zgrp->wbgrp.ext_tab);
441 	zlist_unlock(eid, zgrp->wbgrp.ext_tab);
442 	if (isolated) {
443 		pr_debug("extent %u is already isolated, skip delete.\n", eid);
444 		return false;
445 	}
446 
447 	pr_debug("delete extent %u from group %u\n", eid, gid);
448 	hid = gid + zgrp->wbgrp.nr_ext;
449 	return zlist_del(hid, eid, zgrp->wbgrp.ext_tab);
450 }
451 
452 /*
453  * try to isolate the first @nr exts of @gid, store their eids in array @eids
454  * and @return the cnt actually isolated. isolate all exts if nr is 0.
455  */
zgrp_isolate_exts(struct zram_group * zgrp,u16 gid,u32 * eids,u32 nr,bool * last)456 u32 zgrp_isolate_exts(struct zram_group *zgrp, u16 gid, u32 *eids, u32 nr, bool *last)
457 {
458 	u32 hid, idx;
459 	u32 cnt = 0;
460 	u32 i;
461 
462 	if (last)
463 		*last = false;
464 	if (!zgrp) {
465 		pr_debug("zram group is not enable!");
466 		return 0;
467 	}
468 	if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
469 		return 0;
470 	if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
471 		return 0;
472 	if (!CHECK(eids, "return array eids is null!\n"))
473 		return 0;
474 	hid = gid + zgrp->wbgrp.nr_ext;
475 	zlist_lock(hid, zgrp->wbgrp.ext_tab);
476 	zlist_for_each_entry_reverse(idx, hid, zgrp->wbgrp.ext_tab) {
477 		eids[cnt++] = idx;
478 		if (nr && cnt == nr)
479 			break;
480 	}
481 	for (i = 0; i < cnt; i++)
482 		zlist_del_nolock(hid, eids[i], zgrp->wbgrp.ext_tab);
483 	if (last)
484 		*last = cnt && zlist_is_isolated_nolock(hid, zgrp->wbgrp.ext_tab);
485 	zlist_unlock(hid, zgrp->wbgrp.ext_tab);
486 
487 	pr_debug("isolated %u exts from group %u.\n", cnt, gid);
488 
489 	return cnt;
490 }
491 
zgrp_get_ext(struct zram_group * zgrp,u32 eid)492 void zgrp_get_ext(struct zram_group *zgrp, u32 eid)
493 {
494 	u32 hid;
495 
496 	if (!CHECK(zgrp, "zram group is not enable!\n"))
497 		return;
498 	if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
499 		return;
500 	if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
501 		return;
502 
503 	hid = eid + zgrp->nr_obj + zgrp->nr_grp;
504 	zlist_set_priv(hid, zgrp->obj_tab);
505 	pr_info("get extent %u\n", eid);
506 }
507 
zgrp_put_ext(struct zram_group * zgrp,u32 eid)508 bool zgrp_put_ext(struct zram_group *zgrp, u32 eid)
509 {
510 	u32 hid;
511 	bool ret = false;
512 
513 	if (!CHECK(zgrp, "zram group is not enable!\n"))
514 		return false;
515 	if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
516 		return false;
517 	if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
518 		return false;
519 
520 	hid = eid + zgrp->nr_obj + zgrp->nr_grp;
521 	zlist_lock(hid, zgrp->obj_tab);
522 	zlist_clr_priv_nolock(hid, zgrp->obj_tab);
523 	ret = zlist_is_isolated_nolock(hid, zgrp->obj_tab);
524 	zlist_unlock(hid, zgrp->obj_tab);
525 
526 	pr_info("put extent %u, ret = %d\n", eid, ret);
527 
528 	return ret;
529 }
530 
531 /*
532  * insert obj at @index into extent @eid
533  */
wbgrp_obj_insert(struct zram_group * zgrp,u32 index,u32 eid)534 void wbgrp_obj_insert(struct zram_group *zgrp, u32 index, u32 eid)
535 {
536 	u32 hid;
537 
538 	if (!zgrp) {
539 		pr_debug("zram group is not enable!");
540 		return;
541 	}
542 	if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
543 		return;
544 	if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
545 		return;
546 	if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
547 		return;
548 	hid = eid + zgrp->nr_obj + zgrp->nr_grp;
549 	zlist_add_tail(hid, index, zgrp->obj_tab);
550 	pr_debug("insert obj %u to extent %u\n", index, eid);
551 }
552 
553 /*
554  * remove obj at @index from extent @eid
555  */
wbgrp_obj_delete(struct zram_group * zgrp,u32 index,u32 eid)556 bool wbgrp_obj_delete(struct zram_group *zgrp, u32 index, u32 eid)
557 {
558 	u32 hid;
559 	bool ret = false;
560 
561 	if (!zgrp) {
562 		pr_debug("zram group is not enable!");
563 		return false;
564 	}
565 	if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
566 		return false;
567 	if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
568 		return false;
569 	if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
570 		return false;
571 	pr_debug("delete obj %u from extent %u\n", index, eid);
572 	hid = eid + zgrp->nr_obj + zgrp->nr_grp;
573 
574 	zlist_lock(hid, zgrp->obj_tab);
575 	ret = zlist_del_nolock(hid, index, zgrp->obj_tab)
576 		&& !zlist_test_priv_nolock(hid, zgrp->obj_tab);
577 	zlist_unlock(hid, zgrp->obj_tab);
578 
579 	return ret;
580 }
581 
582 /*
583  * try to isolate the first @nr writeback objs of @eid, store their indexes in
584  * array @idxs and @return the obj cnt actually isolated. isolate all objs if
585  * @nr is 0.
586  */
wbgrp_isolate_objs(struct zram_group * zgrp,u32 eid,u32 * idxs,u32 nr,bool * last)587 u32 wbgrp_isolate_objs(struct zram_group *zgrp, u32 eid, u32 *idxs, u32 nr, bool *last)
588 {
589 	u32 hid, idx;
590 	u32 cnt = 0;
591 	u32 i;
592 
593 	if (last)
594 		*last = false;
595 	if (!zgrp) {
596 		pr_debug("zram group is not enable!");
597 		return 0;
598 	}
599 	if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
600 		return 0;
601 	if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
602 		return 0;
603 	if (!CHECK(idxs, "return array idxs is null!\n"))
604 		return 0;
605 	hid = eid + zgrp->nr_obj + zgrp->nr_grp;
606 	zlist_lock(hid, zgrp->obj_tab);
607 	zlist_for_each_entry(idx, hid, zgrp->obj_tab) {
608 		idxs[cnt++] = idx;
609 		if (nr && cnt == nr)
610 			break;
611 	}
612 	for (i = 0; i < cnt; i++)
613 		zlist_del_nolock(hid, idxs[i], zgrp->obj_tab);
614 	if (last)
615 		*last = cnt && zlist_is_isolated_nolock(hid, zgrp->obj_tab)
616 			&& !zlist_test_priv_nolock(hid, zgrp->obj_tab);
617 	zlist_unlock(hid, zgrp->obj_tab);
618 
619 	pr_debug("isolated %u objs from extent %u.\n", cnt, eid);
620 
621 	return cnt;
622 }
623 
wbgrp_obj_stats_inc(struct zram_group * zgrp,u16 gid,u32 eid,u32 size)624 void wbgrp_obj_stats_inc(struct zram_group *zgrp, u16 gid, u32 eid, u32 size)
625 {
626 	if (!zgrp) {
627 		pr_debug("zram group is not enable!");
628 		return;
629 	}
630 	if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
631 		return;
632 	if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
633 		return;
634 
635 	atomic_inc(&zgrp->stats[gid].wb_pages);
636 	atomic64_add(size, &zgrp->stats[gid].wb_size);
637 	atomic_inc(&zgrp->stats[0].wb_pages);
638 	atomic64_add(size, &zgrp->stats[0].wb_size);
639 }
640 
wbgrp_obj_stats_dec(struct zram_group * zgrp,u16 gid,u32 eid,u32 size)641 void wbgrp_obj_stats_dec(struct zram_group *zgrp, u16 gid, u32 eid, u32 size)
642 {
643 	if (!zgrp) {
644 		pr_debug("zram group is not enable!");
645 		return;
646 	}
647 	if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
648 		return;
649 	if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
650 		return;
651 
652 	atomic_dec(&zgrp->stats[gid].wb_pages);
653 	atomic64_sub(size, &zgrp->stats[gid].wb_size);
654 	atomic_dec(&zgrp->stats[0].wb_pages);
655 	atomic64_sub(size, &zgrp->stats[0].wb_size);
656 }
657 
wbgrp_fault_stats_inc(struct zram_group * zgrp,u16 gid,u32 eid,u32 size)658 void wbgrp_fault_stats_inc(struct zram_group *zgrp, u16 gid, u32 eid, u32 size)
659 {
660 	if (!zgrp) {
661 		pr_debug("zram group is not enable!");
662 		return;
663 	}
664 	if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
665 		return;
666 	if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
667 		return;
668 
669 	atomic64_inc(&zgrp->stats[gid].wb_fault);
670 	atomic64_inc(&zgrp->stats[0].wb_fault);
671 }
672 #endif
673