1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/block/zram/zram_group/zram_group.c
4 *
5 * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd.
6 */
7
8 #define pr_fmt(fmt) "[ZRAM_GROUP]" fmt
9
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include "zram_group.h"
13
14 #define CHECK(cond, ...) ((cond) || (pr_err(__VA_ARGS__), false))
15 #define CHECK_BOUND(var, min, max) \
16 CHECK((var) >= (min) && (var) <= (max), \
17 "%s %u out of bounds %u ~ %u!\n", \
18 #var, (var), (min), (max))
19
20 /*
21 * idx2node for obj table
22 */
get_obj(u32 index,void * private)23 static struct zlist_node *get_obj(u32 index, void *private)
24 {
25 struct zram_group *zgrp = private;
26
27 if (index < zgrp->nr_obj)
28 return &zgrp->obj[index];
29
30 index -= zgrp->nr_obj;
31 BUG_ON(!index);
32 if (index < zgrp->nr_grp)
33 return &zgrp->grp_obj_head[index];
34 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
35 index -= zgrp->nr_grp;
36 BUG_ON(index >= zgrp->wbgrp.nr_ext);
37 return &zgrp->wbgrp.ext_obj_head[index];
38 #endif
39 BUG();
40 }
41
zram_group_meta_free(struct zram_group * zgrp)42 void zram_group_meta_free(struct zram_group *zgrp)
43 {
44 if (!CHECK(zgrp, "zram group is not enable!\n"))
45 return;
46
47 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
48 zram_group_remove_writeback(zgrp);
49 #endif
50 vfree(zgrp->grp_obj_head);
51 vfree(zgrp->obj);
52 zlist_table_free(zgrp->obj_tab);
53 vfree(zgrp->stats);
54 kfree(zgrp);
55
56 pr_info("zram group freed.\n");
57 }
58
zram_group_meta_alloc(u32 nr_obj,u32 nr_grp)59 struct zram_group *zram_group_meta_alloc(u32 nr_obj, u32 nr_grp)
60 {
61 struct zram_group *zgrp = NULL;
62 u32 i;
63
64 if (!CHECK_BOUND(nr_grp, 1, ZGRP_MAX_GRP - 1))
65 return NULL;
66
67 /* reserve gid 0 */
68 nr_grp++;
69 if (!CHECK_BOUND(nr_obj, 1, ZGRP_MAX_OBJ))
70 return NULL;
71 zgrp = kzalloc(sizeof(struct zram_group), GFP_KERNEL);
72 if (!zgrp)
73 goto err;
74 zgrp->nr_obj = nr_obj;
75 zgrp->nr_grp = nr_grp;
76 zgrp->grp_obj_head = vmalloc(sizeof(struct zlist_node) * zgrp->nr_grp);
77 if (!zgrp->grp_obj_head)
78 goto err;
79 zgrp->obj = vmalloc(sizeof(struct zlist_node) * zgrp->nr_obj);
80 if (!zgrp->obj)
81 goto err;
82 zgrp->obj_tab = zlist_table_alloc(get_obj, zgrp, GFP_KERNEL);
83 if (!zgrp->obj_tab)
84 goto err;
85 zgrp->stats = vzalloc(sizeof(struct zram_group_stats) * zgrp->nr_grp);
86 if (!zgrp->stats)
87 goto err;
88 zgrp->gsdev = NULL;
89
90 for (i = 0; i < zgrp->nr_obj; i++)
91 zlist_node_init(i, zgrp->obj_tab);
92 for (i = 1; i < zgrp->nr_grp; i++)
93 zlist_node_init(i + zgrp->nr_obj, zgrp->obj_tab);
94
95 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
96 zgrp->wbgrp.enable = false;
97 mutex_init(&zgrp->wbgrp.init_lock);
98 #endif
99 pr_info("zram_group alloc succ.\n");
100 return zgrp;
101 err:
102 pr_err("zram_group alloc failed!\n");
103 zram_group_meta_free(zgrp);
104
105 return NULL;
106 }
107
108 /*
109 * insert obj at @index into group @gid as the HOTTEST obj
110 */
zgrp_obj_insert(struct zram_group * zgrp,u32 index,u16 gid)111 void zgrp_obj_insert(struct zram_group *zgrp, u32 index, u16 gid)
112 {
113 u32 hid;
114
115 if (!CHECK(zgrp, "zram group is not enable!\n"))
116 return;
117 if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
118 return;
119 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
120 return;
121 hid = gid + zgrp->nr_obj;
122 zlist_add(hid, index, zgrp->obj_tab);
123 pr_info("insert obj %u to group %u\n", index, gid);
124 }
125
126 /*
127 * remove obj at @index from group @gid
128 */
zgrp_obj_delete(struct zram_group * zgrp,u32 index,u16 gid)129 bool zgrp_obj_delete(struct zram_group *zgrp, u32 index, u16 gid)
130 {
131 u32 hid;
132
133 if (!CHECK(zgrp, "zram group is not enable!\n"))
134 return false;
135 if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
136 return false;
137 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
138 return false;
139 pr_info("delete obj %u from group %u\n", index, gid);
140 hid = gid + zgrp->nr_obj;
141
142 return zlist_del(hid, index, zgrp->obj_tab);
143 }
144
145 /*
146 * try to isolate the last @nr objs of @gid, store their indexes in array @idxs
147 * and @return the obj cnt actually isolated. isolate all objs if nr is 0.
148 */
zgrp_isolate_objs(struct zram_group * zgrp,u16 gid,u32 * idxs,u32 nr,bool * last)149 u32 zgrp_isolate_objs(struct zram_group *zgrp, u16 gid, u32 *idxs, u32 nr, bool *last)
150 {
151 u32 hid, idx;
152 u32 cnt = 0;
153 u32 i;
154
155 if (last)
156 *last = false;
157 if (!CHECK(zgrp, "zram group is not enable!\n"))
158 return 0;
159 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
160 return 0;
161 if (!CHECK(idxs, "return array idxs is null!\n"))
162 return 0;
163 hid = gid + zgrp->nr_obj;
164 zlist_lock(hid, zgrp->obj_tab);
165 zlist_for_each_entry_reverse(idx, hid, zgrp->obj_tab) {
166 idxs[cnt++] = idx;
167 if (nr && cnt == nr)
168 break;
169 }
170 for (i = 0; i < cnt; i++)
171 zlist_del_nolock(hid, idxs[i], zgrp->obj_tab);
172 if (last)
173 *last = cnt && zlist_is_isolated_nolock(hid, zgrp->obj_tab);
174 zlist_unlock(hid, zgrp->obj_tab);
175
176 pr_info("isolated %u objs from group %u.\n", cnt, gid);
177
178 return cnt;
179 }
180
181 /*
182 * check if the obj at @index is isolate from zram groups
183 */
zgrp_obj_is_isolated(struct zram_group * zgrp,u32 index)184 bool zgrp_obj_is_isolated(struct zram_group *zgrp, u32 index)
185 {
186 bool ret = false;
187
188 if (!CHECK(zgrp, "zram group is not enable!\n"))
189 return false;
190 if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
191 return false;
192
193 zlist_lock(index, zgrp->obj_tab);
194 ret = zlist_is_isolated_nolock(index, zgrp->obj_tab);
195 zlist_unlock(index, zgrp->obj_tab);
196
197 return ret;
198 }
199 /*
200 * insert obj at @index into group @gid as the COLDEST obj
201 */
zgrp_obj_putback(struct zram_group * zgrp,u32 index,u16 gid)202 void zgrp_obj_putback(struct zram_group *zgrp, u32 index, u16 gid)
203 {
204 u32 hid;
205
206 if (!CHECK(zgrp, "zram group is not enable!\n"))
207 return;
208 if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
209 return;
210 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
211 return;
212 hid = gid + zgrp->nr_obj;
213 zlist_add_tail(hid, index, zgrp->obj_tab);
214 pr_info("putback obj %u to group %u\n", index, gid);
215 }
216
zgrp_obj_stats_inc(struct zram_group * zgrp,u16 gid,u32 size)217 void zgrp_obj_stats_inc(struct zram_group *zgrp, u16 gid, u32 size)
218 {
219 if (!CHECK(zgrp, "zram group is not enable!\n"))
220 return;
221 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
222 return;
223
224 atomic_inc(&zgrp->stats[gid].zram_pages);
225 atomic64_add(size, &zgrp->stats[gid].zram_size);
226 atomic_inc(&zgrp->stats[0].zram_pages);
227 atomic64_add(size, &zgrp->stats[0].zram_size);
228 }
229
zgrp_obj_stats_dec(struct zram_group * zgrp,u16 gid,u32 size)230 void zgrp_obj_stats_dec(struct zram_group *zgrp, u16 gid, u32 size)
231 {
232 if (!CHECK(zgrp, "zram group is not enable!\n"))
233 return;
234 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
235 return;
236
237 atomic_dec(&zgrp->stats[gid].zram_pages);
238 atomic64_sub(size, &zgrp->stats[gid].zram_size);
239 atomic_dec(&zgrp->stats[0].zram_pages);
240 atomic64_sub(size, &zgrp->stats[0].zram_size);
241 }
242
zgrp_fault_stats_inc(struct zram_group * zgrp,u16 gid,u32 size)243 void zgrp_fault_stats_inc(struct zram_group *zgrp, u16 gid, u32 size)
244 {
245 if (!CHECK(zgrp, "zram group is not enable!\n"))
246 return;
247 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
248 return;
249
250 atomic64_inc(&zgrp->stats[gid].zram_fault);
251 atomic64_inc(&zgrp->stats[0].zram_fault);
252 }
253
254 #ifdef CONFIG_ZRAM_GROUP_DEBUG
zram_group_dump(struct zram_group * zgrp,u16 gid,u32 index)255 void zram_group_dump(struct zram_group *zgrp, u16 gid, u32 index)
256 {
257 u32 hid, idx;
258
259 if (!CHECK(zgrp, "zram group is not enable!\n"))
260 return;
261 hid = gid + zgrp->nr_obj;
262 if (gid == 0) {
263 struct zlist_node *node = NULL;
264
265 if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
266 return;
267 node = idx2node(index, zgrp->obj_tab);
268 pr_err("dump index %u = %u %u %u %u\n", index,
269 node->prev, node->next,
270 node->lock, node->priv);
271 } else {
272 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
273 return;
274 pr_err("dump index of group %u\n", gid);
275 zlist_for_each_entry(idx, hid, zgrp->obj_tab)
276 pr_err("%u\n", idx);
277 }
278 }
279 #endif
280
281 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
282 /*
283 * idx2node for ext table
284 */
get_ext(u32 index,void * private)285 static struct zlist_node *get_ext(u32 index, void *private)
286 {
287 struct zram_group *zgrp = private;
288
289 if (index < zgrp->wbgrp.nr_ext)
290 return &zgrp->wbgrp.ext[index];
291
292 index -= zgrp->wbgrp.nr_ext;
293 BUG_ON(!index);
294 return &zgrp->wbgrp.grp_ext_head[index];
295 }
296
297 /*
298 * disable writeback for zram group @zgrp
299 */
zram_group_remove_writeback(struct zram_group * zgrp)300 void zram_group_remove_writeback(struct zram_group *zgrp)
301 {
302 if (!CHECK(zgrp, "zram group is not enable!\n"))
303 return;
304 if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
305 return;
306 zgrp->wbgrp.enable = false;
307 vfree(zgrp->wbgrp.grp_ext_head);
308 vfree(zgrp->wbgrp.ext);
309 zlist_table_free(zgrp->wbgrp.ext_tab);
310 vfree(zgrp->wbgrp.ext_obj_head);
311 pr_info("zram group writeback is removed.\n");
312 }
313
314 /*
315 * init & enable writeback on exist zram group @zgrp with a backing device of
316 * @nr_ext extents.
317 */
zram_group_apply_writeback(struct zram_group * zgrp,u32 nr_ext)318 int zram_group_apply_writeback(struct zram_group *zgrp, u32 nr_ext)
319 {
320 struct writeback_group *wbgrp = NULL;
321 u32 i;
322 int ret = 0;
323
324 if (!CHECK(zgrp, "zram group is not enable!\n"))
325 return -EINVAL;
326
327 mutex_lock(&zgrp->wbgrp.init_lock);
328 if (!CHECK(!zgrp->wbgrp.enable, "zram group writeback is already enable!\n"))
329 goto out;
330 if (!CHECK_BOUND(nr_ext, 1, ZGRP_MAX_EXT)) {
331 ret = -EINVAL;
332 goto out;
333 }
334 wbgrp = &zgrp->wbgrp;
335 wbgrp->nr_ext = nr_ext;
336 wbgrp->grp_ext_head = vmalloc(sizeof(struct zlist_node) * zgrp->nr_grp);
337 if (!wbgrp->grp_ext_head) {
338 ret = -ENOMEM;
339 goto out;
340 }
341 wbgrp->ext = vmalloc(sizeof(struct zlist_node) * wbgrp->nr_ext);
342 if (!wbgrp->ext) {
343 ret = -ENOMEM;
344 goto out;
345 }
346 wbgrp->ext_obj_head = vmalloc(sizeof(struct zlist_node) * wbgrp->nr_ext);
347 if (!wbgrp->ext_obj_head) {
348 ret = -ENOMEM;
349 goto out;
350 }
351
352 wbgrp->ext_tab = zlist_table_alloc(get_ext, zgrp, GFP_KERNEL);
353 if (!wbgrp->ext_tab) {
354 ret = -ENOMEM;
355 goto out;
356 }
357
358 for (i = 0; i < wbgrp->nr_ext; i++)
359 zlist_node_init(i, wbgrp->ext_tab);
360 for (i = 1; i < zgrp->nr_grp; i++)
361 zlist_node_init(i + wbgrp->nr_ext, wbgrp->ext_tab);
362
363 for (i = 0; i < wbgrp->nr_ext; i++)
364 zlist_node_init(i + zgrp->nr_obj + zgrp->nr_grp, zgrp->obj_tab);
365
366 init_waitqueue_head(&wbgrp->fault_wq);
367 wbgrp->enable = true;
368 pr_info("zram group writeback is enabled.\n");
369 out:
370 mutex_unlock(&zgrp->wbgrp.init_lock);
371
372 if (ret) {
373 zram_group_remove_writeback(zgrp);
374 pr_err("zram group writeback enable failed!\n");
375 }
376
377 return ret;
378 }
379
380 /*
381 * attach extent at @eid to group @gid as the HOTTEST extent
382 */
zgrp_ext_insert(struct zram_group * zgrp,u32 eid,u16 gid)383 void zgrp_ext_insert(struct zram_group *zgrp, u32 eid, u16 gid)
384 {
385 u32 hid;
386
387 if (!CHECK(zgrp, "zram group is not enable!\n"))
388 return;
389 if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
390 return;
391 if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
392 return;
393 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
394 return;
395 hid = gid + zgrp->wbgrp.nr_ext;
396 zlist_add(hid, eid, zgrp->wbgrp.ext_tab);
397 pr_info("insert extent %u to group %u\n", eid, gid);
398 }
399
400 /*
401 * remove extent at @eid from group @gid
402 */
zgrp_ext_delete(struct zram_group * zgrp,u32 eid,u16 gid)403 bool zgrp_ext_delete(struct zram_group *zgrp, u32 eid, u16 gid)
404 {
405 u32 hid;
406 bool isolated = false;
407
408 if (!CHECK(zgrp, "zram group is not enable!\n"))
409 return false;
410 if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
411 return false;
412 if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
413 return false;
414 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
415 return false;
416
417 zlist_lock(eid, zgrp->wbgrp.ext_tab);
418 isolated = zlist_is_isolated_nolock(eid, zgrp->wbgrp.ext_tab);
419 zlist_unlock(eid, zgrp->wbgrp.ext_tab);
420 if (isolated) {
421 pr_info("extent %u is already isolated, skip delete.\n", eid);
422 return false;
423 }
424
425 pr_info("delete extent %u from group %u\n", eid, gid);
426 hid = gid + zgrp->wbgrp.nr_ext;
427 return zlist_del(hid, eid, zgrp->wbgrp.ext_tab);
428 }
429
430 /*
431 * try to isolate the first @nr exts of @gid, store their eids in array @eids
432 * and @return the cnt actually isolated. isolate all exts if nr is 0.
433 */
zgrp_isolate_exts(struct zram_group * zgrp,u16 gid,u32 * eids,u32 nr,bool * last)434 u32 zgrp_isolate_exts(struct zram_group *zgrp, u16 gid, u32 *eids, u32 nr, bool *last)
435 {
436 u32 hid, idx;
437 u32 cnt = 0;
438 u32 i;
439
440 if (last)
441 *last = false;
442 if (!CHECK(zgrp, "zram group is not enable!\n"))
443 return 0;
444 if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
445 return 0;
446 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
447 return 0;
448 if (!CHECK(eids, "return array eids is null!\n"))
449 return 0;
450 hid = gid + zgrp->wbgrp.nr_ext;
451 zlist_lock(hid, zgrp->wbgrp.ext_tab);
452 zlist_for_each_entry_reverse(idx, hid, zgrp->wbgrp.ext_tab) {
453 eids[cnt++] = idx;
454 if (nr && cnt == nr)
455 break;
456 }
457 for (i = 0; i < cnt; i++)
458 zlist_del_nolock(hid, eids[i], zgrp->wbgrp.ext_tab);
459 if (last)
460 *last = cnt && zlist_is_isolated_nolock(hid, zgrp->wbgrp.ext_tab);
461 zlist_unlock(hid, zgrp->wbgrp.ext_tab);
462
463 pr_info("isolated %u exts from group %u.\n", cnt, gid);
464
465 return cnt;
466 }
467
468 /*
469 * insert obj at @index into extent @eid
470 */
wbgrp_obj_insert(struct zram_group * zgrp,u32 index,u32 eid)471 void wbgrp_obj_insert(struct zram_group *zgrp, u32 index, u32 eid)
472 {
473 u32 hid;
474
475 if (!CHECK(zgrp, "zram group is not enable!\n"))
476 return;
477 if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
478 return;
479 if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
480 return;
481 if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
482 return;
483 hid = eid + zgrp->nr_obj + zgrp->nr_grp;
484 zlist_add_tail(hid, index, zgrp->obj_tab);
485 pr_info("insert obj %u to extent %u\n", index, eid);
486 }
487
488 /*
489 * remove obj at @index from extent @eid
490 */
wbgrp_obj_delete(struct zram_group * zgrp,u32 index,u32 eid)491 bool wbgrp_obj_delete(struct zram_group *zgrp, u32 index, u32 eid)
492 {
493 u32 hid;
494
495 if (!CHECK(zgrp, "zram group is not enable!\n"))
496 return false;
497 if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
498 return false;
499 if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1))
500 return false;
501 if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
502 return false;
503 pr_info("delete obj %u from extent %u\n", index, eid);
504 hid = eid + zgrp->nr_obj + zgrp->nr_grp;
505
506 return zlist_del(hid, index, zgrp->obj_tab);
507 }
508
509 /*
510 * try to isolate the first @nr writeback objs of @eid, store their indexes in
511 * array @idxs and @return the obj cnt actually isolated. isolate all objs if
512 * @nr is 0.
513 */
wbgrp_isolate_objs(struct zram_group * zgrp,u32 eid,u32 * idxs,u32 nr,bool * last)514 u32 wbgrp_isolate_objs(struct zram_group *zgrp, u32 eid, u32 *idxs, u32 nr, bool *last)
515 {
516 u32 hid, idx;
517 u32 cnt = 0;
518 u32 i;
519
520 if (last)
521 *last = false;
522 if (!CHECK(zgrp, "zram group is not enable!\n"))
523 return 0;
524 if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
525 return 0;
526 if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
527 return 0;
528 if (!CHECK(idxs, "return array idxs is null!\n"))
529 return 0;
530 hid = eid + zgrp->nr_obj + zgrp->nr_grp;
531 zlist_lock(hid, zgrp->obj_tab);
532 zlist_for_each_entry(idx, hid, zgrp->obj_tab) {
533 idxs[cnt++] = idx;
534 if (nr && cnt == nr)
535 break;
536 }
537 for (i = 0; i < cnt; i++)
538 zlist_del_nolock(hid, idxs[i], zgrp->obj_tab);
539 if (last)
540 *last = cnt && zlist_is_isolated_nolock(hid, zgrp->obj_tab);
541 zlist_unlock(hid, zgrp->obj_tab);
542
543 pr_info("isolated %u objs from extent %u.\n", cnt, eid);
544
545 return cnt;
546 }
547
wbgrp_obj_stats_inc(struct zram_group * zgrp,u16 gid,u32 eid,u32 size)548 void wbgrp_obj_stats_inc(struct zram_group *zgrp, u16 gid, u32 eid, u32 size)
549 {
550 if (!CHECK(zgrp, "zram group is not enable!\n"))
551 return;
552 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
553 return;
554 if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
555 return;
556
557 atomic_inc(&zgrp->stats[gid].wb_pages);
558 atomic64_add(size, &zgrp->stats[gid].wb_size);
559 atomic_inc(&zgrp->stats[0].wb_pages);
560 atomic64_add(size, &zgrp->stats[0].wb_size);
561 }
562
wbgrp_obj_stats_dec(struct zram_group * zgrp,u16 gid,u32 eid,u32 size)563 void wbgrp_obj_stats_dec(struct zram_group *zgrp, u16 gid, u32 eid, u32 size)
564 {
565 if (!CHECK(zgrp, "zram group is not enable!\n"))
566 return;
567 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
568 return;
569 if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
570 return;
571
572 atomic_dec(&zgrp->stats[gid].wb_pages);
573 atomic64_sub(size, &zgrp->stats[gid].wb_size);
574 atomic_dec(&zgrp->stats[0].wb_pages);
575 atomic64_sub(size, &zgrp->stats[0].wb_size);
576 }
577
wbgrp_fault_stats_inc(struct zram_group * zgrp,u16 gid,u32 eid,u32 size)578 void wbgrp_fault_stats_inc(struct zram_group *zgrp, u16 gid, u32 eid, u32 size)
579 {
580 if (!CHECK(zgrp, "zram group is not enable!\n"))
581 return;
582 if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1))
583 return;
584 if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1))
585 return;
586
587 atomic64_inc(&zgrp->stats[gid].wb_fault);
588 atomic64_inc(&zgrp->stats[0].wb_fault);
589 }
590 #endif
591