1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/block/zram/zram_group/group_writeback.c
4 *
5 * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd.
6 */
7
8 #include <linux/mm.h>
9 #include <linux/memcontrol.h>
10 #include <linux/blk_types.h>
11 #include <linux/zswapd.h>
12
13 #include "../zram_drv.h"
14 #include "zram_group.h"
15
16 #ifdef CONFIG_HYPERHOLD
17 #include "hyperhold.h"
18 #endif
19
20 #define CHECK(cond, ...) ((cond) || (pr_err(__VA_ARGS__), false))
21 #define CHECK_BOUND(var, min, max) \
22 CHECK((var) >= (min) && (var) <= (max), \
23 "%s %u out of bounds %u ~ %u!\n", \
24 #var, (var), (min), (max))
25
zram_get_memcg_id(struct zram * zram,u32 index)26 static u16 zram_get_memcg_id(struct zram *zram, u32 index)
27 {
28 return (zram->table[index].flags & ZRAM_GRPID_MASK) >> ZRAM_SIZE_SHIFT;
29 }
30
zram_set_memcg_id(struct zram * zram,u32 index,u16 gid)31 static void zram_set_memcg_id(struct zram *zram, u32 index, u16 gid)
32 {
33 unsigned long old = zram->table[index].flags & (~ZRAM_GRPID_MASK);
34
35 zram->table[index].flags = old | ((u64)gid << ZRAM_SIZE_SHIFT);
36 }
37
38 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
obj_can_wb(struct zram * zram,u32 index,u16 gid)39 static bool obj_can_wb(struct zram *zram, u32 index, u16 gid)
40 {
41 /* overwrited obj, just skip */
42 if (zram_get_memcg_id(zram, index) != gid) {
43 pr_debug("obj %u is from group %u instead of group %u.\n",
44 index, zram_get_memcg_id(zram, index), gid);
45 return false;
46 }
47 if (!zgrp_obj_is_isolated(zram->zgrp, index)) {
48 pr_debug("obj %u is not isolated.\n", index);
49 return false;
50 }
51 /* need not to writeback, put back the obj as HOTEST */
52 if (zram_test_flag(zram, index, ZRAM_SAME)) {
53 pr_debug("obj %u is filled with same element.\n", index);
54 goto insert;
55 }
56 if (zram_test_flag(zram, index, ZRAM_WB)) {
57 pr_debug("obj %u is writeback.\n", index);
58 goto insert;
59 }
60 /* obj is needed by a pagefault req, do not writeback it. */
61 if (zram_test_flag(zram, index, ZRAM_FAULT)) {
62 pr_debug("obj %u is needed by a pagefault request.\n", index);
63 goto insert;
64 }
65 /* should never happen */
66 if (zram_test_flag(zram, index, ZRAM_GWB)) {
67 pr_debug("obj %u is group writeback.\n", index);
68 BUG();
69 return false;
70 }
71
72 return true;
73 insert:
74 zgrp_obj_insert(zram->zgrp, index, gid);
75
76 return false;
77 }
78
copy_obj(struct hpio * hpio,u32 offset,char * obj,u32 size,bool to)79 static void copy_obj(struct hpio *hpio, u32 offset, char *obj, u32 size, bool to)
80 {
81 u32 page_id, start;
82 char *buf = NULL;
83
84 page_id = offset / PAGE_SIZE;
85 start = offset % PAGE_SIZE;
86 if (size + start <= PAGE_SIZE) {
87 buf = page_to_virt(hyperhold_io_page(hpio, page_id));
88 if (to)
89 memcpy(buf + start, obj, size);
90 else
91 memcpy(obj, buf + start, size);
92
93 return;
94 }
95 buf = page_to_virt(hyperhold_io_page(hpio, page_id));
96 if (to)
97 memcpy(buf + start, obj, PAGE_SIZE - start);
98 else
99 memcpy(obj, buf + start, PAGE_SIZE - start);
100 buf = page_to_virt(hyperhold_io_page(hpio, page_id + 1));
101 if (to)
102 memcpy(buf, obj + PAGE_SIZE - start, size + start - PAGE_SIZE);
103 else
104 memcpy(obj + PAGE_SIZE - start, buf, size + start - PAGE_SIZE);
105 }
106
move_obj_to_hpio(struct zram * zram,u32 index,u16 gid,struct hpio * hpio,u32 offset)107 static u32 move_obj_to_hpio(struct zram *zram, u32 index, u16 gid,
108 struct hpio *hpio, u32 offset)
109 {
110 u32 size = 0;
111 unsigned long handle;
112 char *src = NULL;
113 u32 ext_size;
114 u32 eid;
115
116 eid = hyperhold_io_extent(hpio);
117 ext_size = hyperhold_extent_size(eid);
118
119 zram_slot_lock(zram, index);
120 if (!obj_can_wb(zram, index, gid))
121 goto unlock;
122 size = zram_get_obj_size(zram, index);
123 /* no space, put back the obj as COLDEST */
124 if (size + offset > ext_size) {
125 pr_debug("obj %u size is %u, but ext %u only %u space left.\n",
126 index, size, eid, ext_size - offset);
127 zgrp_obj_putback(zram->zgrp, index, gid);
128 size = 0;
129 goto unlock;
130 }
131 handle = zram_get_handle(zram, index);
132 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
133 copy_obj(hpio, offset, src, size, true);
134 zs_unmap_object(zram->mem_pool, handle);
135 zs_free(zram->mem_pool, handle);
136 zram_set_handle(zram, index, hyperhold_address(eid, offset));
137 zram_set_flag(zram, index, ZRAM_GWB);
138 wbgrp_obj_insert(zram->zgrp, index, eid);
139 wbgrp_obj_stats_inc(zram->zgrp, gid, eid, size);
140 zgrp_obj_stats_dec(zram->zgrp, gid, size);
141 pr_debug("move obj %u of group %u to hpio %p of eid %u, size = %u, offset = %u\n",
142 index, gid, hpio, eid, size, offset);
143 unlock:
144 zram_slot_unlock(zram, index);
145
146 return size;
147 }
148
move_obj_from_hpio(struct zram * zram,int index,struct hpio * hpio)149 static void move_obj_from_hpio(struct zram *zram, int index, struct hpio *hpio)
150 {
151 u32 size = 0;
152 unsigned long handle = 0;
153 u32 eid, offset;
154 u64 addr;
155 char *dst = NULL;
156 u16 gid;
157
158 eid = hyperhold_io_extent(hpio);
159 retry:
160 zram_slot_lock(zram, index);
161 if (!zram_test_flag(zram, index, ZRAM_GWB))
162 goto unlock;
163 addr = zram_get_handle(zram, index);
164 if (hyperhold_addr_extent(addr) != eid)
165 goto unlock;
166 size = zram_get_obj_size(zram, index);
167 if (handle)
168 goto move;
169 handle = zs_malloc(zram->mem_pool, size, GFP_NOWAIT);
170 if (handle)
171 goto move;
172 zram_slot_unlock(zram, index);
173 handle = zs_malloc(zram->mem_pool, size, GFP_NOIO | __GFP_NOFAIL);
174 if (handle)
175 goto retry;
176 BUG();
177
178 return;
179 move:
180 offset = hyperhold_addr_offset(addr);
181 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
182 copy_obj(hpio, offset, dst, size, false);
183 zs_unmap_object(zram->mem_pool, handle);
184 zram_set_handle(zram, index, handle);
185 zram_clear_flag(zram, index, ZRAM_GWB);
186 gid = zram_get_memcg_id(zram, index);
187 zgrp_obj_insert(zram->zgrp, index, gid);
188 wbgrp_obj_stats_dec(zram->zgrp, gid, eid, size);
189 zgrp_obj_stats_inc(zram->zgrp, gid, size);
190 pr_debug("move obj %u of group %u from hpio %p of eid %u, size = %u, offset = %u\n",
191 index, gid, hpio, eid, size, offset);
192 unlock:
193 zram_slot_unlock(zram, index);
194 }
195
196
197 #define NR_ISOLATE 32
move_extent_from_hpio(struct zram * zram,struct hpio * hpio)198 static bool move_extent_from_hpio(struct zram *zram, struct hpio *hpio)
199 {
200 u32 idxs[NR_ISOLATE];
201 u32 eid;
202 u32 nr;
203 int i;
204 bool last = false;
205
206 eid = hyperhold_io_extent(hpio);
207 repeat:
208 nr = wbgrp_isolate_objs(zram->zgrp, eid, idxs, NR_ISOLATE, &last);
209 for (i = 0; i < nr; i++)
210 move_obj_from_hpio(zram, idxs[i], hpio);
211 if (last)
212 return true;
213 if (nr)
214 goto repeat;
215
216 return false;
217 }
218
219 struct hpio_priv {
220 struct zram *zram;
221 u16 gid;
222 };
223
write_endio(struct hpio * hpio)224 static void write_endio(struct hpio *hpio)
225 {
226 struct hpio_priv *priv = hyperhold_io_private(hpio);
227 struct zram *zram = priv->zram;
228 u16 gid = priv->gid;
229 u32 eid = hyperhold_io_extent(hpio);
230
231 if (hyperhold_io_success(hpio))
232 goto out;
233 if (move_extent_from_hpio(zram, hpio)) {
234 zgrp_ext_delete(zram->zgrp, eid, gid);
235 hyperhold_should_free_extent(eid);
236 }
237 out:
238 hyperhold_io_complete(hpio);
239 hyperhold_io_put(hpio);
240 kfree(priv);
241 }
242
collect_objs(struct zram * zram,u16 gid,struct hpio * hpio,u32 ext_size)243 static u32 collect_objs(struct zram *zram, u16 gid, struct hpio *hpio, u32 ext_size)
244 {
245 u32 offset = 0;
246 u32 last_offset;
247 u32 nr;
248 u32 idxs[NR_ISOLATE];
249 int i;
250
251 more:
252 last_offset = offset;
253 nr = zgrp_isolate_objs(zram->zgrp, gid, idxs, NR_ISOLATE, NULL);
254 for (i = 0; i < nr; i++)
255 offset += move_obj_to_hpio(zram, idxs[i], gid, hpio, offset);
256 pr_debug("%u data attached, offset = %u.\n", offset - last_offset, offset);
257 if (offset < ext_size && offset != last_offset)
258 goto more;
259
260 return offset;
261 }
262
write_one_extent(struct zram * zram,u16 gid)263 static u64 write_one_extent(struct zram *zram, u16 gid)
264 {
265 int eid;
266 struct hpio *hpio = NULL;
267 struct hpio_priv *priv = NULL;
268 u32 size = 0;
269 int ret;
270
271 priv = kmalloc(sizeof(struct hpio_priv), GFP_NOIO);
272 if (!priv)
273 return 0;
274 priv->gid = gid;
275 priv->zram = zram;
276 eid = hyperhold_alloc_extent();
277 if (eid < 0)
278 goto err;
279 hpio = hyperhold_io_get(eid, GFP_NOIO, REQ_OP_WRITE);
280 if (!hpio)
281 goto free_extent;
282
283 size = collect_objs(zram, gid, hpio, hyperhold_extent_size(eid));
284 if (size == 0) {
285 pr_err("group %u has no data in zram.\n", gid);
286 goto put_hpio;
287 }
288 zgrp_ext_insert(zram->zgrp, eid, gid);
289
290 ret = hyperhold_write_async(hpio, write_endio, priv);
291 if (ret)
292 goto move_back;
293
294 return size;
295 move_back:
296 if (move_extent_from_hpio(zram, hpio)) {
297 zgrp_ext_delete(zram->zgrp, eid, gid);
298 hyperhold_should_free_extent(eid);
299 }
300 eid = -EINVAL;
301 put_hpio:
302 hyperhold_io_put(hpio);
303 free_extent:
304 if (eid >= 0)
305 hyperhold_free_extent(eid);
306 err:
307 kfree(priv);
308
309 return 0;
310 }
311
read_endio(struct hpio * hpio)312 static void read_endio(struct hpio *hpio)
313 {
314 struct hpio_priv *priv = hyperhold_io_private(hpio);
315 struct zram *zram = priv->zram;
316 u16 gid = priv->gid;
317 u32 eid = hyperhold_io_extent(hpio);
318
319 if (!hyperhold_io_success(hpio)) {
320 BUG();
321 goto out;
322 }
323 if (move_extent_from_hpio(zram, hpio)) {
324 zgrp_ext_delete(zram->zgrp, eid, gid);
325 hyperhold_should_free_extent(eid);
326 }
327 out:
328 hyperhold_io_complete(hpio);
329 hyperhold_io_put(hpio);
330 kfree(priv);
331 }
332
read_one_extent(struct zram * zram,u32 eid,u16 gid)333 static u64 read_one_extent(struct zram *zram, u32 eid, u16 gid)
334 {
335 struct hpio *hpio = NULL;
336 u32 ext_size = 0;
337 int ret;
338 struct hpio_priv *priv = NULL;
339
340 priv = kmalloc(sizeof(struct hpio_priv), GFP_NOIO);
341 if (!priv)
342 goto err;
343 priv->gid = gid;
344 priv->zram = zram;
345 hpio = hyperhold_io_get(eid, GFP_NOIO, REQ_OP_READ);
346 if (!hpio)
347 goto err;
348 ext_size = hyperhold_extent_size(eid);
349 ret = hyperhold_read_async(hpio, read_endio, priv);
350 if (ret)
351 goto err;
352
353 return ext_size;
354 err:
355 hyperhold_io_put(hpio);
356 kfree(priv);
357
358 return 0;
359 }
360
sync_read_endio(struct hpio * hpio)361 static void sync_read_endio(struct hpio *hpio)
362 {
363 hyperhold_io_complete(hpio);
364 }
365
read_one_obj_sync(struct zram * zram,u32 index)366 static int read_one_obj_sync(struct zram *zram, u32 index)
367 {
368 struct hpio *hpio = NULL;
369 int ret;
370 u32 eid;
371 u16 gid;
372 u32 size;
373
374 if (!zram_test_flag(zram, index, ZRAM_GWB))
375 return 0;
376
377 pr_debug("read obj %u.\n", index);
378
379 gid = zram_get_memcg_id(zram, index);
380 eid = hyperhold_addr_extent(zram_get_handle(zram, index));
381 size = zram_get_obj_size(zram, index);
382 wbgrp_fault_stats_inc(zram->zgrp, gid, eid, size);
383 check:
384 if (!zram_test_flag(zram, index, ZRAM_GWB))
385 return 0;
386 if (!zram_test_flag(zram, index, ZRAM_FAULT))
387 goto read;
388 zram_slot_unlock(zram, index);
389 wait_event(zram->zgrp->wbgrp.fault_wq, !zram_test_flag(zram, index, ZRAM_FAULT));
390 zram_slot_lock(zram, index);
391 goto check;
392 read:
393 zram_set_flag(zram, index, ZRAM_FAULT);
394 zram_slot_unlock(zram, index);
395
396 hpio = hyperhold_io_get(eid, GFP_NOIO, REQ_OP_READ);
397 if (!hpio) {
398 ret = -ENOMEM;
399 goto out;
400 }
401 ret = hyperhold_read_async(hpio, sync_read_endio, NULL);
402 /* io submit error */
403 if (ret && ret != -EAGAIN)
404 goto out;
405
406 hyperhold_io_wait(hpio);
407 /* get a write io, data is ready, copy the pages even write failed */
408 if (op_is_write(hyperhold_io_operate(hpio)))
409 goto move;
410 /* read io failed, return -EIO */
411 if (!hyperhold_io_success(hpio)) {
412 ret = -EIO;
413 goto out;
414 }
415 /* success, copy the data and free extent */
416 move:
417 if (move_extent_from_hpio(zram, hpio)) {
418 zgrp_ext_delete(zram->zgrp, eid, gid);
419 hyperhold_should_free_extent(eid);
420 }
421 out:
422 hyperhold_io_put(hpio);
423 zram_slot_lock(zram, index);
424 zram_clear_flag(zram, index, ZRAM_FAULT);
425 wake_up(&zram->zgrp->wbgrp.fault_wq);
426
427 return ret;
428 }
429
read_group_objs(struct zram * zram,u16 gid,u64 req_size)430 u64 read_group_objs(struct zram *zram, u16 gid, u64 req_size)
431 {
432 u32 eid;
433 u64 read_size = 0;
434 u32 nr;
435
436 if (!(zram->zgrp)) {
437 pr_debug("zram group is not enable!\n");
438 return 0;
439 }
440 if (!CHECK_BOUND(gid, 1, zram->zgrp->nr_grp - 1))
441 return 0;
442
443 pr_debug("read %llu data of group %u.\n", req_size, gid);
444
445 while (!req_size || req_size > read_size) {
446 nr = zgrp_isolate_exts(zram->zgrp, gid, &eid, 1, NULL);
447 if (!nr)
448 break;
449 read_size += read_one_extent(zram, eid, gid);
450 }
451
452 return read_size;
453 }
454
write_group_objs(struct zram * zram,u16 gid,u64 req_size)455 u64 write_group_objs(struct zram *zram, u16 gid, u64 req_size)
456 {
457 u64 write_size = 0;
458 u64 size = 0;
459
460 if (!(zram->zgrp)) {
461 pr_debug("zram group is not enable!\n");
462 return 0;
463 }
464 if (!CHECK(zram->zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
465 return 0;
466 if (!CHECK_BOUND(gid, 1, zram->zgrp->nr_grp - 1))
467 return 0;
468
469 pr_debug("write %llu data of group %u.\n", req_size, gid);
470
471 while (!req_size || req_size > write_size) {
472 size = write_one_extent(zram, gid);
473 if (!size)
474 break;
475 write_size += size;
476 }
477
478 atomic64_add(write_size, &zram->zgrp->stats[0].write_size);
479 atomic64_add(write_size, &zram->zgrp->stats[gid].write_size);
480 return write_size;
481 }
482 #endif
483
484 #ifdef CONFIG_ZRAM_GROUP_DEBUG
485 #include <linux/random.h>
486 #define ZGRP_TEST_MAX_GRP 101
487 #endif
488
zram_group_fault_obj(struct zram * zram,u32 index)489 int zram_group_fault_obj(struct zram *zram, u32 index)
490 {
491 u16 gid;
492 u32 size;
493
494 if (!(zram->zgrp)) {
495 pr_debug("zram group is not enable!\n");
496 return 0;
497 }
498 if (!CHECK_BOUND(index, 0, zram->zgrp->nr_obj - 1))
499 return 0;
500
501 gid = zram_get_memcg_id(zram, index);
502 size = zram_get_obj_size(zram, index);
503 zgrp_fault_stats_inc(zram->zgrp, gid, size);
504 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
505 return read_one_obj_sync(zram, index);
506 #else
507 return 0;
508 #endif
509 }
510
zram_group_track_obj(struct zram * zram,u32 index,struct mem_cgroup * memcg)511 void zram_group_track_obj(struct zram *zram, u32 index, struct mem_cgroup *memcg)
512 {
513 u16 gid;
514
515 if (!(zram->zgrp)) {
516 pr_debug("zram group is not enable!\n");
517 return;
518 }
519 if (!CHECK_BOUND(index, 0, zram->zgrp->nr_obj - 1))
520 return;
521 if (!CHECK(memcg || !memcg->id.id, "obj %u has no memcg!\n", index))
522 return;
523 gid = zram_get_memcg_id(zram, index);
524 if (!CHECK(!gid, "obj %u has gid %u.\n", index, gid))
525 BUG();
526
527 gid = memcg->id.id;
528 zram_set_memcg_id(zram, index, gid);
529 zgrp_obj_insert(zram->zgrp, index, gid);
530 zgrp_obj_stats_inc(zram->zgrp, gid, zram_get_obj_size(zram, index));
531 }
532
zram_group_untrack_obj(struct zram * zram,u32 index)533 void zram_group_untrack_obj(struct zram *zram, u32 index)
534 {
535 u16 gid;
536 u32 size;
537
538 if (!(zram->zgrp)) {
539 pr_debug("zram group is not enable!\n");
540 return;
541 }
542 if (!CHECK_BOUND(index, 0, zram->zgrp->nr_obj - 1))
543 return;
544
545 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
546 check:
547 if (!zram_test_flag(zram, index, ZRAM_FAULT))
548 goto clear;
549 zram_slot_unlock(zram, index);
550 wait_event(zram->zgrp->wbgrp.fault_wq, !zram_test_flag(zram, index, ZRAM_FAULT));
551 zram_slot_lock(zram, index);
552 goto check;
553 clear:
554 #endif
555 gid = zram_get_memcg_id(zram, index);
556 size = zram_get_obj_size(zram, index);
557 if (!gid)
558 return;
559 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
560 if (zram_test_flag(zram, index, ZRAM_GWB)) {
561 u32 eid = hyperhold_addr_extent(zram_get_handle(zram, index));
562
563 if (wbgrp_obj_delete(zram->zgrp, index, eid)) {
564 zgrp_ext_delete(zram->zgrp, eid, gid);
565 hyperhold_should_free_extent(eid);
566 }
567 zram_clear_flag(zram, index, ZRAM_GWB);
568 zram_set_memcg_id(zram, index, 0);
569 wbgrp_obj_stats_dec(zram->zgrp, gid, eid, size);
570 zram_set_handle(zram, index, 0);
571 return;
572 }
573 #endif
574 zgrp_obj_delete(zram->zgrp, index, gid);
575 zram_set_memcg_id(zram, index, 0);
576 zgrp_obj_stats_dec(zram->zgrp, gid, size);
577 }
578
579 #ifdef CONFIG_ZRAM_GROUP_DEBUG
group_debug(struct zram * zram,u32 op,u32 index,u32 gid)580 void group_debug(struct zram *zram, u32 op, u32 index, u32 gid)
581 {
582 if (op == 0)
583 zram_group_dump(zram->zgrp, gid, index);
584
585 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
586 if (op == 22)
587 read_group_objs(zram, gid, index);
588 if (op == 23)
589 write_group_objs(zram, gid, index);
590 if (op == 20) {
591 if (index)
592 zram_group_apply_writeback(zram->zgrp, hyperhold_nr_extent());
593 else
594 zram_group_remove_writeback(zram->zgrp);
595 }
596 #endif
597 }
598 #endif
599
group_obj_stats(struct zram * zram,u16 gid,int type)600 static u64 group_obj_stats(struct zram *zram, u16 gid, int type)
601 {
602 if (!(zram->zgrp)) {
603 pr_debug("zram group is not enable!\n");
604 return 0;
605 }
606 if (!CHECK_BOUND(gid, 0, zram->zgrp->nr_grp - 1))
607 return 0;
608
609 if (type == CACHE_SIZE)
610 return atomic64_read(&zram->zgrp->stats[gid].zram_size);
611 else if (type == CACHE_PAGE)
612 return atomic_read(&zram->zgrp->stats[gid].zram_pages);
613 else if (type == CACHE_FAULT)
614 return atomic64_read(&zram->zgrp->stats[gid].zram_fault);
615 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
616 else if (type == SWAP_SIZE)
617 return atomic64_read(&zram->zgrp->stats[gid].wb_size);
618 else if (type == SWAP_PAGE)
619 return atomic_read(&zram->zgrp->stats[gid].wb_pages);
620 else if (type == READ_SIZE)
621 return atomic64_read(&zram->zgrp->stats[gid].read_size);
622 else if (type == WRITE_SIZE)
623 return atomic64_read(&zram->zgrp->stats[gid].write_size);
624 else if (type == SWAP_FAULT)
625 return atomic64_read(&zram->zgrp->stats[gid].wb_fault);
626 BUG();
627 #endif
628
629 return 0;
630 }
631
632 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
zram_group_read(u16 gid,u64 req_size,void * priv)633 static u64 zram_group_read(u16 gid, u64 req_size, void *priv)
634 {
635 if (!CHECK(priv, "priv is NULL!\n"))
636 return 0;
637
638 return read_group_objs((struct zram *)priv, gid, req_size);
639 }
640
zram_group_write(u16 gid,u64 req_size,void * priv)641 static u64 zram_group_write(u16 gid, u64 req_size, void *priv)
642 {
643 if (!CHECK(priv, "priv is NULL!\n"))
644 return 0;
645
646 return write_group_objs((struct zram *)priv, gid, req_size);
647 }
648 #else
zram_group_read(u16 gid,u64 req_size,void * priv)649 static u64 zram_group_read(u16 gid, u64 req_size, void *priv)
650 {
651 return 0;
652 }
zram_group_write(u16 gid,u64 req_size,void * priv)653 static u64 zram_group_write(u16 gid, u64 req_size, void *priv)
654 {
655 return 0;
656 }
657 #endif
658
659
zram_group_data_size(u16 gid,int type,void * priv)660 static u64 zram_group_data_size(u16 gid, int type, void *priv)
661 {
662 if (!CHECK(priv, "priv is NULL!\n"))
663 return 0;
664
665 return group_obj_stats((struct zram *)priv, gid, type);
666 }
667
668 struct group_swap_ops zram_group_ops = {
669 .group_read = zram_group_read,
670 .group_write = zram_group_write,
671 .group_data_size = zram_group_data_size,
672 };
673
register_zram_group(struct zram * zram)674 static int register_zram_group(struct zram *zram)
675 {
676 if (!CHECK(zram, "zram is NULL!\n"))
677 return -EINVAL;
678 if (!(zram->zgrp)) {
679 pr_debug("zram group is not enable!\n");
680 return -EINVAL;
681 }
682
683 zram->zgrp->gsdev = register_group_swap(&zram_group_ops, zram);
684 if (!zram->zgrp->gsdev) {
685 pr_err("register zram group failed!\n");
686 return -ENOMEM;
687 }
688
689 return 0;
690 }
691
unregister_zram_group(struct zram * zram)692 static void unregister_zram_group(struct zram *zram)
693 {
694 if (!CHECK(zram, "zram is NULL!\n"))
695 return;
696 if (!(zram->zgrp)) {
697 pr_debug("zram group is not enable!\n");
698 return;
699 }
700
701 unregister_group_swap(zram->zgrp->gsdev);
702 zram->zgrp->gsdev = NULL;
703 }
704
zram_group_init(struct zram * zram,u32 nr_obj)705 void zram_group_init(struct zram *zram, u32 nr_obj)
706 {
707 unsigned int ctrl = zram->zgrp_ctrl;
708
709 if (ctrl == ZGRP_NONE)
710 return;
711 zram->zgrp = zram_group_meta_alloc(nr_obj, ZGRP_MAX_GRP - 1);
712 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
713 if (ctrl == ZGRP_WRITE)
714 zram_group_apply_writeback(zram->zgrp, hyperhold_nr_extent());
715 #endif
716 register_zram_group(zram);
717 }
718
zram_group_deinit(struct zram * zram)719 void zram_group_deinit(struct zram *zram)
720 {
721 unregister_zram_group(zram);
722 zram_group_meta_free(zram->zgrp);
723 zram->zgrp = NULL;
724 }
725