• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/block/zram/zram_group/group_writeback.c
4  *
5  * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd.
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/memcontrol.h>
10 #include <linux/blk_types.h>
11 #include <linux/zswapd.h>
12 
13 #include "../zram_drv.h"
14 #include "zram_group.h"
15 
16 #ifdef CONFIG_HYPERHOLD
17 #include "hyperhold.h"
18 #endif
19 
20 #define CHECK(cond, ...) ((cond) || (pr_err(__VA_ARGS__), false))
21 #define CHECK_BOUND(var, min, max) \
22 	CHECK((var) >= (min) && (var) <= (max), \
23 			"%s %u out of bounds %u ~ %u!\n", \
24 			#var, (var), (min), (max))
25 
zram_get_memcg_id(struct zram * zram,u32 index)26 static u16 zram_get_memcg_id(struct zram *zram, u32 index)
27 {
28 	return (zram->table[index].flags & ZRAM_GRPID_MASK) >> ZRAM_SIZE_SHIFT;
29 }
30 
zram_set_memcg_id(struct zram * zram,u32 index,u16 gid)31 static void zram_set_memcg_id(struct zram *zram, u32 index, u16 gid)
32 {
33 	unsigned long old = zram->table[index].flags & (~ZRAM_GRPID_MASK);
34 
35 	zram->table[index].flags = old | ((u64)gid << ZRAM_SIZE_SHIFT);
36 }
37 
38 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
obj_can_wb(struct zram * zram,u32 index,u16 gid)39 static bool obj_can_wb(struct zram *zram, u32 index, u16 gid)
40 {
41 	/* overwrited obj, just skip */
42 	if (zram_get_memcg_id(zram, index) != gid) {
43 		pr_info("obj %u is from group %u instead of group %u.\n",
44 				index, zram_get_memcg_id(zram, index), gid);
45 		return false;
46 	}
47 	if (!zgrp_obj_is_isolated(zram->zgrp, index)) {
48 		pr_info("obj %u is not isolated.\n", index);
49 		return false;
50 	}
51 	/* need not to writeback, put back the obj as HOTEST */
52 	if (zram_test_flag(zram, index, ZRAM_SAME)) {
53 		pr_info("obj %u is filled with same element.\n", index);
54 		goto insert;
55 	}
56 	if (zram_test_flag(zram, index, ZRAM_WB)) {
57 		pr_info("obj %u is writeback.\n", index);
58 		goto insert;
59 	}
60 	/* obj is needed by a pagefault req, do not writeback it. */
61 	if (zram_test_flag(zram, index, ZRAM_FAULT)) {
62 		pr_info("obj %u is needed by a pagefault request.\n", index);
63 		goto insert;
64 	}
65 	/* should never happen */
66 	if (zram_test_flag(zram, index, ZRAM_GWB)) {
67 		pr_info("obj %u is group writeback.\n", index);
68 		BUG();
69 		return false;
70 	}
71 
72 	return true;
73 insert:
74 	zgrp_obj_insert(zram->zgrp, index, gid);
75 
76 	return false;
77 }
78 
copy_obj(struct hpio * hpio,u32 offset,char * obj,u32 size,bool to)79 static void copy_obj(struct hpio *hpio, u32 offset, char *obj, u32 size, bool to)
80 {
81 	u32 page_id, start;
82 	char *buf = NULL;
83 
84 	page_id = offset / PAGE_SIZE;
85 	start = offset % PAGE_SIZE;
86 	if (size + start <= PAGE_SIZE) {
87 		buf = page_to_virt(hyperhold_io_page(hpio, page_id));
88 		if (to)
89 			memcpy(buf + start, obj, size);
90 		else
91 			memcpy(obj, buf + start, size);
92 
93 		return;
94 	}
95 	buf = page_to_virt(hyperhold_io_page(hpio, page_id));
96 	if (to)
97 		memcpy(buf + start, obj, PAGE_SIZE - start);
98 	else
99 		memcpy(obj, buf + start, PAGE_SIZE - start);
100 	buf = page_to_virt(hyperhold_io_page(hpio, page_id + 1));
101 	if (to)
102 		memcpy(buf, obj + PAGE_SIZE - start, size + start - PAGE_SIZE);
103 	else
104 		memcpy(obj + PAGE_SIZE - start, buf, size + start - PAGE_SIZE);
105 }
106 
move_obj_to_hpio(struct zram * zram,u32 index,u16 gid,struct hpio * hpio,u32 offset)107 static u32 move_obj_to_hpio(struct zram *zram, u32 index, u16 gid,
108 				struct hpio *hpio, u32 offset)
109 {
110 	u32 size = 0;
111 	unsigned long handle;
112 	char *src = NULL;
113 	u32 ext_size;
114 	u32 eid;
115 
116 	eid = hyperhold_io_extent(hpio);
117 	ext_size = hyperhold_extent_size(eid);
118 
119 	zram_slot_lock(zram, index);
120 	if (!obj_can_wb(zram, index, gid))
121 		goto unlock;
122 	size = zram_get_obj_size(zram, index);
123 	/* no space, put back the obj as COLDEST */
124 	if (size + offset > ext_size) {
125 		pr_info("obj %u size is %u, but ext %u only %u space left.\n",
126 				index, size, eid, ext_size - offset);
127 		zgrp_obj_putback(zram->zgrp, index, gid);
128 		size = 0;
129 		goto unlock;
130 	}
131 	handle = zram_get_handle(zram, index);
132 	src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
133 	copy_obj(hpio, offset, src, size, true);
134 	zs_unmap_object(zram->mem_pool, handle);
135 	zs_free(zram->mem_pool, handle);
136 	zram_set_handle(zram, index, hyperhold_address(eid, offset));
137 	zram_set_flag(zram, index, ZRAM_GWB);
138 	wbgrp_obj_insert(zram->zgrp, index, eid);
139 	wbgrp_obj_stats_inc(zram->zgrp, gid, eid, size);
140 	zgrp_obj_stats_dec(zram->zgrp, gid, size);
141 	pr_info("move obj %u of group %u to hpio %p of eid %u, size = %u, offset = %u\n",
142 		index, gid, hpio, eid, size, offset);
143 unlock:
144 	zram_slot_unlock(zram, index);
145 
146 	return size;
147 }
148 
move_obj_from_hpio(struct zram * zram,int index,struct hpio * hpio)149 static void move_obj_from_hpio(struct zram *zram, int index, struct hpio *hpio)
150 {
151 	u32 size = 0;
152 	unsigned long handle = 0;
153 	u32 eid, offset;
154 	u64 addr;
155 	char *dst = NULL;
156 	u16 gid;
157 
158 	eid = hyperhold_io_extent(hpio);
159 retry:
160 	zram_slot_lock(zram, index);
161 	if (!zram_test_flag(zram, index, ZRAM_GWB))
162 		goto unlock;
163 	addr = zram_get_handle(zram, index);
164 	if (hyperhold_addr_extent(addr) != eid)
165 		goto unlock;
166 	size = zram_get_obj_size(zram, index);
167 	if (handle)
168 		goto move;
169 	handle = zs_malloc(zram->mem_pool, size, GFP_NOWAIT);
170 	if (handle)
171 		goto move;
172 	zram_slot_unlock(zram, index);
173 	handle = zs_malloc(zram->mem_pool, size, GFP_NOIO | __GFP_NOFAIL);
174 	if (handle)
175 		goto retry;
176 	BUG();
177 
178 	return;
179 move:
180 	offset = hyperhold_addr_offset(addr);
181 	dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
182 	copy_obj(hpio, offset, dst, size, false);
183 	zs_unmap_object(zram->mem_pool, handle);
184 	zram_set_handle(zram, index, handle);
185 	zram_clear_flag(zram, index, ZRAM_GWB);
186 	gid = zram_get_memcg_id(zram, index);
187 	zgrp_obj_insert(zram->zgrp, index, gid);
188 	wbgrp_obj_stats_dec(zram->zgrp, gid, eid, size);
189 	zgrp_obj_stats_inc(zram->zgrp, gid, size);
190 	pr_info("move obj %u of group %u from hpio %p of eid %u, size = %u, offset = %u\n",
191 		index, gid, hpio, eid, size, offset);
192 unlock:
193 	zram_slot_unlock(zram, index);
194 }
195 
196 
197 #define NR_ISOLATE 32
move_extent_from_hpio(struct zram * zram,struct hpio * hpio)198 static bool move_extent_from_hpio(struct zram *zram, struct hpio *hpio)
199 {
200 	u32 idxs[NR_ISOLATE];
201 	u32 eid;
202 	u32 nr;
203 	int i;
204 	bool last = false;
205 
206 	eid = hyperhold_io_extent(hpio);
207 repeat:
208 	nr = wbgrp_isolate_objs(zram->zgrp, eid, idxs, NR_ISOLATE, &last);
209 	for (i = 0; i < nr; i++)
210 		move_obj_from_hpio(zram, idxs[i], hpio);
211 	if (last)
212 		return true;
213 	if (nr)
214 		goto repeat;
215 
216 	return false;
217 }
218 
219 struct hpio_priv {
220 	struct zram *zram;
221 	u16 gid;
222 };
223 
write_endio(struct hpio * hpio)224 static void write_endio(struct hpio *hpio)
225 {
226 	struct hpio_priv *priv = hyperhold_io_private(hpio);
227 	struct zram *zram = priv->zram;
228 	u16 gid = priv->gid;
229 	u32 eid = hyperhold_io_extent(hpio);
230 
231 	if (hyperhold_io_success(hpio))
232 		goto out;
233 	if (move_extent_from_hpio(zram, hpio)) {
234 		zgrp_ext_delete(zram->zgrp, eid, gid);
235 		hyperhold_should_free_extent(eid);
236 	}
237 out:
238 	hyperhold_io_complete(hpio);
239 	hyperhold_io_put(hpio);
240 	kfree(priv);
241 }
242 
collect_objs(struct zram * zram,u16 gid,struct hpio * hpio,u32 ext_size)243 static u32 collect_objs(struct zram *zram, u16 gid, struct hpio *hpio, u32 ext_size)
244 {
245 	u32 offset = 0;
246 	u32 last_offset;
247 	u32 nr;
248 	u32 idxs[NR_ISOLATE];
249 	int i;
250 
251 more:
252 	last_offset = offset;
253 	nr = zgrp_isolate_objs(zram->zgrp, gid, idxs, NR_ISOLATE, NULL);
254 	for (i = 0; i < nr; i++)
255 		offset += move_obj_to_hpio(zram, idxs[i], gid, hpio, offset);
256 	pr_info("%u data attached, offset = %u.\n", offset - last_offset, offset);
257 	if (offset < ext_size && offset != last_offset)
258 		goto more;
259 
260 	return offset;
261 }
262 
write_one_extent(struct zram * zram,u16 gid)263 static u64 write_one_extent(struct zram *zram, u16 gid)
264 {
265 	int eid;
266 	struct hpio *hpio = NULL;
267 	struct hpio_priv *priv = NULL;
268 	u32 size = 0;
269 	int ret;
270 
271 	priv = kmalloc(sizeof(struct hpio_priv), GFP_NOIO);
272 	if (!priv)
273 		return 0;
274 	priv->gid = gid;
275 	priv->zram = zram;
276 	eid = hyperhold_alloc_extent();
277 	if (eid < 0)
278 		goto err;
279 	hpio = hyperhold_io_get(eid, GFP_NOIO, REQ_OP_WRITE);
280 	if (!hpio)
281 		goto free_extent;
282 
283 	size = collect_objs(zram, gid, hpio, hyperhold_extent_size(eid));
284 	if (size == 0) {
285 		pr_err("group %u has no data in zram.\n", gid);
286 		goto put_hpio;
287 	}
288 	zgrp_ext_insert(zram->zgrp, eid, gid);
289 
290 	ret = hyperhold_write_async(hpio, write_endio, priv);
291 	if (ret)
292 		goto move_back;
293 
294 	return size;
295 move_back:
296 	if (move_extent_from_hpio(zram, hpio)) {
297 		zgrp_ext_delete(zram->zgrp, eid, gid);
298 		hyperhold_should_free_extent(eid);
299 	}
300 	eid = -EINVAL;
301 put_hpio:
302 	hyperhold_io_put(hpio);
303 free_extent:
304 	if (eid >= 0)
305 		hyperhold_free_extent(eid);
306 err:
307 	kfree(priv);
308 
309 	return 0;
310 }
311 
read_endio(struct hpio * hpio)312 static void read_endio(struct hpio *hpio)
313 {
314 	struct hpio_priv *priv = hyperhold_io_private(hpio);
315 	struct zram *zram = priv->zram;
316 	u16 gid = priv->gid;
317 	u32 eid = hyperhold_io_extent(hpio);
318 
319 	if (!hyperhold_io_success(hpio)) {
320 		BUG();
321 		goto out;
322 	}
323 	if (move_extent_from_hpio(zram, hpio)) {
324 		zgrp_ext_delete(zram->zgrp, eid, gid);
325 		hyperhold_should_free_extent(eid);
326 	}
327 out:
328 	hyperhold_io_complete(hpio);
329 	hyperhold_io_put(hpio);
330 	kfree(priv);
331 }
332 
read_one_extent(struct zram * zram,u32 eid,u16 gid)333 static u64 read_one_extent(struct zram *zram, u32 eid, u16 gid)
334 {
335 	struct hpio *hpio = NULL;
336 	u32 ext_size = 0;
337 	int ret;
338 	struct hpio_priv *priv = NULL;
339 
340 	priv = kmalloc(sizeof(struct hpio_priv), GFP_NOIO);
341 	if (!priv)
342 		goto err;
343 	priv->gid = gid;
344 	priv->zram = zram;
345 	hpio = hyperhold_io_get(eid, GFP_NOIO, REQ_OP_READ);
346 	if (!hpio)
347 		goto err;
348 	ext_size = hyperhold_extent_size(eid);
349 	ret = hyperhold_read_async(hpio, read_endio, priv);
350 	if (ret)
351 		goto err;
352 
353 	return ext_size;
354 err:
355 	hyperhold_io_put(hpio);
356 	kfree(priv);
357 
358 	return 0;
359 }
360 
sync_read_endio(struct hpio * hpio)361 static void sync_read_endio(struct hpio *hpio)
362 {
363 	hyperhold_io_complete(hpio);
364 }
365 
read_one_obj_sync(struct zram * zram,u32 index)366 static int read_one_obj_sync(struct zram *zram, u32 index)
367 {
368 	struct hpio *hpio = NULL;
369 	int ret;
370 	u32 eid;
371 	u16 gid;
372 	u32 size;
373 
374 	if (!zram_test_flag(zram, index, ZRAM_GWB))
375 		return 0;
376 
377 	pr_info("read obj %u.\n", index);
378 
379 	gid = zram_get_memcg_id(zram, index);
380 	eid = hyperhold_addr_extent(zram_get_handle(zram, index));
381 	size = zram_get_obj_size(zram, index);
382 	wbgrp_fault_stats_inc(zram->zgrp, gid, eid, size);
383 check:
384 	if (!zram_test_flag(zram, index, ZRAM_GWB))
385 		return 0;
386 	if (!zram_test_flag(zram, index, ZRAM_FAULT))
387 		goto read;
388 	zram_slot_unlock(zram, index);
389 	wait_event(zram->zgrp->wbgrp.fault_wq, !zram_test_flag(zram, index, ZRAM_FAULT));
390 	zram_slot_lock(zram, index);
391 	goto check;
392 read:
393 	zram_set_flag(zram, index, ZRAM_FAULT);
394 	zram_slot_unlock(zram, index);
395 
396 	hpio = hyperhold_io_get(eid, GFP_NOIO, REQ_OP_READ);
397 	if (!hpio) {
398 		ret = -ENOMEM;
399 		goto out;
400 	}
401 	ret = hyperhold_read_async(hpio, sync_read_endio, NULL);
402 	/* io submit error */
403 	if (ret && ret != -EAGAIN)
404 		goto out;
405 
406 	hyperhold_io_wait(hpio);
407 	/* get a write io, data is ready, copy the pages even write failed */
408 	if (op_is_write(hyperhold_io_operate(hpio)))
409 		goto move;
410 	/* read io failed, return -EIO */
411 	if (!hyperhold_io_success(hpio)) {
412 		ret = -EIO;
413 		goto out;
414 	}
415 	/* success, copy the data and free extent */
416 move:
417 	if (move_extent_from_hpio(zram, hpio)) {
418 		zgrp_ext_delete(zram->zgrp, eid, gid);
419 		hyperhold_should_free_extent(eid);
420 	}
421 out:
422 	hyperhold_io_put(hpio);
423 	zram_slot_lock(zram, index);
424 	zram_clear_flag(zram, index, ZRAM_FAULT);
425 	wake_up(&zram->zgrp->wbgrp.fault_wq);
426 
427 	return ret;
428 }
429 
read_group_objs(struct zram * zram,u16 gid,u64 req_size)430 u64 read_group_objs(struct zram *zram, u16 gid, u64 req_size)
431 {
432 	u32 eid;
433 	u64 read_size = 0;
434 	u32 nr;
435 
436 	if (!CHECK(zram->zgrp, "zram group is not enable!\n"))
437 		return 0;
438 	if (!CHECK_BOUND(gid, 1, zram->zgrp->nr_grp - 1))
439 		return 0;
440 
441 	pr_info("read %llu data of group %u.\n", req_size, gid);
442 
443 	while (!req_size || req_size > read_size) {
444 		nr = zgrp_isolate_exts(zram->zgrp, gid, &eid, 1, NULL);
445 		if (!nr)
446 			break;
447 		read_size += read_one_extent(zram, eid, gid);
448 	}
449 
450 	return read_size;
451 }
452 
write_group_objs(struct zram * zram,u16 gid,u64 req_size)453 u64 write_group_objs(struct zram *zram, u16 gid, u64 req_size)
454 {
455 	u64 write_size = 0;
456 	u64 size = 0;
457 
458 	if (!CHECK(zram->zgrp, "zram group is not enable!\n"))
459 		return 0;
460 	if (!CHECK(zram->zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
461 		return 0;
462 	if (!CHECK_BOUND(gid, 1, zram->zgrp->nr_grp - 1))
463 		return 0;
464 
465 	pr_info("write %llu data of group %u.\n", req_size, gid);
466 
467 	while (!req_size || req_size > write_size) {
468 		size = write_one_extent(zram, gid);
469 		if (!size)
470 			break;
471 		write_size += size;
472 	}
473 
474 	return write_size;
475 }
476 #endif
477 
478 #ifdef CONFIG_ZRAM_GROUP_DEBUG
479 #include <linux/random.h>
480 #define ZGRP_TEST_MAX_GRP 101
481 #endif
482 
zram_group_fault_obj(struct zram * zram,u32 index)483 int zram_group_fault_obj(struct zram *zram, u32 index)
484 {
485 	u16 gid;
486 	u32 size;
487 
488 	if (!CHECK(zram->zgrp, "zram group is not enable!\n"))
489 		return 0;
490 	if (!CHECK_BOUND(index, 0, zram->zgrp->nr_obj - 1))
491 		return 0;
492 
493 	gid = zram_get_memcg_id(zram, index);
494 	size = zram_get_obj_size(zram, index);
495 	zgrp_fault_stats_inc(zram->zgrp, gid, size);
496 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
497 	return read_one_obj_sync(zram, index);
498 #else
499 	return 0;
500 #endif
501 }
502 
zram_group_track_obj(struct zram * zram,u32 index,struct mem_cgroup * memcg)503 void zram_group_track_obj(struct zram *zram, u32 index, struct mem_cgroup *memcg)
504 {
505 	u16 gid;
506 
507 	if (!CHECK(zram->zgrp, "zram group is not enable!\n"))
508 		return;
509 	if (!CHECK_BOUND(index, 0, zram->zgrp->nr_obj - 1))
510 		return;
511 	if (!CHECK(memcg || !memcg->id.id, "obj %u has no memcg!\n", index))
512 		return;
513 	gid = zram_get_memcg_id(zram, index);
514 	if (!CHECK(!gid, "obj %u has gid %u.\n", index, gid))
515 		BUG();
516 
517 	gid = memcg->id.id;
518 	zram_set_memcg_id(zram, index, gid);
519 	zgrp_obj_insert(zram->zgrp, index, gid);
520 	zgrp_obj_stats_inc(zram->zgrp, gid, zram_get_obj_size(zram, index));
521 }
522 
zram_group_untrack_obj(struct zram * zram,u32 index)523 void zram_group_untrack_obj(struct zram *zram, u32 index)
524 {
525 	u16 gid;
526 	u32 size;
527 
528 	if (!CHECK(zram->zgrp, "zram group is not enable!\n"))
529 		return;
530 	if (!CHECK_BOUND(index, 0, zram->zgrp->nr_obj - 1))
531 		return;
532 
533 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
534 check:
535 	if (!zram_test_flag(zram, index, ZRAM_FAULT))
536 		goto clear;
537 	zram_slot_unlock(zram, index);
538 	wait_event(zram->zgrp->wbgrp.fault_wq, !zram_test_flag(zram, index, ZRAM_FAULT));
539 	zram_slot_lock(zram, index);
540 	goto check;
541 clear:
542 #endif
543 	gid = zram_get_memcg_id(zram, index);
544 	size = zram_get_obj_size(zram, index);
545 	if (!gid)
546 		return;
547 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
548 	if (zram_test_flag(zram, index, ZRAM_GWB)) {
549 		u32 eid = hyperhold_addr_extent(zram_get_handle(zram, index));
550 
551 		if (wbgrp_obj_delete(zram->zgrp, index, eid)) {
552 			zgrp_ext_delete(zram->zgrp, eid, gid);
553 			hyperhold_should_free_extent(eid);
554 		}
555 		zram_clear_flag(zram, index, ZRAM_GWB);
556 		zram_set_memcg_id(zram, index, 0);
557 		wbgrp_obj_stats_dec(zram->zgrp, gid, eid, size);
558 		zram_set_handle(zram, index, 0);
559 		return;
560 	}
561 #endif
562 	zgrp_obj_delete(zram->zgrp, index, gid);
563 	zram_set_memcg_id(zram, index, 0);
564 	zgrp_obj_stats_dec(zram->zgrp, gid, size);
565 }
566 
567 #ifdef CONFIG_ZRAM_GROUP_DEBUG
group_debug(struct zram * zram,u32 op,u32 index,u32 gid)568 void group_debug(struct zram *zram, u32 op, u32 index, u32 gid)
569 {
570 	if (op == 0)
571 		zram_group_dump(zram->zgrp, gid, index);
572 
573 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
574 	if (op == 22)
575 		read_group_objs(zram, gid, index);
576 	if (op == 23)
577 		write_group_objs(zram, gid, index);
578 	if (op == 20) {
579 		if (index)
580 			zram_group_apply_writeback(zram->zgrp, hyperhold_nr_extent());
581 		else
582 			zram_group_remove_writeback(zram->zgrp);
583 	}
584 #endif
585 }
586 #endif
587 
group_obj_stats(struct zram * zram,u16 gid,int type)588 static u64 group_obj_stats(struct zram *zram, u16 gid, int type)
589 {
590 	if (!CHECK(zram->zgrp, "zram group is not enable!\n"))
591 		return 0;
592 	if (!CHECK_BOUND(gid, 0, zram->zgrp->nr_grp - 1))
593 		return 0;
594 
595 	if (type == CACHE_SIZE)
596 		return atomic64_read(&zram->zgrp->stats[gid].zram_size);
597 	else if (type == CACHE_PAGE)
598 		return atomic_read(&zram->zgrp->stats[gid].zram_pages);
599 	else if (type == CACHE_FAULT)
600 		return atomic64_read(&zram->zgrp->stats[gid].zram_fault);
601 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
602 	else if (type == SWAP_SIZE)
603 		return atomic64_read(&zram->zgrp->stats[gid].wb_size);
604 	else if (type == SWAP_PAGE)
605 		return atomic_read(&zram->zgrp->stats[gid].wb_pages);
606 	else if (type == READ_SIZE)
607 		return atomic64_read(&zram->zgrp->stats[gid].read_size);
608 	else if (type == WRITE_SIZE)
609 		return atomic64_read(&zram->zgrp->stats[gid].write_size);
610 	else if (type == SWAP_FAULT)
611 		return atomic64_read(&zram->zgrp->stats[gid].wb_fault);
612 	BUG();
613 #endif
614 
615 	return 0;
616 }
617 
618 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
zram_group_read(u16 gid,u64 req_size,void * priv)619 static u64 zram_group_read(u16 gid, u64 req_size, void *priv)
620 {
621 	if (!CHECK(priv, "priv is NULL!\n"))
622 		return 0;
623 
624 	return read_group_objs((struct zram *)priv, gid, req_size);
625 }
626 
zram_group_write(u16 gid,u64 req_size,void * priv)627 static u64 zram_group_write(u16 gid, u64 req_size, void *priv)
628 {
629 	if (!CHECK(priv, "priv is NULL!\n"))
630 		return 0;
631 
632 	return write_group_objs((struct zram *)priv, gid, req_size);
633 }
634 #else
zram_group_read(u16 gid,u64 req_size,void * priv)635 static u64 zram_group_read(u16 gid, u64 req_size, void *priv)
636 {
637 	return 0;
638 }
zram_group_write(u16 gid,u64 req_size,void * priv)639 static u64 zram_group_write(u16 gid, u64 req_size, void *priv)
640 {
641 	return 0;
642 }
643 #endif
644 
645 
zram_group_data_size(u16 gid,int type,void * priv)646 static u64 zram_group_data_size(u16 gid, int type, void *priv)
647 {
648 	if (!CHECK(priv, "priv is NULL!\n"))
649 		return 0;
650 
651 	return group_obj_stats((struct zram *)priv, gid, type);
652 }
653 
654 struct group_swap_ops zram_group_ops = {
655 	.group_read = zram_group_read,
656 	.group_write = zram_group_write,
657 	.group_data_size = zram_group_data_size,
658 };
659 
register_zram_group(struct zram * zram)660 static int register_zram_group(struct zram *zram)
661 {
662 	if (!CHECK(zram, "zram is NULL!\n"))
663 		return -EINVAL;
664 	if (!CHECK(zram->zgrp, "zram group is not enable!\n"))
665 		return -EINVAL;
666 
667 	zram->zgrp->gsdev = register_group_swap(&zram_group_ops, zram);
668 	if (!zram->zgrp->gsdev) {
669 		pr_err("register zram group failed!\n");
670 		return -ENOMEM;
671 	}
672 
673 	return 0;
674 }
675 
unregister_zram_group(struct zram * zram)676 static void unregister_zram_group(struct zram *zram)
677 {
678 	if (!CHECK(zram, "zram is NULL!\n"))
679 		return;
680 	if (!CHECK(zram->zgrp, "zram group is not enable!\n"))
681 		return;
682 
683 	unregister_group_swap(zram->zgrp->gsdev);
684 	zram->zgrp->gsdev = NULL;
685 }
686 
zram_group_init(struct zram * zram,u32 nr_obj)687 void zram_group_init(struct zram *zram, u32 nr_obj)
688 {
689 	unsigned int ctrl = zram->zgrp_ctrl;
690 
691 	if (ctrl == ZGRP_NONE)
692 		return;
693 	zram->zgrp = zram_group_meta_alloc(nr_obj, ZGRP_MAX_GRP - 1);
694 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
695 	if (ctrl == ZGRP_WRITE)
696 		zram_group_apply_writeback(zram->zgrp, hyperhold_nr_extent());
697 #endif
698 	register_zram_group(zram);
699 }
700 
zram_group_deinit(struct zram * zram)701 void zram_group_deinit(struct zram *zram)
702 {
703 	unregister_zram_group(zram);
704 	zram_group_meta_free(zram->zgrp);
705 	zram->zgrp = NULL;
706 }
707