• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/hyperhold/hp_core.c
4  *
5  * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd.
6  */
7 
8  #define pr_fmt(fmt) "[HYPERHOLD]" fmt
9 
10 #include <linux/module.h>
11 #include <linux/blkdev.h>
12 #include <linux/sysctl.h>
13 
14 #include "hyperhold.h"
15 #include "hp_device.h"
16 #include "hp_space.h"
17 #include "hp_iotab.h"
18 
19 #define HP_DFLT_DEVICE "/dev/by-name/hyperhold"
20 #define HP_DFLT_EXT_SIZE (1 << 15)
21 #define HP_DEV_NAME_LEN 256
22 #define HP_STATE_LEN 10
23 
24 #define CHECK(cond, ...) ((cond) || (pr_err(__VA_ARGS__), false))
25 #define CHECK_BOUND(var, min, max) \
26 	CHECK((var) >= (min) && (var) <= (max), \
27 		"%s %u out of bounds %u ~ %u!\n", #var, (var), (min), (max))
28 #define CHECK_INITED CHECK(hyperhold.inited, "hyperhold is not enable!\n")
29 #define CHECK_ENABLE (CHECK_INITED && CHECK(hyperhold.enable, "hyperhold is readonly!\n"))
30 
31 struct hyperhold {
32 	bool enable;
33 	bool inited;
34 
35 	char device_name[HP_DEV_NAME_LEN];
36 	u32 extent_size;
37 	u32 enable_soft_crypt;
38 
39 	struct hp_device dev;
40 	struct hp_space spc;
41 
42 	struct workqueue_struct *read_wq;
43 	struct workqueue_struct *write_wq;
44 
45 	struct mutex init_lock;
46 };
47 
48 struct hyperhold hyperhold;
49 
50 atomic64_t mem_used = ATOMIC64_INIT(0);
51 #ifdef CONFIG_HYPERHOLD_DEBUG
52 /*
53  * return the memory overhead of hyperhold module
54  */
hyperhold_memory_used(void)55 u64 hyperhold_memory_used(void)
56 {
57 	return atomic64_read(&mem_used) + hpio_memory() + space_memory();
58 }
59 #endif
60 
hyperhold_disable(bool force)61 void hyperhold_disable(bool force)
62 {
63 	if (!CHECK_INITED)
64 		return;
65 	if (!force && !CHECK_ENABLE)
66 		return;
67 
68 	mutex_lock(&hyperhold.init_lock);
69 	hyperhold.enable = false;
70 	if (!wait_for_space_empty(&hyperhold.spc, force))
71 		goto out;
72 	hyperhold.inited = false;
73 	wait_for_iotab_empty();
74 	destroy_workqueue(hyperhold.read_wq);
75 	destroy_workqueue(hyperhold.write_wq);
76 	deinit_space(&hyperhold.spc);
77 	crypto_deinit(&hyperhold.dev);
78 	unbind_bdev(&hyperhold.dev);
79 out:
80 	if (hyperhold.inited)
81 		pr_info("hyperhold is disabled, read only.\n");
82 	else
83 		pr_info("hyperhold is totally disabled!\n");
84 	mutex_unlock(&hyperhold.init_lock);
85 }
86 EXPORT_SYMBOL(hyperhold_disable);
87 
hyperhold_enable(void)88 void hyperhold_enable(void)
89 {
90 	bool enable = true;
91 
92 	if (hyperhold.inited)
93 		goto out;
94 
95 	mutex_lock(&hyperhold.init_lock);
96 	if (hyperhold.inited)
97 		goto unlock;
98 	if (!bind_bdev(&hyperhold.dev, hyperhold.device_name))
99 		goto err1;
100 	if (!crypto_init(&hyperhold.dev, hyperhold.enable_soft_crypt))
101 		goto err2;
102 	if (!init_space(&hyperhold.spc, hyperhold.dev.dev_size, hyperhold.extent_size))
103 		goto err3;
104 	hyperhold.read_wq = alloc_workqueue("hyperhold_read", WQ_HIGHPRI | WQ_UNBOUND, 0);
105 	if (!hyperhold.read_wq)
106 		goto err4;
107 	hyperhold.write_wq = alloc_workqueue("hyperhold_write", 0, 0);
108 	if (!hyperhold.write_wq)
109 		goto err5;
110 	hyperhold.inited = true;
111 	goto unlock;
112 err5:
113 	destroy_workqueue(hyperhold.read_wq);
114 err4:
115 	deinit_space(&hyperhold.spc);
116 err3:
117 	crypto_deinit(&hyperhold.dev);
118 err2:
119 	unbind_bdev(&hyperhold.dev);
120 err1:
121 	enable = false;
122 unlock:
123 	mutex_unlock(&hyperhold.init_lock);
124 out:
125 	if (enable) {
126 		hyperhold.enable = true;
127 		pr_info("hyperhold is enabled.\n");
128 	} else {
129 		hyperhold.enable = false;
130 		pr_err("hyperhold enable failed!\n");
131 	}
132 }
133 EXPORT_SYMBOL(hyperhold_enable);
134 
enable_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)135 static int enable_sysctl_handler(struct ctl_table *table, int write,
136 				 void *buffer, size_t *lenp, loff_t *ppos)
137 {
138 	const struct cred *cred = current_cred();
139 	char *filter_buf;
140 
141 	filter_buf = strstrip((char *)buffer);
142 	if (write) {
143 		if (!uid_eq(cred->euid, GLOBAL_MEMMGR_UID) &&
144 			!uid_eq(cred->euid, GLOBAL_ROOT_UID)) {
145 			pr_err("no permission to enable/disable eswap!\n");
146 			return 0;
147 		}
148 		if (!strcmp(filter_buf, "enable"))
149 			hyperhold_enable();
150 		else if (!strcmp(filter_buf, "disable"))
151 			hyperhold_disable(false);
152 		else if (!strcmp(filter_buf, "force_disable"))
153 			hyperhold_disable(true);
154 	} else {
155 		if (*lenp < HP_STATE_LEN || *ppos) {
156 			*lenp = 0;
157 			return 0;
158 		}
159 		if (hyperhold.enable)
160 			strcpy(buffer, "enable\n");
161 		else if (hyperhold.inited)
162 			strcpy(buffer, "readonly\n");
163 		else
164 			strcpy(buffer, "disable\n");
165 		*lenp = strlen(buffer);
166 		*ppos += *lenp;
167 #ifdef CONFIG_HYPERHOLD_DEBUG
168 		pr_info("hyperhold memory overhead = %llu.\n", hyperhold_memory_used());
169 #endif
170 	}
171 	return 0;
172 }
173 
device_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)174 static int device_sysctl_handler(struct ctl_table *table, int write,
175 				 void *buffer, size_t *lenp, loff_t *ppos)
176 {
177 	int ret;
178 
179 	mutex_lock(&hyperhold.init_lock);
180 	if (write && hyperhold.inited) {
181 		pr_err("hyperhold device is busy!\n");
182 		ret = -EBUSY;
183 		goto unlock;
184 	}
185 	ret = proc_dostring(table, write, buffer, lenp, ppos);
186 	if (write && !ret) {
187 		hyperhold.enable_soft_crypt = 1;
188 		pr_info("device changed, default enable soft crypt.\n");
189 	}
190 unlock:
191 	mutex_unlock(&hyperhold.init_lock);
192 
193 	return ret;
194 }
195 
extent_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)196 static int extent_sysctl_handler(struct ctl_table *table, int write,
197 				 void *buffer, size_t *lenp, loff_t *ppos)
198 {
199 	int ret;
200 
201 	mutex_lock(&hyperhold.init_lock);
202 	if (write && hyperhold.inited) {
203 		pr_err("hyperhold device is busy!\n");
204 		ret = -EBUSY;
205 		goto unlock;
206 	}
207 	ret = proc_douintvec(table, write, buffer, lenp, ppos);
208 unlock:
209 	mutex_unlock(&hyperhold.init_lock);
210 
211 	return ret;
212 }
213 
crypto_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)214 static int crypto_sysctl_handler(struct ctl_table *table, int write,
215 				 void *buffer, size_t *lenp, loff_t *ppos)
216 {
217 	int ret;
218 
219 	mutex_lock(&hyperhold.init_lock);
220 	if (write && hyperhold.inited) {
221 		pr_err("hyperhold device is busy!\n");
222 		ret = -EBUSY;
223 		goto unlock;
224 	}
225 	ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
226 unlock:
227 	mutex_unlock(&hyperhold.init_lock);
228 
229 	return ret;
230 }
231 
232 static struct ctl_table_header *hp_sysctl_header;
233 static struct ctl_table hp_table[] = {
234 	{
235 		.procname = "enable",
236 		.mode = 0666,
237 		.proc_handler = enable_sysctl_handler,
238 	},
239 	{
240 		.procname = "device",
241 		.data = &hyperhold.device_name,
242 		.maxlen = sizeof(hyperhold.device_name),
243 		.mode = 0644,
244 		.proc_handler = device_sysctl_handler,
245 	},
246 	{
247 		.procname = "extent_size",
248 		.data = &hyperhold.extent_size,
249 		.maxlen = sizeof(hyperhold.extent_size),
250 		.mode = 0644,
251 		.proc_handler = extent_sysctl_handler,
252 	},
253 	{
254 		.procname = "soft_crypt",
255 		.data = &hyperhold.enable_soft_crypt,
256 		.maxlen = sizeof(hyperhold.enable_soft_crypt),
257 		.mode = 0644,
258 		.proc_handler = crypto_sysctl_handler,
259 		.extra1 = SYSCTL_ZERO,
260 		.extra2 = SYSCTL_ONE,
261 	},
262 	{}
263 };
264 static struct ctl_table hp_kernel_table[] = {
265 	{
266 		.procname = "hyperhold",
267 		.mode = 0555,
268 		.child = hp_table,
269 	},
270 	{}
271 };
272 static struct ctl_table hp_sys_table[] = {
273 	{
274 		.procname = "kernel",
275 		.mode = 0555,
276 		.child = hp_kernel_table,
277 	},
278 	{}
279 };
280 
is_hyperhold_enable(void)281 bool is_hyperhold_enable(void)
282 {
283 	return hyperhold.enable;
284 }
285 
hyperhold_init(void)286 static int __init hyperhold_init(void)
287 {
288 	strcpy(hyperhold.device_name, HP_DFLT_DEVICE);
289 	hyperhold.extent_size = HP_DFLT_EXT_SIZE;
290 	hyperhold.enable_soft_crypt = 1;
291 	mutex_init(&hyperhold.init_lock);
292 	hp_sysctl_header = register_sysctl_table(hp_sys_table);
293 	if (!hp_sysctl_header) {
294 		pr_err("register hyperhold sysctl table failed!\n");
295 		return -EINVAL;
296 	}
297 
298 	return 0;
299 }
300 
hyperhold_exit(void)301 static void __exit hyperhold_exit(void)
302 {
303 	unregister_sysctl_table(hp_sysctl_header);
304 	hyperhold_disable(true);
305 }
306 
space_of(u32 eid)307 static struct hp_space *space_of(u32 eid)
308 {
309 	return &hyperhold.spc;
310 }
311 
312 /* replace this func for multi devices */
device_of(u32 eid)313 static struct hp_device *device_of(u32 eid)
314 {
315 	return &hyperhold.dev;
316 }
317 
318 /* replace this func for multi devices */
hyperhold_nr_extent(void)319 u32 hyperhold_nr_extent(void)
320 {
321 	if (!CHECK_INITED)
322 		return 0;
323 
324 	return hyperhold.spc.nr_ext;
325 }
326 EXPORT_SYMBOL(hyperhold_nr_extent);
327 
hyperhold_extent_size(u32 eid)328 u32 hyperhold_extent_size(u32 eid)
329 {
330 	struct hp_space *spc = NULL;
331 
332 	if (!CHECK_INITED)
333 		return 0;
334 	spc = space_of(eid);
335 	if (!CHECK(spc, "invalid eid %u!\n", eid))
336 		return 0;
337 
338 	return spc->ext_size;
339 }
340 EXPORT_SYMBOL(hyperhold_extent_size);
341 
342 /* replace this func for multi devices */
hyperhold_address(u32 eid,u32 offset)343 long hyperhold_address(u32 eid, u32 offset)
344 {
345 	struct hp_space *spc = NULL;
346 
347 	if (!CHECK_INITED)
348 		return -EINVAL;
349 	spc = space_of(eid);
350 	if (!CHECK(spc, "invalid eid %u!\n", eid))
351 		return -EINVAL;
352 	if (!CHECK_BOUND(offset, 0, spc->ext_size - 1))
353 		return -EINVAL;
354 
355 	return (u64)eid * spc->ext_size + offset;
356 }
357 EXPORT_SYMBOL(hyperhold_address);
358 
359 /* replace this func for multi devices */
hyperhold_addr_extent(u64 addr)360 int hyperhold_addr_extent(u64 addr)
361 {
362 	struct hp_space *spc = NULL;
363 	u32 eid;
364 
365 	if (!CHECK_INITED)
366 		return -EINVAL;
367 	eid = div_u64(addr, hyperhold.spc.ext_size);
368 	spc = space_of(eid);
369 	if (!CHECK(spc, "invalid eid %u!\n", eid))
370 		return -EINVAL;
371 
372 	return eid;
373 }
374 EXPORT_SYMBOL(hyperhold_addr_extent);
375 
376 /* replace this func for multi devices */
hyperhold_addr_offset(u64 addr)377 int hyperhold_addr_offset(u64 addr)
378 {
379 	if (!CHECK_INITED)
380 		return -EINVAL;
381 
382 	return do_div(addr, hyperhold.spc.ext_size);
383 }
384 EXPORT_SYMBOL(hyperhold_addr_offset);
385 
386 /* replace this func for multi devices */
hyperhold_alloc_extent(void)387 int hyperhold_alloc_extent(void)
388 {
389 	if (!CHECK_ENABLE)
390 		return -EINVAL;
391 
392 	return alloc_eid(&hyperhold.spc);
393 }
394 EXPORT_SYMBOL(hyperhold_alloc_extent);
395 
hyperhold_free_extent(u32 eid)396 void hyperhold_free_extent(u32 eid)
397 {
398 	struct hp_space *spc = NULL;
399 
400 	if (!CHECK_INITED)
401 		return;
402 	spc = space_of(eid);
403 	if (!CHECK(spc, "invalid eid %u!\n", eid))
404 		return;
405 
406 	free_eid(spc, eid);
407 }
408 EXPORT_SYMBOL(hyperhold_free_extent);
409 
hyperhold_should_free_extent(u32 eid)410 void hyperhold_should_free_extent(u32 eid)
411 {
412 	struct hpio *hpio = NULL;
413 	struct hp_space *spc = NULL;
414 
415 	if (!CHECK_INITED)
416 		return;
417 	spc = space_of(eid);
418 	if (!CHECK(spc, "invalid eid %u", eid))
419 		return;
420 
421 	hpio = hpio_get(eid);
422 	if (!hpio) {
423 		free_eid(spc, eid);
424 		return;
425 	}
426 	hpio->free_extent = hyperhold_free_extent;
427 	hpio_put(hpio);
428 }
429 EXPORT_SYMBOL(hyperhold_should_free_extent);
430 
431 /*
432  * alloc hpio struct for r/w extent at @eid, will fill hpio with new alloced
433  * pages if @new_page. @return NULL on fail.
434  */
hyperhold_io_alloc(u32 eid,gfp_t gfp,unsigned int op,bool new_page)435 struct hpio *hyperhold_io_alloc(u32 eid, gfp_t gfp, unsigned int op, bool new_page)
436 {
437 	struct hpio *hpio = NULL;
438 	struct hp_space *spc;
439 	u32 nr_page;
440 
441 	if (!CHECK_ENABLE)
442 		return NULL;
443 	spc = space_of(eid);
444 	if (!CHECK(spc, "invalid eid  %u!\n", eid))
445 		return NULL;
446 
447 	nr_page = spc->ext_size / PAGE_SIZE;
448 	hpio = hpio_alloc(nr_page, gfp, op, new_page);
449 	if (!hpio)
450 		goto err;
451 	hpio->eid = eid;
452 
453 	return hpio;
454 err:
455 	hpio_free(hpio);
456 
457 	return NULL;
458 }
459 EXPORT_SYMBOL(hyperhold_io_alloc);
460 
hyperhold_io_free(struct hpio * hpio)461 void hyperhold_io_free(struct hpio *hpio)
462 {
463 	if (!CHECK_INITED)
464 		return;
465 	if (!CHECK(hpio, "hpio is null!\n"))
466 		return;
467 
468 	hpio_free(hpio);
469 }
470 EXPORT_SYMBOL(hyperhold_io_free);
471 
472 /*
473  * find exist read hpio of the extent @eid in iotab and inc its refcnt,
474  * alloc a new hpio and insert it into iotab if there is no hpio for @eid
475  */
hyperhold_io_get(u32 eid,gfp_t gfp,unsigned int op)476 struct hpio *hyperhold_io_get(u32 eid, gfp_t gfp, unsigned int op)
477 {
478 	struct hp_space *spc = NULL;
479 	u32 nr_page;
480 
481 	if (!CHECK_INITED)
482 		return NULL;
483 	spc = space_of(eid);
484 	if (!CHECK(spc, "invalid eid %u", eid))
485 		return NULL;
486 
487 	nr_page = spc->ext_size / PAGE_SIZE;
488 	return hpio_get_alloc(eid, nr_page, gfp, op);
489 }
490 EXPORT_SYMBOL(hyperhold_io_get);
491 
hyperhold_io_put(struct hpio * hpio)492 bool hyperhold_io_put(struct hpio *hpio)
493 {
494 	if (!CHECK_INITED)
495 		return false;
496 	if (!CHECK(hpio, "hpio is null!\n"))
497 		return false;
498 
499 	return hpio_put(hpio);
500 }
501 EXPORT_SYMBOL(hyperhold_io_put);
502 
503 /*
504  * notify all threads waiting for this hpio
505  */
hyperhold_io_complete(struct hpio * hpio)506 void hyperhold_io_complete(struct hpio *hpio)
507 {
508 	if (!CHECK_INITED)
509 		return;
510 	if (!CHECK(hpio, "hpio is null!\n"))
511 		return;
512 
513 	hpio_complete(hpio);
514 }
515 EXPORT_SYMBOL(hyperhold_io_complete);
516 
hyperhold_io_wait(struct hpio * hpio)517 void hyperhold_io_wait(struct hpio *hpio)
518 {
519 	if (!CHECK_INITED)
520 		return;
521 	if (!CHECK(hpio, "hpio is null!\n"))
522 		return;
523 
524 	hpio_wait(hpio);
525 }
526 EXPORT_SYMBOL(hyperhold_io_wait);
527 
hyperhold_io_success(struct hpio * hpio)528 bool hyperhold_io_success(struct hpio *hpio)
529 {
530 	if (!CHECK_INITED)
531 		return false;
532 	if (!CHECK(hpio, "hpio is null!\n"))
533 		return false;
534 
535 	return hpio_get_state(hpio) == HPIO_DONE;
536 }
537 EXPORT_SYMBOL(hyperhold_io_success);
538 
hyperhold_io_extent(struct hpio * hpio)539 int hyperhold_io_extent(struct hpio *hpio)
540 {
541 	if (!CHECK_INITED)
542 		return -EINVAL;
543 	if (!CHECK(hpio, "hpio is null!\n"))
544 		return -EINVAL;
545 
546 	return hpio->eid;
547 }
548 EXPORT_SYMBOL(hyperhold_io_extent);
549 
hyperhold_io_operate(struct hpio * hpio)550 int hyperhold_io_operate(struct hpio *hpio)
551 {
552 	if (!CHECK_INITED)
553 		return -EINVAL;
554 	if (!CHECK(hpio, "hpio is null!\n"))
555 		return -EINVAL;
556 
557 	return hpio->op;
558 }
559 EXPORT_SYMBOL(hyperhold_io_operate);
560 
hyperhold_io_page(struct hpio * hpio,u32 index)561 struct page *hyperhold_io_page(struct hpio *hpio, u32 index)
562 {
563 	if (!CHECK_INITED)
564 		return NULL;
565 	if (!CHECK(hpio, "hpio is null!\n"))
566 		return NULL;
567 	if (!CHECK_BOUND(index, 0, hpio->nr_page - 1))
568 		return NULL;
569 
570 	return hpio->pages[index];
571 }
572 EXPORT_SYMBOL(hyperhold_io_page);
573 
hyperhold_io_add_page(struct hpio * hpio,u32 index,struct page * page)574 bool hyperhold_io_add_page(struct hpio *hpio, u32 index, struct page *page)
575 {
576 	if (!CHECK_INITED)
577 		return false;
578 	if (!CHECK(hpio, "hpio is null!\n"))
579 		return false;
580 	if (!CHECK(page, "page is null!\n"))
581 		return false;
582 	if (!CHECK_BOUND(index, 0, hpio->nr_page - 1))
583 		return false;
584 
585 	get_page(page);
586 	atomic64_add(PAGE_SIZE, &mem_used);
587 	BUG_ON(hpio->pages[index]);
588 	hpio->pages[index] = page;
589 
590 	return true;
591 }
592 EXPORT_SYMBOL(hyperhold_io_add_page);
593 
hyperhold_io_nr_page(struct hpio * hpio)594 u32 hyperhold_io_nr_page(struct hpio *hpio)
595 {
596 	if (!CHECK_INITED)
597 		return 0;
598 	if (!CHECK(hpio, "hpio is null!\n"))
599 		return 0;
600 
601 	return hpio->nr_page;
602 }
603 EXPORT_SYMBOL(hyperhold_io_nr_page);
604 
hyperhold_io_private(struct hpio * hpio)605 void *hyperhold_io_private(struct hpio *hpio)
606 {
607 	if (!CHECK_INITED)
608 		return NULL;
609 	if (!CHECK(hpio, "hpio is null!\n"))
610 		return NULL;
611 
612 	return hpio->private;
613 }
614 EXPORT_SYMBOL(hyperhold_io_private);
615 
get_encrypted_page(struct hp_device * dev,struct page * page,unsigned int op)616 static struct page *get_encrypted_page(struct hp_device *dev, struct page *page, unsigned int op)
617 {
618 	struct page *encrypted_page = NULL;
619 
620 	if (!dev->ctfm) {
621 		encrypted_page = page;
622 		get_page(encrypted_page);
623 		goto out;
624 	}
625 
626 	encrypted_page = alloc_page(GFP_NOIO);
627 	if (!encrypted_page) {
628 		pr_err("alloc encrypted page failed!\n");
629 		goto out;
630 	}
631 	encrypted_page->index = page->index;
632 
633 	/* just alloc a new page for read */
634 	if (!op_is_write(op))
635 		goto out;
636 
637 	/* encrypt page for write */
638 	if (soft_crypt_page(dev->ctfm, encrypted_page, page, HP_DEV_ENCRYPT)) {
639 		put_page(encrypted_page);
640 		encrypted_page = NULL;
641 	}
642 out:
643 	return encrypted_page;
644 }
645 
put_encrypted_pages(struct bio * bio)646 static void put_encrypted_pages(struct bio *bio)
647 {
648 	struct bio_vec *bv = NULL;
649 	struct bvec_iter_all iter;
650 
651 	bio_for_each_segment_all(bv, bio, iter)
652 		put_page(bv->bv_page);
653 }
654 
hp_endio_work(struct work_struct * work)655 static void hp_endio_work(struct work_struct *work)
656 {
657 	struct hpio *hpio = container_of(work, struct hpio, endio_work);
658 	struct hp_device *dev = NULL;
659 	struct bio_vec *bv = NULL;
660 	struct bvec_iter_all iter;
661 	struct page *page = NULL;
662 	u32 ext_size;
663 	sector_t sec;
664 	int i;
665 
666 	if (op_is_write(hpio->op))
667 		goto endio;
668 	ext_size = space_of(hpio->eid)->ext_size;
669 	dev = device_of(hpio->eid);
670 	sec = hpio->eid * ext_size / dev->sec_size;
671 	i = 0;
672 	bio_for_each_segment_all(bv, hpio->bio, iter) {
673 		page = bv->bv_page;
674 		BUG_ON(i >= hpio->nr_page);
675 		BUG_ON(!hpio->pages[i]);
676 		if (dev->ctfm)
677 			BUG_ON(soft_crypt_page(dev->ctfm, hpio->pages[i], page, HP_DEV_DECRYPT));
678 		sec += PAGE_SIZE / dev->sec_size;
679 		i++;
680 	}
681 endio:
682 	put_encrypted_pages(hpio->bio);
683 	bio_put(hpio->bio);
684 	if (hpio->endio)
685 		hpio->endio(hpio);
686 }
687 
hpio_endio(struct bio * bio)688 static void hpio_endio(struct bio *bio)
689 {
690 	struct hpio *hpio = bio->bi_private;
691 	struct workqueue_struct *wq = NULL;
692 
693 	pr_info("hpio %p for eid %u returned %d.\n",
694 			hpio, hpio->eid, bio->bi_status);
695 	hpio_set_state(hpio, bio->bi_status ? HPIO_FAIL : HPIO_DONE);
696 	wq = op_is_write(hpio->op) ? hyperhold.write_wq : hyperhold.read_wq;
697 	queue_work(wq, &hpio->endio_work);
698 	atomic64_sub(sizeof(struct bio), &mem_used);
699 }
700 
hpio_submit(struct hpio * hpio)701 static int hpio_submit(struct hpio *hpio)
702 {
703 	struct hp_device *dev = NULL;
704 	struct bio *bio = NULL;
705 	struct page *page = NULL;
706 	u32 ext_size;
707 	sector_t sec;
708 	int i;
709 
710 	bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
711 	if (!bio) {
712 		pr_err("bio alloc failed!\n");
713 		return -ENOMEM;
714 	}
715 	atomic64_add(sizeof(struct bio), &mem_used);
716 
717 	dev = device_of(hpio->eid);
718 	bio_set_op_attrs(bio, hpio->op, 0);
719 	bio_set_dev(bio, dev->bdev);
720 
721 	ext_size = space_of(hpio->eid)->ext_size;
722 	sec = div_u64((u64)hpio->eid * ext_size, dev->sec_size);
723 	bio->bi_iter.bi_sector = sec;
724 	for (i = 0; i < hpio->nr_page; i++) {
725 		if (!hpio->pages[i])
726 			break;
727 		hpio->pages[i]->index = sec;
728 		page = get_encrypted_page(dev, hpio->pages[i], hpio->op);
729 		if (!page)
730 			goto err;
731 		if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
732 			put_page(page);
733 			goto err;
734 		}
735 		sec += PAGE_SIZE / dev->sec_size;
736 	}
737 
738 	if (dev->blk_key)
739 		inline_crypt_bio(dev->blk_key, bio);
740 	bio->bi_private = hpio;
741 	bio->bi_end_io = hpio_endio;
742 	hpio->bio = bio;
743 	submit_bio(bio);
744 	pr_info("submit hpio %p for eid %u.\n", hpio, hpio->eid);
745 
746 	return 0;
747 err:
748 	put_encrypted_pages(bio);
749 	bio_put(bio);
750 	atomic64_sub(sizeof(struct bio), &mem_used);
751 	return -EIO;
752 }
753 
rw_extent_async(struct hpio * hpio,hp_endio endio,void * priv,unsigned int op)754 static int rw_extent_async(struct hpio *hpio, hp_endio endio, void *priv, unsigned int op)
755 {
756 	int ret = 0;
757 
758 	if (!hpio_change_state(hpio, HPIO_INIT, HPIO_SUBMIT))
759 		return -EAGAIN;
760 
761 	hpio->private = priv;
762 	hpio->endio = endio;
763 	INIT_WORK(&hpio->endio_work, hp_endio_work);
764 
765 	ret = hpio_submit(hpio);
766 	if (ret) {
767 		hpio_set_state(hpio, HPIO_FAIL);
768 		hpio_complete(hpio);
769 	}
770 
771 	return ret;
772 }
773 
hyperhold_write_async(struct hpio * hpio,hp_endio endio,void * priv)774 int hyperhold_write_async(struct hpio *hpio, hp_endio endio, void *priv)
775 {
776 	if (!CHECK_ENABLE) {
777 		hpio_set_state(hpio, HPIO_FAIL);
778 		hpio_complete(hpio);
779 		return -EINVAL;
780 	}
781 
782 	BUG_ON(!op_is_write(hpio->op));
783 
784 	return rw_extent_async(hpio, endio, priv, REQ_OP_WRITE);
785 }
786 EXPORT_SYMBOL(hyperhold_write_async);
787 
hyperhold_read_async(struct hpio * hpio,hp_endio endio,void * priv)788 int hyperhold_read_async(struct hpio *hpio, hp_endio endio, void *priv)
789 {
790 	if (!CHECK_INITED) {
791 		hpio_set_state(hpio, HPIO_FAIL);
792 		hpio_complete(hpio);
793 		return -EINVAL;
794 	}
795 
796 	if (op_is_write(hpio->op))
797 		return -EAGAIN;
798 
799 	return rw_extent_async(hpio, endio, priv, REQ_OP_READ);
800 }
801 EXPORT_SYMBOL(hyperhold_read_async);
802 
803 module_init(hyperhold_init)
804 module_exit(hyperhold_exit)
805