• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/hyperhold/hp_core.c
4  *
5  * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd.
6  */
7 
8  #define pr_fmt(fmt) "[HYPERHOLD]" fmt
9 
10 #include <linux/module.h>
11 #include <linux/blkdev.h>
12 #include <linux/sysctl.h>
13 
14 #include "hyperhold.h"
15 #include "hp_device.h"
16 #include "hp_space.h"
17 #include "hp_iotab.h"
18 
19 #define HP_DFLT_DEVICE "/dev/by-name/hyperhold"
20 #define HP_DFLT_EXT_SIZE (1 << 15)
21 #define HP_DEV_NAME_LEN 256
22 #define HP_STATE_LEN 10
23 
24 #define CHECK(cond, ...) ((cond) || (pr_err(__VA_ARGS__), false))
25 #define CHECK_BOUND(var, min, max) \
26 	CHECK((var) >= (min) && (var) <= (max), \
27 		"%s %u out of bounds %u ~ %u!\n", #var, (var), (min), (max))
28 #define CHECK_INITED CHECK(hyperhold.inited, "hyperhold is not enable!\n")
29 #define CHECK_ENABLE (CHECK_INITED && CHECK(hyperhold.enable, "hyperhold is readonly!\n"))
30 
31 struct hyperhold {
32 	bool enable;
33 	bool inited;
34 
35 	char device_name[HP_DEV_NAME_LEN];
36 	u32 extent_size;
37 	u32 enable_soft_crypt;
38 
39 	struct hp_device dev;
40 	struct hp_space spc;
41 
42 	struct workqueue_struct *read_wq;
43 	struct workqueue_struct *write_wq;
44 
45 	struct mutex init_lock;
46 };
47 
48 struct hyperhold hyperhold;
49 
50 atomic64_t mem_used = ATOMIC64_INIT(0);
51 #ifdef CONFIG_HYPERHOLD_DEBUG
52 /*
53  * return the memory overhead of hyperhold module
54  */
hyperhold_memory_used(void)55 u64 hyperhold_memory_used(void)
56 {
57 	return atomic64_read(&mem_used) + hpio_memory() + space_memory();
58 }
59 #endif
60 
hyperhold_disable(bool force)61 void hyperhold_disable(bool force)
62 {
63 	if (!CHECK_INITED)
64 		return;
65 	if (!force && !CHECK_ENABLE)
66 		return;
67 
68 	mutex_lock(&hyperhold.init_lock);
69 	hyperhold.enable = false;
70 	if (!wait_for_space_empty(&hyperhold.spc, force))
71 		goto out;
72 	hyperhold.inited = false;
73 	wait_for_iotab_empty();
74 	if (hyperhold.read_wq)
75 		destroy_workqueue(hyperhold.read_wq);
76 	if (hyperhold.write_wq)
77 		destroy_workqueue(hyperhold.write_wq);
78 	deinit_space(&hyperhold.spc);
79 	crypto_deinit(&hyperhold.dev);
80 	unbind_bdev(&hyperhold.dev);
81 out:
82 	if (hyperhold.inited)
83 		pr_info("hyperhold is disabled, read only.\n");
84 	else
85 		pr_info("hyperhold is totally disabled!\n");
86 	mutex_unlock(&hyperhold.init_lock);
87 }
88 EXPORT_SYMBOL(hyperhold_disable);
89 
hyperhold_enable(void)90 void hyperhold_enable(void)
91 {
92 	bool enable = true;
93 
94 	if (hyperhold.inited)
95 		goto out;
96 
97 	mutex_lock(&hyperhold.init_lock);
98 	if (hyperhold.inited)
99 		goto unlock;
100 	if (!bind_bdev(&hyperhold.dev, hyperhold.device_name))
101 		goto err;
102 	if (!crypto_init(&hyperhold.dev, hyperhold.enable_soft_crypt))
103 		goto err;
104 	if (!init_space(&hyperhold.spc, hyperhold.dev.dev_size, hyperhold.extent_size))
105 		goto err;
106 	hyperhold.read_wq = alloc_workqueue("hyperhold_read", WQ_HIGHPRI | WQ_UNBOUND, 0);
107 	if (!hyperhold.read_wq)
108 		goto err;
109 	hyperhold.write_wq = alloc_workqueue("hyperhold_write", 0, 0);
110 	if (!hyperhold.write_wq)
111 		goto err;
112 	hyperhold.inited = true;
113 	goto unlock;
114 err:
115 	if (hyperhold.read_wq)
116 		destroy_workqueue(hyperhold.read_wq);
117 	if (hyperhold.write_wq)
118 		destroy_workqueue(hyperhold.write_wq);
119 	deinit_space(&hyperhold.spc);
120 	crypto_deinit(&hyperhold.dev);
121 	unbind_bdev(&hyperhold.dev);
122 	enable = false;
123 unlock:
124 	mutex_unlock(&hyperhold.init_lock);
125 out:
126 	if (enable) {
127 		hyperhold.enable = true;
128 		pr_info("hyperhold is enabled.\n");
129 	} else {
130 		hyperhold.enable = false;
131 		pr_err("hyperhold enable failed!\n");
132 	}
133 }
134 EXPORT_SYMBOL(hyperhold_enable);
135 
enable_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)136 static int enable_sysctl_handler(struct ctl_table *table, int write,
137 				 void *buffer, size_t *lenp, loff_t *ppos)
138 {
139 	if (write) {
140 		if (!strcmp(buffer, "enable\n"))
141 			hyperhold_enable();
142 		else if (!strcmp(buffer, "disable\n"))
143 			hyperhold_disable(false);
144 		else if (!strcmp(buffer, "force_disable\n"))
145 			hyperhold_disable(true);
146 	} else {
147 		if (*lenp < HP_STATE_LEN || *ppos) {
148 			*lenp = 0;
149 			return 0;
150 		}
151 		if (hyperhold.enable)
152 			strcpy(buffer, "enable\n");
153 		else if (hyperhold.inited)
154 			strcpy(buffer, "readonly\n");
155 		else
156 			strcpy(buffer, "disable\n");
157 		*lenp = strlen(buffer);
158 		*ppos += *lenp;
159 #ifdef CONFIG_HYPERHOLD_DEBUG
160 		pr_info("hyperhold memory overhead = %llu.\n", hyperhold_memory_used());
161 #endif
162 	}
163 	return 0;
164 }
165 
device_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)166 static int device_sysctl_handler(struct ctl_table *table, int write,
167 				 void *buffer, size_t *lenp, loff_t *ppos)
168 {
169 	int ret;
170 
171 	mutex_lock(&hyperhold.init_lock);
172 	if (write && hyperhold.inited) {
173 		pr_err("hyperhold device is busy!\n");
174 		ret = -EBUSY;
175 		goto unlock;
176 	}
177 	ret = proc_dostring(table, write, buffer, lenp, ppos);
178 	if (write && !ret) {
179 		hyperhold.enable_soft_crypt = 1;
180 		pr_info("device changed, default enable soft crypt.\n");
181 	}
182 unlock:
183 	mutex_unlock(&hyperhold.init_lock);
184 
185 	return ret;
186 }
187 
extent_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)188 static int extent_sysctl_handler(struct ctl_table *table, int write,
189 				 void *buffer, size_t *lenp, loff_t *ppos)
190 {
191 	int ret;
192 
193 	mutex_lock(&hyperhold.init_lock);
194 	if (write && hyperhold.inited) {
195 		pr_err("hyperhold device is busy!\n");
196 		ret = -EBUSY;
197 		goto unlock;
198 	}
199 	ret = proc_douintvec(table, write, buffer, lenp, ppos);
200 unlock:
201 	mutex_unlock(&hyperhold.init_lock);
202 
203 	return ret;
204 }
205 
crypto_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)206 static int crypto_sysctl_handler(struct ctl_table *table, int write,
207 				 void *buffer, size_t *lenp, loff_t *ppos)
208 {
209 	int ret;
210 
211 	mutex_lock(&hyperhold.init_lock);
212 	if (write && hyperhold.inited) {
213 		pr_err("hyperhold device is busy!\n");
214 		ret = -EBUSY;
215 		goto unlock;
216 	}
217 	ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
218 unlock:
219 	mutex_unlock(&hyperhold.init_lock);
220 
221 	return ret;
222 }
223 
224 static struct ctl_table_header *hp_sysctl_header;
225 static struct ctl_table hp_table[] = {
226 	{
227 		.procname = "enable",
228 		.mode = 0644,
229 		.proc_handler = enable_sysctl_handler,
230 	},
231 	{
232 		.procname = "device",
233 		.data = &hyperhold.device_name,
234 		.maxlen = sizeof(hyperhold.device_name),
235 		.mode = 0644,
236 		.proc_handler = device_sysctl_handler,
237 	},
238 	{
239 		.procname = "extent_size",
240 		.data = &hyperhold.extent_size,
241 		.maxlen = sizeof(hyperhold.extent_size),
242 		.mode = 0644,
243 		.proc_handler = extent_sysctl_handler,
244 	},
245 	{
246 		.procname = "soft_crypt",
247 		.data = &hyperhold.enable_soft_crypt,
248 		.maxlen = sizeof(hyperhold.enable_soft_crypt),
249 		.mode = 0644,
250 		.proc_handler = crypto_sysctl_handler,
251 		.extra1 = SYSCTL_ZERO,
252 		.extra2 = SYSCTL_ONE,
253 	},
254 	{}
255 };
256 static struct ctl_table hp_kernel_table[] = {
257 	{
258 		.procname = "hyperhold",
259 		.mode = 0555,
260 		.child = hp_table,
261 	},
262 	{}
263 };
264 static struct ctl_table hp_sys_table[] = {
265 	{
266 		.procname = "kernel",
267 		.mode = 0555,
268 		.child = hp_kernel_table,
269 	},
270 	{}
271 };
272 
is_hyperhold_enable(void)273 bool is_hyperhold_enable(void)
274 {
275 	return hyperhold.enable;
276 }
277 
hyperhold_init(void)278 static int __init hyperhold_init(void)
279 {
280 	strcpy(hyperhold.device_name, HP_DFLT_DEVICE);
281 	hyperhold.extent_size = HP_DFLT_EXT_SIZE;
282 	hyperhold.enable_soft_crypt = 1;
283 	mutex_init(&hyperhold.init_lock);
284 	hp_sysctl_header = register_sysctl_table(hp_sys_table);
285 	if (!hp_sysctl_header) {
286 		pr_err("register hyperhold sysctl table failed!\n");
287 		return -EINVAL;
288 	}
289 
290 	return 0;
291 }
292 
hyperhold_exit(void)293 static void __exit hyperhold_exit(void)
294 {
295 	unregister_sysctl_table(hp_sysctl_header);
296 	hyperhold_disable(true);
297 }
298 
space_of(u32 eid)299 static struct hp_space *space_of(u32 eid)
300 {
301 	return &hyperhold.spc;
302 }
303 
304 /* replace this func for multi devices */
device_of(u32 eid)305 static struct hp_device *device_of(u32 eid)
306 {
307 	return &hyperhold.dev;
308 }
309 
310 /* replace this func for multi devices */
hyperhold_nr_extent(void)311 u32 hyperhold_nr_extent(void)
312 {
313 	if (!CHECK_INITED)
314 		return 0;
315 
316 	return hyperhold.spc.nr_ext;
317 }
318 EXPORT_SYMBOL(hyperhold_nr_extent);
319 
hyperhold_extent_size(u32 eid)320 u32 hyperhold_extent_size(u32 eid)
321 {
322 	struct hp_space *spc = NULL;
323 
324 	if (!CHECK_INITED)
325 		return 0;
326 	spc = space_of(eid);
327 	if (!CHECK(spc, "invalid eid %u!\n", eid))
328 		return 0;
329 
330 	return spc->ext_size;
331 }
332 EXPORT_SYMBOL(hyperhold_extent_size);
333 
334 /* replace this func for multi devices */
hyperhold_address(u32 eid,u32 offset)335 long hyperhold_address(u32 eid, u32 offset)
336 {
337 	struct hp_space *spc = NULL;
338 
339 	if (!CHECK_INITED)
340 		return -EINVAL;
341 	spc = space_of(eid);
342 	if (!CHECK(spc, "invalid eid %u!\n", eid))
343 		return -EINVAL;
344 	if (!CHECK_BOUND(offset, 0, spc->ext_size - 1))
345 		return -EINVAL;
346 
347 	return (u64)eid * spc->ext_size + offset;
348 }
349 EXPORT_SYMBOL(hyperhold_address);
350 
351 /* replace this func for multi devices */
hyperhold_addr_extent(u64 addr)352 int hyperhold_addr_extent(u64 addr)
353 {
354 	struct hp_space *spc = NULL;
355 	u32 eid;
356 
357 	if (!CHECK_INITED)
358 		return -EINVAL;
359 	eid = div_u64(addr, hyperhold.spc.ext_size);
360 	spc = space_of(eid);
361 	if (!CHECK(spc, "invalid eid %u!\n", eid))
362 		return -EINVAL;
363 
364 	return eid;
365 }
366 EXPORT_SYMBOL(hyperhold_addr_extent);
367 
368 /* replace this func for multi devices */
hyperhold_addr_offset(u64 addr)369 int hyperhold_addr_offset(u64 addr)
370 {
371 	if (!CHECK_INITED)
372 		return -EINVAL;
373 
374 	return do_div(addr, hyperhold.spc.ext_size);
375 }
376 EXPORT_SYMBOL(hyperhold_addr_offset);
377 
378 /* replace this func for multi devices */
hyperhold_alloc_extent(void)379 int hyperhold_alloc_extent(void)
380 {
381 	if (!CHECK_ENABLE)
382 		return -EINVAL;
383 
384 	return alloc_eid(&hyperhold.spc);
385 }
386 EXPORT_SYMBOL(hyperhold_alloc_extent);
387 
hyperhold_free_extent(u32 eid)388 void hyperhold_free_extent(u32 eid)
389 {
390 	struct hp_space *spc = NULL;
391 
392 	if (!CHECK_INITED)
393 		return;
394 	spc = space_of(eid);
395 	if (!CHECK(spc, "invalid eid %u!\n", eid))
396 		return;
397 
398 	free_eid(spc, eid);
399 }
400 EXPORT_SYMBOL(hyperhold_free_extent);
401 
hyperhold_should_free_extent(u32 eid)402 void hyperhold_should_free_extent(u32 eid)
403 {
404 	struct hpio *hpio = NULL;
405 	struct hp_space *spc = NULL;
406 
407 	if (!CHECK_INITED)
408 		return;
409 	spc = space_of(eid);
410 	if (!CHECK(spc, "invalid eid %u", eid))
411 		return;
412 
413 	hpio = hpio_get(eid);
414 	if (!hpio) {
415 		free_eid(spc, eid);
416 		return;
417 	}
418 	hpio->free_extent = hyperhold_free_extent;
419 	hpio_put(hpio);
420 }
421 EXPORT_SYMBOL(hyperhold_should_free_extent);
422 
423 /*
424  * alloc hpio struct for r/w extent at @eid, will fill hpio with new alloced
425  * pages if @new_page. @return NULL on fail.
426  */
hyperhold_io_alloc(u32 eid,gfp_t gfp,unsigned int op,bool new_page)427 struct hpio *hyperhold_io_alloc(u32 eid, gfp_t gfp, unsigned int op, bool new_page)
428 {
429 	struct hpio *hpio = NULL;
430 	struct hp_space *spc;
431 	u32 nr_page;
432 
433 	if (!CHECK_ENABLE)
434 		return NULL;
435 	spc = space_of(eid);
436 	if (!CHECK(spc, "invalid eid  %u!\n", eid))
437 		return NULL;
438 
439 	nr_page = spc->ext_size / PAGE_SIZE;
440 	hpio = hpio_alloc(nr_page, gfp, op, new_page);
441 	if (!hpio)
442 		goto err;
443 	hpio->eid = eid;
444 
445 	return hpio;
446 err:
447 	hpio_free(hpio);
448 
449 	return NULL;
450 }
451 EXPORT_SYMBOL(hyperhold_io_alloc);
452 
hyperhold_io_free(struct hpio * hpio)453 void hyperhold_io_free(struct hpio *hpio)
454 {
455 	if (!CHECK_INITED)
456 		return;
457 	if (!CHECK(hpio, "hpio is null!\n"))
458 		return;
459 
460 	hpio_free(hpio);
461 }
462 EXPORT_SYMBOL(hyperhold_io_free);
463 
464 /*
465  * find exist read hpio of the extent @eid in iotab and inc its refcnt,
466  * alloc a new hpio and insert it into iotab if there is no hpio for @eid
467  */
hyperhold_io_get(u32 eid,gfp_t gfp,unsigned int op)468 struct hpio *hyperhold_io_get(u32 eid, gfp_t gfp, unsigned int op)
469 {
470 	struct hp_space *spc = NULL;
471 	u32 nr_page;
472 
473 	if (!CHECK_INITED)
474 		return NULL;
475 	spc = space_of(eid);
476 	if (!CHECK(spc, "invalid eid %u", eid))
477 		return NULL;
478 
479 	nr_page = spc->ext_size / PAGE_SIZE;
480 	return hpio_get_alloc(eid, nr_page, gfp, op);
481 }
482 EXPORT_SYMBOL(hyperhold_io_get);
483 
hyperhold_io_put(struct hpio * hpio)484 bool hyperhold_io_put(struct hpio *hpio)
485 {
486 	if (!CHECK_INITED)
487 		return false;
488 	if (!CHECK(hpio, "hpio is null!\n"))
489 		return false;
490 
491 	return hpio_put(hpio);
492 }
493 EXPORT_SYMBOL(hyperhold_io_put);
494 
495 /*
496  * notify all threads waiting for this hpio
497  */
hyperhold_io_complete(struct hpio * hpio)498 void hyperhold_io_complete(struct hpio *hpio)
499 {
500 	if (!CHECK_INITED)
501 		return;
502 	if (!CHECK(hpio, "hpio is null!\n"))
503 		return;
504 
505 	hpio_complete(hpio);
506 }
507 EXPORT_SYMBOL(hyperhold_io_complete);
508 
hyperhold_io_wait(struct hpio * hpio)509 void hyperhold_io_wait(struct hpio *hpio)
510 {
511 	if (!CHECK_INITED)
512 		return;
513 	if (!CHECK(hpio, "hpio is null!\n"))
514 		return;
515 
516 	hpio_wait(hpio);
517 }
518 EXPORT_SYMBOL(hyperhold_io_wait);
519 
hyperhold_io_success(struct hpio * hpio)520 bool hyperhold_io_success(struct hpio *hpio)
521 {
522 	if (!CHECK_INITED)
523 		return false;
524 	if (!CHECK(hpio, "hpio is null!\n"))
525 		return false;
526 
527 	return hpio_get_state(hpio) == HPIO_DONE;
528 }
529 EXPORT_SYMBOL(hyperhold_io_success);
530 
hyperhold_io_extent(struct hpio * hpio)531 int hyperhold_io_extent(struct hpio *hpio)
532 {
533 	if (!CHECK_INITED)
534 		return -EINVAL;
535 	if (!CHECK(hpio, "hpio is null!\n"))
536 		return -EINVAL;
537 
538 	return hpio->eid;
539 }
540 EXPORT_SYMBOL(hyperhold_io_extent);
541 
hyperhold_io_operate(struct hpio * hpio)542 int hyperhold_io_operate(struct hpio *hpio)
543 {
544 	if (!CHECK_INITED)
545 		return -EINVAL;
546 	if (!CHECK(hpio, "hpio is null!\n"))
547 		return -EINVAL;
548 
549 	return hpio->op;
550 }
551 EXPORT_SYMBOL(hyperhold_io_operate);
552 
hyperhold_io_page(struct hpio * hpio,u32 index)553 struct page *hyperhold_io_page(struct hpio *hpio, u32 index)
554 {
555 	if (!CHECK_INITED)
556 		return NULL;
557 	if (!CHECK(hpio, "hpio is null!\n"))
558 		return NULL;
559 	if (!CHECK_BOUND(index, 0, hpio->nr_page - 1))
560 		return NULL;
561 
562 	return hpio->pages[index];
563 }
564 EXPORT_SYMBOL(hyperhold_io_page);
565 
hyperhold_io_add_page(struct hpio * hpio,u32 index,struct page * page)566 bool hyperhold_io_add_page(struct hpio *hpio, u32 index, struct page *page)
567 {
568 	if (!CHECK_INITED)
569 		return false;
570 	if (!CHECK(hpio, "hpio is null!\n"))
571 		return false;
572 	if (!CHECK(page, "page is null!\n"))
573 		return false;
574 	if (!CHECK_BOUND(index, 0, hpio->nr_page - 1))
575 		return false;
576 
577 	get_page(page);
578 	atomic64_add(PAGE_SIZE, &mem_used);
579 	BUG_ON(hpio->pages[index]);
580 	hpio->pages[index] = page;
581 
582 	return true;
583 }
584 EXPORT_SYMBOL(hyperhold_io_add_page);
585 
hyperhold_io_nr_page(struct hpio * hpio)586 u32 hyperhold_io_nr_page(struct hpio *hpio)
587 {
588 	if (!CHECK_INITED)
589 		return 0;
590 	if (!CHECK(hpio, "hpio is null!\n"))
591 		return 0;
592 
593 	return hpio->nr_page;
594 }
595 EXPORT_SYMBOL(hyperhold_io_nr_page);
596 
hyperhold_io_private(struct hpio * hpio)597 void *hyperhold_io_private(struct hpio *hpio)
598 {
599 	if (!CHECK_INITED)
600 		return NULL;
601 	if (!CHECK(hpio, "hpio is null!\n"))
602 		return NULL;
603 
604 	return hpio->private;
605 }
606 EXPORT_SYMBOL(hyperhold_io_private);
607 
get_encrypted_page(struct hp_device * dev,struct page * page,unsigned int op)608 static struct page *get_encrypted_page(struct hp_device *dev, struct page *page, unsigned int op)
609 {
610 	struct page *encrypted_page = NULL;
611 
612 	if (!dev->ctfm) {
613 		encrypted_page = page;
614 		get_page(encrypted_page);
615 		goto out;
616 	}
617 
618 	encrypted_page = alloc_page(GFP_NOIO);
619 	if (!encrypted_page) {
620 		pr_err("alloc encrypted page failed!\n");
621 		goto out;
622 	}
623 	encrypted_page->index = page->index;
624 
625 	/* just alloc a new page for read */
626 	if (!op_is_write(op))
627 		goto out;
628 
629 	/* encrypt page for write */
630 	if (soft_crypt_page(dev->ctfm, encrypted_page, page, HP_DEV_ENCRYPT)) {
631 		put_page(encrypted_page);
632 		encrypted_page = NULL;
633 	}
634 out:
635 	return encrypted_page;
636 }
637 
put_encrypted_pages(struct bio * bio)638 static void put_encrypted_pages(struct bio *bio)
639 {
640 	struct bio_vec *bv = NULL;
641 	struct bvec_iter_all iter;
642 
643 	bio_for_each_segment_all(bv, bio, iter)
644 		put_page(bv->bv_page);
645 }
646 
hp_endio_work(struct work_struct * work)647 static void hp_endio_work(struct work_struct *work)
648 {
649 	struct hpio *hpio = container_of(work, struct hpio, endio_work);
650 	struct hp_device *dev = NULL;
651 	struct bio_vec *bv = NULL;
652 	struct bvec_iter_all iter;
653 	struct page *page = NULL;
654 	u32 ext_size;
655 	sector_t sec;
656 	int i;
657 
658 	if (op_is_write(hpio->op))
659 		goto endio;
660 	ext_size = space_of(hpio->eid)->ext_size;
661 	dev = device_of(hpio->eid);
662 	sec = hpio->eid * ext_size / dev->sec_size;
663 	i = 0;
664 	bio_for_each_segment_all(bv, hpio->bio, iter) {
665 		page = bv->bv_page;
666 		BUG_ON(i >= hpio->nr_page);
667 		BUG_ON(!hpio->pages[i]);
668 		if (dev->ctfm)
669 			BUG_ON(soft_crypt_page(dev->ctfm, hpio->pages[i], page, HP_DEV_DECRYPT));
670 		sec += PAGE_SIZE / dev->sec_size;
671 		i++;
672 	}
673 endio:
674 	put_encrypted_pages(hpio->bio);
675 	bio_put(hpio->bio);
676 	if (hpio->endio)
677 		hpio->endio(hpio);
678 }
679 
hpio_endio(struct bio * bio)680 static void hpio_endio(struct bio *bio)
681 {
682 	struct hpio *hpio = bio->bi_private;
683 	struct workqueue_struct *wq = NULL;
684 
685 	pr_info("hpio %p for eid %u returned %d.\n",
686 			hpio, hpio->eid, bio->bi_status);
687 	hpio_set_state(hpio, bio->bi_status ? HPIO_FAIL : HPIO_DONE);
688 	wq = op_is_write(hpio->op) ? hyperhold.write_wq : hyperhold.read_wq;
689 	queue_work(wq, &hpio->endio_work);
690 	atomic64_sub(sizeof(struct bio), &mem_used);
691 }
692 
hpio_submit(struct hpio * hpio)693 static int hpio_submit(struct hpio *hpio)
694 {
695 	struct hp_device *dev = NULL;
696 	struct bio *bio = NULL;
697 	struct page *page = NULL;
698 	u32 ext_size;
699 	sector_t sec;
700 	int i;
701 
702 	bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
703 	if (!bio) {
704 		pr_err("bio alloc failed!\n");
705 		return -ENOMEM;
706 	}
707 	atomic64_add(sizeof(struct bio), &mem_used);
708 
709 	dev = device_of(hpio->eid);
710 	bio_set_op_attrs(bio, hpio->op, 0);
711 	bio_set_dev(bio, dev->bdev);
712 
713 	ext_size = space_of(hpio->eid)->ext_size;
714 	sec = div_u64((u64)hpio->eid * ext_size, dev->sec_size);
715 	bio->bi_iter.bi_sector = sec;
716 	for (i = 0; i < hpio->nr_page; i++) {
717 		if (!hpio->pages[i])
718 			break;
719 		hpio->pages[i]->index = sec;
720 		page = get_encrypted_page(dev, hpio->pages[i], hpio->op);
721 		if (!page)
722 			goto err;
723 		if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
724 			put_page(page);
725 			goto err;
726 		}
727 		sec += PAGE_SIZE / dev->sec_size;
728 	}
729 
730 	if (dev->blk_key)
731 		inline_crypt_bio(dev->blk_key, bio);
732 	bio->bi_private = hpio;
733 	bio->bi_end_io = hpio_endio;
734 	hpio->bio = bio;
735 	submit_bio(bio);
736 	pr_info("submit hpio %p for eid %u.\n", hpio, hpio->eid);
737 
738 	return 0;
739 err:
740 	put_encrypted_pages(bio);
741 	bio_put(bio);
742 	atomic64_sub(sizeof(struct bio), &mem_used);
743 	return -EIO;
744 }
745 
rw_extent_async(struct hpio * hpio,hp_endio endio,void * priv,unsigned int op)746 static int rw_extent_async(struct hpio *hpio, hp_endio endio, void *priv, unsigned int op)
747 {
748 	int ret = 0;
749 
750 	if (!hpio_change_state(hpio, HPIO_INIT, HPIO_SUBMIT))
751 		return -EAGAIN;
752 
753 	hpio->private = priv;
754 	hpio->endio = endio;
755 	INIT_WORK(&hpio->endio_work, hp_endio_work);
756 
757 	ret = hpio_submit(hpio);
758 	if (ret) {
759 		hpio_set_state(hpio, HPIO_FAIL);
760 		hpio_complete(hpio);
761 	}
762 
763 	return ret;
764 }
765 
hyperhold_write_async(struct hpio * hpio,hp_endio endio,void * priv)766 int hyperhold_write_async(struct hpio *hpio, hp_endio endio, void *priv)
767 {
768 	if (!CHECK_ENABLE) {
769 		hpio_set_state(hpio, HPIO_FAIL);
770 		hpio_complete(hpio);
771 		return -EINVAL;
772 	}
773 
774 	BUG_ON(!op_is_write(hpio->op));
775 
776 	return rw_extent_async(hpio, endio, priv, REQ_OP_WRITE);
777 }
778 EXPORT_SYMBOL(hyperhold_write_async);
779 
hyperhold_read_async(struct hpio * hpio,hp_endio endio,void * priv)780 int hyperhold_read_async(struct hpio *hpio, hp_endio endio, void *priv)
781 {
782 	if (!CHECK_INITED) {
783 		hpio_set_state(hpio, HPIO_FAIL);
784 		hpio_complete(hpio);
785 		return -EINVAL;
786 	}
787 
788 	if (op_is_write(hpio->op))
789 		return -EAGAIN;
790 
791 	return rw_extent_async(hpio, endio, priv, REQ_OP_READ);
792 }
793 EXPORT_SYMBOL(hyperhold_read_async);
794 
795 module_init(hyperhold_init)
796 module_exit(hyperhold_exit)
797