• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/hyperhold/hp_device.c
4  *
5  * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd.
6  */
7 
8 #define pr_fmt(fmt) "[HYPERHOLD]" fmt
9 
10 #include <linux/random.h>
11 #include <linux/blk-crypto.h>
12 
13 #include "hp_device.h"
14 
15 #define HP_CIPHER_MODE BLK_ENCRYPTION_MODE_AES_256_XTS
16 #define HP_CIPHER_NAME "xts(aes)"
17 #define HP_KEY_SIZE (64)
18 #define HP_IV_SIZE (16)
19 
20 union hp_iv {
21 	__le64 index;
22 	__le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
23 };
24 
unbind_bdev(struct hp_device * dev)25 void unbind_bdev(struct hp_device *dev)
26 {
27 	int ret;
28 
29 	if (!dev->bdev)
30 		goto close;
31 	if (!dev->old_block_size)
32 		goto put;
33 	ret = set_blocksize(dev->bdev, dev->old_block_size);
34 	if (ret)
35 		pr_err("set old block size %d failed, err = %d!\n",
36 				dev->old_block_size, ret);
37 	dev->old_block_size = 0;
38 put:
39 	blkdev_put(dev->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
40 	dev->bdev = NULL;
41 close:
42 	if (dev->filp)
43 		filp_close(dev->filp, NULL);
44 	dev->filp = NULL;
45 
46 	pr_info("hyperhold bdev unbinded.\n");
47 }
48 
bind_bdev(struct hp_device * dev,const char * name)49 bool bind_bdev(struct hp_device *dev, const char *name)
50 {
51 	struct inode *inode = NULL;
52 	int ret;
53 
54 	dev->filp = filp_open(name, O_RDWR | O_LARGEFILE, 0);
55 	if (IS_ERR(dev->filp)) {
56 		pr_err("open file %s failed, err = %ld!\n", name, PTR_ERR(dev->filp));
57 		dev->filp = NULL;
58 		goto err;
59 	}
60 	inode = dev->filp->f_mapping->host;
61 	if (!S_ISBLK(inode->i_mode)) {
62 		pr_err("%s is not a block device!\n", name);
63 		goto err;
64 	}
65 	dev->bdev = blkdev_get_by_dev(inode->i_rdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, dev);
66 	if (IS_ERR(dev->bdev)) {
67 		ret = PTR_ERR(dev->bdev);
68 		dev->bdev = NULL;
69 		pr_err("get blkdev %s failed, err = %d!\n", name, ret);
70 		goto err;
71 	}
72 	dev->old_block_size = block_size(dev->bdev);
73 	ret = set_blocksize(dev->bdev, PAGE_SIZE);
74 	if (ret) {
75 		pr_err("set %s block size failed, err = %d!\n", name, ret);
76 		goto err;
77 	}
78 	dev->dev_size = (u64)i_size_read(inode);
79 	dev->sec_size = SECTOR_SIZE;
80 
81 	pr_info("hyperhold bind bdev %s of size %llu / %u succ.\n",
82 			name, dev->dev_size, dev->sec_size);
83 
84 	return true;
85 err:
86 	unbind_bdev(dev);
87 
88 	return false;
89 }
90 
soft_crypt_page(struct crypto_skcipher * ctfm,struct page * dst_page,struct page * src_page,unsigned int op)91 int soft_crypt_page(struct crypto_skcipher *ctfm, struct page *dst_page,
92 		    struct page *src_page, unsigned int op)
93 {
94 	struct skcipher_request *req = NULL;
95 	DECLARE_CRYPTO_WAIT(wait);
96 	struct scatterlist dst, src;
97 	int ret = 0;
98 	union hp_iv iv;
99 
100 	memset(&iv, 0, sizeof(union hp_iv));
101 	iv.index = cpu_to_le64(src_page->index);
102 
103 	req = skcipher_request_alloc(ctfm, GFP_NOIO);
104 	if (!req) {
105 		pr_err("alloc skcipher request failed!\n");
106 		return -ENOMEM;
107 	}
108 
109 	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
110 			crypto_req_done, &wait);
111 	sg_init_table(&dst, 1);
112 	sg_set_page(&dst, dst_page, PAGE_SIZE, 0);
113 	sg_init_table(&src, 1);
114 	sg_set_page(&src, src_page, PAGE_SIZE, 0);
115 	skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &iv);
116 	if (op == HP_DEV_ENCRYPT)
117 		ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
118 	else if (op == HP_DEV_DECRYPT)
119 		ret = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
120 	else
121 		BUG();
122 
123 	skcipher_request_free(req);
124 
125 	if (ret)
126 		pr_err("%scrypt failed!\n", op == HP_DEV_ENCRYPT ? "en" : "de");
127 
128 	return ret;
129 }
130 
soft_crypto_init(const u8 * key)131 static struct crypto_skcipher *soft_crypto_init(const u8 *key)
132 {
133 	char *cipher = HP_CIPHER_NAME;
134 	u32 key_len = HP_KEY_SIZE;
135 	struct crypto_skcipher *ctfm = NULL;
136 	int ret;
137 
138 	ctfm = crypto_alloc_skcipher(cipher, 0, 0);
139 	if (IS_ERR(ctfm)) {
140 		pr_err("alloc ctfm failed, ret = %ld!\n", PTR_ERR(ctfm));
141 		ctfm = NULL;
142 		goto err;
143 	}
144 	crypto_skcipher_clear_flags(ctfm, ~0);
145 	crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
146 	ret = crypto_skcipher_setkey(ctfm, key, key_len);
147 	if (ret) {
148 		pr_err("ctfm setkey failed, ret = %d!\n", ret);
149 		goto err;
150 	}
151 
152 	return ctfm;
153 err:
154 	if (ctfm)
155 		crypto_free_skcipher(ctfm);
156 
157 	return NULL;
158 }
159 
160 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
inline_crypt_bio(struct blk_crypto_key * blk_key,struct bio * bio)161 void inline_crypt_bio(struct blk_crypto_key *blk_key, struct bio *bio)
162 {
163 	union hp_iv iv;
164 
165 	memset(&iv, 0, sizeof(union hp_iv));
166 	iv.index = cpu_to_le64(bio->bi_iter.bi_sector);
167 
168 	bio_crypt_set_ctx(bio, blk_key, iv.dun, GFP_NOIO);
169 }
170 
inline_crypto_init(const u8 * key)171 static struct blk_crypto_key *inline_crypto_init(const u8 *key)
172 {
173 	struct blk_crypto_key *blk_key = NULL;
174 	u32 dun_bytes = HP_IV_SIZE - sizeof(__le64);
175 	int ret;
176 
177 	blk_key = kzalloc(sizeof(struct blk_crypto_key), GFP_KERNEL);
178 	if (!blk_key) {
179 		pr_err("blk key alloc failed!\n");
180 		goto err;
181 	}
182 	ret = blk_crypto_init_key(blk_key, key, HP_CIPHER_MODE, dun_bytes, PAGE_SIZE);
183 	if (ret) {
184 		pr_err("blk key init failed, ret = %d!\n", ret);
185 		goto err;
186 	}
187 
188 	return blk_key;
189 err:
190 	if (blk_key)
191 		kfree_sensitive(blk_key);
192 
193 	return NULL;
194 }
195 #else
inline_crypt_bio(struct blk_crypto_key * blk_key,struct bio * bio)196 void inline_crypt_bio(struct blk_crypto_key *blk_key, struct bio *bio) {}
inline_crypto_init(const u8 * key)197 static struct blk_crypto_key *inline_crypto_init(const u8 *key)
198 {
199 	pr_err("CONFIG_BLK_INLINE_ENCRYPTION is not enabled!\n");
200 	return NULL;
201 }
202 #endif
203 
crypto_init(struct hp_device * dev,bool soft)204 bool crypto_init(struct hp_device *dev, bool soft)
205 {
206 	u8 key[HP_KEY_SIZE];
207 	bool ret = false;
208 
209 	get_random_bytes(key, HP_KEY_SIZE);
210 	if (soft) {
211 		dev->ctfm = soft_crypto_init(key);
212 		ret = dev->ctfm;
213 	} else {
214 		dev->blk_key = inline_crypto_init(key);
215 		ret = dev->blk_key;
216 		if (ret)
217 			pr_warn("soft crypt has been turned off, now apply hard crypt!\n");
218 	}
219 	memzero_explicit(key, HP_KEY_SIZE);
220 
221 	return ret;
222 }
223 
crypto_deinit(struct hp_device * dev)224 void crypto_deinit(struct hp_device *dev)
225 {
226 	if (dev->ctfm) {
227 		crypto_free_skcipher(dev->ctfm);
228 		dev->ctfm = NULL;
229 	}
230 	if (dev->blk_key) {
231 		kfree_sensitive(dev->blk_key);
232 		dev->blk_key = NULL;
233 	}
234 }
235