1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bio-integrity.c - bio data integrity extensions
4 *
5 * Copyright (C) 2007, 2008, 2009 Oracle Corporation
6 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7 */
8
9 #include <linux/blkdev.h>
10 #include <linux/mempool.h>
11 #include <linux/export.h>
12 #include <linux/bio.h>
13 #include <linux/workqueue.h>
14 #include <linux/slab.h>
15 #include "blk.h"
16
17 static struct kmem_cache *bip_slab;
18 static struct workqueue_struct *kintegrityd_wq;
19
blk_flush_integrity(void)20 void blk_flush_integrity(void)
21 {
22 flush_workqueue(kintegrityd_wq);
23 }
24
__bio_integrity_free(struct bio_set * bs,struct bio_integrity_payload * bip)25 static void __bio_integrity_free(struct bio_set *bs,
26 struct bio_integrity_payload *bip)
27 {
28 if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
29 if (bip->bip_vec)
30 bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
31 bip->bip_max_vcnt);
32 mempool_free(bip, &bs->bio_integrity_pool);
33 } else {
34 kfree(bip);
35 }
36 }
37
38 /**
39 * bio_integrity_alloc - Allocate integrity payload and attach it to bio
40 * @bio: bio to attach integrity metadata to
41 * @gfp_mask: Memory allocation mask
42 * @nr_vecs: Number of integrity metadata scatter-gather elements
43 *
44 * Description: This function prepares a bio for attaching integrity
45 * metadata. nr_vecs specifies the maximum number of pages containing
46 * integrity metadata that can be attached.
47 */
bio_integrity_alloc(struct bio * bio,gfp_t gfp_mask,unsigned int nr_vecs)48 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
49 gfp_t gfp_mask,
50 unsigned int nr_vecs)
51 {
52 struct bio_integrity_payload *bip;
53 struct bio_set *bs = bio->bi_pool;
54 unsigned inline_vecs;
55
56 if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
57 return ERR_PTR(-EOPNOTSUPP);
58
59 if (!bs || !mempool_initialized(&bs->bio_integrity_pool)) {
60 bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
61 inline_vecs = nr_vecs;
62 } else {
63 bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
64 inline_vecs = BIO_INLINE_VECS;
65 }
66
67 if (unlikely(!bip))
68 return ERR_PTR(-ENOMEM);
69
70 memset(bip, 0, sizeof(*bip));
71
72 if (nr_vecs > inline_vecs) {
73 bip->bip_max_vcnt = nr_vecs;
74 bip->bip_vec = bvec_alloc(&bs->bvec_integrity_pool,
75 &bip->bip_max_vcnt, gfp_mask);
76 if (!bip->bip_vec)
77 goto err;
78 } else {
79 bip->bip_vec = bip->bip_inline_vecs;
80 bip->bip_max_vcnt = inline_vecs;
81 }
82
83 bip->bip_bio = bio;
84 bio->bi_integrity = bip;
85 bio->bi_opf |= REQ_INTEGRITY;
86
87 return bip;
88 err:
89 __bio_integrity_free(bs, bip);
90 return ERR_PTR(-ENOMEM);
91 }
92 EXPORT_SYMBOL(bio_integrity_alloc);
93
94 /**
95 * bio_integrity_free - Free bio integrity payload
96 * @bio: bio containing bip to be freed
97 *
98 * Description: Used to free the integrity portion of a bio. Usually
99 * called from bio_free().
100 */
bio_integrity_free(struct bio * bio)101 void bio_integrity_free(struct bio *bio)
102 {
103 struct bio_integrity_payload *bip = bio_integrity(bio);
104 struct bio_set *bs = bio->bi_pool;
105
106 if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
107 kfree(bvec_virt(bip->bip_vec));
108
109 __bio_integrity_free(bs, bip);
110 bio->bi_integrity = NULL;
111 bio->bi_opf &= ~REQ_INTEGRITY;
112 }
113
114 /**
115 * bio_integrity_add_page - Attach integrity metadata
116 * @bio: bio to update
117 * @page: page containing integrity metadata
118 * @len: number of bytes of integrity metadata in page
119 * @offset: start offset within page
120 *
121 * Description: Attach a page containing integrity metadata to bio.
122 */
bio_integrity_add_page(struct bio * bio,struct page * page,unsigned int len,unsigned int offset)123 int bio_integrity_add_page(struct bio *bio, struct page *page,
124 unsigned int len, unsigned int offset)
125 {
126 struct bio_integrity_payload *bip = bio_integrity(bio);
127 struct bio_vec *iv;
128
129 if (bip->bip_vcnt >= bip->bip_max_vcnt) {
130 printk(KERN_ERR "%s: bip_vec full\n", __func__);
131 return 0;
132 }
133
134 iv = bip->bip_vec + bip->bip_vcnt;
135
136 if (bip->bip_vcnt &&
137 bvec_gap_to_prev(bio->bi_bdev->bd_disk->queue,
138 &bip->bip_vec[bip->bip_vcnt - 1], offset))
139 return 0;
140
141 iv->bv_page = page;
142 iv->bv_len = len;
143 iv->bv_offset = offset;
144 bip->bip_vcnt++;
145
146 return len;
147 }
148 EXPORT_SYMBOL(bio_integrity_add_page);
149
150 /**
151 * bio_integrity_process - Process integrity metadata for a bio
152 * @bio: bio to generate/verify integrity metadata for
153 * @proc_iter: iterator to process
154 * @proc_fn: Pointer to the relevant processing function
155 */
bio_integrity_process(struct bio * bio,struct bvec_iter * proc_iter,integrity_processing_fn * proc_fn)156 static blk_status_t bio_integrity_process(struct bio *bio,
157 struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn)
158 {
159 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
160 struct blk_integrity_iter iter;
161 struct bvec_iter bviter;
162 struct bio_vec bv;
163 struct bio_integrity_payload *bip = bio_integrity(bio);
164 blk_status_t ret = BLK_STS_OK;
165
166 iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
167 iter.interval = 1 << bi->interval_exp;
168 iter.seed = proc_iter->bi_sector;
169 iter.prot_buf = bvec_virt(bip->bip_vec);
170
171 __bio_for_each_segment(bv, bio, bviter, *proc_iter) {
172 void *kaddr = bvec_kmap_local(&bv);
173
174 iter.data_buf = kaddr;
175 iter.data_size = bv.bv_len;
176 ret = proc_fn(&iter);
177 kunmap_local(kaddr);
178
179 if (ret)
180 break;
181
182 }
183 return ret;
184 }
185
186 /**
187 * bio_integrity_prep - Prepare bio for integrity I/O
188 * @bio: bio to prepare
189 *
190 * Description: Checks if the bio already has an integrity payload attached.
191 * If it does, the payload has been generated by another kernel subsystem,
192 * and we just pass it through. Otherwise allocates integrity payload.
193 * The bio must have data direction, target device and start sector set priot
194 * to calling. In the WRITE case, integrity metadata will be generated using
195 * the block device's integrity function. In the READ case, the buffer
196 * will be prepared for DMA and a suitable end_io handler set up.
197 */
bio_integrity_prep(struct bio * bio)198 bool bio_integrity_prep(struct bio *bio)
199 {
200 struct bio_integrity_payload *bip;
201 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
202 void *buf;
203 unsigned long start, end;
204 unsigned int len, nr_pages;
205 unsigned int bytes, offset, i;
206 unsigned int intervals;
207 blk_status_t status;
208
209 if (!bi)
210 return true;
211
212 if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
213 return true;
214
215 if (!bio_sectors(bio))
216 return true;
217
218 /* Already protected? */
219 if (bio_integrity(bio))
220 return true;
221
222 if (bio_data_dir(bio) == READ) {
223 if (!bi->profile->verify_fn ||
224 !(bi->flags & BLK_INTEGRITY_VERIFY))
225 return true;
226 } else {
227 if (!bi->profile->generate_fn ||
228 !(bi->flags & BLK_INTEGRITY_GENERATE))
229 return true;
230 }
231 intervals = bio_integrity_intervals(bi, bio_sectors(bio));
232
233 /* Allocate kernel buffer for protection data */
234 len = intervals * bi->tuple_size;
235 buf = kmalloc(len, GFP_NOIO);
236 status = BLK_STS_RESOURCE;
237 if (unlikely(buf == NULL)) {
238 printk(KERN_ERR "could not allocate integrity buffer\n");
239 goto err_end_io;
240 }
241
242 end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
243 start = ((unsigned long) buf) >> PAGE_SHIFT;
244 nr_pages = end - start;
245
246 /* Allocate bio integrity payload and integrity vectors */
247 bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
248 if (IS_ERR(bip)) {
249 printk(KERN_ERR "could not allocate data integrity bioset\n");
250 kfree(buf);
251 status = BLK_STS_RESOURCE;
252 goto err_end_io;
253 }
254
255 bip->bip_flags |= BIP_BLOCK_INTEGRITY;
256 bip->bip_iter.bi_size = len;
257 bip_set_seed(bip, bio->bi_iter.bi_sector);
258
259 if (bi->flags & BLK_INTEGRITY_IP_CHECKSUM)
260 bip->bip_flags |= BIP_IP_CHECKSUM;
261
262 /* Map it */
263 offset = offset_in_page(buf);
264 for (i = 0 ; i < nr_pages ; i++) {
265 int ret;
266 bytes = PAGE_SIZE - offset;
267
268 if (len <= 0)
269 break;
270
271 if (bytes > len)
272 bytes = len;
273
274 ret = bio_integrity_add_page(bio, virt_to_page(buf),
275 bytes, offset);
276
277 if (ret == 0) {
278 printk(KERN_ERR "could not attach integrity payload\n");
279 status = BLK_STS_RESOURCE;
280 goto err_end_io;
281 }
282
283 if (ret < bytes)
284 break;
285
286 buf += bytes;
287 len -= bytes;
288 offset = 0;
289 }
290
291 /* Auto-generate integrity metadata if this is a write */
292 if (bio_data_dir(bio) == WRITE) {
293 bio_integrity_process(bio, &bio->bi_iter,
294 bi->profile->generate_fn);
295 } else {
296 bip->bio_iter = bio->bi_iter;
297 }
298 return true;
299
300 err_end_io:
301 bio->bi_status = status;
302 bio_endio(bio);
303 return false;
304
305 }
306 EXPORT_SYMBOL(bio_integrity_prep);
307
308 /**
309 * bio_integrity_verify_fn - Integrity I/O completion worker
310 * @work: Work struct stored in bio to be verified
311 *
312 * Description: This workqueue function is called to complete a READ
313 * request. The function verifies the transferred integrity metadata
314 * and then calls the original bio end_io function.
315 */
bio_integrity_verify_fn(struct work_struct * work)316 static void bio_integrity_verify_fn(struct work_struct *work)
317 {
318 struct bio_integrity_payload *bip =
319 container_of(work, struct bio_integrity_payload, bip_work);
320 struct bio *bio = bip->bip_bio;
321 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
322
323 /*
324 * At the moment verify is called bio's iterator was advanced
325 * during split and completion, we need to rewind iterator to
326 * it's original position.
327 */
328 bio->bi_status = bio_integrity_process(bio, &bip->bio_iter,
329 bi->profile->verify_fn);
330 bio_integrity_free(bio);
331 bio_endio(bio);
332 }
333
334 /**
335 * __bio_integrity_endio - Integrity I/O completion function
336 * @bio: Protected bio
337 *
338 * Description: Completion for integrity I/O
339 *
340 * Normally I/O completion is done in interrupt context. However,
341 * verifying I/O integrity is a time-consuming task which must be run
342 * in process context. This function postpones completion
343 * accordingly.
344 */
__bio_integrity_endio(struct bio * bio)345 bool __bio_integrity_endio(struct bio *bio)
346 {
347 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
348 struct bio_integrity_payload *bip = bio_integrity(bio);
349
350 if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
351 (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
352 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
353 queue_work(kintegrityd_wq, &bip->bip_work);
354 return false;
355 }
356
357 bio_integrity_free(bio);
358 return true;
359 }
360
361 /**
362 * bio_integrity_advance - Advance integrity vector
363 * @bio: bio whose integrity vector to update
364 * @bytes_done: number of data bytes that have been completed
365 *
366 * Description: This function calculates how many integrity bytes the
367 * number of completed data bytes correspond to and advances the
368 * integrity vector accordingly.
369 */
bio_integrity_advance(struct bio * bio,unsigned int bytes_done)370 void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
371 {
372 struct bio_integrity_payload *bip = bio_integrity(bio);
373 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
374 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
375
376 bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
377 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
378 }
379
380 /**
381 * bio_integrity_trim - Trim integrity vector
382 * @bio: bio whose integrity vector to update
383 *
384 * Description: Used to trim the integrity vector in a cloned bio.
385 */
bio_integrity_trim(struct bio * bio)386 void bio_integrity_trim(struct bio *bio)
387 {
388 struct bio_integrity_payload *bip = bio_integrity(bio);
389 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
390
391 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
392 }
393 EXPORT_SYMBOL(bio_integrity_trim);
394
395 /**
396 * bio_integrity_clone - Callback for cloning bios with integrity metadata
397 * @bio: New bio
398 * @bio_src: Original bio
399 * @gfp_mask: Memory allocation mask
400 *
401 * Description: Called to allocate a bip when cloning a bio
402 */
bio_integrity_clone(struct bio * bio,struct bio * bio_src,gfp_t gfp_mask)403 int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
404 gfp_t gfp_mask)
405 {
406 struct bio_integrity_payload *bip_src = bio_integrity(bio_src);
407 struct bio_integrity_payload *bip;
408
409 BUG_ON(bip_src == NULL);
410
411 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
412 if (IS_ERR(bip))
413 return PTR_ERR(bip);
414
415 memcpy(bip->bip_vec, bip_src->bip_vec,
416 bip_src->bip_vcnt * sizeof(struct bio_vec));
417
418 bip->bip_vcnt = bip_src->bip_vcnt;
419 bip->bip_iter = bip_src->bip_iter;
420 bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY;
421
422 return 0;
423 }
424 EXPORT_SYMBOL(bio_integrity_clone);
425
bioset_integrity_create(struct bio_set * bs,int pool_size)426 int bioset_integrity_create(struct bio_set *bs, int pool_size)
427 {
428 if (mempool_initialized(&bs->bio_integrity_pool))
429 return 0;
430
431 if (mempool_init_slab_pool(&bs->bio_integrity_pool,
432 pool_size, bip_slab))
433 return -1;
434
435 if (biovec_init_pool(&bs->bvec_integrity_pool, pool_size)) {
436 mempool_exit(&bs->bio_integrity_pool);
437 return -1;
438 }
439
440 return 0;
441 }
442 EXPORT_SYMBOL(bioset_integrity_create);
443
bioset_integrity_free(struct bio_set * bs)444 void bioset_integrity_free(struct bio_set *bs)
445 {
446 mempool_exit(&bs->bio_integrity_pool);
447 mempool_exit(&bs->bvec_integrity_pool);
448 }
449
bio_integrity_init(void)450 void __init bio_integrity_init(void)
451 {
452 /*
453 * kintegrityd won't block much but may burn a lot of CPU cycles.
454 * Make it highpri CPU intensive wq with max concurrency of 1.
455 */
456 kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
457 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
458 if (!kintegrityd_wq)
459 panic("Failed to create kintegrityd\n");
460
461 bip_slab = kmem_cache_create("bio_integrity_payload",
462 sizeof(struct bio_integrity_payload) +
463 sizeof(struct bio_vec) * BIO_INLINE_VECS,
464 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
465 }
466