1 /*
2 * Copyright (C) 2012 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
7 *
8 * This file is released under the GPLv2.
9 *
10 * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
11 * default prefetch value. Data are read in "prefetch_cluster" chunks from the
12 * hash device. Setting this greatly improves performance when data and hash
13 * are on the same disk on different partitions on devices with poor random
14 * access behavior.
15 */
16
17 #include "dm-verity.h"
18 #include "dm-verity-fec.h"
19
20 #include <linux/module.h>
21 #include <linux/reboot.h>
22
23 #define DM_MSG_PREFIX "verity"
24
25 #define DM_VERITY_ENV_LENGTH 42
26 #define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR"
27
28 #define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
29
30 #define DM_VERITY_MAX_CORRUPTED_ERRS 100
31
32 #define DM_VERITY_OPT_LOGGING "ignore_corruption"
33 #define DM_VERITY_OPT_RESTART "restart_on_corruption"
34 #define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
35
36 #define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC)
37
38 static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
39
40 module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
41
42 struct dm_verity_prefetch_work {
43 struct work_struct work;
44 struct dm_verity *v;
45 sector_t block;
46 unsigned n_blocks;
47 };
48
49 /*
50 * Auxiliary structure appended to each dm-bufio buffer. If the value
51 * hash_verified is nonzero, hash of the block has been verified.
52 *
53 * The variable hash_verified is set to 0 when allocating the buffer, then
54 * it can be changed to 1 and it is never reset to 0 again.
55 *
56 * There is no lock around this value, a race condition can at worst cause
57 * that multiple processes verify the hash of the same buffer simultaneously
58 * and write 1 to hash_verified simultaneously.
59 * This condition is harmless, so we don't need locking.
60 */
61 struct buffer_aux {
62 int hash_verified;
63 };
64
65 /*
66 * Initialize struct buffer_aux for a freshly created buffer.
67 */
dm_bufio_alloc_callback(struct dm_buffer * buf)68 static void dm_bufio_alloc_callback(struct dm_buffer *buf)
69 {
70 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
71
72 aux->hash_verified = 0;
73 }
74
75 /*
76 * Translate input sector number to the sector number on the target device.
77 */
verity_map_sector(struct dm_verity * v,sector_t bi_sector)78 static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
79 {
80 return v->data_start + dm_target_offset(v->ti, bi_sector);
81 }
82
83 /*
84 * Return hash position of a specified block at a specified tree level
85 * (0 is the lowest level).
86 * The lowest "hash_per_block_bits"-bits of the result denote hash position
87 * inside a hash block. The remaining bits denote location of the hash block.
88 */
verity_position_at_level(struct dm_verity * v,sector_t block,int level)89 static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
90 int level)
91 {
92 return block >> (level * v->hash_per_block_bits);
93 }
94
95 /*
96 * Wrapper for crypto_shash_init, which handles verity salting.
97 */
verity_hash_init(struct dm_verity * v,struct shash_desc * desc)98 static int verity_hash_init(struct dm_verity *v, struct shash_desc *desc)
99 {
100 int r;
101
102 desc->tfm = v->tfm;
103 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
104
105 r = crypto_shash_init(desc);
106
107 if (unlikely(r < 0)) {
108 DMERR("crypto_shash_init failed: %d", r);
109 return r;
110 }
111
112 if (likely(v->version >= 1)) {
113 r = crypto_shash_update(desc, v->salt, v->salt_size);
114
115 if (unlikely(r < 0)) {
116 DMERR("crypto_shash_update failed: %d", r);
117 return r;
118 }
119 }
120
121 return 0;
122 }
123
verity_hash_update(struct dm_verity * v,struct shash_desc * desc,const u8 * data,size_t len)124 static int verity_hash_update(struct dm_verity *v, struct shash_desc *desc,
125 const u8 *data, size_t len)
126 {
127 int r = crypto_shash_update(desc, data, len);
128
129 if (unlikely(r < 0))
130 DMERR("crypto_shash_update failed: %d", r);
131
132 return r;
133 }
134
verity_hash_final(struct dm_verity * v,struct shash_desc * desc,u8 * digest)135 static int verity_hash_final(struct dm_verity *v, struct shash_desc *desc,
136 u8 *digest)
137 {
138 int r;
139
140 if (unlikely(!v->version)) {
141 r = crypto_shash_update(desc, v->salt, v->salt_size);
142
143 if (r < 0) {
144 DMERR("crypto_shash_update failed: %d", r);
145 return r;
146 }
147 }
148
149 r = crypto_shash_final(desc, digest);
150
151 if (unlikely(r < 0))
152 DMERR("crypto_shash_final failed: %d", r);
153
154 return r;
155 }
156
verity_hash(struct dm_verity * v,struct shash_desc * desc,const u8 * data,size_t len,u8 * digest)157 int verity_hash(struct dm_verity *v, struct shash_desc *desc,
158 const u8 *data, size_t len, u8 *digest)
159 {
160 int r;
161
162 r = verity_hash_init(v, desc);
163 if (unlikely(r < 0))
164 return r;
165
166 r = verity_hash_update(v, desc, data, len);
167 if (unlikely(r < 0))
168 return r;
169
170 return verity_hash_final(v, desc, digest);
171 }
172
verity_hash_at_level(struct dm_verity * v,sector_t block,int level,sector_t * hash_block,unsigned * offset)173 static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
174 sector_t *hash_block, unsigned *offset)
175 {
176 sector_t position = verity_position_at_level(v, block, level);
177 unsigned idx;
178
179 *hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
180
181 if (!offset)
182 return;
183
184 idx = position & ((1 << v->hash_per_block_bits) - 1);
185 if (!v->version)
186 *offset = idx * v->digest_size;
187 else
188 *offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
189 }
190
191 /*
192 * Handle verification errors.
193 */
verity_handle_err(struct dm_verity * v,enum verity_block_type type,unsigned long long block)194 static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
195 unsigned long long block)
196 {
197 char verity_env[DM_VERITY_ENV_LENGTH];
198 char *envp[] = { verity_env, NULL };
199 const char *type_str = "";
200 struct mapped_device *md = dm_table_get_md(v->ti->table);
201
202 /* Corruption should be visible in device status in all modes */
203 v->hash_failed = 1;
204
205 if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
206 goto out;
207
208 v->corrupted_errs++;
209
210 switch (type) {
211 case DM_VERITY_BLOCK_TYPE_DATA:
212 type_str = "data";
213 break;
214 case DM_VERITY_BLOCK_TYPE_METADATA:
215 type_str = "metadata";
216 break;
217 default:
218 BUG();
219 }
220
221 DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
222 block);
223
224 if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
225 DMERR("%s: reached maximum errors", v->data_dev->name);
226
227 snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu",
228 DM_VERITY_ENV_VAR_NAME, type, block);
229
230 kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
231
232 out:
233 if (v->mode == DM_VERITY_MODE_LOGGING)
234 return 0;
235
236 if (v->mode == DM_VERITY_MODE_RESTART)
237 kernel_restart("dm-verity device corrupted");
238
239 return 1;
240 }
241
242 /*
243 * Verify hash of a metadata block pertaining to the specified data block
244 * ("block" argument) at a specified level ("level" argument).
245 *
246 * On successful return, verity_io_want_digest(v, io) contains the hash value
247 * for a lower tree level or for the data block (if we're at the lowest level).
248 *
249 * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
250 * If "skip_unverified" is false, unverified buffer is hashed and verified
251 * against current value of verity_io_want_digest(v, io).
252 */
verity_verify_level(struct dm_verity * v,struct dm_verity_io * io,sector_t block,int level,bool skip_unverified,u8 * want_digest)253 static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
254 sector_t block, int level, bool skip_unverified,
255 u8 *want_digest)
256 {
257 struct dm_buffer *buf;
258 struct buffer_aux *aux;
259 u8 *data;
260 int r;
261 sector_t hash_block;
262 unsigned offset;
263
264 verity_hash_at_level(v, block, level, &hash_block, &offset);
265
266 data = dm_bufio_read(v->bufio, hash_block, &buf);
267 if (IS_ERR(data))
268 return PTR_ERR(data);
269
270 aux = dm_bufio_get_aux_data(buf);
271
272 if (!aux->hash_verified) {
273 if (skip_unverified) {
274 r = 1;
275 goto release_ret_r;
276 }
277
278 r = verity_hash(v, verity_io_hash_desc(v, io),
279 data, 1 << v->hash_dev_block_bits,
280 verity_io_real_digest(v, io));
281 if (unlikely(r < 0))
282 goto release_ret_r;
283
284 if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
285 v->digest_size) == 0))
286 aux->hash_verified = 1;
287 else if (verity_fec_decode(v, io,
288 DM_VERITY_BLOCK_TYPE_METADATA,
289 hash_block, data, NULL) == 0)
290 aux->hash_verified = 1;
291 else if (verity_handle_err(v,
292 DM_VERITY_BLOCK_TYPE_METADATA,
293 hash_block)) {
294 r = -EIO;
295 goto release_ret_r;
296 }
297 }
298
299 data += offset;
300 memcpy(want_digest, data, v->digest_size);
301 r = 0;
302
303 release_ret_r:
304 dm_bufio_release(buf);
305 return r;
306 }
307
308 /*
309 * Find a hash for a given block, write it to digest and verify the integrity
310 * of the hash tree if necessary.
311 */
verity_hash_for_block(struct dm_verity * v,struct dm_verity_io * io,sector_t block,u8 * digest,bool * is_zero)312 int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
313 sector_t block, u8 *digest, bool *is_zero)
314 {
315 int r = 0, i;
316
317 if (likely(v->levels)) {
318 /*
319 * First, we try to get the requested hash for
320 * the current block. If the hash block itself is
321 * verified, zero is returned. If it isn't, this
322 * function returns 1 and we fall back to whole
323 * chain verification.
324 */
325 r = verity_verify_level(v, io, block, 0, true, digest);
326 if (likely(r <= 0))
327 goto out;
328 }
329
330 memcpy(digest, v->root_digest, v->digest_size);
331
332 for (i = v->levels - 1; i >= 0; i--) {
333 r = verity_verify_level(v, io, block, i, false, digest);
334 if (unlikely(r))
335 goto out;
336 }
337 out:
338 if (!r && v->zero_digest)
339 *is_zero = !memcmp(v->zero_digest, digest, v->digest_size);
340 else
341 *is_zero = false;
342
343 return r;
344 }
345
346 /*
347 * Calls function process for 1 << v->data_dev_block_bits bytes in the bio_vec
348 * starting from iter.
349 */
verity_for_bv_block(struct dm_verity * v,struct dm_verity_io * io,struct bvec_iter * iter,int (* process)(struct dm_verity * v,struct dm_verity_io * io,u8 * data,size_t len))350 int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
351 struct bvec_iter *iter,
352 int (*process)(struct dm_verity *v,
353 struct dm_verity_io *io, u8 *data,
354 size_t len))
355 {
356 unsigned todo = 1 << v->data_dev_block_bits;
357 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
358
359 do {
360 int r;
361 u8 *page;
362 unsigned len;
363 struct bio_vec bv = bio_iter_iovec(bio, *iter);
364
365 page = kmap_atomic(bv.bv_page);
366 len = bv.bv_len;
367
368 if (likely(len >= todo))
369 len = todo;
370
371 r = process(v, io, page + bv.bv_offset, len);
372 kunmap_atomic(page);
373
374 if (r < 0)
375 return r;
376
377 bio_advance_iter(bio, iter, len);
378 todo -= len;
379 } while (todo);
380
381 return 0;
382 }
383
verity_bv_hash_update(struct dm_verity * v,struct dm_verity_io * io,u8 * data,size_t len)384 static int verity_bv_hash_update(struct dm_verity *v, struct dm_verity_io *io,
385 u8 *data, size_t len)
386 {
387 return verity_hash_update(v, verity_io_hash_desc(v, io), data, len);
388 }
389
verity_bv_zero(struct dm_verity * v,struct dm_verity_io * io,u8 * data,size_t len)390 static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
391 u8 *data, size_t len)
392 {
393 memset(data, 0, len);
394 return 0;
395 }
396
397 /*
398 * Verify one "dm_verity_io" structure.
399 */
verity_verify_io(struct dm_verity_io * io)400 static int verity_verify_io(struct dm_verity_io *io)
401 {
402 bool is_zero;
403 struct dm_verity *v = io->v;
404 struct bvec_iter start;
405 unsigned b;
406
407 for (b = 0; b < io->n_blocks; b++) {
408 int r;
409 struct shash_desc *desc = verity_io_hash_desc(v, io);
410
411 r = verity_hash_for_block(v, io, io->block + b,
412 verity_io_want_digest(v, io),
413 &is_zero);
414 if (unlikely(r < 0))
415 return r;
416
417 if (is_zero) {
418 /*
419 * If we expect a zero block, don't validate, just
420 * return zeros.
421 */
422 r = verity_for_bv_block(v, io, &io->iter,
423 verity_bv_zero);
424 if (unlikely(r < 0))
425 return r;
426
427 continue;
428 }
429
430 r = verity_hash_init(v, desc);
431 if (unlikely(r < 0))
432 return r;
433
434 start = io->iter;
435 r = verity_for_bv_block(v, io, &io->iter, verity_bv_hash_update);
436 if (unlikely(r < 0))
437 return r;
438
439 r = verity_hash_final(v, desc, verity_io_real_digest(v, io));
440 if (unlikely(r < 0))
441 return r;
442
443 if (likely(memcmp(verity_io_real_digest(v, io),
444 verity_io_want_digest(v, io), v->digest_size) == 0))
445 continue;
446 else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
447 io->block + b, NULL, &start) == 0)
448 continue;
449 else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
450 io->block + b))
451 return -EIO;
452 }
453
454 return 0;
455 }
456
457 /*
458 * End one "io" structure with a given error.
459 */
verity_finish_io(struct dm_verity_io * io,int error)460 static void verity_finish_io(struct dm_verity_io *io, int error)
461 {
462 struct dm_verity *v = io->v;
463 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
464
465 bio->bi_end_io = io->orig_bi_end_io;
466 bio->bi_private = io->orig_bi_private;
467
468 verity_fec_finish_io(io);
469
470 bio_endio_nodec(bio, error);
471 }
472
verity_work(struct work_struct * w)473 static void verity_work(struct work_struct *w)
474 {
475 struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
476
477 verity_finish_io(io, verity_verify_io(io));
478 }
479
verity_end_io(struct bio * bio,int error)480 static void verity_end_io(struct bio *bio, int error)
481 {
482 struct dm_verity_io *io = bio->bi_private;
483
484 if (error && !verity_fec_is_enabled(io->v)) {
485 verity_finish_io(io, error);
486 return;
487 }
488
489 INIT_WORK(&io->work, verity_work);
490 queue_work(io->v->verify_wq, &io->work);
491 }
492
493 /*
494 * Prefetch buffers for the specified io.
495 * The root buffer is not prefetched, it is assumed that it will be cached
496 * all the time.
497 */
verity_prefetch_io(struct work_struct * work)498 static void verity_prefetch_io(struct work_struct *work)
499 {
500 struct dm_verity_prefetch_work *pw =
501 container_of(work, struct dm_verity_prefetch_work, work);
502 struct dm_verity *v = pw->v;
503 int i;
504 sector_t prefetch_size;
505
506 for (i = v->levels - 2; i >= 0; i--) {
507 sector_t hash_block_start;
508 sector_t hash_block_end;
509 verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
510 verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
511 if (!i) {
512 unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
513
514 cluster >>= v->data_dev_block_bits;
515 if (unlikely(!cluster))
516 goto no_prefetch_cluster;
517
518 if (unlikely(cluster & (cluster - 1)))
519 cluster = 1 << __fls(cluster);
520
521 hash_block_start &= ~(sector_t)(cluster - 1);
522 hash_block_end |= cluster - 1;
523 if (unlikely(hash_block_end >= v->hash_blocks))
524 hash_block_end = v->hash_blocks - 1;
525 }
526 no_prefetch_cluster:
527 // for emmc, it is more efficient to send bigger read
528 prefetch_size = max((sector_t)CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE,
529 hash_block_end - hash_block_start + 1);
530 if ((hash_block_start + prefetch_size) >= (v->hash_start + v->hash_blocks)) {
531 prefetch_size = hash_block_end - hash_block_start + 1;
532 }
533 dm_bufio_prefetch(v->bufio, hash_block_start,
534 prefetch_size);
535 }
536
537 kfree(pw);
538 }
539
verity_submit_prefetch(struct dm_verity * v,struct dm_verity_io * io)540 static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
541 {
542 struct dm_verity_prefetch_work *pw;
543
544 pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
545 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
546
547 if (!pw)
548 return;
549
550 INIT_WORK(&pw->work, verity_prefetch_io);
551 pw->v = v;
552 pw->block = io->block;
553 pw->n_blocks = io->n_blocks;
554 queue_work(v->verify_wq, &pw->work);
555 }
556
557 /*
558 * Bio map function. It allocates dm_verity_io structure and bio vector and
559 * fills them. Then it issues prefetches and the I/O.
560 */
verity_map(struct dm_target * ti,struct bio * bio)561 int verity_map(struct dm_target *ti, struct bio *bio)
562 {
563 struct dm_verity *v = ti->private;
564 struct dm_verity_io *io;
565
566 bio->bi_bdev = v->data_dev->bdev;
567 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
568
569 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
570 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
571 DMERR_LIMIT("unaligned io");
572 return -EIO;
573 }
574
575 if (bio_end_sector(bio) >>
576 (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
577 DMERR_LIMIT("io out of range");
578 return -EIO;
579 }
580
581 if (bio_data_dir(bio) == WRITE)
582 return -EIO;
583
584 io = dm_per_bio_data(bio, ti->per_bio_data_size);
585 io->v = v;
586 io->orig_bi_end_io = bio->bi_end_io;
587 io->orig_bi_private = bio->bi_private;
588 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
589 io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
590
591 bio->bi_end_io = verity_end_io;
592 bio->bi_private = io;
593 io->iter = bio->bi_iter;
594
595 verity_fec_init_io(io);
596
597 verity_submit_prefetch(v, io);
598
599 generic_make_request(bio);
600
601 return DM_MAPIO_SUBMITTED;
602 }
603 EXPORT_SYMBOL_GPL(verity_map);
604
605 /*
606 * Status: V (valid) or C (corruption found)
607 */
verity_status(struct dm_target * ti,status_type_t type,unsigned status_flags,char * result,unsigned maxlen)608 void verity_status(struct dm_target *ti, status_type_t type,
609 unsigned status_flags, char *result, unsigned maxlen)
610 {
611 struct dm_verity *v = ti->private;
612 unsigned args = 0;
613 unsigned sz = 0;
614 unsigned x;
615
616 switch (type) {
617 case STATUSTYPE_INFO:
618 DMEMIT("%c", v->hash_failed ? 'C' : 'V');
619 break;
620 case STATUSTYPE_TABLE:
621 DMEMIT("%u %s %s %u %u %llu %llu %s ",
622 v->version,
623 v->data_dev->name,
624 v->hash_dev->name,
625 1 << v->data_dev_block_bits,
626 1 << v->hash_dev_block_bits,
627 (unsigned long long)v->data_blocks,
628 (unsigned long long)v->hash_start,
629 v->alg_name
630 );
631 for (x = 0; x < v->digest_size; x++)
632 DMEMIT("%02x", v->root_digest[x]);
633 DMEMIT(" ");
634 if (!v->salt_size)
635 DMEMIT("-");
636 else
637 for (x = 0; x < v->salt_size; x++)
638 DMEMIT("%02x", v->salt[x]);
639 if (v->mode != DM_VERITY_MODE_EIO)
640 args++;
641 if (verity_fec_is_enabled(v))
642 args += DM_VERITY_OPTS_FEC;
643 if (v->zero_digest)
644 args++;
645 if (!args)
646 return;
647 DMEMIT(" %u", args);
648 if (v->mode != DM_VERITY_MODE_EIO) {
649 DMEMIT(" ");
650 switch (v->mode) {
651 case DM_VERITY_MODE_LOGGING:
652 DMEMIT(DM_VERITY_OPT_LOGGING);
653 break;
654 case DM_VERITY_MODE_RESTART:
655 DMEMIT(DM_VERITY_OPT_RESTART);
656 break;
657 default:
658 BUG();
659 }
660 }
661 if (v->zero_digest)
662 DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
663 sz = verity_fec_status_table(v, sz, result, maxlen);
664 break;
665 }
666 }
667 EXPORT_SYMBOL_GPL(verity_status);
668
verity_ioctl(struct dm_target * ti,unsigned cmd,unsigned long arg)669 int verity_ioctl(struct dm_target *ti, unsigned cmd,
670 unsigned long arg)
671 {
672 struct dm_verity *v = ti->private;
673 int r = 0;
674
675 if (v->data_start ||
676 ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
677 r = scsi_verify_blk_ioctl(NULL, cmd);
678
679 return r ? : __blkdev_driver_ioctl(v->data_dev->bdev, v->data_dev->mode,
680 cmd, arg);
681 }
682 EXPORT_SYMBOL_GPL(verity_ioctl);
683
verity_merge(struct dm_target * ti,struct bvec_merge_data * bvm,struct bio_vec * biovec,int max_size)684 int verity_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
685 struct bio_vec *biovec, int max_size)
686 {
687 struct dm_verity *v = ti->private;
688 struct request_queue *q = bdev_get_queue(v->data_dev->bdev);
689
690 if (!q->merge_bvec_fn)
691 return max_size;
692
693 bvm->bi_bdev = v->data_dev->bdev;
694 bvm->bi_sector = verity_map_sector(v, bvm->bi_sector);
695
696 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
697 }
698 EXPORT_SYMBOL_GPL(verity_merge);
699
verity_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)700 int verity_iterate_devices(struct dm_target *ti,
701 iterate_devices_callout_fn fn, void *data)
702 {
703 struct dm_verity *v = ti->private;
704
705 return fn(ti, v->data_dev, v->data_start, ti->len, data);
706 }
707 EXPORT_SYMBOL_GPL(verity_iterate_devices);
708
verity_io_hints(struct dm_target * ti,struct queue_limits * limits)709 void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
710 {
711 struct dm_verity *v = ti->private;
712
713 if (limits->logical_block_size < 1 << v->data_dev_block_bits)
714 limits->logical_block_size = 1 << v->data_dev_block_bits;
715
716 if (limits->physical_block_size < 1 << v->data_dev_block_bits)
717 limits->physical_block_size = 1 << v->data_dev_block_bits;
718
719 blk_limits_io_min(limits, limits->logical_block_size);
720 }
721 EXPORT_SYMBOL_GPL(verity_io_hints);
722
verity_dtr(struct dm_target * ti)723 void verity_dtr(struct dm_target *ti)
724 {
725 struct dm_verity *v = ti->private;
726
727 if (v->verify_wq)
728 destroy_workqueue(v->verify_wq);
729
730 if (v->bufio)
731 dm_bufio_client_destroy(v->bufio);
732
733 kfree(v->salt);
734 kfree(v->root_digest);
735 kfree(v->zero_digest);
736
737 if (v->tfm)
738 crypto_free_shash(v->tfm);
739
740 kfree(v->alg_name);
741
742 if (v->hash_dev)
743 dm_put_device(ti, v->hash_dev);
744
745 if (v->data_dev)
746 dm_put_device(ti, v->data_dev);
747
748 verity_fec_dtr(v);
749
750 kfree(v);
751 }
752 EXPORT_SYMBOL_GPL(verity_dtr);
753
verity_alloc_zero_digest(struct dm_verity * v)754 static int verity_alloc_zero_digest(struct dm_verity *v)
755 {
756 int r = -ENOMEM;
757 struct shash_desc *desc;
758 u8 *zero_data;
759
760 v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
761
762 if (!v->zero_digest)
763 return r;
764
765 desc = kmalloc(v->shash_descsize, GFP_KERNEL);
766
767 if (!desc)
768 return r; /* verity_dtr will free zero_digest */
769
770 zero_data = kzalloc(1 << v->data_dev_block_bits, GFP_KERNEL);
771
772 if (!zero_data)
773 goto out;
774
775 r = verity_hash(v, desc, zero_data, 1 << v->data_dev_block_bits,
776 v->zero_digest);
777
778 out:
779 kfree(desc);
780 kfree(zero_data);
781
782 return r;
783 }
784
verity_parse_opt_args(struct dm_arg_set * as,struct dm_verity * v)785 static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v)
786 {
787 int r;
788 unsigned argc;
789 struct dm_target *ti = v->ti;
790 const char *arg_name;
791
792 static struct dm_arg _args[] = {
793 {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"},
794 };
795
796 r = dm_read_arg_group(_args, as, &argc, &ti->error);
797 if (r)
798 return -EINVAL;
799
800 if (!argc)
801 return 0;
802
803 do {
804 arg_name = dm_shift_arg(as);
805 argc--;
806
807 if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) {
808 v->mode = DM_VERITY_MODE_LOGGING;
809 continue;
810
811 } else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) {
812 v->mode = DM_VERITY_MODE_RESTART;
813 continue;
814
815 } else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
816 r = verity_alloc_zero_digest(v);
817 if (r) {
818 ti->error = "Cannot allocate zero digest";
819 return r;
820 }
821 continue;
822
823 } else if (verity_is_fec_opt_arg(arg_name)) {
824 r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
825 if (r)
826 return r;
827 continue;
828 }
829
830 ti->error = "Unrecognized verity feature request";
831 return -EINVAL;
832 } while (argc && !r);
833
834 return r;
835 }
836
837 /*
838 * Target parameters:
839 * <version> The current format is version 1.
840 * Vsn 0 is compatible with original Chromium OS releases.
841 * <data device>
842 * <hash device>
843 * <data block size>
844 * <hash block size>
845 * <the number of data blocks>
846 * <hash start block>
847 * <algorithm>
848 * <digest>
849 * <salt> Hex string or "-" if no salt.
850 */
verity_ctr(struct dm_target * ti,unsigned argc,char ** argv)851 int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
852 {
853 struct dm_verity *v;
854 struct dm_arg_set as;
855 unsigned int num;
856 unsigned long long num_ll;
857 int r;
858 int i;
859 sector_t hash_position;
860 char dummy;
861
862 v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
863 if (!v) {
864 ti->error = "Cannot allocate verity structure";
865 return -ENOMEM;
866 }
867 ti->private = v;
868 v->ti = ti;
869
870 r = verity_fec_ctr_alloc(v);
871 if (r)
872 goto bad;
873
874 if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) {
875 ti->error = "Device must be readonly";
876 r = -EINVAL;
877 goto bad;
878 }
879
880 if (argc < 10) {
881 ti->error = "Not enough arguments";
882 r = -EINVAL;
883 goto bad;
884 }
885
886 if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
887 num > 1) {
888 ti->error = "Invalid version";
889 r = -EINVAL;
890 goto bad;
891 }
892 v->version = num;
893
894 r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev);
895 if (r) {
896 ti->error = "Data device lookup failed";
897 goto bad;
898 }
899
900 r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
901 if (r) {
902 ti->error = "Data device lookup failed";
903 goto bad;
904 }
905
906 if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
907 !num || (num & (num - 1)) ||
908 num < bdev_logical_block_size(v->data_dev->bdev) ||
909 num > PAGE_SIZE) {
910 ti->error = "Invalid data device block size";
911 r = -EINVAL;
912 goto bad;
913 }
914 v->data_dev_block_bits = __ffs(num);
915
916 if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
917 !num || (num & (num - 1)) ||
918 num < bdev_logical_block_size(v->hash_dev->bdev) ||
919 num > INT_MAX) {
920 ti->error = "Invalid hash device block size";
921 r = -EINVAL;
922 goto bad;
923 }
924 v->hash_dev_block_bits = __ffs(num);
925
926 if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
927 (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
928 >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
929 ti->error = "Invalid data blocks";
930 r = -EINVAL;
931 goto bad;
932 }
933 v->data_blocks = num_ll;
934
935 if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
936 ti->error = "Data device is too small";
937 r = -EINVAL;
938 goto bad;
939 }
940
941 if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
942 (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
943 >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
944 ti->error = "Invalid hash start";
945 r = -EINVAL;
946 goto bad;
947 }
948 v->hash_start = num_ll;
949
950 v->alg_name = kstrdup(argv[7], GFP_KERNEL);
951 if (!v->alg_name) {
952 ti->error = "Cannot allocate algorithm name";
953 r = -ENOMEM;
954 goto bad;
955 }
956
957 v->tfm = crypto_alloc_shash(v->alg_name, 0, 0);
958 if (IS_ERR(v->tfm)) {
959 ti->error = "Cannot initialize hash function";
960 r = PTR_ERR(v->tfm);
961 v->tfm = NULL;
962 goto bad;
963 }
964 v->digest_size = crypto_shash_digestsize(v->tfm);
965 if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
966 ti->error = "Digest size too big";
967 r = -EINVAL;
968 goto bad;
969 }
970 v->shash_descsize =
971 sizeof(struct shash_desc) + crypto_shash_descsize(v->tfm);
972
973 v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
974 if (!v->root_digest) {
975 ti->error = "Cannot allocate root digest";
976 r = -ENOMEM;
977 goto bad;
978 }
979 if (strlen(argv[8]) != v->digest_size * 2 ||
980 hex2bin(v->root_digest, argv[8], v->digest_size)) {
981 ti->error = "Invalid root digest";
982 r = -EINVAL;
983 goto bad;
984 }
985
986 if (strcmp(argv[9], "-")) {
987 v->salt_size = strlen(argv[9]) / 2;
988 v->salt = kmalloc(v->salt_size, GFP_KERNEL);
989 if (!v->salt) {
990 ti->error = "Cannot allocate salt";
991 r = -ENOMEM;
992 goto bad;
993 }
994 if (strlen(argv[9]) != v->salt_size * 2 ||
995 hex2bin(v->salt, argv[9], v->salt_size)) {
996 ti->error = "Invalid salt";
997 r = -EINVAL;
998 goto bad;
999 }
1000 }
1001
1002 argv += 10;
1003 argc -= 10;
1004
1005 /* Optional parameters */
1006 if (argc) {
1007 as.argc = argc;
1008 as.argv = argv;
1009
1010 r = verity_parse_opt_args(&as, v);
1011 if (r < 0)
1012 goto bad;
1013 }
1014
1015 v->hash_per_block_bits =
1016 __fls((1 << v->hash_dev_block_bits) / v->digest_size);
1017
1018 v->levels = 0;
1019 if (v->data_blocks)
1020 while (v->hash_per_block_bits * v->levels < 64 &&
1021 (unsigned long long)(v->data_blocks - 1) >>
1022 (v->hash_per_block_bits * v->levels))
1023 v->levels++;
1024
1025 if (v->levels > DM_VERITY_MAX_LEVELS) {
1026 ti->error = "Too many tree levels";
1027 r = -E2BIG;
1028 goto bad;
1029 }
1030
1031 hash_position = v->hash_start;
1032 for (i = v->levels - 1; i >= 0; i--) {
1033 sector_t s;
1034 v->hash_level_block[i] = hash_position;
1035 s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
1036 >> ((i + 1) * v->hash_per_block_bits);
1037 if (hash_position + s < hash_position) {
1038 ti->error = "Hash device offset overflow";
1039 r = -E2BIG;
1040 goto bad;
1041 }
1042 hash_position += s;
1043 }
1044 v->hash_blocks = hash_position;
1045
1046 v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1047 1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
1048 dm_bufio_alloc_callback, NULL);
1049 if (IS_ERR(v->bufio)) {
1050 ti->error = "Cannot initialize dm-bufio";
1051 r = PTR_ERR(v->bufio);
1052 v->bufio = NULL;
1053 goto bad;
1054 }
1055
1056 if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
1057 ti->error = "Hash device is too small";
1058 r = -E2BIG;
1059 goto bad;
1060 }
1061
1062 /* WQ_UNBOUND greatly improves performance when running on ramdisk */
1063 v->verify_wq = alloc_workqueue("kverityd",
1064 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND,
1065 num_online_cpus());
1066 if (!v->verify_wq) {
1067 ti->error = "Cannot allocate workqueue";
1068 r = -ENOMEM;
1069 goto bad;
1070 }
1071
1072 ti->per_bio_data_size = sizeof(struct dm_verity_io) +
1073 v->shash_descsize + v->digest_size * 2;
1074
1075 r = verity_fec_ctr(v);
1076 if (r)
1077 goto bad;
1078
1079 ti->per_bio_data_size = roundup(ti->per_bio_data_size,
1080 __alignof__(struct dm_verity_io));
1081
1082 return 0;
1083
1084 bad:
1085 verity_dtr(ti);
1086
1087 return r;
1088 }
1089 EXPORT_SYMBOL_GPL(verity_ctr);
1090
1091 static struct target_type verity_target = {
1092 .name = "verity",
1093 .version = {1, 3, 0},
1094 .module = THIS_MODULE,
1095 .ctr = verity_ctr,
1096 .dtr = verity_dtr,
1097 .map = verity_map,
1098 .status = verity_status,
1099 .ioctl = verity_ioctl,
1100 .merge = verity_merge,
1101 .iterate_devices = verity_iterate_devices,
1102 .io_hints = verity_io_hints,
1103 };
1104
dm_verity_init(void)1105 static int __init dm_verity_init(void)
1106 {
1107 int r;
1108
1109 r = dm_register_target(&verity_target);
1110 if (r < 0)
1111 DMERR("register failed %d", r);
1112
1113 return r;
1114 }
1115
dm_verity_exit(void)1116 static void __exit dm_verity_exit(void)
1117 {
1118 dm_unregister_target(&verity_target);
1119 }
1120
1121 module_init(dm_verity_init);
1122 module_exit(dm_verity_exit);
1123
1124 MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
1125 MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
1126 MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
1127 MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
1128 MODULE_LICENSE("GPL");
1129