• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <ctype.h>
18 #include <stdlib.h>
19 
20 #include <algorithm>
21 #include <string>
22 #include <vector>
23 
24 #include <android-base/strings.h>
25 #include <openssl/evp.h>
26 
27 #include "fec_private.h"
28 
29 /* converts a hex nibble into an int */
hextobin(char c)30 static inline int hextobin(char c)
31 {
32     if (c >= '0' && c <= '9') {
33         return c - '0';
34     } else if (c >= 'a' && c <= 'f') {
35         return c - 'a' + 10;
36     } else {
37         errno = EINVAL;
38         return -1;
39     }
40 }
41 
42 /* converts a hex string `src' of `size' characters to binary and copies the
43    the result into `dst' */
parse_hex(uint8_t * dst,uint32_t size,const char * src)44 static int parse_hex(uint8_t *dst, uint32_t size, const char *src)
45 {
46     int l, h;
47 
48     check(dst);
49     check(src);
50     check(2 * size == strlen(src));
51 
52     while (size) {
53         h = hextobin(tolower(*src++));
54         l = hextobin(tolower(*src++));
55 
56         check(l >= 0);
57         check(h >= 0);
58 
59         *dst++ = (h << 4) | l;
60         --size;
61     }
62 
63     return 0;
64 }
65 
66 /* parses a 64-bit unsigned integer from string `src' into `dst' and if
67    `maxval' is >0, checks that `dst' <= `maxval' */
parse_uint64(const char * src,uint64_t maxval,uint64_t * dst)68 static int parse_uint64(const char *src, uint64_t maxval, uint64_t *dst)
69 {
70     char *end;
71     unsigned long long int value;
72 
73     check(src);
74     check(dst);
75 
76     errno = 0;
77     value = strtoull(src, &end, 0);
78 
79     if (*src == '\0' || *end != '\0' ||
80             (errno == ERANGE && value == ULLONG_MAX)) {
81         errno = EINVAL;
82         return -1;
83     }
84 
85     if (maxval && value > maxval) {
86         errno = EINVAL;
87         return -1;
88     }
89 
90    *dst = (uint64_t)value;
91     return 0;
92 }
93 
94 /* computes the size of verity hash tree for `file_size' bytes and returns the
95    number of hash tree levels in `verity_levels,' and the number of hashes per
96    level in `level_hashes', if the parameters are non-NULL */
verity_get_size(uint64_t file_size,uint32_t * verity_levels,uint32_t * level_hashes,uint32_t padded_digest_size)97 uint64_t verity_get_size(uint64_t file_size, uint32_t *verity_levels,
98                          uint32_t *level_hashes, uint32_t padded_digest_size) {
99     // we assume a known metadata size, 4 KiB block size, and SHA-256 or SHA1 to
100     // avoid relying on disk content.
101 
102     uint32_t level = 0;
103     uint64_t total = 0;
104     uint64_t hashes = file_size / FEC_BLOCKSIZE;
105 
106     do {
107         if (level_hashes) {
108             level_hashes[level] = hashes;
109         }
110 
111         hashes = fec_div_round_up(hashes * padded_digest_size, FEC_BLOCKSIZE);
112         total += hashes;
113 
114         ++level;
115     } while (hashes > 1);
116 
117     if (verity_levels) {
118         *verity_levels = level;
119     }
120 
121     return total * FEC_BLOCKSIZE;
122 }
123 
get_hash(const uint8_t * block,uint8_t * hash)124 int hashtree_info::get_hash(const uint8_t *block, uint8_t *hash) {
125     auto md = EVP_get_digestbynid(nid_);
126     check(md)
127     auto mdctx = EVP_MD_CTX_new();
128     check(mdctx)
129 
130     EVP_DigestInit_ex(mdctx, md, nullptr);
131     EVP_DigestUpdate(mdctx, salt.data(), salt.size());
132     EVP_DigestUpdate(mdctx, block, FEC_BLOCKSIZE);
133     unsigned int hash_size;
134     EVP_DigestFinal_ex(mdctx, hash, &hash_size);
135     EVP_MD_CTX_free(mdctx);
136 
137     check(hash_size == digest_length_)
138     return 0;
139 }
140 
initialize(uint64_t hash_start,uint64_t data_blocks,const std::vector<uint8_t> & salt,int nid)141 int hashtree_info::initialize(uint64_t hash_start, uint64_t data_blocks,
142                               const std::vector<uint8_t> &salt, int nid) {
143     check(nid == NID_sha256 || nid == NID_sha1);
144 
145     this->hash_start = hash_start;
146     this->data_blocks = data_blocks;
147     this->salt = salt;
148     this->nid_ = nid;
149 
150     digest_length_ = nid == NID_sha1 ? SHA_DIGEST_LENGTH : SHA256_DIGEST_LENGTH;
151     // The padded digest size for both sha256 and sha1 are 256 bytes.
152     padded_digest_length_ = SHA256_DIGEST_LENGTH;
153 
154     return 0;
155 }
156 
check_block_hash(const uint8_t * expected,const uint8_t * block)157 bool hashtree_info::check_block_hash(const uint8_t *expected,
158                                      const uint8_t *block) {
159     check(block);
160     std::vector<uint8_t> hash(digest_length_, 0);
161 
162     if (unlikely(get_hash(block, hash.data()) == -1)) {
163         error("failed to hash");
164         return false;
165     }
166 
167     check(expected);
168     return !memcmp(expected, hash.data(), digest_length_);
169 }
170 
check_block_hash_with_index(uint64_t index,const uint8_t * block)171 bool hashtree_info::check_block_hash_with_index(uint64_t index,
172                                                 const uint8_t *block) {
173     check(index < data_blocks)
174 
175     const uint8_t *expected = &hash_data[index * padded_digest_length_];
176     return check_block_hash(expected, block);
177 }
178 
179 // Reads the hash and the corresponding data block using error correction, if
180 // available.
ecc_read_hashes(fec_handle * f,uint64_t hash_offset,uint8_t * hash,uint64_t data_offset,uint8_t * data)181 bool hashtree_info::ecc_read_hashes(fec_handle *f, uint64_t hash_offset,
182                                     uint8_t *hash, uint64_t data_offset,
183                                     uint8_t *data) {
184     check(f);
185 
186     if (hash &&
187         fec_pread(f, hash, digest_length_, hash_offset) != digest_length_) {
188         error("failed to read hash tree: offset %" PRIu64 ": %s", hash_offset,
189               strerror(errno));
190         return false;
191     }
192 
193     check(data);
194 
195     if (fec_pread(f, data, FEC_BLOCKSIZE, data_offset) != FEC_BLOCKSIZE) {
196         error("failed to read hash tree: data_offset %" PRIu64 ": %s",
197               data_offset, strerror(errno));
198         return false;
199     }
200 
201     return true;
202 }
203 
verify_tree(const fec_handle * f,const uint8_t * root)204 int hashtree_info::verify_tree(const fec_handle *f, const uint8_t *root) {
205     check(f);
206     check(root);
207 
208     uint8_t data[FEC_BLOCKSIZE];
209 
210     uint32_t levels = 0;
211 
212     /* calculate the size and the number of levels in the hash tree */
213     uint64_t hash_size = verity_get_size(data_blocks * FEC_BLOCKSIZE, &levels,
214                                          NULL, padded_digest_length_);
215 
216     check(hash_start < UINT64_MAX - hash_size);
217     check(hash_start + hash_size <= f->data_size);
218 
219     uint64_t hash_offset = hash_start;
220     uint64_t data_offset = hash_offset + FEC_BLOCKSIZE;
221 
222     /* validate the root hash */
223     if (!raw_pread(f->fd, data, FEC_BLOCKSIZE, hash_offset) ||
224         !check_block_hash(root, data)) {
225         /* try to correct */
226         if (!ecc_read_hashes(const_cast<fec_handle *>(f), 0, nullptr,
227                              hash_offset, data) ||
228             !check_block_hash(root, data)) {
229             error("root hash invalid");
230             return -1;
231         } else if (f->mode & O_RDWR &&
232                    !raw_pwrite(f->fd, data, FEC_BLOCKSIZE, hash_offset)) {
233             error("failed to rewrite the root block: %s", strerror(errno));
234             return -1;
235         }
236     }
237 
238     debug("root hash valid");
239 
240     /* calculate the number of hashes on each level */
241     uint32_t hashes[levels];
242 
243     verity_get_size(data_blocks * FEC_BLOCKSIZE, NULL, hashes,
244                     padded_digest_length_);
245 
246     uint64_t hash_data_offset = data_offset;
247     uint32_t hash_data_blocks = 0;
248     /* calculate the size and offset for the data hashes */
249     for (uint32_t i = 1; i < levels; ++i) {
250         uint32_t blocks = hashes[levels - i];
251         debug("%u hash blocks on level %u", blocks, levels - i);
252 
253         hash_data_offset = data_offset;
254         hash_data_blocks = blocks;
255 
256         data_offset += blocks * FEC_BLOCKSIZE;
257     }
258 
259     check(hash_data_blocks);
260     check(hash_data_blocks <= hash_size / FEC_BLOCKSIZE);
261 
262     check(hash_data_offset);
263     check(hash_data_offset <= UINT64_MAX - (hash_data_blocks * FEC_BLOCKSIZE));
264     check(hash_data_offset < f->data_size);
265     check(hash_data_offset + hash_data_blocks * FEC_BLOCKSIZE <= f->data_size);
266 
267     /* copy data hashes to memory in case they are corrupted, so we don't
268        have to correct them every time they are needed */
269     std::vector<uint8_t> data_hashes(hash_data_blocks * FEC_BLOCKSIZE, 0);
270 
271     /* validate the rest of the hash tree */
272     data_offset = hash_offset + FEC_BLOCKSIZE;
273 
274     std::vector<uint8_t> buffer(padded_digest_length_, 0);
275     for (uint32_t i = 1; i < levels; ++i) {
276         uint32_t blocks = hashes[levels - i];
277 
278         for (uint32_t j = 0; j < blocks; ++j) {
279             /* ecc reads are very I/O intensive, so read raw hash tree and do
280                error correcting only if it doesn't validate */
281             if (!raw_pread(f->fd, buffer.data(), padded_digest_length_,
282                            hash_offset + j * padded_digest_length_) ||
283                 !raw_pread(f->fd, data, FEC_BLOCKSIZE,
284                            data_offset + j * FEC_BLOCKSIZE)) {
285                 error("failed to read hashes: %s", strerror(errno));
286                 return -1;
287             }
288 
289             if (!check_block_hash(buffer.data(), data)) {
290                 /* try to correct */
291                 if (!ecc_read_hashes(const_cast<fec_handle *>(f),
292                                      hash_offset + j * padded_digest_length_,
293                                      buffer.data(),
294                                      data_offset + j * FEC_BLOCKSIZE, data) ||
295                     !check_block_hash(buffer.data(), data)) {
296                     error("invalid hash tree: hash_offset %" PRIu64
297                           ", "
298                           "data_offset %" PRIu64 ", block %u",
299                           hash_offset, data_offset, j);
300                     return -1;
301                 }
302 
303                 /* update the corrected blocks to the file if we are in r/w
304                    mode */
305                 if (f->mode & O_RDWR) {
306                     if (!raw_pwrite(f->fd, buffer.data(), padded_digest_length_,
307                                     hash_offset + j * padded_digest_length_) ||
308                         !raw_pwrite(f->fd, data, FEC_BLOCKSIZE,
309                                     data_offset + j * FEC_BLOCKSIZE)) {
310                         error("failed to write hashes: %s", strerror(errno));
311                         return -1;
312                     }
313                 }
314             }
315 
316             if (blocks == hash_data_blocks) {
317                 std::copy(data, data + FEC_BLOCKSIZE,
318                           data_hashes.begin() + j * FEC_BLOCKSIZE);
319             }
320         }
321 
322         hash_offset = data_offset;
323         data_offset += blocks * FEC_BLOCKSIZE;
324     }
325 
326     debug("valid");
327 
328     this->hash_data = std::move(data_hashes);
329 
330     std::vector<uint8_t> zero_block(FEC_BLOCKSIZE, 0);
331     zero_hash.resize(padded_digest_length_, 0);
332     if (get_hash(zero_block.data(), zero_hash.data()) == -1) {
333         error("failed to hash");
334         return -1;
335     }
336     return 0;
337 }
338 
339 /* reads, corrects and parses the verity table, validates parameters, and if
340    `f->flags' does not have `FEC_VERITY_DISABLE' set, calls `verify_tree' to
341    load and validate the hash tree */
parse_table(fec_handle * f,uint64_t offset,uint32_t size,bool useecc)342 static int parse_table(fec_handle *f, uint64_t offset, uint32_t size, bool useecc)
343 {
344     check(f);
345     check(size >= VERITY_MIN_TABLE_SIZE);
346     check(size <= VERITY_MAX_TABLE_SIZE);
347 
348     debug("offset = %" PRIu64 ", size = %u", offset, size);
349 
350     std::string table(size, 0);
351 
352     if (!useecc) {
353         if (!raw_pread(f->fd, const_cast<char *>(table.data()), size, offset)) {
354             error("failed to read verity table: %s", strerror(errno));
355             return -1;
356         }
357     } else if (fec_pread(f, const_cast<char *>(table.data()), size, offset) !=
358                (ssize_t)size) {
359         error("failed to ecc read verity table: %s", strerror(errno));
360         return -1;
361     }
362 
363     debug("verity table: '%s'", table.c_str());
364 
365     int i = 0;
366     std::vector<uint8_t> salt;
367     uint8_t root[SHA256_DIGEST_LENGTH];
368     uint64_t hash_start = 0;
369     uint64_t data_blocks = 0;
370 
371     auto tokens = android::base::Split(table, " ");
372 
373     for (const auto& token : tokens) {
374         switch (i++) {
375         case 0: /* version */
376             if (token != stringify(VERITY_TABLE_VERSION)) {
377                 error("unsupported verity table version: %s", token.c_str());
378                 return -1;
379             }
380             break;
381         case 3: /* data_block_size */
382         case 4: /* hash_block_size */
383             /* assume 4 KiB block sizes for everything */
384             if (token != stringify(FEC_BLOCKSIZE)) {
385                 error("unsupported verity block size: %s", token.c_str());
386                 return -1;
387             }
388             break;
389         case 5: /* num_data_blocks */
390             if (parse_uint64(token.c_str(), f->data_size / FEC_BLOCKSIZE,
391                              &data_blocks) == -1) {
392                 error("invalid number of verity data blocks: %s",
393                     token.c_str());
394                 return -1;
395             }
396             break;
397         case 6: /* hash_start_block */
398             if (parse_uint64(token.c_str(), f->data_size / FEC_BLOCKSIZE,
399                              &hash_start) == -1) {
400                 error("invalid verity hash start block: %s", token.c_str());
401                 return -1;
402             }
403 
404             hash_start *= FEC_BLOCKSIZE;
405             break;
406         case 7: /* algorithm */
407             if (token != "sha256") {
408                 error("unsupported verity hash algorithm: %s", token.c_str());
409                 return -1;
410             }
411             break;
412         case 8: /* digest */
413             if (parse_hex(root, sizeof(root), token.c_str()) == -1) {
414                 error("invalid verity root hash: %s", token.c_str());
415                 return -1;
416             }
417             break;
418         case 9: /* salt */
419         {
420             uint32_t salt_size = token.size();
421             check(salt_size % 2 == 0);
422             salt_size /= 2;
423 
424             salt.resize(salt_size, 0);
425 
426             if (parse_hex(salt.data(), salt_size, token.c_str()) == -1) {
427                 error("invalid verity salt: %s", token.c_str());
428                 return -1;
429             }
430             break;
431         }
432         default:
433             break;
434         }
435     }
436 
437     if (i < VERITY_TABLE_ARGS) {
438         error("not enough arguments in verity table: %d; expected at least "
439             stringify(VERITY_TABLE_ARGS), i);
440         return -1;
441     }
442 
443     check(hash_start < f->data_size);
444 
445     verity_info *v = &f->verity;
446     if (v->metadata_start < hash_start) {
447         check(data_blocks == v->metadata_start / FEC_BLOCKSIZE);
448     } else {
449         check(data_blocks == hash_start / FEC_BLOCKSIZE);
450     }
451 
452     v->table = std::move(table);
453 
454     v->hashtree.initialize(hash_start, data_blocks, salt, NID_sha256);
455     if (!(f->flags & FEC_VERITY_DISABLE)) {
456         if (v->hashtree.verify_tree(f, root) == -1) {
457             return -1;
458         }
459 
460         check(!v->hashtree.hash_data.empty());
461         check(!v->hashtree.zero_hash.empty());
462     }
463 
464     return 0;
465 }
466 
467 /* rewrites verity metadata block using error corrected data in `f->verity' */
rewrite_metadata(fec_handle * f,uint64_t offset)468 static int rewrite_metadata(fec_handle *f, uint64_t offset)
469 {
470     check(f);
471     check(f->data_size > VERITY_METADATA_SIZE);
472     check(offset <= f->data_size - VERITY_METADATA_SIZE);
473 
474     std::unique_ptr<uint8_t[]> metadata(
475         new (std::nothrow) uint8_t[VERITY_METADATA_SIZE]);
476 
477     if (!metadata) {
478         errno = ENOMEM;
479         return -1;
480     }
481 
482     memset(metadata.get(), 0, VERITY_METADATA_SIZE);
483 
484     verity_info *v = &f->verity;
485     memcpy(metadata.get(), &v->header, sizeof(v->header));
486 
487     check(!v->table.empty());
488     size_t len = v->table.size();
489 
490     check(sizeof(v->header) + len <= VERITY_METADATA_SIZE);
491     memcpy(metadata.get() + sizeof(v->header), v->table.data(), len);
492 
493     return raw_pwrite(f->fd, metadata.get(), VERITY_METADATA_SIZE, offset);
494 }
495 
validate_header(const fec_handle * f,const verity_header * header,uint64_t offset)496 static int validate_header(const fec_handle *f, const verity_header *header,
497         uint64_t offset)
498 {
499     check(f);
500     check(header);
501 
502     if (header->magic != VERITY_MAGIC &&
503         header->magic != VERITY_MAGIC_DISABLE) {
504         return -1;
505     }
506 
507     if (header->version != VERITY_VERSION) {
508         error("unsupported verity version %u", header->version);
509         return -1;
510     }
511 
512     if (header->length < VERITY_MIN_TABLE_SIZE ||
513         header->length > VERITY_MAX_TABLE_SIZE) {
514         error("invalid verity table size: %u; expected ["
515             stringify(VERITY_MIN_TABLE_SIZE) ", "
516             stringify(VERITY_MAX_TABLE_SIZE) ")", header->length);
517         return -1;
518     }
519 
520     /* signature is skipped, because for our purposes it won't matter from
521        where the data originates; the caller of the library is responsible
522        for signature verification */
523 
524     if (offset > UINT64_MAX - header->length) {
525         error("invalid verity table length: %u", header->length);
526         return -1;
527     } else if (offset + header->length >= f->data_size) {
528         error("invalid verity table length: %u", header->length);
529         return -1;
530     }
531 
532     return 0;
533 }
534 
535 /* attempts to read verity metadata from `f->fd' position `offset'; if in r/w
536    mode, rewrites the metadata if it had errors */
verity_parse_header(fec_handle * f,uint64_t offset)537 int verity_parse_header(fec_handle *f, uint64_t offset)
538 {
539     check(f);
540     check(f->data_size > VERITY_METADATA_SIZE);
541 
542     if (offset > f->data_size - VERITY_METADATA_SIZE) {
543         debug("failed to read verity header: offset %" PRIu64 " is too far",
544             offset);
545         return -1;
546     }
547 
548     verity_info *v = &f->verity;
549     uint64_t errors = f->errors;
550 
551     if (!raw_pread(f->fd, &v->header, sizeof(v->header), offset)) {
552         error("failed to read verity header: %s", strerror(errno));
553         return -1;
554     }
555 
556     /* use raw data to check for the alternative magic, because it will
557        be error corrected to VERITY_MAGIC otherwise */
558     if (v->header.magic == VERITY_MAGIC_DISABLE) {
559         /* this value is not used by us, but can be used by a caller to
560            decide whether dm-verity should be enabled */
561         v->disabled = true;
562     }
563 
564     if (fec_pread(f, &v->ecc_header, sizeof(v->ecc_header), offset) !=
565             sizeof(v->ecc_header)) {
566         warn("failed to read verity header: %s", strerror(errno));
567         return -1;
568     }
569 
570     if (validate_header(f, &v->header, offset)) {
571         /* raw verity header is invalid; this could be due to corruption, or
572            due to missing verity metadata */
573 
574         if (validate_header(f, &v->ecc_header, offset)) {
575             return -1; /* either way, we cannot recover */
576         }
577 
578         /* report mismatching fields */
579         if (!v->disabled && v->header.magic != v->ecc_header.magic) {
580             warn("corrected verity header magic");
581             v->header.magic = v->ecc_header.magic;
582         }
583 
584         if (v->header.version != v->ecc_header.version) {
585             warn("corrected verity header version");
586             v->header.version = v->ecc_header.version;
587         }
588 
589         if (v->header.length != v->ecc_header.length) {
590             warn("corrected verity header length");
591             v->header.length = v->ecc_header.length;
592         }
593 
594         if (memcmp(v->header.signature, v->ecc_header.signature,
595                 sizeof(v->header.signature))) {
596             warn("corrected verity header signature");
597             /* we have no way of knowing which signature is correct, if either
598                of them is */
599         }
600     }
601 
602     v->metadata_start = offset;
603 
604     if (parse_table(f, offset + sizeof(v->header), v->header.length,
605             false) == -1 &&
606         parse_table(f, offset + sizeof(v->header), v->header.length,
607             true)  == -1) {
608         return -1;
609     }
610 
611     /* if we corrected something while parsing metadata and we are in r/w
612        mode, rewrite the corrected metadata */
613     if (f->mode & O_RDWR && f->errors > errors &&
614             rewrite_metadata(f, offset) < 0) {
615         warn("failed to rewrite verity metadata: %s", strerror(errno));
616     }
617 
618     if (v->metadata_start < v->hashtree.hash_start) {
619         f->data_size = v->metadata_start;
620     } else {
621         f->data_size = v->hashtree.hash_start;
622     }
623 
624     return 0;
625 }
626 
fec_verity_set_status(struct fec_handle * f,bool enabled)627 int fec_verity_set_status(struct fec_handle *f, bool enabled)
628 {
629     check(f);
630 
631     if (!(f->mode & O_RDWR)) {
632         error("cannot update verity magic: read-only handle");
633         errno = EBADF;
634         return -1;
635     }
636 
637     verity_info *v = &f->verity;
638 
639     if (!v->metadata_start) {
640         error("cannot update verity magic: no metadata found");
641         errno = EINVAL;
642         return -1;
643     }
644 
645     if (v->disabled == !enabled) {
646         return 0; /* nothing to do */
647     }
648 
649     uint32_t magic = enabled ? VERITY_MAGIC : VERITY_MAGIC_DISABLE;
650 
651     if (!raw_pwrite(f->fd, &magic, sizeof(magic), v->metadata_start)) {
652         error("failed to update verity magic to %08x: %s", magic,
653               strerror(errno));
654         return -1;
655     }
656 
657     warn("updated verity magic to %08x (%s)", magic,
658         enabled ? "enabled" : "disabled");
659     v->disabled = !enabled;
660 
661     return 0;
662 }
663