Lines Matching full:ic
89 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block]) argument
267 struct dm_integrity_c *ic; member
285 struct dm_integrity_c *ic; member
335 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) in dm_integrity_io_error() argument
338 atomic64_inc(&ic->number_of_mismatches); in dm_integrity_io_error()
339 if (!cmpxchg(&ic->failed, 0, err)) in dm_integrity_io_error()
343 static int dm_integrity_failed(struct dm_integrity_c *ic) in dm_integrity_failed() argument
345 return READ_ONCE(ic->failed); in dm_integrity_failed()
348 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, in dm_integrity_commit_id() argument
355 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j); in dm_integrity_commit_id()
358 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector, in get_area_and_offset() argument
361 if (!ic->meta_dev) { in get_area_and_offset()
362 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors; in get_area_and_offset()
371 #define sector_to_block(ic, n) \ argument
373 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
374 (n) >>= (ic)->sb->log2_sectors_per_block; \
377 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area, in get_metadata_sector_and_offset() argument
383 ms = area << ic->sb->log2_interleave_sectors; in get_metadata_sector_and_offset()
384 if (likely(ic->log2_metadata_run >= 0)) in get_metadata_sector_and_offset()
385 ms += area << ic->log2_metadata_run; in get_metadata_sector_and_offset()
387 ms += area * ic->metadata_run; in get_metadata_sector_and_offset()
388 ms >>= ic->log2_buffer_sectors; in get_metadata_sector_and_offset()
390 sector_to_block(ic, offset); in get_metadata_sector_and_offset()
392 if (likely(ic->log2_tag_size >= 0)) { in get_metadata_sector_and_offset()
393 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size); in get_metadata_sector_and_offset()
394 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); in get_metadata_sector_and_offset()
396 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors); in get_metadata_sector_and_offset()
397 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); in get_metadata_sector_and_offset()
403 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset) in get_data_sector() argument
407 if (ic->meta_dev) in get_data_sector()
410 result = area << ic->sb->log2_interleave_sectors; in get_data_sector()
411 if (likely(ic->log2_metadata_run >= 0)) in get_data_sector()
412 result += (area + 1) << ic->log2_metadata_run; in get_data_sector()
414 result += (area + 1) * ic->metadata_run; in get_data_sector()
416 result += (sector_t)ic->initial_sectors + offset; in get_data_sector()
417 result += ic->start; in get_data_sector()
422 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr) in wraparound_section() argument
424 if (unlikely(*sec_ptr >= ic->journal_sections)) in wraparound_section()
425 *sec_ptr -= ic->journal_sections; in wraparound_section()
428 static void sb_set_version(struct dm_integrity_c *ic) in sb_set_version() argument
430 if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) in sb_set_version()
431 ic->sb->version = SB_VERSION_2; in sb_set_version()
433 ic->sb->version = SB_VERSION_1; in sb_set_version()
436 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags) in sync_rw_sb() argument
444 io_req.mem.ptr.addr = ic->sb; in sync_rw_sb()
446 io_req.client = ic->io; in sync_rw_sb()
447 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; in sync_rw_sb()
448 io_loc.sector = ic->start; in sync_rw_sb()
454 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset, in access_journal_check() argument
458 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors; in access_journal_check()
460 if (unlikely(section >= ic->journal_sections) || in access_journal_check()
463 function, section, offset, ic->journal_sections, limit); in access_journal_check()
469 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset, in page_list_location() argument
474 access_journal_check(ic, section, offset, false, "page_list_location"); in page_list_location()
476 sector = section * ic->journal_section_sectors + offset; in page_list_location()
482 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl, in access_page_list() argument
488 page_list_location(ic, section, offset, &pl_index, &pl_offset); in access_page_list()
498 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned … in access_journal() argument
500 return access_page_list(ic, ic->journal, section, offset, NULL); in access_journal()
503 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsi… in access_journal_entry() argument
508 access_journal_check(ic, section, n, true, "access_journal_entry"); in access_journal_entry()
513 js = access_journal(ic, section, rel_sector); in access_journal_entry()
514 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size); in access_journal_entry()
517 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsi… in access_journal_data() argument
519 n <<= ic->sb->log2_sectors_per_block; in access_journal_data()
523 access_journal_check(ic, section, n, false, "access_journal_data"); in access_journal_data()
525 return access_journal(ic, section, n); in access_journal_data()
528 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE]) in section_mac() argument
530 SHASH_DESC_ON_STACK(desc, ic->journal_mac); in section_mac()
534 desc->tfm = ic->journal_mac; in section_mac()
539 dm_integrity_io_error(ic, "crypto_shash_init", r); in section_mac()
543 for (j = 0; j < ic->journal_section_entries; j++) { in section_mac()
544 struct journal_entry *je = access_journal_entry(ic, section, j); in section_mac()
547 dm_integrity_io_error(ic, "crypto_shash_update", r); in section_mac()
552 size = crypto_shash_digestsize(ic->journal_mac); in section_mac()
557 dm_integrity_io_error(ic, "crypto_shash_final", r); in section_mac()
565 dm_integrity_io_error(ic, "crypto_shash_final", r); in section_mac()
576 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr) in rw_section_mac() argument
581 if (!ic->journal_mac) in rw_section_mac()
584 section_mac(ic, section, result); in rw_section_mac()
587 struct journal_sector *js = access_journal(ic, section, j); in rw_section_mac()
593 dm_integrity_io_error(ic, "journal mac", -EILSEQ); in rw_section_mac()
606 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, in xor_journal() argument
610 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT; in xor_journal()
615 source_pl = ic->journal; in xor_journal()
616 target_pl = ic->journal_io; in xor_journal()
618 source_pl = ic->journal_io; in xor_journal()
619 target_pl = ic->journal; in xor_journal()
622 page_list_location(ic, section, 0, &pl_index, &pl_offset); in xor_journal()
638 rw_section_mac(ic, section, true); in xor_journal()
643 page_list_location(ic, section, 0, §ion_index, &dummy); in xor_journal()
649 src_pages[1] = ic->journal_xor[pl_index].page; in xor_journal()
668 complete(&comp->ic->crypto_backoff); in complete_journal_encrypt()
671 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err); in complete_journal_encrypt()
690 wait_for_completion(&comp->ic->crypto_backoff); in do_crypt()
691 reinit_completion(&comp->ic->crypto_backoff); in do_crypt()
694 dm_integrity_io_error(comp->ic, "encrypt", r); in do_crypt()
698 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, in crypt_journal() argument
707 source_sg = ic->journal_scatterlist; in crypt_journal()
708 target_sg = ic->journal_io_scatterlist; in crypt_journal()
710 source_sg = ic->journal_io_scatterlist; in crypt_journal()
711 target_sg = ic->journal_scatterlist; in crypt_journal()
720 rw_section_mac(ic, section, true); in crypt_journal()
722 req = ic->sk_requests[section]; in crypt_journal()
723 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); in crypt_journal()
742 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, in encrypt_journal() argument
745 if (ic->journal_xor) in encrypt_journal()
746 return xor_journal(ic, encrypt, section, n_sections, comp); in encrypt_journal()
748 return crypt_journal(ic, encrypt, section, n_sections, comp); in encrypt_journal()
755 dm_integrity_io_error(comp->ic, "writing journal", -EIO); in complete_journal_io()
759 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section, in rw_journal() argument
767 if (unlikely(dm_integrity_failed(ic))) { in rw_journal()
773 sector = section * ic->journal_section_sectors; in rw_journal()
774 n_sectors = n_sections * ic->journal_section_sectors; in rw_journal()
782 if (ic->journal_io) in rw_journal()
783 io_req.mem.ptr.pl = &ic->journal_io[pl_index]; in rw_journal()
785 io_req.mem.ptr.pl = &ic->journal[pl_index]; in rw_journal()
793 io_req.client = ic->io; in rw_journal()
794 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; in rw_journal()
795 io_loc.sector = ic->start + SB_SECTORS + sector; in rw_journal()
800 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r); in rw_journal()
808 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_section… in write_journal() argument
815 io_comp.ic = ic; in write_journal()
818 if (commit_start + commit_sections <= ic->journal_sections) { in write_journal()
820 if (ic->journal_io) { in write_journal()
821 crypt_comp_1.ic = ic; in write_journal()
824 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); in write_journal()
828 rw_section_mac(ic, commit_start + i, true); in write_journal()
830 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start, in write_journal()
835 to_end = ic->journal_sections - commit_start; in write_journal()
836 if (ic->journal_io) { in write_journal()
837 crypt_comp_1.ic = ic; in write_journal()
840 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); in write_journal()
842 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); in write_journal()
845 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); in write_journal()
848 crypt_comp_2.ic = ic; in write_journal()
851 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); in write_journal()
853 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); in write_journal()
858 rw_section_mac(ic, commit_start + i, true); in write_journal()
859 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); in write_journal()
861 rw_section_mac(ic, i, true); in write_journal()
863 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp); in write_journal()
869 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset, in copy_from_journal() argument
877 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1)); in copy_from_journal()
879 if (unlikely(dm_integrity_failed(ic))) { in copy_from_journal()
884 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset; in copy_from_journal()
892 io_req.mem.ptr.pl = &ic->journal[pl_index]; in copy_from_journal()
896 io_req.client = ic->io; in copy_from_journal()
897 io_loc.bdev = ic->dev->bdev; in copy_from_journal()
914 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool che… in add_new_range() argument
916 struct rb_node **n = &ic->in_progress.rb_node; in add_new_range()
919 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1)); in add_new_range()
923 list_for_each_entry(range, &ic->wait_list, wait_entry) { in add_new_range()
945 rb_insert_color(&new_range->node, &ic->in_progress); in add_new_range()
950 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range) in remove_range_unlocked() argument
952 rb_erase(&range->node, &ic->in_progress); in remove_range_unlocked()
953 while (unlikely(!list_empty(&ic->wait_list))) { in remove_range_unlocked()
955 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); in remove_range_unlocked()
959 if (!add_new_range(ic, last_range, false)) { in remove_range_unlocked()
961 list_add(&last_range->wait_entry, &ic->wait_list); in remove_range_unlocked()
969 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range) in remove_range() argument
973 spin_lock_irqsave(&ic->endio_wait.lock, flags); in remove_range()
974 remove_range_unlocked(ic, range); in remove_range()
975 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); in remove_range()
978 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) in wait_and_add_new_range() argument
981 list_add_tail(&new_range->wait_entry, &ic->wait_list); in wait_and_add_new_range()
985 spin_unlock_irq(&ic->endio_wait.lock); in wait_and_add_new_range()
987 spin_lock_irq(&ic->endio_wait.lock); in wait_and_add_new_range()
997 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector) in add_journal_node() argument
1005 link = &ic->journal_tree_root.rb_node; in add_journal_node()
1019 rb_insert_color(&node->node, &ic->journal_tree_root); in add_journal_node()
1022 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node) in remove_journal_node() argument
1025 rb_erase(&node->node, &ic->journal_tree_root); in remove_journal_node()
1031 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector) in find_journal_node() argument
1033 struct rb_node *n = ic->journal_tree_root.rb_node; in find_journal_node()
1039 found = j - ic->journal_tree; in find_journal_node()
1052 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector) in test_journal_node() argument
1057 if (unlikely(pos >= ic->journal_entries)) in test_journal_node()
1059 node = &ic->journal_tree[pos]; in test_journal_node()
1073 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node) in find_newer_committed_node() argument
1090 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries; in find_newer_committed_node()
1091 if (next_section >= ic->committed_section && in find_newer_committed_node()
1092 next_section < ic->committed_section + ic->n_committed_sections) in find_newer_committed_node()
1094 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections) in find_newer_committed_node()
1104 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_bl… in dm_integrity_rw_tag() argument
1113 r = dm_integrity_failed(ic); in dm_integrity_rw_tag()
1117 data = dm_bufio_read(ic->bufio, *metadata_block, &b); in dm_integrity_rw_tag()
1121 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size); in dm_integrity_rw_tag()
1146 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) { in dm_integrity_rw_tag()
1156 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic) in dm_integrity_flush_buffers() argument
1159 r = dm_bufio_write_dirty_buffers(ic->bufio); in dm_integrity_flush_buffers()
1161 dm_integrity_io_error(ic, "writing tags", r); in dm_integrity_flush_buffers()
1164 static void sleep_on_endio_wait(struct dm_integrity_c *ic) in sleep_on_endio_wait() argument
1167 __add_wait_queue(&ic->endio_wait, &wait); in sleep_on_endio_wait()
1169 spin_unlock_irq(&ic->endio_wait.lock); in sleep_on_endio_wait()
1171 spin_lock_irq(&ic->endio_wait.lock); in sleep_on_endio_wait()
1172 __remove_wait_queue(&ic->endio_wait, &wait); in sleep_on_endio_wait()
1177 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer); in autocommit_fn() local
1179 if (likely(!dm_integrity_failed(ic))) in autocommit_fn()
1180 queue_work(ic->commit_wq, &ic->commit_work); in autocommit_fn()
1183 static void schedule_autocommit(struct dm_integrity_c *ic) in schedule_autocommit() argument
1185 if (!timer_pending(&ic->autocommit_timer)) in schedule_autocommit()
1186 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies); in schedule_autocommit()
1189 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) in submit_flush_bio() argument
1194 spin_lock_irqsave(&ic->endio_wait.lock, flags); in submit_flush_bio()
1196 bio_list_add(&ic->flush_bio_list, bio); in submit_flush_bio()
1197 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); in submit_flush_bio()
1199 queue_work(ic->commit_wq, &ic->commit_work); in submit_flush_bio()
1202 static void do_endio(struct dm_integrity_c *ic, struct bio *bio) in do_endio() argument
1204 int r = dm_integrity_failed(ic); in do_endio()
1210 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio) in do_endio_flush() argument
1214 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) in do_endio_flush()
1215 submit_flush_bio(ic, dio); in do_endio_flush()
1217 do_endio(ic, bio); in do_endio_flush()
1223 struct dm_integrity_c *ic = dio->ic; in dec_in_flight() local
1226 remove_range(ic, &dio->range); in dec_in_flight()
1229 schedule_autocommit(ic); in dec_in_flight()
1239 queue_work(ic->offload_wq, &dio->work); in dec_in_flight()
1242 do_endio_flush(ic, dio); in dec_in_flight()
1260 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector, in integrity_sector_checksum() argument
1264 SHASH_DESC_ON_STACK(req, ic->internal_hash); in integrity_sector_checksum()
1268 req->tfm = ic->internal_hash; in integrity_sector_checksum()
1273 dm_integrity_io_error(ic, "crypto_shash_init", r); in integrity_sector_checksum()
1279 dm_integrity_io_error(ic, "crypto_shash_update", r); in integrity_sector_checksum()
1283 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT); in integrity_sector_checksum()
1285 dm_integrity_io_error(ic, "crypto_shash_update", r); in integrity_sector_checksum()
1291 dm_integrity_io_error(ic, "crypto_shash_final", r); in integrity_sector_checksum()
1295 digest_size = crypto_shash_digestsize(ic->internal_hash); in integrity_sector_checksum()
1296 if (unlikely(digest_size < ic->tag_size)) in integrity_sector_checksum()
1297 memset(result + digest_size, 0, ic->tag_size - digest_size); in integrity_sector_checksum()
1303 get_random_bytes(result, ic->tag_size); in integrity_sector_checksum()
1309 struct dm_integrity_c *ic = dio->ic; in integrity_metadata() local
1313 if (ic->internal_hash) { in integrity_metadata()
1316 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); in integrity_metadata()
1319 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; in integrity_metadata()
1320 char checksums_onstack[ic->tag_size + extra_space]; in integrity_metadata()
1324 if (unlikely(ic->mode == 'R')) in integrity_metadata()
1327 …checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size +… in integrity_metadata()
1341 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr); in integrity_metadata()
1342 checksums_ptr += ic->tag_size; in integrity_metadata()
1343 sectors_to_process -= ic->sectors_per_block; in integrity_metadata()
1344 pos += ic->sectors_per_block << SECTOR_SHIFT; in integrity_metadata()
1345 sector += ic->sectors_per_block; in integrity_metadata()
1349 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, in integrity_metadata()
1354 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); in integrity_metadata()
1356 atomic64_inc(&ic->number_of_mismatches); in integrity_metadata()
1382 sector_to_block(ic, data_to_process); in integrity_metadata()
1383 data_to_process *= ic->tag_size; in integrity_metadata()
1392 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset, in integrity_metadata()
1412 struct dm_integrity_c *ic = ti->private; in dm_integrity_map() local
1418 dio->ic = ic; in dm_integrity_map()
1422 submit_flush_bio(ic, dio); in dm_integrity_map()
1436 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { in dm_integrity_map()
1439 (unsigned long long)ic->provided_data_sectors); in dm_integrity_map()
1442 …if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1… in dm_integrity_map()
1444 ic->sectors_per_block, in dm_integrity_map()
1449 if (ic->sectors_per_block > 1) { in dm_integrity_map()
1453 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { in dm_integrity_map()
1455 bv.bv_offset, bv.bv_len, ic->sectors_per_block); in dm_integrity_map()
1462 if (!ic->internal_hash) { in dm_integrity_map()
1464 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; in dm_integrity_map()
1465 if (ic->log2_tag_size >= 0) in dm_integrity_map()
1466 wanted_tag_size <<= ic->log2_tag_size; in dm_integrity_map()
1468 wanted_tag_size *= ic->tag_size; in dm_integrity_map()
1481 if (unlikely(ic->mode == 'R') && unlikely(dio->write)) in dm_integrity_map()
1484 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); in dm_integrity_map()
1485 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); in dm_integrity_map()
1486 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset); in dm_integrity_map()
1495 struct dm_integrity_c *ic = dio->ic; in __journal_read_write() local
1515 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry); in __journal_read_write()
1526 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); in __journal_read_write()
1531 js = access_journal_data(ic, journal_section, journal_entry); in __journal_read_write()
1539 } while (++s < ic->sectors_per_block); in __journal_read_write()
1541 if (ic->internal_hash) { in __journal_read_write()
1542 char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)]; in __journal_read_write()
1544 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); in __journal_read_write()
1545 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { in __journal_read_write()
1553 if (!ic->internal_hash) { in __journal_read_write()
1555 unsigned tag_todo = ic->tag_size; in __journal_read_write()
1556 char *tag_ptr = journal_entry_tag(ic, je); in __journal_read_write()
1581 js = access_journal_data(ic, journal_section, journal_entry); in __journal_read_write()
1582 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT); in __journal_read_write()
1587 } while (++s < ic->sectors_per_block); in __journal_read_write()
1589 if (ic->internal_hash) { in __journal_read_write()
1590 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); in __journal_read_write()
1591 if (unlikely(digest_size > ic->tag_size)) { in __journal_read_write()
1593 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack); in __journal_read_write()
1594 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size); in __journal_read_write()
1596 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je)); in __journal_read_write()
1601 logical_sector += ic->sectors_per_block; in __journal_read_write()
1604 if (unlikely(journal_entry == ic->journal_section_entries)) { in __journal_read_write()
1607 wraparound_section(ic, &journal_section); in __journal_read_write()
1610 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT; in __journal_read_write()
1611 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT); in __journal_read_write()
1620 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) in __journal_read_write()
1621 wake_up(&ic->copy_to_journal_wait); in __journal_read_write()
1622 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) { in __journal_read_write()
1623 queue_work(ic->commit_wq, &ic->commit_work); in __journal_read_write()
1625 schedule_autocommit(ic); in __journal_read_write()
1628 remove_range(ic, &dio->range); in __journal_read_write()
1635 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); in __journal_read_write()
1636 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); in __journal_read_write()
1645 struct dm_integrity_c *ic = dio->ic; in dm_integrity_map_continue() local
1650 bool need_sync_io = ic->internal_hash && !dio->write; in dm_integrity_map_continue()
1654 queue_work(ic->offload_wq, &dio->work); in dm_integrity_map_continue()
1659 spin_lock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
1661 if (unlikely(dm_integrity_failed(ic))) { in dm_integrity_map_continue()
1662 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
1663 do_endio(ic, bio); in dm_integrity_map_continue()
1668 if (likely(ic->mode == 'J')) { in dm_integrity_map_continue()
1674 ic->free_sectors << ic->sb->log2_sectors_per_block); in dm_integrity_map_continue()
1678 sleep_on_endio_wait(ic); in dm_integrity_map_continue()
1681 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; in dm_integrity_map_continue()
1682 ic->free_sectors -= range_sectors; in dm_integrity_map_continue()
1683 journal_section = ic->free_section; in dm_integrity_map_continue()
1684 journal_entry = ic->free_section_entry; in dm_integrity_map_continue()
1686 next_entry = ic->free_section_entry + range_sectors; in dm_integrity_map_continue()
1687 ic->free_section_entry = next_entry % ic->journal_section_entries; in dm_integrity_map_continue()
1688 ic->free_section += next_entry / ic->journal_section_entries; in dm_integrity_map_continue()
1689 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; in dm_integrity_map_continue()
1690 wraparound_section(ic, &ic->free_section); in dm_integrity_map_continue()
1692 pos = journal_section * ic->journal_section_entries + journal_entry; in dm_integrity_map_continue()
1699 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i); in dm_integrity_map_continue()
1701 if (unlikely(pos >= ic->journal_entries)) in dm_integrity_map_continue()
1704 je = access_journal_entry(ic, ws, we); in dm_integrity_map_continue()
1708 if (unlikely(we == ic->journal_section_entries)) { in dm_integrity_map_continue()
1711 wraparound_section(ic, &ws); in dm_integrity_map_continue()
1713 } while ((i += ic->sectors_per_block) < dio->range.n_sectors); in dm_integrity_map_continue()
1715 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
1719 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); in dm_integrity_map_continue()
1726 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) { in dm_integrity_map_continue()
1727 if (!test_journal_node(ic, jp, dio->range.logical_sector + i)) in dm_integrity_map_continue()
1734 if (unlikely(!add_new_range(ic, &dio->range, true))) { in dm_integrity_map_continue()
1742 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
1744 queue_work(ic->wait_wq, &dio->work); in dm_integrity_map_continue()
1748 dio->range.n_sectors = ic->sectors_per_block; in dm_integrity_map_continue()
1749 wait_and_add_new_range(ic, &dio->range); in dm_integrity_map_continue()
1757 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); in dm_integrity_map_continue()
1759 remove_range_unlocked(ic, &dio->range); in dm_integrity_map_continue()
1764 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
1767 journal_section = journal_read_pos / ic->journal_section_entries; in dm_integrity_map_continue()
1768 journal_entry = journal_read_pos % ic->journal_section_entries; in dm_integrity_map_continue()
1781 bio_set_dev(bio, ic->dev->bdev); in dm_integrity_map_continue()
1791 if (unlikely(ic->recalc_wq != NULL) && in dm_integrity_map_continue()
1792 ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && in dm_integrity_map_continue()
1793 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector)) in dm_integrity_map_continue()
1803 queue_work(ic->metadata_wq, &dio->work); in dm_integrity_map_continue()
1812 do_endio_flush(ic, dio); in dm_integrity_map_continue()
1823 static void pad_uncommitted(struct dm_integrity_c *ic) in pad_uncommitted() argument
1825 if (ic->free_section_entry) { in pad_uncommitted()
1826 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry; in pad_uncommitted()
1827 ic->free_section_entry = 0; in pad_uncommitted()
1828 ic->free_section++; in pad_uncommitted()
1829 wraparound_section(ic, &ic->free_section); in pad_uncommitted()
1830 ic->n_uncommitted_sections++; in pad_uncommitted()
1832 WARN_ON(ic->journal_sections * ic->journal_section_entries != in pad_uncommitted()
1833 …(ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_s… in pad_uncommitted()
1838 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work); in integrity_commit() local
1843 del_timer(&ic->autocommit_timer); in integrity_commit()
1845 spin_lock_irq(&ic->endio_wait.lock); in integrity_commit()
1846 flushes = bio_list_get(&ic->flush_bio_list); in integrity_commit()
1847 if (unlikely(ic->mode != 'J')) { in integrity_commit()
1848 spin_unlock_irq(&ic->endio_wait.lock); in integrity_commit()
1849 dm_integrity_flush_buffers(ic); in integrity_commit()
1853 pad_uncommitted(ic); in integrity_commit()
1854 commit_start = ic->uncommitted_section; in integrity_commit()
1855 commit_sections = ic->n_uncommitted_sections; in integrity_commit()
1856 spin_unlock_irq(&ic->endio_wait.lock); in integrity_commit()
1863 for (j = 0; j < ic->journal_section_entries; j++) { in integrity_commit()
1865 je = access_journal_entry(ic, i, j); in integrity_commit()
1866 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); in integrity_commit()
1868 for (j = 0; j < ic->journal_section_sectors; j++) { in integrity_commit()
1870 js = access_journal(ic, i, j); in integrity_commit()
1871 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq); in integrity_commit()
1874 if (unlikely(i >= ic->journal_sections)) in integrity_commit()
1875 ic->commit_seq = next_commit_seq(ic->commit_seq); in integrity_commit()
1876 wraparound_section(ic, &i); in integrity_commit()
1880 write_journal(ic, commit_start, commit_sections); in integrity_commit()
1882 spin_lock_irq(&ic->endio_wait.lock); in integrity_commit()
1883 ic->uncommitted_section += commit_sections; in integrity_commit()
1884 wraparound_section(ic, &ic->uncommitted_section); in integrity_commit()
1885 ic->n_uncommitted_sections -= commit_sections; in integrity_commit()
1886 ic->n_committed_sections += commit_sections; in integrity_commit()
1887 spin_unlock_irq(&ic->endio_wait.lock); in integrity_commit()
1889 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) in integrity_commit()
1890 queue_work(ic->writer_wq, &ic->writer_work); in integrity_commit()
1896 do_endio(ic, flushes); in integrity_commit()
1905 struct dm_integrity_c *ic = comp->ic; in complete_copy_from_journal() local
1906 remove_range(ic, &io->range); in complete_copy_from_journal()
1907 mempool_free(io, &ic->journal_io_mempool); in complete_copy_from_journal()
1909 dm_integrity_io_error(ic, "copying from journal", -EIO); in complete_copy_from_journal()
1913 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js, in restore_last_bytes() argument
1920 } while (++s < ic->sectors_per_block); in restore_last_bytes()
1923 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, in do_journal_write() argument
1932 comp.ic = ic; in do_journal_write()
1937 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { in do_journal_write()
1941 rw_section_mac(ic, i, false); in do_journal_write()
1942 for (j = 0; j < ic->journal_section_entries; j++) { in do_journal_write()
1943 struct journal_entry *je = access_journal_entry(ic, i, j); in do_journal_write()
1955 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) { in do_journal_write()
1956 dm_integrity_io_error(ic, "invalid sector in journal", -EIO); in do_journal_write()
1957 sec &= ~(sector_t)(ic->sectors_per_block - 1); in do_journal_write()
1960 get_area_and_offset(ic, sec, &area, &offset); in do_journal_write()
1961 restore_last_bytes(ic, access_journal_data(ic, i, j), je); in do_journal_write()
1962 for (k = j + 1; k < ic->journal_section_entries; k++) { in do_journal_write()
1963 struct journal_entry *je2 = access_journal_entry(ic, i, k); in do_journal_write()
1969 get_area_and_offset(ic, sec2, &area2, &offset2); in do_journal_write()
1970 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block)) in do_journal_write()
1972 restore_last_bytes(ic, access_journal_data(ic, i, k), je2); in do_journal_write()
1976 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO); in do_journal_write()
1979 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block; in do_journal_write()
1981 spin_lock_irq(&ic->endio_wait.lock); in do_journal_write()
1982 if (unlikely(!add_new_range(ic, &io->range, true))) in do_journal_write()
1983 wait_and_add_new_range(ic, &io->range); in do_journal_write()
1986 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries]; in do_journal_write()
1989 while (j < k && find_newer_committed_node(ic, §ion_node[j])) { in do_journal_write()
1990 struct journal_entry *je2 = access_journal_entry(ic, i, j); in do_journal_write()
1993 remove_journal_node(ic, §ion_node[j]); in do_journal_write()
1995 sec += ic->sectors_per_block; in do_journal_write()
1996 offset += ic->sectors_per_block; in do_journal_write()
1998 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) { in do_journal_write()
1999 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1); in do_journal_write()
2002 remove_journal_node(ic, §ion_node[k - 1]); in do_journal_write()
2006 remove_range_unlocked(ic, &io->range); in do_journal_write()
2007 spin_unlock_irq(&ic->endio_wait.lock); in do_journal_write()
2008 mempool_free(io, &ic->journal_io_mempool); in do_journal_write()
2012 remove_journal_node(ic, §ion_node[l]); in do_journal_write()
2015 spin_unlock_irq(&ic->endio_wait.lock); in do_journal_write()
2017 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); in do_journal_write()
2020 struct journal_entry *je2 = access_journal_entry(ic, i, l); in do_journal_write()
2026 ic->internal_hash) { in do_journal_write()
2027 char test_tag[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)]; in do_journal_write()
2029 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), in do_journal_write()
2030 (char *)access_journal_data(ic, i, l), test_tag); in do_journal_write()
2031 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) in do_journal_write()
2032 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ); in do_journal_write()
2036 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset, in do_journal_write()
2037 ic->tag_size, TAG_WRITE); in do_journal_write()
2039 dm_integrity_io_error(ic, "reading tags", r); in do_journal_write()
2044 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block, in do_journal_write()
2045 (k - j) << ic->sb->log2_sectors_per_block, in do_journal_write()
2046 get_data_sector(ic, area, offset), in do_journal_write()
2053 dm_bufio_write_dirty_buffers_async(ic->bufio); in do_journal_write()
2060 dm_integrity_flush_buffers(ic); in do_journal_write()
2065 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work); in integrity_writer() local
2071 if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) in integrity_writer()
2074 spin_lock_irq(&ic->endio_wait.lock); in integrity_writer()
2075 write_start = ic->committed_section; in integrity_writer()
2076 write_sections = ic->n_committed_sections; in integrity_writer()
2077 spin_unlock_irq(&ic->endio_wait.lock); in integrity_writer()
2082 do_journal_write(ic, write_start, write_sections, false); in integrity_writer()
2084 spin_lock_irq(&ic->endio_wait.lock); in integrity_writer()
2086 ic->committed_section += write_sections; in integrity_writer()
2087 wraparound_section(ic, &ic->committed_section); in integrity_writer()
2088 ic->n_committed_sections -= write_sections; in integrity_writer()
2090 prev_free_sectors = ic->free_sectors; in integrity_writer()
2091 ic->free_sectors += write_sections * ic->journal_section_entries; in integrity_writer()
2093 wake_up_locked(&ic->endio_wait); in integrity_writer()
2095 spin_unlock_irq(&ic->endio_wait.lock); in integrity_writer()
2098 static void recalc_write_super(struct dm_integrity_c *ic) in recalc_write_super() argument
2102 dm_integrity_flush_buffers(ic); in recalc_write_super()
2103 if (dm_integrity_failed(ic)) in recalc_write_super()
2106 sb_set_version(ic); in recalc_write_super()
2107 r = sync_rw_sb(ic, REQ_OP_WRITE, 0); in recalc_write_super()
2109 dm_integrity_io_error(ic, "writing superblock", r); in recalc_write_super()
2114 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work); in integrity_recalc() local
2126 spin_lock_irq(&ic->endio_wait.lock); in integrity_recalc()
2130 if (unlikely(dm_post_suspending(ic->ti))) in integrity_recalc()
2133 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); in integrity_recalc()
2134 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) in integrity_recalc()
2137 get_area_and_offset(ic, range.logical_sector, &area, &offset); in integrity_recalc()
2138 range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector); in integrity_recalc()
2139 if (!ic->meta_dev) in integrity_recalc()
2140 …range.n_sectors = min(range.n_sectors, (1U << ic->sb->log2_interleave_sectors) - (unsigned)offset); in integrity_recalc()
2142 if (unlikely(!add_new_range(ic, &range, true))) in integrity_recalc()
2143 wait_and_add_new_range(ic, &range); in integrity_recalc()
2145 spin_unlock_irq(&ic->endio_wait.lock); in integrity_recalc()
2148 recalc_write_super(ic); in integrity_recalc()
2152 if (unlikely(dm_integrity_failed(ic))) in integrity_recalc()
2158 io_req.mem.ptr.addr = ic->recalc_buffer; in integrity_recalc()
2160 io_req.client = ic->io; in integrity_recalc()
2161 io_loc.bdev = ic->dev->bdev; in integrity_recalc()
2162 io_loc.sector = get_data_sector(ic, area, offset); in integrity_recalc()
2167 dm_integrity_io_error(ic, "reading data", r); in integrity_recalc()
2171 t = ic->recalc_tags; in integrity_recalc()
2172 for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) { in integrity_recalc()
2173 …integrity_sector_checksum(ic, range.logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t… in integrity_recalc()
2174 t += ic->tag_size; in integrity_recalc()
2177 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); in integrity_recalc()
2179 …r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tag… in integrity_recalc()
2181 dm_integrity_io_error(ic, "writing tags", r); in integrity_recalc()
2185 spin_lock_irq(&ic->endio_wait.lock); in integrity_recalc()
2186 remove_range_unlocked(ic, &range); in integrity_recalc()
2187 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors); in integrity_recalc()
2191 remove_range(ic, &range); in integrity_recalc()
2195 spin_unlock_irq(&ic->endio_wait.lock); in integrity_recalc()
2197 recalc_write_super(ic); in integrity_recalc()
2200 static void init_journal(struct dm_integrity_c *ic, unsigned start_section, in init_journal() argument
2210 wraparound_section(ic, &i); in init_journal()
2211 for (j = 0; j < ic->journal_section_sectors; j++) { in init_journal()
2212 struct journal_sector *js = access_journal(ic, i, j); in init_journal()
2214 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq); in init_journal()
2216 for (j = 0; j < ic->journal_section_entries; j++) { in init_journal()
2217 struct journal_entry *je = access_journal_entry(ic, i, j); in init_journal()
2222 write_journal(ic, start_section, n_sections); in init_journal()
2225 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id) in find_commit_seq() argument
2229 if (dm_integrity_commit_id(ic, i, j, k) == id) in find_commit_seq()
2232 dm_integrity_io_error(ic, "journal commit id", -EIO); in find_commit_seq()
2236 static void replay_journal(struct dm_integrity_c *ic) in replay_journal() argument
2246 if (ic->mode == 'R') in replay_journal()
2249 if (ic->journal_uptodate) in replay_journal()
2255 if (!ic->just_formatted) { in replay_journal()
2257 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL); in replay_journal()
2258 if (ic->journal_io) in replay_journal()
2259 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal"); in replay_journal()
2260 if (ic->journal_io) { in replay_journal()
2262 crypt_comp.ic = ic; in replay_journal()
2265 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); in replay_journal()
2268 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal"); in replay_journal()
2271 if (dm_integrity_failed(ic)) in replay_journal()
2277 for (i = 0; i < ic->journal_sections; i++) { in replay_journal()
2278 for (j = 0; j < ic->journal_section_sectors; j++) { in replay_journal()
2280 struct journal_sector *js = access_journal(ic, i, j); in replay_journal()
2281 k = find_commit_seq(ic, i, j, js->commit_id); in replay_journal()
2288 for (j = 0; j < ic->journal_section_entries; j++) { in replay_journal()
2289 struct journal_entry *je = access_journal_entry(ic, i, j); in replay_journal()
2307 dm_integrity_io_error(ic, "journal commit ids", -EIO); in replay_journal()
2322 if (unlikely(write_start >= ic->journal_sections)) in replay_journal()
2324 wraparound_section(ic, &write_start); in replay_journal()
2327 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) { in replay_journal()
2328 for (j = 0; j < ic->journal_section_sectors; j++) { in replay_journal()
2329 struct journal_sector *js = access_journal(ic, i, j); in replay_journal()
2331 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) { in replay_journal()
2338 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq); in replay_journal()
2343 if (unlikely(i >= ic->journal_sections)) in replay_journal()
2345 wraparound_section(ic, &i); in replay_journal()
2352 do_journal_write(ic, write_start, write_sections, true); in replay_journal()
2355 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) { in replay_journal()
2357 ic->commit_seq = want_commit_seq; in replay_journal()
2358 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq); in replay_journal()
2367 init_journal(ic, s, 1, erase_seq); in replay_journal()
2369 wraparound_section(ic, &s); in replay_journal()
2370 if (ic->journal_sections >= 2) { in replay_journal()
2371 init_journal(ic, s, ic->journal_sections - 2, erase_seq); in replay_journal()
2372 s += ic->journal_sections - 2; in replay_journal()
2373 wraparound_section(ic, &s); in replay_journal()
2374 init_journal(ic, s, 1, erase_seq); in replay_journal()
2378 ic->commit_seq = next_commit_seq(erase_seq); in replay_journal()
2381 ic->committed_section = continue_section; in replay_journal()
2382 ic->n_committed_sections = 0; in replay_journal()
2384 ic->uncommitted_section = continue_section; in replay_journal()
2385 ic->n_uncommitted_sections = 0; in replay_journal()
2387 ic->free_section = continue_section; in replay_journal()
2388 ic->free_section_entry = 0; in replay_journal()
2389 ic->free_sectors = ic->journal_entries; in replay_journal()
2391 ic->journal_tree_root = RB_ROOT; in replay_journal()
2392 for (i = 0; i < ic->journal_entries; i++) in replay_journal()
2393 init_journal_node(&ic->journal_tree[i]); in replay_journal()
2398 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; in dm_integrity_postsuspend() local
2400 del_timer_sync(&ic->autocommit_timer); in dm_integrity_postsuspend()
2402 if (ic->recalc_wq) in dm_integrity_postsuspend()
2403 drain_workqueue(ic->recalc_wq); in dm_integrity_postsuspend()
2405 queue_work(ic->commit_wq, &ic->commit_work); in dm_integrity_postsuspend()
2406 drain_workqueue(ic->commit_wq); in dm_integrity_postsuspend()
2408 if (ic->mode == 'J') { in dm_integrity_postsuspend()
2409 if (ic->meta_dev) in dm_integrity_postsuspend()
2410 queue_work(ic->writer_wq, &ic->writer_work); in dm_integrity_postsuspend()
2411 drain_workqueue(ic->writer_wq); in dm_integrity_postsuspend()
2412 dm_integrity_flush_buffers(ic); in dm_integrity_postsuspend()
2415 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); in dm_integrity_postsuspend()
2417 ic->journal_uptodate = true; in dm_integrity_postsuspend()
2422 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; in dm_integrity_resume() local
2424 replay_journal(ic); in dm_integrity_resume()
2426 if (ic->recalc_wq && ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { in dm_integrity_resume()
2427 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector); in dm_integrity_resume()
2428 if (recalc_pos < ic->provided_data_sectors) { in dm_integrity_resume()
2429 queue_work(ic->recalc_wq, &ic->recalc_work); in dm_integrity_resume()
2430 } else if (recalc_pos > ic->provided_data_sectors) { in dm_integrity_resume()
2431 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors); in dm_integrity_resume()
2432 recalc_write_super(ic); in dm_integrity_resume()
2440 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; in dm_integrity_status() local
2447 (unsigned long long)atomic64_read(&ic->number_of_mismatches), in dm_integrity_status()
2448 (unsigned long long)ic->provided_data_sectors); in dm_integrity_status()
2449 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) in dm_integrity_status()
2450 DMEMIT(" %llu", (unsigned long long)le64_to_cpu(ic->sb->recalc_sector)); in dm_integrity_status()
2456 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100; in dm_integrity_status()
2457 watermark_percentage += ic->journal_entries / 2; in dm_integrity_status()
2458 do_div(watermark_percentage, ic->journal_entries); in dm_integrity_status()
2460 arg_count += !!ic->meta_dev; in dm_integrity_status()
2461 arg_count += ic->sectors_per_block != 1; in dm_integrity_status()
2462 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)); in dm_integrity_status()
2463 arg_count += !!ic->internal_hash_alg.alg_string; in dm_integrity_status()
2464 arg_count += !!ic->journal_crypt_alg.alg_string; in dm_integrity_status()
2465 arg_count += !!ic->journal_mac_alg.alg_string; in dm_integrity_status()
2466 DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start, in dm_integrity_status()
2467 ic->tag_size, ic->mode, arg_count); in dm_integrity_status()
2468 if (ic->meta_dev) in dm_integrity_status()
2469 DMEMIT(" meta_device:%s", ic->meta_dev->name); in dm_integrity_status()
2470 if (ic->sectors_per_block != 1) in dm_integrity_status()
2471 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_status()
2472 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) in dm_integrity_status()
2474 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); in dm_integrity_status()
2475 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); in dm_integrity_status()
2476 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors); in dm_integrity_status()
2478 DMEMIT(" commit_time:%u", ic->autocommit_msec); in dm_integrity_status()
2482 if (ic->a.alg_string) { \ in dm_integrity_status()
2483 DMEMIT(" %s:%s", n, ic->a.alg_string); \ in dm_integrity_status()
2484 if (ic->a.key_string) \ in dm_integrity_status()
2485 DMEMIT(":%s", ic->a.key_string);\ in dm_integrity_status()
2499 struct dm_integrity_c *ic = ti->private; in dm_integrity_iterate_devices() local
2501 if (!ic->meta_dev) in dm_integrity_iterate_devices()
2502 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data); in dm_integrity_iterate_devices()
2504 return fn(ti, ic->dev, 0, ti->len, data); in dm_integrity_iterate_devices()
2509 struct dm_integrity_c *ic = ti->private; in dm_integrity_io_hints() local
2511 if (ic->sectors_per_block > 1) { in dm_integrity_io_hints()
2512 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; in dm_integrity_io_hints()
2513 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; in dm_integrity_io_hints()
2514 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_io_hints()
2518 static void calculate_journal_section_size(struct dm_integrity_c *ic) in calculate_journal_section_size() argument
2522 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections); in calculate_journal_section_size()
2523 …ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block])… in calculate_journal_section_size()
2526 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) in calculate_journal_section_size()
2528 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size; in calculate_journal_section_size()
2529 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS; in calculate_journal_section_size()
2530 …ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JO… in calculate_journal_section_size()
2531 ic->journal_entries = ic->journal_section_entries * ic->journal_sections; in calculate_journal_section_size()
2534 static int calculate_device_limits(struct dm_integrity_c *ic) in calculate_device_limits() argument
2538 calculate_journal_section_size(ic); in calculate_device_limits()
2539 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections; in calculate_device_limits()
2540 …if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UIN… in calculate_device_limits()
2542 ic->initial_sectors = initial_sectors; in calculate_device_limits()
2544 if (!ic->meta_dev) { in calculate_device_limits()
2547 …ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_… in calculate_device_limits()
2549 if (!(ic->metadata_run & (ic->metadata_run - 1))) in calculate_device_limits()
2550 ic->log2_metadata_run = __ffs(ic->metadata_run); in calculate_device_limits()
2552 ic->log2_metadata_run = -1; in calculate_device_limits()
2554 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset); in calculate_device_limits()
2555 last_sector = get_data_sector(ic, last_area, last_offset); in calculate_device_limits()
2556 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors) in calculate_device_limits()
2559 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; in calculate_device_limits()
2560 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1)) in calculate_device_limits()
2561 >> (ic->log2_buffer_sectors + SECTOR_SHIFT); in calculate_device_limits()
2562 meta_size <<= ic->log2_buffer_sectors; in calculate_device_limits()
2563 if (ic->initial_sectors + meta_size < ic->initial_sectors || in calculate_device_limits()
2564 ic->initial_sectors + meta_size > ic->meta_device_sectors) in calculate_device_limits()
2566 ic->metadata_run = 1; in calculate_device_limits()
2567 ic->log2_metadata_run = 0; in calculate_device_limits()
2573 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned inte… in initialize_superblock() argument
2578 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT); in initialize_superblock()
2579 memcpy(ic->sb->magic, SB_MAGIC, 8); in initialize_superblock()
2580 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size); in initialize_superblock()
2581 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block); in initialize_superblock()
2582 if (ic->journal_mac_alg.alg_string) in initialize_superblock()
2583 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC); in initialize_superblock()
2585 calculate_journal_section_size(ic); in initialize_superblock()
2586 journal_sections = journal_sectors / ic->journal_section_sectors; in initialize_superblock()
2590 if (!ic->meta_dev) { in initialize_superblock()
2591 ic->sb->journal_sections = cpu_to_le32(journal_sections); in initialize_superblock()
2594 ic->sb->log2_interleave_sectors = __fls(interleave_sectors); in initialize_superblock()
2595 …ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_s… in initialize_superblock()
2596 …ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_s… in initialize_superblock()
2598 ic->provided_data_sectors = 0; in initialize_superblock()
2599 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) { in initialize_superblock()
2600 __u64 prev_data_sectors = ic->provided_data_sectors; in initialize_superblock()
2602 ic->provided_data_sectors |= (sector_t)1 << test_bit; in initialize_superblock()
2603 if (calculate_device_limits(ic)) in initialize_superblock()
2604 ic->provided_data_sectors = prev_data_sectors; in initialize_superblock()
2606 if (!ic->provided_data_sectors) in initialize_superblock()
2609 ic->sb->log2_interleave_sectors = 0; in initialize_superblock()
2610 ic->provided_data_sectors = ic->data_device_sectors; in initialize_superblock()
2611 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1); in initialize_superblock()
2614 ic->sb->journal_sections = cpu_to_le32(0); in initialize_superblock()
2616 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections); in initialize_superblock()
2620 ic->sb->journal_sections = cpu_to_le32(test_journal_sections); in initialize_superblock()
2621 if (calculate_device_limits(ic)) in initialize_superblock()
2622 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections); in initialize_superblock()
2625 if (!le32_to_cpu(ic->sb->journal_sections)) { in initialize_superblock()
2626 if (ic->log2_buffer_sectors > 3) { in initialize_superblock()
2627 ic->log2_buffer_sectors--; in initialize_superblock()
2634 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); in initialize_superblock()
2636 sb_set_version(ic); in initialize_superblock()
2641 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) in dm_integrity_set() argument
2648 bi.tuple_size = ic->tag_size; in dm_integrity_set()
2650 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT; in dm_integrity_set()
2656 static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl) in dm_integrity_free_page_list() argument
2662 for (i = 0; i < ic->journal_pages; i++) in dm_integrity_free_page_list()
2668 static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic) in dm_integrity_alloc_page_list() argument
2670 size_t page_list_desc_size = ic->journal_pages * sizeof(struct page_list); in dm_integrity_alloc_page_list()
2678 for (i = 0; i < ic->journal_pages; i++) { in dm_integrity_alloc_page_list()
2681 dm_integrity_free_page_list(ic, pl); in dm_integrity_alloc_page_list()
2691 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **s… in dm_integrity_free_journal_scatterlist() argument
2694 for (i = 0; i < ic->journal_sections; i++) in dm_integrity_free_journal_scatterlist()
2699 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struc… in dm_integrity_alloc_journal_scatterlist() argument
2704 sl = kvmalloc_array(ic->journal_sections, in dm_integrity_alloc_journal_scatterlist()
2710 for (i = 0; i < ic->journal_sections; i++) { in dm_integrity_alloc_journal_scatterlist()
2717 page_list_location(ic, i, 0, &start_index, &start_offset); in dm_integrity_alloc_journal_scatterlist()
2718 page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset); in dm_integrity_alloc_journal_scatterlist()
2725 dm_integrity_free_journal_scatterlist(ic, sl); in dm_integrity_alloc_journal_scatterlist()
2816 static int create_journal(struct dm_integrity_c *ic, char **error) in create_journal() argument
2824 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); in create_journal()
2825 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); in create_journal()
2826 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL); in create_journal()
2827 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL); in create_journal()
2829 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, in create_journal()
2837 ic->journal_pages = journal_pages; in create_journal()
2839 ic->journal = dm_integrity_alloc_page_list(ic); in create_journal()
2840 if (!ic->journal) { in create_journal()
2845 if (ic->journal_crypt_alg.alg_string) { in create_journal()
2849 comp.ic = ic; in create_journal()
2850 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0); in create_journal()
2851 if (IS_ERR(ic->journal_crypt)) { in create_journal()
2853 r = PTR_ERR(ic->journal_crypt); in create_journal()
2854 ic->journal_crypt = NULL; in create_journal()
2857 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); in create_journal()
2858 blocksize = crypto_skcipher_blocksize(ic->journal_crypt); in create_journal()
2860 if (ic->journal_crypt_alg.key) { in create_journal()
2861 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key, in create_journal()
2862 ic->journal_crypt_alg.key_size); in create_journal()
2869 ic->journal_crypt_alg.alg_string, blocksize, ivsize); in create_journal()
2871 ic->journal_io = dm_integrity_alloc_page_list(ic); in create_journal()
2872 if (!ic->journal_io) { in create_journal()
2881 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); in create_journal()
2895 ic->journal_xor = dm_integrity_alloc_page_list(ic); in create_journal()
2896 if (!ic->journal_xor) { in create_journal()
2902 sg = kvmalloc_array(ic->journal_pages + 1, in create_journal()
2910 sg_init_table(sg, ic->journal_pages + 1); in create_journal()
2911 for (i = 0; i < ic->journal_pages; i++) { in create_journal()
2912 char *va = lowmem_page_address(ic->journal_xor[i].page); in create_journal()
2916 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); in create_journal()
2919 …skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, cry… in create_journal()
2925 r = dm_integrity_failed(ic); in create_journal()
2930 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data"); in create_journal()
2932 crypto_free_skcipher(ic->journal_crypt); in create_journal()
2933 ic->journal_crypt = NULL; in create_journal()
2937 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); in create_journal()
2958 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); in create_journal()
2959 if (!ic->journal_scatterlist) { in create_journal()
2964 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io); in create_journal()
2965 if (!ic->journal_io_scatterlist) { in create_journal()
2970 ic->sk_requests = kvmalloc_array(ic->journal_sections, in create_journal()
2973 if (!ic->sk_requests) { in create_journal()
2978 for (i = 0; i < ic->journal_sections; i++) { in create_journal()
2994 r = dm_integrity_failed(ic); in create_journal()
3000 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); in create_journal()
3015 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT; in create_journal()
3016 ic->sk_requests[i] = section_req; in create_journal()
3026 if (ic->commit_ids[j] == ic->commit_ids[i]) { in create_journal()
3027 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1); in create_journal()
3031 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]); in create_journal()
3034 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node); in create_journal()
3040 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL); in create_journal()
3041 if (!ic->journal_tree) { in create_journal()
3075 struct dm_integrity_c *ic; in dm_integrity_ctr() local
3096 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL); in dm_integrity_ctr()
3097 if (!ic) { in dm_integrity_ctr()
3101 ti->private = ic; in dm_integrity_ctr()
3103 ic->ti = ti; in dm_integrity_ctr()
3105 ic->in_progress = RB_ROOT; in dm_integrity_ctr()
3106 INIT_LIST_HEAD(&ic->wait_list); in dm_integrity_ctr()
3107 init_waitqueue_head(&ic->endio_wait); in dm_integrity_ctr()
3108 bio_list_init(&ic->flush_bio_list); in dm_integrity_ctr()
3109 init_waitqueue_head(&ic->copy_to_journal_wait); in dm_integrity_ctr()
3110 init_completion(&ic->crypto_backoff); in dm_integrity_ctr()
3111 atomic64_set(&ic->number_of_mismatches, 0); in dm_integrity_ctr()
3113 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); in dm_integrity_ctr()
3124 ic->start = start; in dm_integrity_ctr()
3127 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) { in dm_integrity_ctr()
3135 ic->mode = argv[3][0]; in dm_integrity_ctr()
3148 ic->sectors_per_block = 1; in dm_integrity_ctr()
3176 if (ic->meta_dev) { in dm_integrity_ctr()
3177 dm_put_device(ti, ic->meta_dev); in dm_integrity_ctr()
3178 ic->meta_dev = NULL; in dm_integrity_ctr()
3180 r = dm_get_device(ti, strchr(opt_string, ':') + 1, dm_table_get_mode(ti->table), &ic->meta_dev); in dm_integrity_ctr()
3193 ic->sectors_per_block = val >> SECTOR_SHIFT; in dm_integrity_ctr()
3195 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, in dm_integrity_ctr()
3200 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, in dm_integrity_ctr()
3205 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, in dm_integrity_ctr()
3218 ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT; in dm_integrity_ctr()
3219 if (!ic->meta_dev) in dm_integrity_ctr()
3220 ic->meta_device_sectors = ic->data_device_sectors; in dm_integrity_ctr()
3222 ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT; in dm_integrity_ctr()
3226 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR); in dm_integrity_ctr()
3231 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT); in dm_integrity_ctr()
3233 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error, in dm_integrity_ctr()
3238 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error, in dm_integrity_ctr()
3243 if (!ic->tag_size) { in dm_integrity_ctr()
3244 if (!ic->internal_hash) { in dm_integrity_ctr()
3249 ic->tag_size = crypto_shash_digestsize(ic->internal_hash); in dm_integrity_ctr()
3251 if (ic->tag_size > MAX_TAG_SIZE) { in dm_integrity_ctr()
3256 if (!(ic->tag_size & (ic->tag_size - 1))) in dm_integrity_ctr()
3257 ic->log2_tag_size = __ffs(ic->tag_size); in dm_integrity_ctr()
3259 ic->log2_tag_size = -1; in dm_integrity_ctr()
3261 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec); in dm_integrity_ctr()
3262 ic->autocommit_msec = sync_msec; in dm_integrity_ctr()
3263 timer_setup(&ic->autocommit_timer, autocommit_fn, 0); in dm_integrity_ctr()
3265 ic->io = dm_io_client_create(); in dm_integrity_ctr()
3266 if (IS_ERR(ic->io)) { in dm_integrity_ctr()
3267 r = PTR_ERR(ic->io); in dm_integrity_ctr()
3268 ic->io = NULL; in dm_integrity_ctr()
3273 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache); in dm_integrity_ctr()
3279 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata", in dm_integrity_ctr()
3281 if (!ic->metadata_wq) { in dm_integrity_ctr()
3291 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); in dm_integrity_ctr()
3292 if (!ic->wait_wq) { in dm_integrity_ctr()
3298 ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM, in dm_integrity_ctr()
3300 if (!ic->offload_wq) { in dm_integrity_ctr()
3306 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1); in dm_integrity_ctr()
3307 if (!ic->commit_wq) { in dm_integrity_ctr()
3312 INIT_WORK(&ic->commit_work, integrity_commit); in dm_integrity_ctr()
3314 if (ic->mode == 'J') { in dm_integrity_ctr()
3315 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1); in dm_integrity_ctr()
3316 if (!ic->writer_wq) { in dm_integrity_ctr()
3321 INIT_WORK(&ic->writer_work, integrity_writer); in dm_integrity_ctr()
3324 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL); in dm_integrity_ctr()
3325 if (!ic->sb) { in dm_integrity_ctr()
3331 r = sync_rw_sb(ic, REQ_OP_READ, 0); in dm_integrity_ctr()
3337 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) { in dm_integrity_ctr()
3338 if (ic->mode != 'R') { in dm_integrity_ctr()
3339 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) { in dm_integrity_ctr()
3346 r = initialize_superblock(ic, journal_sectors, interleave_sectors); in dm_integrity_ctr()
3351 if (ic->mode != 'R') in dm_integrity_ctr()
3355 if (!ic->sb->version || ic->sb->version > SB_VERSION_2) { in dm_integrity_ctr()
3360 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) { in dm_integrity_ctr()
3365 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) { in dm_integrity_ctr()
3370 if (!le32_to_cpu(ic->sb->journal_sections)) { in dm_integrity_ctr()
3376 if (!ic->meta_dev) { in dm_integrity_ctr()
3377 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || in dm_integrity_ctr()
3378 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { in dm_integrity_ctr()
3384 if (ic->sb->log2_interleave_sectors) { in dm_integrity_ctr()
3390 ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors); in dm_integrity_ctr()
3391 if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) { in dm_integrity_ctr()
3397 …if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string)… in dm_integrity_ctr()
3404 r = calculate_device_limits(ic); in dm_integrity_ctr()
3406 if (ic->meta_dev) { in dm_integrity_ctr()
3407 if (ic->log2_buffer_sectors > 3) { in dm_integrity_ctr()
3408 ic->log2_buffer_sectors--; in dm_integrity_ctr()
3415 if (!ic->meta_dev) in dm_integrity_ctr()
3416 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run)); in dm_integrity_ctr()
3418 if (ti->len > ic->provided_data_sectors) { in dm_integrity_ctr()
3425 threshold = (__u64)ic->journal_entries * (100 - journal_watermark); in dm_integrity_ctr()
3428 ic->free_sectors_threshold = threshold; in dm_integrity_ctr()
3431 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size)); in dm_integrity_ctr()
3432 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size); in dm_integrity_ctr()
3433 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector); in dm_integrity_ctr()
3434 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries); in dm_integrity_ctr()
3435 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors); in dm_integrity_ctr()
3436 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections)); in dm_integrity_ctr()
3437 DEBUG_print(" journal_entries %u\n", ic->journal_entries); in dm_integrity_ctr()
3438 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors); in dm_integrity_ctr()
3439 DEBUG_print(" data_device_sectors 0x%llx\n", (unsigned long long)ic->data_device_sectors); in dm_integrity_ctr()
3440 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors); in dm_integrity_ctr()
3441 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run); in dm_integrity_ctr()
3442 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run); in dm_integrity_ctr()
3443 …DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sector… in dm_integrity_ctr()
3444 (unsigned long long)ic->provided_data_sectors); in dm_integrity_ctr()
3445 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors); in dm_integrity_ctr()
3447 if (recalculate && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) { in dm_integrity_ctr()
3448 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); in dm_integrity_ctr()
3449 ic->sb->recalc_sector = cpu_to_le64(0); in dm_integrity_ctr()
3452 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { in dm_integrity_ctr()
3453 if (!ic->internal_hash) { in dm_integrity_ctr()
3458 ic->recalc_wq = alloc_workqueue("dm-intergrity-recalc", WQ_MEM_RECLAIM, 1); in dm_integrity_ctr()
3459 if (!ic->recalc_wq ) { in dm_integrity_ctr()
3464 INIT_WORK(&ic->recalc_work, integrity_recalc); in dm_integrity_ctr()
3465 ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT); in dm_integrity_ctr()
3466 if (!ic->recalc_buffer) { in dm_integrity_ctr()
3471 ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block, in dm_integrity_ctr()
3472 ic->tag_size, GFP_KERNEL); in dm_integrity_ctr()
3473 if (!ic->recalc_tags) { in dm_integrity_ctr()
3480 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, in dm_integrity_ctr()
3481 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL); in dm_integrity_ctr()
3482 if (IS_ERR(ic->bufio)) { in dm_integrity_ctr()
3483 r = PTR_ERR(ic->bufio); in dm_integrity_ctr()
3485 ic->bufio = NULL; in dm_integrity_ctr()
3488 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors); in dm_integrity_ctr()
3490 if (ic->mode != 'R') { in dm_integrity_ctr()
3491 r = create_journal(ic, &ti->error); in dm_integrity_ctr()
3499 init_journal(ic, 0, ic->journal_sections, 0); in dm_integrity_ctr()
3500 r = dm_integrity_failed(ic); in dm_integrity_ctr()
3505 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); in dm_integrity_ctr()
3510 ic->just_formatted = true; in dm_integrity_ctr()
3513 if (!ic->meta_dev) { in dm_integrity_ctr()
3514 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors); in dm_integrity_ctr()
3519 if (!ic->internal_hash) in dm_integrity_ctr()
3520 dm_integrity_set(ti, ic); in dm_integrity_ctr()
3533 struct dm_integrity_c *ic = ti->private; in dm_integrity_dtr() local
3535 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); in dm_integrity_dtr()
3536 BUG_ON(!list_empty(&ic->wait_list)); in dm_integrity_dtr()
3538 if (ic->metadata_wq) in dm_integrity_dtr()
3539 destroy_workqueue(ic->metadata_wq); in dm_integrity_dtr()
3540 if (ic->wait_wq) in dm_integrity_dtr()
3541 destroy_workqueue(ic->wait_wq); in dm_integrity_dtr()
3542 if (ic->offload_wq) in dm_integrity_dtr()
3543 destroy_workqueue(ic->offload_wq); in dm_integrity_dtr()
3544 if (ic->commit_wq) in dm_integrity_dtr()
3545 destroy_workqueue(ic->commit_wq); in dm_integrity_dtr()
3546 if (ic->writer_wq) in dm_integrity_dtr()
3547 destroy_workqueue(ic->writer_wq); in dm_integrity_dtr()
3548 if (ic->recalc_wq) in dm_integrity_dtr()
3549 destroy_workqueue(ic->recalc_wq); in dm_integrity_dtr()
3550 if (ic->recalc_buffer) in dm_integrity_dtr()
3551 vfree(ic->recalc_buffer); in dm_integrity_dtr()
3552 if (ic->recalc_tags) in dm_integrity_dtr()
3553 kvfree(ic->recalc_tags); in dm_integrity_dtr()
3554 if (ic->bufio) in dm_integrity_dtr()
3555 dm_bufio_client_destroy(ic->bufio); in dm_integrity_dtr()
3556 mempool_exit(&ic->journal_io_mempool); in dm_integrity_dtr()
3557 if (ic->io) in dm_integrity_dtr()
3558 dm_io_client_destroy(ic->io); in dm_integrity_dtr()
3559 if (ic->dev) in dm_integrity_dtr()
3560 dm_put_device(ti, ic->dev); in dm_integrity_dtr()
3561 if (ic->meta_dev) in dm_integrity_dtr()
3562 dm_put_device(ti, ic->meta_dev); in dm_integrity_dtr()
3563 dm_integrity_free_page_list(ic, ic->journal); in dm_integrity_dtr()
3564 dm_integrity_free_page_list(ic, ic->journal_io); in dm_integrity_dtr()
3565 dm_integrity_free_page_list(ic, ic->journal_xor); in dm_integrity_dtr()
3566 if (ic->journal_scatterlist) in dm_integrity_dtr()
3567 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist); in dm_integrity_dtr()
3568 if (ic->journal_io_scatterlist) in dm_integrity_dtr()
3569 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist); in dm_integrity_dtr()
3570 if (ic->sk_requests) { in dm_integrity_dtr()
3573 for (i = 0; i < ic->journal_sections; i++) { in dm_integrity_dtr()
3574 struct skcipher_request *req = ic->sk_requests[i]; in dm_integrity_dtr()
3580 kvfree(ic->sk_requests); in dm_integrity_dtr()
3582 kvfree(ic->journal_tree); in dm_integrity_dtr()
3583 if (ic->sb) in dm_integrity_dtr()
3584 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT); in dm_integrity_dtr()
3586 if (ic->internal_hash) in dm_integrity_dtr()
3587 crypto_free_shash(ic->internal_hash); in dm_integrity_dtr()
3588 free_alg(&ic->internal_hash_alg); in dm_integrity_dtr()
3590 if (ic->journal_crypt) in dm_integrity_dtr()
3591 crypto_free_skcipher(ic->journal_crypt); in dm_integrity_dtr()
3592 free_alg(&ic->journal_crypt_alg); in dm_integrity_dtr()
3594 if (ic->journal_mac) in dm_integrity_dtr()
3595 crypto_free_shash(ic->journal_mac); in dm_integrity_dtr()
3596 free_alg(&ic->journal_mac_alg); in dm_integrity_dtr()
3598 kfree(ic); in dm_integrity_dtr()