/drivers/block/ |
D | brd.c | 28 #define SECTOR_SHIFT 9 macro 29 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 201 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; in copy_to_brd_setup() 208 sector += copy >> SECTOR_SHIFT; in copy_to_brd_setup() 228 sector += PAGE_SIZE >> SECTOR_SHIFT; in discard_from_brd() 241 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; in copy_to_brd() 254 sector += copy >> SECTOR_SHIFT; in copy_to_brd() 273 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; in copy_from_brd() 287 sector += copy >> SECTOR_SHIFT; in copy_from_brd() 342 if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) || in brd_make_request() [all …]
|
/drivers/block/zram/ |
D | zram_drv.h | 40 #define SECTOR_SHIFT 9 macro 41 #define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 46 (1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
|
D | zram_drv.c | 137 end = start + (size >> SECTOR_SHIFT); in valid_io_request() 138 bound = zram->disksize >> SECTOR_SHIFT; in valid_io_request() 861 generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT, in zram_bvec_rw() 893 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; in __zram_make_request() 1001 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; in zram_rw_page() 1105 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); in disksize_store()
|
/drivers/mtd/ |
D | ssfdc.c | 39 #define SECTOR_SHIFT 9 macro 136 cis_sector = (int)(offset >> SECTOR_SHIFT); in get_valid_cis_sector() 157 loff_t offset = (loff_t)sect_no << SECTOR_SHIFT; in read_physical_sector() 311 ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT); in ssfdcr_add_mtd() 323 ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) / in ssfdcr_add_mtd() 375 sectors_per_block = ssfdc->erase_size >> SECTOR_SHIFT; in ssfdcr_readsect()
|
/drivers/md/ |
D | dm-android-verity.c | 303 BUG_ON(((device_size - FEC_BLOCK_SIZE) % (1 << SECTOR_SHIFT)) != 0); in extract_fec_header() 306 FEC_BLOCK_SIZE) / (1 << SECTOR_SHIFT), FEC_BLOCK_SIZE); in extract_fec_header() 363 *device_size >>= SECTOR_SHIFT; in find_size() 424 BUG_ON(metadata_offset % (1 << SECTOR_SHIFT) != 0); in extract_metadata() 427 (1 << SECTOR_SHIFT), VERITY_METADATA_SIZE); in extract_metadata() 812 SECTOR_SHIFT)) { in android_verity_ctr() 819 data_sectors *= data_block_size >> SECTOR_SHIFT; in android_verity_ctr()
|
D | dm-verity-target.c | 594 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { in verity_map() 600 (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) { in verity_map() 611 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); in verity_map() 704 ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT) in verity_prepare_ioctl() 964 (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) in verity_ctr() 965 >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) { in verity_ctr() 972 if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) { in verity_ctr() 979 (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) in verity_ctr() 980 >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) { in verity_ctr()
|
D | dm-snap-persistent.c | 172 len = ps->store->chunk_size << SECTOR_SHIFT; in alloc_area() 301 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); in zero_memory_area() 392 memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); in write_header() 503 ps->store->chunk_size << SECTOR_SHIFT, in read_exceptions() 548 memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT); in read_exceptions() 629 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / in persistent_read_metadata()
|
D | dm-log-writes.c | 301 sector += block->vecs[i].bv_len >> SECTOR_SHIFT; in log_one_block() 334 return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT; in logdev_last_sector() 438 lc->sectorsize = 1 << SECTOR_SHIFT; in log_writes_ctr() 726 if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) in log_writes_prepare_ioctl() 769 limits->discard_granularity = 1 << SECTOR_SHIFT; in log_writes_io_hints() 770 limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); in log_writes_io_hints()
|
D | dm-io.c | 322 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); in do_region() 333 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; in do_region() 342 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; in do_region() 519 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); in dm_io()
|
D | dm-bufio.c | 162 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT]; 163 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT]; 573 .count = b->c->block_size >> SECTOR_SHIFT, in use_dmio() 934 (c->sectors_per_block_bits + SECTOR_SHIFT); in __get_memory_limit() 1449 (SECTOR_SHIFT + c->sectors_per_block_bits); in dm_bufio_get_device_size() 1548 return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); in get_retain_buffers() 1621 BUG_ON(block_size < 1 << SECTOR_SHIFT || in dm_bufio_client_create() 1633 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; in dm_bufio_client_create()
|
D | dm-verity-fec.c | 640 ((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) in verity_fec_parse_opt_args() 641 >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll)) { in verity_fec_parse_opt_args() 649 ((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) >> in verity_fec_parse_opt_args() 650 (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll)) { in verity_fec_parse_opt_args()
|
D | dm-cache-metadata.h | 27 #define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
|
D | dm-exception-store.h | 194 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; in get_dev_size()
|
D | dm-crypt.c | 713 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) in crypt_iv_tcw_whitening() 855 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, in crypt_convert_block() 859 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, in crypt_convert_block() 862 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); in crypt_convert_block() 863 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); in crypt_convert_block() 872 1 << SECTOR_SHIFT, iv); in crypt_convert_block() 1939 dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT)); in crypt_map()
|
D | dm-log.c | 446 dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size, in create_log_context() 457 lc->header_location.count = buf_size >> SECTOR_SHIFT; in create_log_context() 479 (LOG_OFFSET << SECTOR_SHIFT); in create_log_context()
|
D | dm-thin-metadata.h | 24 #define THIN_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
|
D | dm-era-target.c | 501 disk->metadata_block_size = cpu_to_le32(DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); in prepare_superblock() 1616 (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT), in era_status() 1665 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; in get_dev_size() 1678 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; in era_io_hints() 1687 blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT); in era_io_hints()
|
D | dm-table.c | 288 i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; in device_area_is_invalid() 290 limits->logical_block_size >> SECTOR_SHIFT; in device_area_is_invalid() 457 (unsigned long long) start << SECTOR_SHIFT); in dm_set_device_limits() 615 limits->logical_block_size >> SECTOR_SHIFT; in validate_hardware_logical_block_alignment() 651 SECTOR_SHIFT) - 1)) in validate_hardware_logical_block_alignment()
|
D | dm-linear.c | 135 ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) in dm_linear_prepare_ioctl()
|
D | dm-exception-store.c | 179 if (chunk_size > INT_MAX >> SECTOR_SHIFT) { in dm_exception_store_set_chunk_size()
|
D | dm-thin.c | 44 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT) 45 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) 660 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT); in get_bio_block_range() 1177 (pool->sectors_per_block << SECTOR_SHIFT); in io_overlaps_block() 1873 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell() 3104 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; in get_dev_size() 3912 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; in pool_io_hints() 3938 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT); in pool_io_hints() 3940 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints() 3941 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints() [all …]
|
D | dm-cache-target.c | 147 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT) 148 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) 1324 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); in bio_writes_complete_block() 2367 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; in get_dev_size() 3795 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; in set_discard_limits() 3801 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; in cache_io_hints() 3809 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); in cache_io_hints() 3810 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); in cache_io_hints()
|
D | dm-flakey.c | 401 ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT) in flakey_prepare_ioctl()
|
/drivers/nvdimm/ |
D | blk.c | 147 lba = div_u64(sector << SECTOR_SHIFT, sector_size); in nsblk_do_bvec() 166 sector += sector_size >> SECTOR_SHIFT; in nsblk_do_bvec() 300 set_capacity(disk, available_disk_size >> SECTOR_SHIFT); in nsblk_attach_disk()
|
/drivers/md/persistent-data/ |
D | dm-space-map-metadata.h | 12 #define DM_SM_METADATA_BLOCK_SIZE (4096 >> SECTOR_SHIFT)
|