/drivers/mtd/ |
D | rfd_ftl.c | 53 #define SECTOR_SIZE 512 macro 140 (i + part->header_sectors_per_block) * SECTOR_SIZE; in build_block_map() 158 sectors_per_block = part->block_size / SECTOR_SIZE; in scan_header() 167 sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE; in scan_header() 252 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, in rfd_ftl_readsect() 254 if (!rc && retlen != SECTOR_SIZE) in rfd_ftl_readsect() 263 memset(buf, 0, SECTOR_SIZE); in rfd_ftl_readsect() 328 sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL); in move_block_contents() 366 (i + part->header_sectors_per_block) * SECTOR_SIZE; in move_block_contents() 376 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, in move_block_contents() [all …]
|
D | ftl.c | 106 #define SECTOR_SIZE 512 macro 398 le32_to_cpu(part->header.BAMOffset), SECTOR_SIZE); in prepare_xfer() 431 u_char buf[SECTOR_SIZE]; in copy_erase_unit() 490 ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen, in copy_erase_unit() 498 ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen, in copy_erase_unit() 512 src += SECTOR_SIZE; in copy_erase_unit() 513 dest += SECTOR_SIZE; in copy_erase_unit() 765 if (((sector+i) * SECTOR_SIZE) >= le32_to_cpu(part->header.FormattedSize)) { in ftl_read() 771 memset(buffer, 0, SECTOR_SIZE); in ftl_read() 775 ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, in ftl_read() [all …]
|
D | ssfdc.c | 35 #define SECTOR_SIZE 512 macro 112 sect_buf = kmalloc(SECTOR_SIZE, GFP_KERNEL); in get_valid_cis_sector() 123 ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, in get_valid_cis_sector() 127 if (ret < 0 || retlen != SECTOR_SIZE) { in get_valid_cis_sector() 156 ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf); in read_physical_sector() 157 if (ret < 0 || retlen != SECTOR_SIZE) in read_physical_sector() 400 memset(buf, 0xff, SECTOR_SIZE); in ssfdcr_readsect() 430 .blksize = SECTOR_SIZE,
|
/drivers/md/ |
D | dm-default-key.c | 108 if (dkc->sector_size < SECTOR_SIZE || in default_key_ctr_optional() 125 if (dkc->sector_size != SECTOR_SIZE && !iv_large_sectors) { in default_key_ctr_optional() 219 dkc->sector_size = SECTOR_SIZE; in default_key_ctr() 334 if (dkc->sector_size != SECTOR_SIZE) in default_key_status() 342 if (dkc->sector_size != SECTOR_SIZE) { in default_key_status()
|
D | dm-bow.c | 105 return (range_top(br) - br->sector) * SECTOR_SIZE; in range_size() 110 return bi_iter->bi_sector + bi_iter->bi_size / SECTOR_SIZE; in bvec_top() 198 * SECTOR_SIZE; in split_range() 679 if (bc->block_size < SECTOR_SIZE || in dm_bow_ctr_optional() 964 bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE; in bow_write() 967 * SECTOR_SIZE; in bow_write() 1056 bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE; in add_trim() 1059 * SECTOR_SIZE; in add_trim() 1095 bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE; in remove_trim() 1098 * SECTOR_SIZE; in remove_trim()
|
/drivers/mtd/nand/raw/ |
D | diskonchip.c | 116 #define SECTOR_SIZE 512 macro 118 #define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10) 195 if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) { in doc_ecc_decode() 198 if (index < SECTOR_SIZE) in doc_ecc_decode() 205 if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) { in doc_ecc_decode() 208 if (index < SECTOR_SIZE) in doc_ecc_decode()
|
/drivers/infiniband/ulp/iser/ |
D | iscsi_iser.h | 101 ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> ilog2(SZ_4K)) 103 #define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> ilog2(SZ_4K))
|
D | iser_verbs.c | 533 sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K); in iser_calc_scsi_params()
|
/drivers/block/ |
D | brd.c | 295 WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) || in brd_submit_bio() 296 (len & (SECTOR_SIZE - 1))); in brd_submit_bio()
|
D | rbd.c | 4941 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; in rbd_dev_update_size() 6290 if (result.uint_32 < SECTOR_SIZE) in rbd_parse_param() 6822 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); in rbd_dev_device_setup()
|
/drivers/scsi/ |
D | sd_zbc.c | 213 bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE); in sd_zbc_alloc_report_buffer() 218 while (bufsize >= SECTOR_SIZE) { in sd_zbc_alloc_report_buffer() 225 bufsize = rounddown(bufsize >> 1, SECTOR_SIZE); in sd_zbc_alloc_report_buffer()
|
D | aha1542.c | 1023 SECTOR_SIZE * AHA1542_MAX_SECTORS, in aha1542_init_cmd_priv() 1034 dma_free_coherent(shost->dma_dev, SECTOR_SIZE * AHA1542_MAX_SECTORS, in aha1542_exit_cmd_priv()
|
/drivers/vdpa/vdpa_sim/ |
D | vdpa_sim_blk.c | 239 blk_config->blk_size = cpu_to_vdpasim32(vdpasim, SECTOR_SIZE); in vdpasim_blk_get_config()
|
/drivers/block/rnbd/ |
D | rnbd-clt.c | 94 dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE; in rnbd_clt_set_dev_attr() 973 size = dev->size * (dev->logical_block_size / SECTOR_SIZE); in rnbd_client_getgeo() 1399 dev->nsectors * (dev->logical_block_size / SECTOR_SIZE) in rnbd_clt_setup_gen_disk()
|
/drivers/mmc/core/ |
D | queue.c | 193 q->limits.discard_granularity = SECTOR_SIZE; in mmc_queue_setup_discard()
|
/drivers/ufs/core/ |
D | ufshcd.c | 8858 .max_sectors = (1 << 20) / SECTOR_SIZE, /* 1 MiB */
|