/drivers/block/drbd/ |
D | drbd_interval.c | 25 sector_t max = node->sector + (node->size >> 9); in compute_subtree_last() 50 sector_t this_end = this->sector + (this->size >> 9); in drbd_insert_interval() 61 if (this->sector < here->sector) in drbd_insert_interval() 63 else if (this->sector > here->sector) in drbd_insert_interval() 90 drbd_contains_interval(struct rb_root *root, sector_t sector, in drbd_contains_interval() argument 99 if (sector < here->sector) in drbd_contains_interval() 101 else if (sector > here->sector) in drbd_contains_interval() 134 drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size) in drbd_find_overlap() argument 138 sector_t end = sector + (size >> 9); in drbd_find_overlap() 147 sector < interval_end(node->rb_left)) { in drbd_find_overlap() [all …]
|
D | drbd_actlog.c | 140 sector_t sector, int rw) in _drbd_md_sync_page_io() argument 156 bio->bi_iter.bi_sector = sector; in _drbd_md_sync_page_io() 191 sector_t sector, int rw) in drbd_md_sync_page_io() argument 200 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", in drbd_md_sync_page_io() 203 if (sector < drbd_md_first_sector(bdev) || in drbd_md_sync_page_io() 204 sector + 7 > drbd_md_last_sector(bdev)) in drbd_md_sync_page_io() 207 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); in drbd_md_sync_page_io() 209 err = _drbd_md_sync_page_io(device, bdev, sector, rw); in drbd_md_sync_page_io() 212 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err); in drbd_md_sync_page_io() 256 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); in drbd_al_begin_io_fastpath() [all …]
|
D | drbd_worker.c | 158 drbd_rs_complete_io(device, i.sector); in drbd_endio_write_sec_final() 184 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio() 338 sector_t sector = peer_req->i.sector; in w_e_send_csum() local 349 err = drbd_send_drequest_csum(peer_device, sector, size, in w_e_send_csum() 369 static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size) in read_for_csum() argument 379 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, in read_for_csum() 569 sector_t sector; in make_resync_request() local 628 sector = BM_BIT_TO_SECT(bit); in make_resync_request() 630 if (drbd_try_rs_begin_io(device, sector)) { in make_resync_request() 637 drbd_rs_complete_io(device, sector); in make_resync_request() [all …]
|
D | drbd_receiver.c | 341 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in drbd_alloc_peer_req() argument 370 peer_req->i.sector = sector; in drbd_alloc_peer_req() 1374 sector_t sector = peer_req->i.sector; in drbd_submit_peer_request() local 1392 sector, data_size >> 9, GFP_NOIO, false)) in drbd_submit_peer_request() 1419 bio->bi_iter.bi_sector = sector; in drbd_submit_peer_request() 1451 sector += len >> 9; in drbd_submit_peer_request() 1588 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in read_in_block() argument 1628 if (sector + (data_size>>9) > capacity) { in read_in_block() 1632 (unsigned long long)sector, data_size); in read_in_block() 1639 peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, trim == NULL, GFP_NOIO); in read_in_block() [all …]
|
D | drbd_interval.h | 9 sector_t sector; /* start sector of the interval */ member 37 #define drbd_for_each_overlap(i, root, sector, size) \ argument 38 for (i = drbd_find_overlap(root, sector, size); \ 40 i = drbd_next_overlap(i, sector, size))
|
D | drbd_req.c | 34 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size); 67 req->i.sector = bio_src->bi_iter.bi_sector; in drbd_req_new() 134 s, (unsigned long long)req->i.sector, req->i.size); in drbd_req_destroy() 153 drbd_set_out_of_sync(device, req->i.sector, req->i.size); in drbd_req_destroy() 156 drbd_set_in_sync(device, req->i.sector, req->i.size); in drbd_req_destroy() 176 (unsigned long long) req->i.sector, req->i.size); in drbd_req_destroy() 548 (unsigned long long)req->i.sector, in drbd_report_io_error() 642 drbd_set_out_of_sync(device, req->i.sector, req->i.size); in __req_mod() 912 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size) in drbd_may_do_local_read() argument 921 esector = sector + (size >> 9) - 1; in drbd_may_do_local_read() [all …]
|
/drivers/block/ |
D | brd.c | 54 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) in brd_lookup_page() argument 71 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ in brd_lookup_page() 85 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) in brd_insert_page() argument 91 page = brd_lookup_page(brd, sector); in brd_insert_page() 118 idx = sector >> PAGE_SECTORS_SHIFT; in brd_insert_page() 133 static void brd_free_page(struct brd_device *brd, sector_t sector) in brd_free_page() argument 139 idx = sector >> PAGE_SECTORS_SHIFT; in brd_free_page() 146 static void brd_zero_page(struct brd_device *brd, sector_t sector) in brd_zero_page() argument 150 page = brd_lookup_page(brd, sector); in brd_zero_page() 195 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) in copy_to_brd_setup() argument [all …]
|
/drivers/scsi/ |
D | sr_vendor.c | 161 unsigned long sector; in sr_cd_check() local 173 sector = 0; /* the multisession sector offset goes here */ in sr_cd_check() 199 sector = buffer[11] + (buffer[10] << 8) + in sr_cd_check() 203 sector = 0; in sr_cd_check() 231 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check() 259 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check() 260 if (sector) in sr_cd_check() 261 sector -= CD_MSF_OFFSET; in sr_cd_check() 297 sector = buffer[11] + (buffer[10] << 8) + in sr_cd_check() 307 sector = 0; in sr_cd_check() [all …]
|
/drivers/md/ |
D | raid0.c | 289 sector_t sector = *sectorp; in find_zone() local 292 if (sector < z[i].zone_end) { in find_zone() 294 *sectorp = sector - z[i-1].zone_end; in find_zone() 305 sector_t sector, sector_t *sector_offset) in map_sector() argument 316 sect_in_chunk = sector & (chunk_sects - 1); in map_sector() 317 sector >>= chunksect_bits; in map_sector() 323 sect_in_chunk = sector_div(sector, chunk_sects); in map_sector() 334 + sector_div(sector, zone->nb_dev)]; in map_sector() 449 sector_t sector = bio->bi_iter.bi_sector; in is_io_in_chunk_boundary() local 450 return chunk_sects >= (sector_div(sector, chunk_sects) in is_io_in_chunk_boundary() [all …]
|
D | dm-log-writes.c | 92 __le64 sector; member 119 sector_t sector; member 191 sector_t sector) in write_metadata() argument 204 bio->bi_iter.bi_sector = sector; in write_metadata() 240 struct pending_block *block, sector_t sector) in log_one_block() argument 247 entry.sector = cpu_to_le64(block->sector); in log_one_block() 252 block->datalen, sector)) { in log_one_block() 259 sector++; in log_one_block() 268 bio->bi_iter.bi_sector = sector; in log_one_block() 289 bio->bi_iter.bi_sector = sector; in log_one_block() [all …]
|
D | raid1.c | 296 r1_bio->sector + (r1_bio->sectors); in update_head_pos() 361 (unsigned long long)r1_bio->sector); in raid1_end_read_request() 380 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, in close_write() 454 r1_bio->sector, r1_bio->sectors, in raid1_end_write_request() 515 const sector_t this_sector = r1_bio->sector; in read_balance() 860 sector_t sector = 0; in wait_barrier() local 898 sector = conf->start_next_window; in wait_barrier() 904 return sector; in wait_barrier() 1123 r1_bio->sector = bio->bi_iter.bi_sector; in make_request() 1164 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, in make_request() [all …]
|
D | raid5.c | 137 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) in r5_next_bio() argument 140 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) in r5_next_bio() 446 (unsigned long long)sh->sector); in remove_hash() 453 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash() 456 (unsigned long long)sh->sector); in insert_hash() 518 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument 529 (unsigned long long)sector); in init_stripe() 534 sh->sector = sector; in init_stripe() 535 stripe_set_idx(sector, conf, previous, sh); in init_stripe() 544 (unsigned long long)sh->sector, i, dev->toread, in init_stripe() [all …]
|
D | dm-crypt.c | 62 sector_t sector; member 682 u64 sector = cpu_to_le64((u64)dmreq->iv_sector); in crypt_iv_tcw_whitening() local 689 crypto_xor(buf, (u8 *)§or, 8); in crypt_iv_tcw_whitening() 690 crypto_xor(&buf[8], (u8 *)§or, 8); in crypt_iv_tcw_whitening() 721 u64 sector = cpu_to_le64((u64)dmreq->iv_sector); in crypt_iv_tcw_gen() local 734 crypto_xor(iv, (u8 *)§or, 8); in crypt_iv_tcw_gen() 736 crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8); in crypt_iv_tcw_gen() 805 sector_t sector) in crypt_convert_init() argument 813 ctx->cc_sector = sector + cc->iv_offset; in crypt_convert_init() 1049 struct bio *bio, sector_t sector) in crypt_io_init() argument [all …]
|
D | raid10.c | 411 (unsigned long long)r10_bio->sector); in raid10_end_read_request() 420 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write() 555 sector_t sector; in __raid10_find_phys() local 569 chunk = r10bio->sector >> geo->chunk_shift; in __raid10_find_phys() 570 sector = r10bio->sector & geo->chunk_mask; in __raid10_find_phys() 578 sector += stripe << geo->chunk_shift; in __raid10_find_phys() 584 sector_t s = sector; in __raid10_find_phys() 610 sector += (geo->chunk_mask + 1); in __raid10_find_phys() 620 ((r10bio->sector >= conf->reshape_progress) != in raid10_find_phys() 630 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) in raid10_find_virt() argument [all …]
|
D | dm-stripe.c | 213 static void stripe_map_sector(struct stripe_c *sc, sector_t sector, in stripe_map_sector() argument 216 sector_t chunk = dm_target_offset(sc->ti, sector); in stripe_map_sector() 241 static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector, in stripe_map_range_sector() argument 246 stripe_map_sector(sc, sector, &stripe, result); in stripe_map_range_sector() 251 sector = *result; in stripe_map_range_sector() 253 *result -= sector_div(sector, sc->chunk_size); in stripe_map_range_sector() 255 *result = sector & ~(sector_t)(sc->chunk_size - 1); in stripe_map_range_sector()
|
/drivers/usb/storage/ |
D | jumpshot.c | 166 u32 sector, in jumpshot_read_data() argument 182 if (sector > 0x0FFFFFFF) in jumpshot_read_data() 204 command[2] = sector & 0xFF; in jumpshot_read_data() 205 command[3] = (sector >> 8) & 0xFF; in jumpshot_read_data() 206 command[4] = (sector >> 16) & 0xFF; in jumpshot_read_data() 208 command[5] = 0xE0 | ((sector >> 24) & 0x0F); in jumpshot_read_data() 228 sector += thistime; in jumpshot_read_data() 243 u32 sector, in jumpshot_write_data() argument 259 if (sector > 0x0FFFFFFF) in jumpshot_write_data() 286 command[2] = sector & 0xFF; in jumpshot_write_data() [all …]
|
D | datafab.c | 148 u32 sector, in datafab_read_data() argument 193 command[2] = sector & 0xFF; in datafab_read_data() 194 command[3] = (sector >> 8) & 0xFF; in datafab_read_data() 195 command[4] = (sector >> 16) & 0xFF; in datafab_read_data() 198 command[5] |= (sector >> 24) & 0x0F; in datafab_read_data() 216 sector += thistime; in datafab_read_data() 231 u32 sector, in datafab_write_data() argument 281 command[2] = sector & 0xFF; in datafab_write_data() 282 command[3] = (sector >> 8) & 0xFF; in datafab_write_data() 283 command[4] = (sector >> 16) & 0xFF; in datafab_write_data() [all …]
|
D | shuttle_usbat.c | 211 u32 sector, unsigned char cmd) in usbat_pack_ata_sector_cmd() argument 215 buf[2] = sector & 0xFF; in usbat_pack_ata_sector_cmd() 216 buf[3] = (sector >> 8) & 0xFF; in usbat_pack_ata_sector_cmd() 217 buf[4] = (sector >> 16) & 0xFF; in usbat_pack_ata_sector_cmd() 218 buf[5] = 0xE0 | ((sector >> 24) & 0x0F); in usbat_pack_ata_sector_cmd() 1119 u32 sector, in usbat_flash_read_data() argument 1150 if (sector > 0x0FFFFFFF) in usbat_flash_read_data() 1175 usbat_pack_ata_sector_cmd(command, thistime, sector, 0x20); in usbat_flash_read_data() 1193 sector += thistime; in usbat_flash_read_data() 1210 u32 sector, in usbat_flash_write_data() argument [all …]
|
/drivers/mtd/nand/ |
D | sh_flctl.c | 446 (struct sh_flctl *flctl, uint8_t *buff, int sector) in read_ecfiforeg() argument 452 res = wait_recfifo_ready(flctl , sector); in read_ecfiforeg() 583 int sector, page_sectors; in execmd_read_page_sector() local 599 for (sector = 0; sector < page_sectors; sector++) { in execmd_read_page_sector() 600 read_fiforeg(flctl, 512, 512 * sector); in execmd_read_page_sector() 603 &flctl->done_buff[mtd->writesize + 16 * sector], in execmd_read_page_sector() 604 sector); in execmd_read_page_sector() 654 int sector, page_sectors; in execmd_write_page_sector() local 667 for (sector = 0; sector < page_sectors; sector++) { in execmd_write_page_sector() 668 write_fiforeg(flctl, 512, 512 * sector); in execmd_write_page_sector() [all …]
|
D | atmel_nand_ecc.h | 120 #define pmecc_readb_ecc_relaxed(addr, sector, n) \ argument 121 readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n)) 123 #define pmecc_readl_rem_relaxed(addr, sector, n) \ argument 124 readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4))
|
/drivers/char/ |
D | ps3flash.c | 110 u64 size, sector, offset; in ps3flash_read() local 130 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_read() 140 res = ps3flash_fetch(dev, sector); in ps3flash_read() 163 sector += priv->chunk_sectors; in ps3flash_read() 179 u64 size, sector, offset; in ps3flash_write() local 199 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_write() 210 res = ps3flash_fetch(dev, sector); in ps3flash_write() 211 else if (sector != priv->tag) in ps3flash_write() 231 priv->tag = sector; in ps3flash_write() 238 sector += priv->chunk_sectors; in ps3flash_write()
|
/drivers/mtd/devices/ |
D | docg3.c | 410 static void doc_setup_addr_sector(struct docg3 *docg3, int sector) in doc_setup_addr_sector() argument 413 doc_flash_address(docg3, sector & 0xff); in doc_setup_addr_sector() 414 doc_flash_address(docg3, (sector >> 8) & 0xff); in doc_setup_addr_sector() 415 doc_flash_address(docg3, (sector >> 16) & 0xff); in doc_setup_addr_sector() 425 static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs) in doc_setup_writeaddr_sector() argument 430 doc_flash_address(docg3, sector & 0xff); in doc_setup_writeaddr_sector() 431 doc_flash_address(docg3, (sector >> 8) & 0xff); in doc_setup_writeaddr_sector() 432 doc_flash_address(docg3, (sector >> 16) & 0xff); in doc_setup_writeaddr_sector() 451 int sector, ret = 0; in doc_read_seek() local 473 sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); in doc_read_seek() [all …]
|
/drivers/mtd/ |
D | rfd_ftl.c | 90 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf); 241 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) in rfd_ftl_readsect() argument 248 if (sector >= part->sector_count) in rfd_ftl_readsect() 251 addr = part->sector_map[sector]; in rfd_ftl_readsect() 639 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr) in do_writesect() argument 680 part->sector_map[sector] = addr; in do_writesect() 682 entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector); in do_writesect() 705 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) in rfd_ftl_writesect() argument 712 pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector); in rfd_ftl_writesect() 719 if (sector >= part->sector_count) { in rfd_ftl_writesect() [all …]
|
D | ftl.c | 788 u_long sector, u_long nblocks) in ftl_read() argument 796 part, sector, nblocks); in ftl_read() 804 if (((sector+i) * SECTOR_SIZE) >= le32_to_cpu(part->header.FormattedSize)) { in ftl_read() 808 log_addr = part->VirtualBlockMap[sector+i]; in ftl_read() 902 u_long sector, u_long nblocks) in ftl_write() argument 910 part, sector, nblocks); in ftl_write() 924 virt_addr = sector * SECTOR_SIZE | BLOCK_DATA; in ftl_write() 961 old_addr = part->VirtualBlockMap[sector+i]; in ftl_write() 963 part->VirtualBlockMap[sector+i] = 0xffffffff; in ftl_write() 972 part->VirtualBlockMap[sector+i] = log_addr; in ftl_write() [all …]
|
/drivers/target/ |
D | target_core_sbc.c | 1252 sector_t sector = cmd->t_task_lba; in sbc_dif_generate() local 1296 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); in sbc_dif_generate() 1302 "WRITE" : "READ", (unsigned long long)sector, in sbc_dif_generate() 1306 sector++; in sbc_dif_generate() 1316 __u16 crc, sector_t sector, unsigned int ei_lba) in sbc_dif_v1_verify() argument 1327 " csum 0x%04x\n", (unsigned long long)sector, in sbc_dif_v1_verify() 1337 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { in sbc_dif_v1_verify() 1339 " sector MSB: 0x%08x\n", (unsigned long long)sector, in sbc_dif_v1_verify() 1340 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); in sbc_dif_v1_verify() 1347 " ei_lba: 0x%08x\n", (unsigned long long)sector, in sbc_dif_v1_verify() [all …]
|