/drivers/block/drbd/ |
D | drbd_interval.c | 25 sector_t max = node->sector + (node->size >> 9); in compute_subtree_last() 90 if (this->sector < here->sector) in drbd_insert_interval() 92 else if (this->sector > here->sector) in drbd_insert_interval() 118 drbd_contains_interval(struct rb_root *root, sector_t sector, in drbd_contains_interval() argument 127 if (sector < here->sector) in drbd_contains_interval() 129 else if (sector > here->sector) in drbd_contains_interval() 162 drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size) in drbd_find_overlap() argument 166 sector_t end = sector + (size >> 9); in drbd_find_overlap() 175 sector < interval_end(node->rb_left)) { in drbd_find_overlap() 178 } else if (here->sector < end && in drbd_find_overlap() [all …]
|
D | drbd_actlog.c | 147 struct page *page, sector_t sector, in _drbd_md_sync_page_io() argument 162 bio->bi_sector = sector; in _drbd_md_sync_page_io() 196 sector_t sector, int rw) in drbd_md_sync_page_io() argument 207 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", in drbd_md_sync_page_io() 210 if (sector < drbd_md_first_sector(bdev) || in drbd_md_sync_page_io() 211 sector + 7 > drbd_md_last_sector(bdev)) in drbd_md_sync_page_io() 214 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); in drbd_md_sync_page_io() 217 err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, 4096); in drbd_md_sync_page_io() 220 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err); in drbd_md_sync_page_io() 264 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); in drbd_al_begin_io_fastpath() [all …]
|
D | drbd_worker.c | 155 drbd_rs_complete_io(mdev, i.sector); in drbd_endio_write_sec_final() 180 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio() 185 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio() 350 sector_t sector = peer_req->i.sector; in w_e_send_csum() local 361 err = drbd_send_drequest_csum(mdev, sector, size, in w_e_send_csum() 381 static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) in read_for_csum() argument 388 if (drbd_rs_should_slow_down(mdev, sector)) in read_for_csum() 393 peer_req = drbd_alloc_peer_req(mdev, ID_SYNCER /* unused */, sector, in read_for_csum() 567 sector_t sector; in w_make_resync_request() local 621 sector = BM_BIT_TO_SECT(bit); in w_make_resync_request() [all …]
|
D | drbd_receiver.c | 333 drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector, in drbd_alloc_peer_req() argument 358 peer_req->i.sector = sector; in drbd_alloc_peer_req() 1315 sector_t sector = peer_req->i.sector; in drbd_submit_peer_request() local 1336 bio->bi_sector = sector; in drbd_submit_peer_request() 1363 sector += len >> 9; in drbd_submit_peer_request() 1487 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, in read_in_block() argument 1518 if (sector + (data_size>>9) > capacity) { in read_in_block() 1522 (unsigned long long)sector, data_size); in read_in_block() 1529 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO); in read_in_block() 1558 (unsigned long long)sector, data_size); in read_in_block() [all …]
|
D | drbd_interval.h | 9 sector_t sector; /* start sector of the interval */ member 35 #define drbd_for_each_overlap(i, root, sector, size) \ argument 36 for (i = drbd_find_overlap(root, sector, size); \ 38 i = drbd_next_overlap(i, sector, size))
|
D | drbd_req.c | 34 static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size); 80 req->i.sector = bio_src->bi_sector; in drbd_req_new() 135 drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); in drbd_req_destroy() 138 drbd_set_in_sync(mdev, req->i.sector, req->i.size); in drbd_req_destroy() 158 (unsigned long long) req->i.sector, req->i.size); in drbd_req_destroy() 436 (unsigned long long)req->i.sector, in drbd_report_io_error() 515 drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); in __req_mod() 774 static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size) in drbd_may_do_local_read() argument 783 esector = sector + (size >> 9) - 1; in drbd_may_do_local_read() 785 D_ASSERT(sector < nr_sectors); in drbd_may_do_local_read() [all …]
|
/drivers/block/ |
D | brd.c | 55 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) in brd_lookup_page() argument 72 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ in brd_lookup_page() 86 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) in brd_insert_page() argument 92 page = brd_lookup_page(brd, sector); in brd_insert_page() 119 idx = sector >> PAGE_SECTORS_SHIFT; in brd_insert_page() 134 static void brd_free_page(struct brd_device *brd, sector_t sector) in brd_free_page() argument 140 idx = sector >> PAGE_SECTORS_SHIFT; in brd_free_page() 147 static void brd_zero_page(struct brd_device *brd, sector_t sector) in brd_zero_page() argument 151 page = brd_lookup_page(brd, sector); in brd_zero_page() 196 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) in copy_to_brd_setup() argument [all …]
|
/drivers/scsi/ |
D | sr_vendor.c | 160 unsigned long sector; in sr_cd_check() local 172 sector = 0; /* the multisession sector offset goes here */ in sr_cd_check() 198 sector = buffer[11] + (buffer[10] << 8) + in sr_cd_check() 202 sector = 0; in sr_cd_check() 230 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check() 259 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check() 260 if (sector) in sr_cd_check() 261 sector -= CD_MSF_OFFSET; in sr_cd_check() 297 sector = buffer[11] + (buffer[10] << 8) + in sr_cd_check() 307 sector = 0; in sr_cd_check() [all …]
|
D | sd_dif.c | 60 sector_t sector = bix->sector; in sd_dif_type1_generate() local 65 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); in sd_dif_type1_generate() 69 sector++; in sd_dif_type1_generate() 87 sector_t sector = bix->sector; in sd_dif_type1_verify() local 96 if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { in sd_dif_type1_verify() 99 bix->disk_name, (unsigned long)sector, in sd_dif_type1_verify() 109 (unsigned long)sector, in sd_dif_type1_verify() 115 sector++; in sd_dif_type1_verify() 212 sector_t sector = bix->sector; in sd_dif_type3_verify() local 226 (unsigned long)sector, in sd_dif_type3_verify() [all …]
|
/drivers/md/ |
D | raid0.c | 310 sector_t sector = *sectorp; in find_zone() local 313 if (sector < z[i].zone_end) { in find_zone() 315 *sectorp = sector - z[i-1].zone_end; in find_zone() 326 sector_t sector, sector_t *sector_offset) in map_sector() argument 337 sect_in_chunk = sector & (chunk_sects - 1); in map_sector() 338 sector >>= chunksect_bits; in map_sector() 344 sect_in_chunk = sector_div(sector, chunk_sects); in map_sector() 355 + sector_div(sector, zone->nb_dev)]; in map_sector() 372 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); in raid0_mergeable_bvec() local 373 sector_t sector_offset = sector; in raid0_mergeable_bvec() [all …]
|
D | raid5.c | 91 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) in r5_next_bio() argument 94 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) in r5_next_bio() 258 (unsigned long long)sh->sector); in remove_hash() 265 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash() 268 (unsigned long long)sh->sector); in insert_hash() 326 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument 336 (unsigned long long)sh->sector); in init_stripe() 342 sh->sector = sector; in init_stripe() 343 stripe_set_idx(sector, conf, previous, sh); in init_stripe() 353 (unsigned long long)sh->sector, i, dev->toread, in init_stripe() [all …]
|
D | raid1.c | 275 r1_bio->sector + (r1_bio->sectors); in update_head_pos() 340 (unsigned long long)r1_bio->sector); in raid1_end_read_request() 359 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, in close_write() 434 r1_bio->sector, r1_bio->sectors, in raid1_end_write_request() 497 const sector_t this_sector = r1_bio->sector; in read_balance() 698 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); in raid1_mergeable_bvec() local 711 bvm->bi_sector = sector + in raid1_mergeable_bvec() 1060 r1_bio->sector = bio->bi_sector; in make_request() 1100 md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector, in make_request() 1105 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; in make_request() [all …]
|
D | raid10.c | 413 (unsigned long long)r10_bio->sector); in raid10_end_read_request() 422 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write() 558 sector_t sector; in __raid10_find_phys() local 572 chunk = r10bio->sector >> geo->chunk_shift; in __raid10_find_phys() 573 sector = r10bio->sector & geo->chunk_mask; in __raid10_find_phys() 581 sector += stripe << geo->chunk_shift; in __raid10_find_phys() 587 sector_t s = sector; in __raid10_find_phys() 613 sector += (geo->chunk_mask + 1); in __raid10_find_phys() 623 ((r10bio->sector >= conf->reshape_progress) != in raid10_find_phys() 633 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) in raid10_find_virt() argument [all …]
|
D | dm.c | 927 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) in max_io_len_target_boundary() argument 929 sector_t target_offset = dm_target_offset(ti, sector); in max_io_len_target_boundary() 934 static sector_t max_io_len(sector_t sector, struct dm_target *ti) in max_io_len() argument 936 sector_t len = max_io_len_target_boundary(sector, ti); in max_io_len() 943 offset = dm_target_offset(ti, sector); in max_io_len() 975 sector_t sector; in __map_bio() local 989 sector = clone->bi_sector; in __map_bio() 995 tio->io->bio->bi_bdev->bd_dev, sector); in __map_bio() 1014 sector_t sector; member 1019 static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) in bio_setup_sector() argument [all …]
|
D | dm-stripe.c | 208 static void stripe_map_sector(struct stripe_c *sc, sector_t sector, in stripe_map_sector() argument 211 sector_t chunk = dm_target_offset(sc->ti, sector); in stripe_map_sector() 236 static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector, in stripe_map_range_sector() argument 241 stripe_map_sector(sc, sector, &stripe, result); in stripe_map_range_sector() 246 sector = *result; in stripe_map_range_sector() 248 *result -= sector_div(sector, sc->chunk_size); in stripe_map_range_sector() 250 *result = sector & ~(sector_t)(sc->chunk_size - 1); in stripe_map_range_sector()
|
D | dm-crypt.c | 63 sector_t sector; member 636 sector_t sector) in crypt_convert_init() argument 644 ctx->cc_sector = sector + cc->iv_offset; in crypt_convert_init() 876 struct bio *bio, sector_t sector) in crypt_io_init() argument 880 io->sector = sector; in crypt_io_init() 983 clone->bi_sector = cc->start + io->sector; in kcryptd_io_read() 1078 sector_t sector; in kcryptd_crypt_write_io_submit() local 1091 clone->bi_sector = cc->start + io->sector; in kcryptd_crypt_write_io_submit() 1096 sector = io->sector; in kcryptd_crypt_write_io_submit() 1099 if (sector < crypt_io_from_node(parent)->sector) in kcryptd_crypt_write_io_submit() [all …]
|
/drivers/usb/storage/ |
D | jumpshot.c | 164 u32 sector, in jumpshot_read_data() argument 180 if (sector > 0x0FFFFFFF) in jumpshot_read_data() 202 command[2] = sector & 0xFF; in jumpshot_read_data() 203 command[3] = (sector >> 8) & 0xFF; in jumpshot_read_data() 204 command[4] = (sector >> 16) & 0xFF; in jumpshot_read_data() 206 command[5] = 0xE0 | ((sector >> 24) & 0x0F); in jumpshot_read_data() 226 sector += thistime; in jumpshot_read_data() 241 u32 sector, in jumpshot_write_data() argument 257 if (sector > 0x0FFFFFFF) in jumpshot_write_data() 284 command[2] = sector & 0xFF; in jumpshot_write_data() [all …]
|
D | datafab.c | 145 u32 sector, in datafab_read_data() argument 190 command[2] = sector & 0xFF; in datafab_read_data() 191 command[3] = (sector >> 8) & 0xFF; in datafab_read_data() 192 command[4] = (sector >> 16) & 0xFF; in datafab_read_data() 195 command[5] |= (sector >> 24) & 0x0F; in datafab_read_data() 213 sector += thistime; in datafab_read_data() 228 u32 sector, in datafab_write_data() argument 278 command[2] = sector & 0xFF; in datafab_write_data() 279 command[3] = (sector >> 8) & 0xFF; in datafab_write_data() 280 command[4] = (sector >> 16) & 0xFF; in datafab_write_data() [all …]
|
D | shuttle_usbat.c | 208 u32 sector, unsigned char cmd) in usbat_pack_ata_sector_cmd() argument 212 buf[2] = sector & 0xFF; in usbat_pack_ata_sector_cmd() 213 buf[3] = (sector >> 8) & 0xFF; in usbat_pack_ata_sector_cmd() 214 buf[4] = (sector >> 16) & 0xFF; in usbat_pack_ata_sector_cmd() 215 buf[5] = 0xE0 | ((sector >> 24) & 0x0F); in usbat_pack_ata_sector_cmd() 1116 u32 sector, in usbat_flash_read_data() argument 1147 if (sector > 0x0FFFFFFF) in usbat_flash_read_data() 1172 usbat_pack_ata_sector_cmd(command, thistime, sector, 0x20); in usbat_flash_read_data() 1190 sector += thistime; in usbat_flash_read_data() 1207 u32 sector, in usbat_flash_write_data() argument [all …]
|
/drivers/mtd/nand/ |
D | sh_flctl.c | 448 (struct sh_flctl *flctl, uint8_t *buff, int sector) in read_ecfiforeg() argument 454 res = wait_recfifo_ready(flctl , sector); in read_ecfiforeg() 584 int sector, page_sectors; in execmd_read_page_sector() local 600 for (sector = 0; sector < page_sectors; sector++) { in execmd_read_page_sector() 601 read_fiforeg(flctl, 512, 512 * sector); in execmd_read_page_sector() 604 &flctl->done_buff[mtd->writesize + 16 * sector], in execmd_read_page_sector() 605 sector); in execmd_read_page_sector() 655 int sector, page_sectors; in execmd_write_page_sector() local 668 for (sector = 0; sector < page_sectors; sector++) { in execmd_write_page_sector() 669 write_fiforeg(flctl, 512, 512 * sector); in execmd_write_page_sector() [all …]
|
D | atmel_nand_ecc.h | 120 #define pmecc_readb_ecc_relaxed(addr, sector, n) \ argument 121 readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n)) 123 #define pmecc_readl_rem_relaxed(addr, sector, n) \ argument 124 readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4))
|
/drivers/char/ |
D | ps3flash.c | 134 u64 size, sector, offset; in ps3flash_read() local 154 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_read() 164 res = ps3flash_fetch(dev, sector); in ps3flash_read() 187 sector += priv->chunk_sectors; in ps3flash_read() 203 u64 size, sector, offset; in ps3flash_write() local 223 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_write() 234 res = ps3flash_fetch(dev, sector); in ps3flash_write() 235 else if (sector != priv->tag) in ps3flash_write() 255 priv->tag = sector; in ps3flash_write() 262 sector += priv->chunk_sectors; in ps3flash_write()
|
/drivers/mtd/devices/ |
D | docg3.c | 409 static void doc_setup_addr_sector(struct docg3 *docg3, int sector) in doc_setup_addr_sector() argument 412 doc_flash_address(docg3, sector & 0xff); in doc_setup_addr_sector() 413 doc_flash_address(docg3, (sector >> 8) & 0xff); in doc_setup_addr_sector() 414 doc_flash_address(docg3, (sector >> 16) & 0xff); in doc_setup_addr_sector() 424 static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs) in doc_setup_writeaddr_sector() argument 429 doc_flash_address(docg3, sector & 0xff); in doc_setup_writeaddr_sector() 430 doc_flash_address(docg3, (sector >> 8) & 0xff); in doc_setup_writeaddr_sector() 431 doc_flash_address(docg3, (sector >> 16) & 0xff); in doc_setup_writeaddr_sector() 450 int sector, ret = 0; in doc_read_seek() local 472 sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); in doc_read_seek() [all …]
|
/drivers/mtd/ |
D | rfd_ftl.c | 90 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf); 241 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) in rfd_ftl_readsect() argument 248 if (sector >= part->sector_count) in rfd_ftl_readsect() 251 addr = part->sector_map[sector]; in rfd_ftl_readsect() 640 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr) in do_writesect() argument 682 part->sector_map[sector] = addr; in do_writesect() 684 entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector); in do_writesect() 708 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) in rfd_ftl_writesect() argument 715 pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector); in rfd_ftl_writesect() 722 if (sector >= part->sector_count) { in rfd_ftl_writesect() [all …]
|
D | ftl.c | 789 u_long sector, u_long nblocks) in ftl_read() argument 797 part, sector, nblocks); in ftl_read() 805 if (((sector+i) * SECTOR_SIZE) >= le32_to_cpu(part->header.FormattedSize)) { in ftl_read() 809 log_addr = part->VirtualBlockMap[sector+i]; in ftl_read() 903 u_long sector, u_long nblocks) in ftl_write() argument 911 part, sector, nblocks); in ftl_write() 925 virt_addr = sector * SECTOR_SIZE | BLOCK_DATA; in ftl_write() 962 old_addr = part->VirtualBlockMap[sector+i]; in ftl_write() 964 part->VirtualBlockMap[sector+i] = 0xffffffff; in ftl_write() 973 part->VirtualBlockMap[sector+i] = log_addr; in ftl_write() [all …]
|