Home
last modified time | relevance | path

Searched refs:sector (Results 1 – 25 of 118) sorted by relevance

12345

/drivers/block/drbd/
Ddrbd_interval.c16 #define NODE_END(node) ((node)->sector + ((node)->size >> 9))
28 sector_t this_end = this->sector + (this->size >> 9); in drbd_insert_interval()
39 if (this->sector < here->sector) in drbd_insert_interval()
41 else if (this->sector > here->sector) in drbd_insert_interval()
69 drbd_contains_interval(struct rb_root *root, sector_t sector, in drbd_contains_interval() argument
78 if (sector < here->sector) in drbd_contains_interval()
80 else if (sector > here->sector) in drbd_contains_interval()
114 drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size) in drbd_find_overlap() argument
118 sector_t end = sector + (size >> 9); in drbd_find_overlap()
127 sector < interval_end(node->rb_left)) { in drbd_find_overlap()
[all …]
Ddrbd_actlog.c127 sector_t sector, enum req_op op) in _drbd_md_sync_page_io() argument
144 bio->bi_iter.bi_sector = sector; in _drbd_md_sync_page_io()
178 sector_t sector, enum req_op op) in drbd_md_sync_page_io() argument
187 (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ", in drbd_md_sync_page_io()
190 if (sector < drbd_md_first_sector(bdev) || in drbd_md_sync_page_io()
191 sector + 7 > drbd_md_last_sector(bdev)) in drbd_md_sync_page_io()
194 (unsigned long long)sector, in drbd_md_sync_page_io()
197 err = _drbd_md_sync_page_io(device, bdev, sector, op); in drbd_md_sync_page_io()
200 (unsigned long long)sector, in drbd_md_sync_page_io()
245 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); in drbd_al_begin_io_fastpath()
[all …]
Ddrbd_worker.c127 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); in drbd_endio_write_sec_final()
157 drbd_rs_complete_io(device, i.sector); in drbd_endio_write_sec_final()
183 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio()
356 sector_t sector = peer_req->i.sector; in w_e_send_csum() local
367 err = drbd_send_drequest_csum(peer_device, sector, size, in w_e_send_csum()
387 static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size) in read_for_csum() argument
397 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, in read_for_csum()
588 sector_t sector; in make_resync_request() local
654 sector = BM_BIT_TO_SECT(bit); in make_resync_request()
656 if (drbd_try_rs_begin_io(device, sector)) { in make_resync_request()
[all …]
Ddrbd_receiver.c361 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in drbd_alloc_peer_req() argument
390 peer_req->i.sector = sector; in drbd_alloc_peer_req()
1600 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector, in drbd_issue_peer_discard_or_zero_out()
1629 sector_t sector = peer_req->i.sector; in drbd_submit_peer_request() local
1672 bio->bi_iter.bi_sector = sector; in drbd_submit_peer_request()
1685 sector += len >> 9; in drbd_submit_peer_request()
1825 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in read_in_block() argument
1875 if (sector + (ds>>9) > capacity) { in read_in_block()
1879 (unsigned long long)sector, ds); in read_in_block()
1886 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO); in read_in_block()
[all …]
Ddrbd_interval.h10 sector_t sector; /* start sector of the interval */ member
38 #define drbd_for_each_overlap(i, root, sector, size) \ argument
39 for (i = drbd_find_overlap(root, sector, size); \
41 i = drbd_next_overlap(i, sector, size))
/drivers/block/
Dbrd.c54 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) in brd_lookup_page() argument
71 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ in brd_lookup_page()
83 static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp) in brd_insert_page() argument
89 page = brd_lookup_page(brd, sector); in brd_insert_page()
103 idx = sector >> PAGE_SECTORS_SHIFT; in brd_insert_page()
167 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n, in copy_to_brd_setup() argument
170 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; in copy_to_brd_setup()
175 ret = brd_insert_page(brd, sector, gfp); in copy_to_brd_setup()
179 sector += copy >> SECTOR_SHIFT; in copy_to_brd_setup()
180 ret = brd_insert_page(brd, sector, gfp); in copy_to_brd_setup()
[all …]
/drivers/block/null_blk/
Dzoned.c65 sector_t sector = 0; in null_init_zoned_dev() local
127 zone->start = sector; in null_init_zoned_dev()
134 sector += dev->zone_size_sects; in null_init_zoned_dev()
141 zone->start = zone->wp = sector; in null_init_zoned_dev()
151 sector += dev->zone_size_sects; in null_init_zoned_dev()
189 int null_report_zones(struct gendisk *disk, sector_t sector, in null_report_zones() argument
199 first_zone = null_zone_no(dev, sector); in null_report_zones()
237 sector_t sector, unsigned int len) in null_zone_valid_read_len() argument
240 struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)]; in null_zone_valid_read_len()
245 sector + nr_sectors <= zone->wp) in null_zone_valid_read_len()
[all …]
Dnull_blk.h140 blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
143 sector_t sector, unsigned int nr_sectors);
149 int null_report_zones(struct gendisk *disk, sector_t sector,
152 sector_t sector, sector_t nr_sectors);
154 sector_t sector, unsigned int len);
168 enum req_op op, sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd() argument
173 sector_t sector, in null_zone_valid_read_len() argument
Dmain.c855 static void null_free_sector(struct nullb *nullb, sector_t sector, in null_free_sector() argument
864 idx = sector >> PAGE_SECTORS_SHIFT; in null_free_sector()
865 sector_bit = (sector & SECTOR_MASK); in null_free_sector()
928 sector_t sector, bool for_write, bool is_cache) in __null_lookup_page() argument
935 idx = sector >> PAGE_SECTORS_SHIFT; in __null_lookup_page()
936 sector_bit = (sector & SECTOR_MASK); in __null_lookup_page()
949 sector_t sector, bool for_write, bool ignore_cache) in null_lookup_page() argument
954 page = __null_lookup_page(nullb, sector, for_write, true); in null_lookup_page()
957 return __null_lookup_page(nullb, sector, for_write, false); in null_lookup_page()
961 sector_t sector, bool ignore_cache) in null_insert_page() argument
[all …]
/drivers/scsi/
Dsr_vendor.c174 unsigned long sector; in sr_cd_check() local
186 sector = 0; /* the multisession sector offset goes here */ in sr_cd_check()
212 sector = buffer[11] + (buffer[10] << 8) + in sr_cd_check()
216 sector = 0; in sr_cd_check()
243 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check()
271 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check()
272 if (sector) in sr_cd_check()
273 sector -= CD_MSF_OFFSET; in sr_cd_check()
309 sector = buffer[11] + (buffer[10] << 8) + in sr_cd_check()
318 sector = 0; in sr_cd_check()
[all …]
/drivers/vdpa/vdpa_sim/
Dvdpa_sim_blk.c87 u64 sector; in vdpasim_blk_handle_req() local
125 sector = vdpasim64_to_cpu(vdpasim, hdr.sector); in vdpasim_blk_handle_req()
126 offset = sector << SECTOR_SHIFT; in vdpasim_blk_handle_req()
130 sector != 0) { in vdpasim_blk_handle_req()
133 type, sector); in vdpasim_blk_handle_req()
140 if (!vdpasim_blk_check_range(vdpasim, sector, in vdpasim_blk_handle_req()
162 if (!vdpasim_blk_check_range(vdpasim, sector, in vdpasim_blk_handle_req()
222 sector = le64_to_cpu(range.sector); in vdpasim_blk_handle_req()
223 offset = sector << SECTOR_SHIFT; in vdpasim_blk_handle_req()
244 if (!vdpasim_blk_check_range(vdpasim, sector, num_sectors, in vdpasim_blk_handle_req()
/drivers/usb/storage/
Djumpshot.c155 u32 sector, in jumpshot_read_data() argument
171 if (sector > 0x0FFFFFFF) in jumpshot_read_data()
193 command[2] = sector & 0xFF; in jumpshot_read_data()
194 command[3] = (sector >> 8) & 0xFF; in jumpshot_read_data()
195 command[4] = (sector >> 16) & 0xFF; in jumpshot_read_data()
197 command[5] = 0xE0 | ((sector >> 24) & 0x0F); in jumpshot_read_data()
217 sector += thistime; in jumpshot_read_data()
232 u32 sector, in jumpshot_write_data() argument
248 if (sector > 0x0FFFFFFF) in jumpshot_write_data()
275 command[2] = sector & 0xFF; in jumpshot_write_data()
[all …]
Ddatafab.c137 u32 sector, in datafab_read_data() argument
182 command[2] = sector & 0xFF; in datafab_read_data()
183 command[3] = (sector >> 8) & 0xFF; in datafab_read_data()
184 command[4] = (sector >> 16) & 0xFF; in datafab_read_data()
187 command[5] |= (sector >> 24) & 0x0F; in datafab_read_data()
205 sector += thistime; in datafab_read_data()
220 u32 sector, in datafab_write_data() argument
270 command[2] = sector & 0xFF; in datafab_write_data()
271 command[3] = (sector >> 8) & 0xFF; in datafab_write_data()
272 command[4] = (sector >> 16) & 0xFF; in datafab_write_data()
[all …]
Dshuttle_usbat.c200 u32 sector, unsigned char cmd) in usbat_pack_ata_sector_cmd() argument
204 buf[2] = sector & 0xFF; in usbat_pack_ata_sector_cmd()
205 buf[3] = (sector >> 8) & 0xFF; in usbat_pack_ata_sector_cmd()
206 buf[4] = (sector >> 16) & 0xFF; in usbat_pack_ata_sector_cmd()
207 buf[5] = 0xE0 | ((sector >> 24) & 0x0F); in usbat_pack_ata_sector_cmd()
1109 u32 sector, in usbat_flash_read_data() argument
1140 if (sector > 0x0FFFFFFF) in usbat_flash_read_data()
1165 usbat_pack_ata_sector_cmd(command, thistime, sector, 0x20); in usbat_flash_read_data()
1183 sector += thistime; in usbat_flash_read_data()
1200 u32 sector, in usbat_flash_write_data() argument
[all …]
/drivers/mtd/
Drfd_ftl.c91 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
240 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) in rfd_ftl_readsect() argument
247 if (sector >= part->sector_count) in rfd_ftl_readsect()
250 addr = part->sector_map[sector]; in rfd_ftl_readsect()
601 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr) in do_writesect() argument
642 part->sector_map[sector] = addr; in do_writesect()
644 entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector); in do_writesect()
667 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) in rfd_ftl_writesect() argument
674 pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector); in rfd_ftl_writesect()
681 if (sector >= part->sector_count) { in rfd_ftl_writesect()
[all …]
/drivers/md/
Ddm-log-writes.c96 __le64 sector; member
125 sector_t sector; member
213 sector_t sector) in write_metadata() argument
222 bio->bi_iter.bi_sector = sector; in write_metadata()
223 bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ? in write_metadata()
259 sector_t sector) in write_inline_data() argument
275 bio->bi_iter.bi_sector = sector; in write_inline_data()
307 sector += bio_pages * PAGE_SECTORS; in write_inline_data()
318 struct pending_block *block, sector_t sector) in log_one_block() argument
325 entry.sector = cpu_to_le64(block->sector); in log_one_block()
[all …]
Draid0.c305 sector_t sector = *sectorp; in find_zone() local
308 if (sector < z[i].zone_end) { in find_zone()
310 *sectorp = sector - z[i-1].zone_end; in find_zone()
321 sector_t sector, sector_t *sector_offset) in map_sector() argument
332 sect_in_chunk = sector & (chunk_sects - 1); in map_sector()
333 sector >>= chunksect_bits; in map_sector()
339 sect_in_chunk = sector_div(sector, chunk_sects); in map_sector()
350 + sector_div(sector, zone->nb_dev)]; in map_sector()
566 sector_t sector = bio_sector; in raid0_map_submit_bio() local
570 zone = find_zone(mddev->private, &sector); in raid0_map_submit_bio()
[all …]
Draid5.c426 (unsigned long long)sh->sector); in remove_hash()
433 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
436 (unsigned long long)sh->sector); in insert_hash()
575 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
586 (unsigned long long)sector); in init_stripe()
591 sh->sector = sector; in init_stripe()
592 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
601 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
607 dev->sector = raid5_compute_blocknr(sh, i, previous); in init_stripe()
617 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, in __find_stripe() argument
[all …]
Draid5-ppl.c166 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_partial_parity()
277 pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector); in ppl_log_stripe()
303 if (!data_disks || dev->sector < data_sector) in ppl_log_stripe()
304 data_sector = dev->sector; in ppl_log_stripe()
328 if ((sh->sector == sh_last->sector + RAID5_STRIPE_SECTORS(conf)) && in ppl_log_stripe()
461 log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector < in ppl_submit_iounit()
463 log->next_io_sector = log->rdev->ppl.sector; in ppl_submit_iounit()
859 sector_t sector; in ppl_recover_entry() local
878 sector = raid5_compute_sector(conf, r_sector, 0, in ppl_recover_entry()
883 (unsigned long long)sector); in ppl_recover_entry()
[all …]
Draid1.c64 sector_t lo = r1_bio->sector; in check_and_add_serial()
86 int idx = sector_to_idx(r1_bio->sector); in wait_for_serialization()
264 sector_t sect = r1_bio->sector; in put_buf()
285 idx = sector_to_idx(r1_bio->sector); in reschedule_retry()
330 allow_barrier(conf, r1_bio->sector); in raid_end_bio_io()
343 r1_bio->sector + (r1_bio->sectors); in update_head_pos()
408 (unsigned long long)r1_bio->sector); in raid1_end_read_request()
424 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, in close_write()
456 sector_t lo = r1_bio->sector; in raid1_end_write_request()
457 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_end_write_request()
[all …]
Ddm-ebs-target.c36 static inline sector_t __sector_to_block(struct ebs_c *ec, sector_t sector) in __sector_to_block() argument
38 return sector >> ec->block_shift; in __sector_to_block()
41 static inline sector_t __block_mod(sector_t sector, unsigned int bs) in __block_mod() argument
43 return sector & (bs - 1); in __block_mod()
144 sector_t block, blocks, sector = bio->bi_iter.bi_sector; in __ebs_discard_bio() local
146 block = __sector_to_block(ec, sector); in __ebs_discard_bio()
153 if (__block_mod(sector, ec->u_bs)) { in __ebs_discard_bio()
168 sector_t blocks, sector = bio->bi_iter.bi_sector; in __ebs_forget_bio() local
172 dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks); in __ebs_forget_bio()
/drivers/nvdimm/
Dpmem.c58 static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector) in to_offset() argument
60 return (sector << SECTOR_SHIFT) + pmem->data_offset; in to_offset()
88 static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks) in pmem_clear_bb() argument
92 badblocks_clear(&pmem->bb, sector, blks); in pmem_clear_bb()
166 sector_t sector, unsigned int len) in pmem_do_read() argument
169 phys_addr_t pmem_off = to_offset(pmem, sector); in pmem_do_read()
172 if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) in pmem_do_read()
182 sector_t sector, unsigned int len) in pmem_do_write() argument
184 phys_addr_t pmem_off = to_offset(pmem, sector); in pmem_do_write()
187 if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) { in pmem_do_write()
[all …]
/drivers/char/
Dps3flash.c98 u64 size, sector, offset; in ps3flash_read() local
118 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_read()
128 res = ps3flash_fetch(dev, sector); in ps3flash_read()
151 sector += priv->chunk_sectors; in ps3flash_read()
167 u64 size, sector, offset; in ps3flash_write() local
187 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_write()
198 res = ps3flash_fetch(dev, sector); in ps3flash_write()
199 else if (sector != priv->tag) in ps3flash_write()
219 priv->tag = sector; in ps3flash_write()
226 sector += priv->chunk_sectors; in ps3flash_write()
/drivers/mtd/nand/raw/
Dsh_flctl.c485 (struct sh_flctl *flctl, uint8_t *buff, int sector) in read_ecfiforeg() argument
491 res = wait_recfifo_ready(flctl , sector); in read_ecfiforeg()
625 int sector, page_sectors; in execmd_read_page_sector() local
641 for (sector = 0; sector < page_sectors; sector++) { in execmd_read_page_sector()
642 read_fiforeg(flctl, 512, 512 * sector); in execmd_read_page_sector()
645 &flctl->done_buff[mtd->writesize + 16 * sector], in execmd_read_page_sector()
646 sector); in execmd_read_page_sector()
696 int sector, page_sectors; in execmd_write_page_sector() local
709 for (sector = 0; sector < page_sectors; sector++) { in execmd_write_page_sector()
710 write_fiforeg(flctl, 512, 512 * sector); in execmd_write_page_sector()
[all …]
/drivers/mtd/devices/
Ddocg3.c418 static void doc_setup_addr_sector(struct docg3 *docg3, int sector) in doc_setup_addr_sector() argument
421 doc_flash_address(docg3, sector & 0xff); in doc_setup_addr_sector()
422 doc_flash_address(docg3, (sector >> 8) & 0xff); in doc_setup_addr_sector()
423 doc_flash_address(docg3, (sector >> 16) & 0xff); in doc_setup_addr_sector()
433 static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs) in doc_setup_writeaddr_sector() argument
438 doc_flash_address(docg3, sector & 0xff); in doc_setup_writeaddr_sector()
439 doc_flash_address(docg3, (sector >> 8) & 0xff); in doc_setup_writeaddr_sector()
440 doc_flash_address(docg3, (sector >> 16) & 0xff); in doc_setup_writeaddr_sector()
459 int sector, ret = 0; in doc_read_seek() local
481 sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); in doc_read_seek()
[all …]

12345