Home
last modified time | relevance | path

Searched refs:sector_t (Results 1 – 25 of 189) sorted by relevance

12345678

/drivers/md/
Dmd.h27 #define MaxSector (~(sector_t)0)
42 sector_t sectors; /* Device size (in 512bytes sectors) */
57 sector_t data_offset; /* start of data in array */
58 sector_t new_data_offset;/* only relevant while reshaping */
59 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
88 sector_t recovery_offset;/* If this device has been partially
125 sector_t sector;
126 sector_t size; /* in sectors */
184 extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
185 sector_t *first_bad, int *bad_sectors);
[all …]
Draid10.h6 sector_t head_position;
31 sector_t stride; /* distance between far copies.
42 sector_t chunk_mask;
48 sector_t dev_sectors; /* temp copy of
50 sector_t reshape_progress;
51 sector_t reshape_safe;
53 sector_t offset_diff;
65 sector_t next_resync;
96 sector_t sector; /* virtual sector number */
126 sector_t addr;
Ddm-bufio.h48 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
55 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
62 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
71 sector_t block, unsigned n_blocks);
109 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
116 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
124 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
125 sector_t dm_bufio_get_block_number(struct dm_buffer *b);
Draid1.h6 sector_t head_position;
11 sector_t next_seq_sect;
12 sector_t seq_start;
42 sector_t next_resync;
53 sector_t start_next_window;
125 sector_t sector;
126 sector_t start_next_window;
Dbitmap.h249 int bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
251 void bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
253 int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
254 void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
256 void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
261 int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
Ddm-table.c27 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
37 sector_t *index[MAX_DEPTH];
41 sector_t *highs;
93 static inline sector_t *get_node(struct dm_table *t, in get_node()
103 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) in high()
109 return (sector_t) - 1; in high()
121 sector_t *node; in setup_btree_index()
157 sector_t *n_highs; in alloc_targets()
165 n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + in alloc_targets()
166 sizeof(sector_t)); in alloc_targets()
[all …]
Ddm-verity.h46 sector_t data_start; /* data offset in 512-byte sectors */
47 sector_t hash_start; /* hash start in blocks */
48 sector_t data_blocks; /* the number of data blocks */
49 sector_t hash_blocks; /* the number of hash blocks */
64 sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
76 sector_t block;
129 sector_t block, u8 *digest, bool *is_zero);
Dbitmap.c139 sector_t target; in read_sb_page()
342 sector_t block; in read_page()
830 static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) in bitmap_file_set_bit()
854 static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block) in bitmap_file_clear_bit()
913 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
925 static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) in bitmap_init_from_disk()
946 int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift) in bitmap_init_from_disk()
949 (sector_t)i << bitmap->counts.chunkshift, in bitmap_init_from_disk()
1025 int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift in bitmap_init_from_disk()
1028 (sector_t)i << bitmap->counts.chunkshift, in bitmap_init_from_disk()
[all …]
Ddm-verity-fec.h44 sector_t start; /* parity data start in blocks */
45 sector_t blocks; /* number of blocks covered */
46 sector_t rounds; /* number of interleaving rounds */
47 sector_t hash_blocks; /* blocks covered after v->hash_start */
78 enum verity_block_type type, sector_t block,
109 sector_t block, u8 *dest, in verity_fec_decode()
Ddm-exception-store.h20 typedef sector_t chunk_t;
107 sector_t *total_sectors, sector_t *sectors_allocated,
108 sector_t *metadata_sectors);
191 static inline sector_t get_dev_size(struct block_device *bdev) in get_dev_size()
197 sector_t sector) in sector_to_chunk()
Ddm-stripe.c22 sector_t physical_start;
32 sector_t stripe_width;
98 sector_t width, tmp_len; in stripe_ctr()
211 static void stripe_map_sector(struct stripe_c *sc, sector_t sector, in stripe_map_sector()
212 uint32_t *stripe, sector_t *result) in stripe_map_sector()
214 sector_t chunk = dm_target_offset(sc->ti, sector); in stripe_map_sector()
215 sector_t chunk_offset; in stripe_map_sector()
239 static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector, in stripe_map_range_sector()
240 uint32_t target_stripe, sector_t *result) in stripe_map_range_sector()
253 *result = sector & ~(sector_t)(sc->chunk_size - 1); in stripe_map_range_sector()
[all …]
Draid0.c53 sector_t zone_size = 0; in dump_zones()
54 sector_t zone_start = 0; in dump_zones()
83 sector_t curr_zone_end, sectors; in create_strip_zones()
305 sector_t *sectorp) in find_zone()
309 sector_t sector = *sectorp; in find_zone()
325 sector_t sector, sector_t *sector_offset) in map_sector()
328 sector_t chunk; in map_sector()
371 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); in raid0_mergeable_bvec()
372 sector_t sector_offset = sector; in raid0_mergeable_bvec()
410 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid0_size()
[all …]
Ddm-snap-transient.c23 sector_t next_free;
43 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); in transient_prepare_exception()
64 sector_t *total_sectors, in transient_usage()
65 sector_t *sectors_allocated, in transient_usage()
66 sector_t *metadata_sectors) in transient_usage()
Ddm-stats.c44 sector_t start;
45 sector_t end;
46 sector_t step;
57 sector_t last_sector;
197 last->last_sector = (sector_t)ULLONG_MAX; in dm_stats_init()
229 static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, in dm_stats_create()
230 sector_t step, const char *program_id, const char *aux_data, in dm_stats_create()
237 sector_t n_entries; in dm_stats_create()
395 sector_t len; in dm_stats_list()
446 unsigned long bi_rw, sector_t len, bool merged, in dm_stat_for_entry()
[all …]
Ddm-verity-target.c45 sector_t block;
78 static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector) in verity_map_sector()
89 static sector_t verity_position_at_level(struct dm_verity *v, sector_t block, in verity_position_at_level()
173 static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level, in verity_hash_at_level()
174 sector_t *hash_block, unsigned *offset) in verity_hash_at_level()
176 sector_t position = verity_position_at_level(v, block, level); in verity_hash_at_level()
254 sector_t block, int level, bool skip_unverified, in verity_verify_level()
261 sector_t hash_block; in verity_verify_level()
313 sector_t block, u8 *digest, bool *is_zero) in verity_hash_for_block()
504 sector_t prefetch_size; in verity_prefetch_io()
[all …]
Dlinear.c30 static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) in which_dev()
70 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); in linear_mergeable_bvec()
121 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) in linear_size()
124 sector_t array_sectors; in linear_size()
154 sector_t sectors; in linear_conf()
294 sector_t start_sector, end_sector, data_offset; in linear_make_request()
Dfaulty.c88 sector_t faults[MaxFault];
109 static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir) in check_sector()
133 static void add_sector(struct faulty_conf *conf, sector_t start, int mode) in add_sector()
291 static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks) in faulty_size()
Draid10.c101 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
487 sector_t first_bad; in raid10_end_write_request()
555 sector_t sector; in __raid10_find_phys()
556 sector_t chunk; in __raid10_find_phys()
557 sector_t stripe; in __raid10_find_phys()
584 sector_t s = sector; in __raid10_find_phys()
630 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) in raid10_find_virt()
632 sector_t offset, chunk, vchunk; in raid10_find_virt()
691 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); in raid10_mergeable_bvec()
786 const sector_t this_sector = r10_bio->sector; in read_balance()
[all …]
Draid1.c69 static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
70 sector_t bi_sector);
240 sector_t start_next_window = r1_bio->start_next_window; in call_bio_endio()
241 sector_t bi_sector = bio->bi_iter.bi_sector; in call_bio_endio()
433 sector_t first_bad; in raid1_end_write_request()
513 const sector_t this_sector = r1_bio->sector; in read_balance()
519 sector_t best_dist; in read_balance()
545 sector_t dist; in read_balance()
546 sector_t first_bad; in read_balance()
597 sector_t good_sectors = first_bad - this_sector; in read_balance()
[all …]
/drivers/block/drbd/
Ddrbd_interval.c9 sector_t interval_end(struct rb_node *node) in interval_end()
22 static inline sector_t
25 sector_t max = node->sector + (node->size >> 9); in compute_subtree_last()
28 sector_t left = interval_end(node->rb.rb_left); in compute_subtree_last()
33 sector_t right = interval_end(node->rb.rb_right); in compute_subtree_last()
41 sector_t, end, compute_subtree_last);
50 sector_t this_end = this->sector + (this->size >> 9); in drbd_insert_interval()
90 drbd_contains_interval(struct rb_root *root, sector_t sector, in drbd_contains_interval()
134 drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size) in drbd_find_overlap()
138 sector_t end = sector + (size >> 9); in drbd_find_overlap()
[all …]
Ddrbd_interval.h9 sector_t sector; /* start sector of the interval */
11 sector_t end; /* highest interval end in subtree */
29 extern bool drbd_contains_interval(struct rb_root *, sector_t,
32 extern struct drbd_interval *drbd_find_overlap(struct rb_root *, sector_t,
34 extern struct drbd_interval *drbd_next_overlap(struct drbd_interval *, sector_t,
Ddrbd_int.h613 sector_t known_size; /* last known size of that backing device */
844 sector_t p_size; /* partner's disk size */
908 sector_t ov_start_sector;
909 sector_t ov_stop_sector;
911 sector_t ov_position;
913 sector_t ov_last_oos_start;
915 sector_t ov_last_oos_size;
1100 sector_t sector, int blksize, u64 block_id);
1106 sector_t sector, int size, u64 block_id);
1107 extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
[all …]
/drivers/block/
Dbrd.c55 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) in brd_lookup_page()
86 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) in brd_insert_page()
134 static void brd_free_page(struct brd_device *brd, sector_t sector) in brd_free_page()
147 static void brd_zero_page(struct brd_device *brd, sector_t sector) in brd_zero_page()
196 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) in copy_to_brd_setup()
213 sector_t sector, size_t n) in discard_from_brd()
234 sector_t sector, size_t n) in copy_to_brd()
266 sector_t sector, size_t n) in copy_from_brd()
301 sector_t sector) in brd_do_bvec()
332 sector_t sector; in brd_make_request()
[all …]
/drivers/scsi/
Dscsi_trace.c32 sector_t lba = 0, txlen = 0; in scsi_trace_rw6()
50 sector_t lba = 0, txlen = 0; in scsi_trace_rw10()
75 sector_t lba = 0, txlen = 0; in scsi_trace_rw12()
98 sector_t lba = 0, txlen = 0; in scsi_trace_rw16()
129 sector_t lba = 0, txlen = 0; in scsi_trace_rw32()
196 sector_t lba = 0; in scsi_trace_service_action_in()
/drivers/target/
Dtarget_core_file.c478 sector_t nolb = sbc_get_write_same_sectors(cmd); in fd_execute_write_same()
539 fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb, in fd_do_prot_fill()
544 sector_t prot_length, prot; in fd_do_prot_fill()
555 sector_t len = min_t(sector_t, bufsize, prot_length - prot); in fd_do_prot_fill()
569 fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) in fd_do_prot_unmap()
589 fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) in fd_do_unmap()
642 sector_t lba = cmd->t_task_lba; in fd_execute_write_same_unmap()
643 sector_t nolb = sbc_get_write_same_sectors(cmd); in fd_execute_write_same_unmap()
867 static sector_t fd_get_blocks(struct se_device *dev) in fd_get_blocks()

12345678