Home
last modified time | relevance | path

Searched refs:offset (Results 1 – 14 of 14) sorted by relevance

/block/partitions/
Dibm.c144 loff_t offset, size; in find_vol1_partitions() local
175 offset = cchh2blk(&f1.DS1EXT1.llimit, geo); in find_vol1_partitions()
177 offset + geo->sectors; in find_vol1_partitions()
178 offset *= secperblk; in find_vol1_partitions()
182 put_partition(state, counter + 1, offset, size); in find_vol1_partitions()
204 loff_t offset, geo_size, size; in find_lnx1_partitions() local
236 offset = labelsect + secperblk; in find_lnx1_partitions()
237 put_partition(state, 1, offset, size - offset); in find_lnx1_partitions()
249 loff_t offset, size; in find_cms1_partitions() local
262 offset = label->cms.disk_offset * secperblk; in find_cms1_partitions()
[all …]
Dmsdos.c250 sector_t offset, sector_t size, int origin) in parse_solaris_x86() argument
258 v = read_part_sector(state, offset + 1, &sect); in parse_solaris_x86()
293 le32_to_cpu(s->s_start)+offset, in parse_solaris_x86()
355 sector_t offset, sector_t size, int origin, char *flavour, in parse_bsd() argument
363 l = read_part_sector(state, offset + 1, &sect); in parse_bsd()
388 bsd_start += offset; in parse_bsd()
389 if (offset == bsd_start && size == bsd_size) in parse_bsd()
392 if (offset > bsd_start || offset+size < bsd_start+bsd_size) { in parse_bsd()
409 sector_t offset, sector_t size, int origin) in parse_freebsd() argument
412 parse_bsd(state, offset, size, origin, "bsd", BSD_MAXPARTITIONS); in parse_freebsd()
[all …]
Dldm.c621 static int ldm_relative(const u8 *buffer, int buflen, int base, int offset) in ldm_relative() argument
624 base += offset; in ldm_relative()
625 if (!buffer || offset < 0 || base > buflen) { in ldm_relative()
628 if (offset < 0) in ldm_relative()
629 ldm_error("offset (%d) < 0", offset); in ldm_relative()
639 return buffer[base] + offset + 1; in ldm_relative()
/block/
Dbio.c574 unsigned offset; in bio_truncate() local
577 offset = new_size - done; in bio_truncate()
579 offset = 0; in bio_truncate()
580 zero_user(bv.bv_page, bv.bv_offset + offset, in bio_truncate()
581 bv.bv_len - offset); in bio_truncate()
766 unsigned offset, bool *same_page) in bio_try_merge_hw_seg() argument
771 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; in bio_try_merge_hw_seg()
777 return __bio_try_merge_page(bio, page, len, offset, same_page); in bio_try_merge_hw_seg()
794 struct page *page, unsigned int len, unsigned int offset, in bio_add_hw_page() argument
806 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page)) in bio_add_hw_page()
[all …]
Dblk-map.c139 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; in bio_copy_user_iov() local
153 nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE); in bio_copy_user_iov()
165 i = map_data->offset / PAGE_SIZE; in bio_copy_user_iov()
170 bytes -= offset; in bio_copy_user_iov()
193 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { in bio_copy_user_iov()
200 offset = 0; in bio_copy_user_iov()
204 map_data->offset += bio->bi_iter.bi_size; in bio_copy_user_iov()
390 int offset, i; in bio_map_kern() local
402 offset = offset_in_page(kaddr); in bio_map_kern()
404 unsigned int bytes = PAGE_SIZE - offset; in bio_map_kern()
[all …]
Dbio-integrity.c130 unsigned int len, unsigned int offset) in bio_integrity_add_page() argument
144 &bip->bip_vec[bip->bip_vcnt - 1], offset)) in bio_integrity_add_page()
149 iv->bv_offset = offset; in bio_integrity_add_page()
216 unsigned int bytes, offset, i; in bio_integrity_prep() local
274 offset = offset_in_page(buf); in bio_integrity_prep()
277 bytes = PAGE_SIZE - offset; in bio_integrity_prep()
286 bytes, offset); in bio_integrity_prep()
299 offset = 0; in bio_integrity_prep()
Dblk-mq-pci.c27 int offset) in blk_mq_pci_map_queues() argument
33 mask = pci_irq_get_affinity(pdev, queue + offset); in blk_mq_pci_map_queues()
Dblk-merge.c166 unsigned long offset) in get_max_segment_size() argument
170 offset = mask & (page_to_phys(start_page) + offset); in get_max_segment_size()
176 return min_not_zero(mask - offset + 1, in get_max_segment_size()
428 unsigned offset = bvec->bv_offset + total; in blk_bvec_map_sg() local
430 offset), nbytes); in blk_bvec_map_sg()
441 page += (offset >> PAGE_SHIFT); in blk_bvec_map_sg()
442 offset &= ~PAGE_MASK; in blk_bvec_map_sg()
445 sg_set_page(*sg, page, len, offset); in blk_bvec_map_sg()
Dblk.h72 struct bio_vec *bprv, unsigned int offset) in __bvec_gap_to_prev() argument
74 return (offset & queue_virt_boundary(q)) || in __bvec_gap_to_prev()
83 struct bio_vec *bprv, unsigned int offset) in bvec_gap_to_prev() argument
87 return __bvec_gap_to_prev(q, bprv, offset); in bvec_gap_to_prev()
448 struct page *page, unsigned int len, unsigned int offset,
Dblk-settings.c367 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) in blk_queue_alignment_offset() argument
370 offset & (q->limits.physical_block_size - 1); in blk_queue_alignment_offset()
649 sector_t offset) in disk_stack_limits() argument
654 get_start_sect(bdev) + (offset >> 9)) < 0) { in disk_stack_limits()
Dsed-opal.c1227 u64 offset, u64 size, const u8 *uid) in generic_table_write_data() argument
1243 if (size > len || offset > len - size) { in generic_table_write_data()
1245 offset + size, len); in generic_table_write_data()
1254 add_token_u64(&err, dev, offset + off); in generic_table_write_data()
1658 return generic_table_write_data(dev, shadow->data, shadow->offset, in write_shadow_mbr()
1972 return generic_table_write_data(dev, write_tbl->data, write_tbl->offset, in write_table_data()
2010 u64 offset = read_tbl->offset, read_size = read_tbl->size - 1; in read_table_data() local
2022 if (read_size > table_len || offset > table_len - read_size) { in read_table_data()
2024 offset + read_size, table_len); in read_table_data()
2034 add_token_u64(&err, dev, offset + off); /* start row value */ in read_table_data()
[all …]
Dblk-crypto-fallback.c343 src.offset += data_unit_size; in blk_crypto_fallback_encrypt_bio()
344 dst.offset += data_unit_size; in blk_crypto_fallback_encrypt_bio()
429 sg.offset += data_unit_size; in blk_crypto_fallback_decrypt_bio()
Delevator.c229 struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) in elv_rqhash_find() argument
235 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { in elv_rqhash_find()
243 if (rq_hash_key(rq) == offset) in elv_rqhash_find()
Dgenhd.c408 void blkdev_show(struct seq_file *seqf, off_t offset) in blkdev_show() argument
413 for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next) in blkdev_show()
414 if (dp->major == offset) in blkdev_show()