Searched refs:slba (Results 1 – 9 of 9) sorted by relevance
/drivers/nvme/host/ |
D | trace.c | 107 u64 slba = get_unaligned_le64(cdw10); in nvme_trace_get_lba_status() local 113 slba, mndw, rl, atype); in nvme_trace_get_lba_status() 139 u64 slba = get_unaligned_le64(cdw10); in nvme_trace_read_write() local 147 slba, length, control, dsmgmt, reftag); in nvme_trace_read_write() 168 u64 slba = get_unaligned_le64(cdw10); in nvme_trace_zone_mgmt_send() local 172 trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all); in nvme_trace_zone_mgmt_send() 181 u64 slba = get_unaligned_le64(cdw10); in nvme_trace_zone_mgmt_recv() local 188 slba, numd, zra, zrasf, pr); in nvme_trace_zone_mgmt_recv()
|
D | ioctl.c | 163 c.rw.slba = cpu_to_le64(io.slba); in nvme_submit_io() 173 metadata, meta_len, lower_32_bits(io.slba), NULL, 0); in nvme_submit_io() 314 __u64 slba; member
|
D | zns.c | 197 c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector)); in nvme_ns_report_zones() 234 c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); in nvme_setup_zone_mgmt_send()
|
D | core.c | 857 u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req)); in nvme_setup_discard() local 862 range[0].slba = cpu_to_le64(slba); in nvme_setup_discard() 866 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); in nvme_setup_discard() local 872 range[n].slba = cpu_to_le64(slba); in nvme_setup_discard() 907 cmnd->write_zeroes.slba = in nvme_setup_write_zeroes() 936 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); in nvme_setup_rw()
|
/drivers/nvme/target/ |
D | zns.c | 160 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_validate_zone_mgmt_recv() 164 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); in nvmet_bdev_validate_zone_mgmt_recv() 257 unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_req_nr_zones_from_slba() 275 sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_zone_zmgmt_recv_work() 482 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); in nvmet_bdev_zmgmt_send_work() 502 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); in nvmet_bdev_zmgmt_send_work() 508 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); in nvmet_bdev_zmgmt_send_work() 542 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); in nvmet_bdev_execute_zone_append() 559 req->error_loc = offsetof(struct nvme_rw_command, slba); in nvmet_bdev_execute_zone_append() 565 req->error_loc = offsetof(struct nvme_rw_command, slba); in nvmet_bdev_execute_zone_append()
|
D | io-cmd-bdev.c | 132 req->error_loc = offsetof(struct nvme_rw_command, slba); in blk_to_nvme_status() 158 req->error_slba = le64_to_cpu(req->cmd->rw.slba); in blk_to_nvme_status() 162 le64_to_cpu(req->cmd->write_zeroes.slba); in blk_to_nvme_status() 265 sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); in nvmet_bdev_execute_rw() 353 nvmet_lba_to_sect(ns, range->slba), in nvmet_bdev_discard_range() 357 req->error_slba = le64_to_cpu(range->slba); in nvmet_bdev_discard_range() 422 sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba); in nvmet_bdev_execute_write_zeroes()
|
D | trace.c | 40 u64 slba = get_unaligned_le64(cdw10); in nvmet_trace_get_lba_status() local 46 slba, mndw, rl, atype); in nvmet_trace_get_lba_status() 69 u64 slba = get_unaligned_le64(cdw10); in nvmet_trace_read_write() local 77 slba, length, control, dsmgmt, reftag); in nvmet_trace_read_write()
|
D | io-cmd-file.c | 141 pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; in nvmet_file_execute_io() 294 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift; in nvmet_file_execute_discard() 298 req->error_slba = le64_to_cpu(range.slba); in nvmet_file_execute_discard() 305 req->error_slba = le64_to_cpu(range.slba); in nvmet_file_execute_discard() 348 offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift; in nvmet_file_write_zeroes_work()
|
D | core.c | 58 req->error_loc = offsetof(struct nvme_rw_command, slba); in errno_to_nvme_status()
|