Lines Matching full:req
85 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, in nvmet_file_submit_bvec() argument
88 struct kiocb *iocb = &req->f.iocb; in nvmet_file_submit_bvec()
94 if (req->cmd->rw.opcode == nvme_cmd_write) { in nvmet_file_submit_bvec()
95 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) in nvmet_file_submit_bvec()
97 call_iter = req->ns->file->f_op->write_iter; in nvmet_file_submit_bvec()
100 call_iter = req->ns->file->f_op->read_iter; in nvmet_file_submit_bvec()
104 iov_iter_bvec(&iter, ITER_BVEC | rw, req->f.bvec, nr_segs, count); in nvmet_file_submit_bvec()
107 iocb->ki_filp = req->ns->file; in nvmet_file_submit_bvec()
108 iocb->ki_flags = ki_flags | iocb_flags(req->ns->file); in nvmet_file_submit_bvec()
120 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb); in nvmet_file_io_done() local
122 if (req->f.bvec != req->inline_bvec) { in nvmet_file_io_done()
123 if (likely(req->f.mpool_alloc == false)) in nvmet_file_io_done()
124 kfree(req->f.bvec); in nvmet_file_io_done()
126 mempool_free(req->f.bvec, req->ns->bvec_pool); in nvmet_file_io_done()
129 nvmet_req_complete(req, ret != req->data_len ? in nvmet_file_io_done()
133 static void nvmet_file_execute_rw(struct nvmet_req *req) in nvmet_file_execute_rw() argument
135 ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); in nvmet_file_execute_rw()
143 if (!req->sg_cnt || !nr_bvec) { in nvmet_file_execute_rw()
144 nvmet_req_complete(req, 0); in nvmet_file_execute_rw()
148 pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; in nvmet_file_execute_rw()
149 if (unlikely(pos + req->data_len > req->ns->size)) { in nvmet_file_execute_rw()
150 nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR); in nvmet_file_execute_rw()
155 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), in nvmet_file_execute_rw()
158 req->f.bvec = req->inline_bvec; in nvmet_file_execute_rw()
160 req->f.mpool_alloc = false; in nvmet_file_execute_rw()
161 if (unlikely(!req->f.bvec)) { in nvmet_file_execute_rw()
163 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL); in nvmet_file_execute_rw()
164 req->f.mpool_alloc = true; in nvmet_file_execute_rw()
169 memset(&req->f.iocb, 0, sizeof(struct kiocb)); in nvmet_file_execute_rw()
170 for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) { in nvmet_file_execute_rw()
171 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter); in nvmet_file_execute_rw()
172 len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_rw()
173 total_len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_rw()
180 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len); in nvmet_file_execute_rw()
190 if (WARN_ON_ONCE(total_len != req->data_len)) in nvmet_file_execute_rw()
194 nvmet_file_io_done(&req->f.iocb, ret < 0 ? ret : total_len, 0); in nvmet_file_execute_rw()
197 req->f.iocb.ki_complete = nvmet_file_io_done; in nvmet_file_execute_rw()
198 nvmet_file_submit_bvec(req, pos, bv_cnt, total_len); in nvmet_file_execute_rw()
203 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); in nvmet_file_buffered_io_work() local
205 nvmet_file_execute_rw(req); in nvmet_file_buffered_io_work()
208 static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req) in nvmet_file_execute_rw_buffered_io() argument
210 INIT_WORK(&req->f.work, nvmet_file_buffered_io_work); in nvmet_file_execute_rw_buffered_io()
211 queue_work(buffered_io_wq, &req->f.work); in nvmet_file_execute_rw_buffered_io()
214 u16 nvmet_file_flush(struct nvmet_req *req) in nvmet_file_flush() argument
216 if (vfs_fsync(req->ns->file, 1) < 0) in nvmet_file_flush()
223 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); in nvmet_file_flush_work() local
225 nvmet_req_complete(req, nvmet_file_flush(req)); in nvmet_file_flush_work()
228 static void nvmet_file_execute_flush(struct nvmet_req *req) in nvmet_file_execute_flush() argument
230 INIT_WORK(&req->f.work, nvmet_file_flush_work); in nvmet_file_execute_flush()
231 schedule_work(&req->f.work); in nvmet_file_execute_flush()
234 static void nvmet_file_execute_discard(struct nvmet_req *req) in nvmet_file_execute_discard() argument
242 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) { in nvmet_file_execute_discard()
243 ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range, in nvmet_file_execute_discard()
248 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift; in nvmet_file_execute_discard()
250 len <<= req->ns->blksize_shift; in nvmet_file_execute_discard()
251 if (offset + len > req->ns->size) { in nvmet_file_execute_discard()
256 if (vfs_fallocate(req->ns->file, mode, offset, len)) { in nvmet_file_execute_discard()
262 nvmet_req_complete(req, ret); in nvmet_file_execute_discard()
267 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); in nvmet_file_dsm_work() local
269 switch (le32_to_cpu(req->cmd->dsm.attributes)) { in nvmet_file_dsm_work()
271 nvmet_file_execute_discard(req); in nvmet_file_dsm_work()
277 nvmet_req_complete(req, 0); in nvmet_file_dsm_work()
282 static void nvmet_file_execute_dsm(struct nvmet_req *req) in nvmet_file_execute_dsm() argument
284 INIT_WORK(&req->f.work, nvmet_file_dsm_work); in nvmet_file_execute_dsm()
285 schedule_work(&req->f.work); in nvmet_file_execute_dsm()
290 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); in nvmet_file_write_zeroes_work() local
291 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes; in nvmet_file_write_zeroes_work()
297 offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift; in nvmet_file_write_zeroes_work()
299 req->ns->blksize_shift); in nvmet_file_write_zeroes_work()
301 if (unlikely(offset + len > req->ns->size)) { in nvmet_file_write_zeroes_work()
302 nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR); in nvmet_file_write_zeroes_work()
306 ret = vfs_fallocate(req->ns->file, mode, offset, len); in nvmet_file_write_zeroes_work()
307 nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); in nvmet_file_write_zeroes_work()
310 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req) in nvmet_file_execute_write_zeroes() argument
312 INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); in nvmet_file_execute_write_zeroes()
313 schedule_work(&req->f.work); in nvmet_file_execute_write_zeroes()
316 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) in nvmet_file_parse_io_cmd() argument
318 struct nvme_command *cmd = req->cmd; in nvmet_file_parse_io_cmd()
323 if (req->ns->buffered_io) in nvmet_file_parse_io_cmd()
324 req->execute = nvmet_file_execute_rw_buffered_io; in nvmet_file_parse_io_cmd()
326 req->execute = nvmet_file_execute_rw; in nvmet_file_parse_io_cmd()
327 req->data_len = nvmet_rw_len(req); in nvmet_file_parse_io_cmd()
330 req->execute = nvmet_file_execute_flush; in nvmet_file_parse_io_cmd()
331 req->data_len = 0; in nvmet_file_parse_io_cmd()
334 req->execute = nvmet_file_execute_dsm; in nvmet_file_parse_io_cmd()
335 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * in nvmet_file_parse_io_cmd()
339 req->execute = nvmet_file_execute_write_zeroes; in nvmet_file_parse_io_cmd()
340 req->data_len = 0; in nvmet_file_parse_io_cmd()
344 cmd->common.opcode, req->sq->qid); in nvmet_file_parse_io_cmd()