1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
10
11 #include <generated/utsrelease.h>
12 #include <asm/unaligned.h>
13 #include "nvmet.h"
14
nvmet_get_log_page_len(struct nvme_command * cmd)15 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16 {
17 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18
19 len <<= 16;
20 len += le16_to_cpu(cmd->get_log_page.numdl);
21 /* NUMD is a 0's based value */
22 len += 1;
23 len *= sizeof(u32);
24
25 return len;
26 }
27
nvmet_feat_data_len(struct nvmet_req * req,u32 cdw10)28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29 {
30 switch (cdw10 & 0xff) {
31 case NVME_FEAT_HOST_ID:
32 return sizeof(req->sq->ctrl->hostid);
33 default:
34 return 0;
35 }
36 }
37
nvmet_get_log_page_offset(struct nvme_command * cmd)38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39 {
40 return le64_to_cpu(cmd->get_log_page.lpo);
41 }
42
nvmet_execute_get_log_page_noop(struct nvmet_req * req)43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44 {
45 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46 }
47
nvmet_execute_get_log_page_error(struct nvmet_req * req)48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49 {
50 struct nvmet_ctrl *ctrl = req->sq->ctrl;
51 unsigned long flags;
52 off_t offset = 0;
53 u64 slot;
54 u64 i;
55
56 spin_lock_irqsave(&ctrl->error_lock, flags);
57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58
59 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 sizeof(struct nvme_error_slot)))
62 break;
63
64 if (slot == 0)
65 slot = NVMET_ERROR_LOG_SLOTS - 1;
66 else
67 slot--;
68 offset += sizeof(struct nvme_error_slot);
69 }
70 spin_unlock_irqrestore(&ctrl->error_lock, flags);
71 nvmet_req_complete(req, 0);
72 }
73
nvmet_get_smart_log_nsid(struct nvmet_req * req,struct nvme_smart_log * slog)74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75 struct nvme_smart_log *slog)
76 {
77 u64 host_reads, host_writes, data_units_read, data_units_written;
78 u16 status;
79
80 status = nvmet_req_find_ns(req);
81 if (status)
82 return status;
83
84 /* we don't have the right data for file backed ns */
85 if (!req->ns->bdev)
86 return NVME_SC_SUCCESS;
87
88 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
89 data_units_read =
90 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
91 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
92 data_units_written =
93 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
94
95 put_unaligned_le64(host_reads, &slog->host_reads[0]);
96 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
97 put_unaligned_le64(host_writes, &slog->host_writes[0]);
98 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
99
100 return NVME_SC_SUCCESS;
101 }
102
nvmet_get_smart_log_all(struct nvmet_req * req,struct nvme_smart_log * slog)103 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
104 struct nvme_smart_log *slog)
105 {
106 u64 host_reads = 0, host_writes = 0;
107 u64 data_units_read = 0, data_units_written = 0;
108 struct nvmet_ns *ns;
109 struct nvmet_ctrl *ctrl;
110 unsigned long idx;
111
112 ctrl = req->sq->ctrl;
113 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
114 /* we don't have the right data for file backed ns */
115 if (!ns->bdev)
116 continue;
117 host_reads += part_stat_read(ns->bdev, ios[READ]);
118 data_units_read += DIV_ROUND_UP(
119 part_stat_read(ns->bdev, sectors[READ]), 1000);
120 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
121 data_units_written += DIV_ROUND_UP(
122 part_stat_read(ns->bdev, sectors[WRITE]), 1000);
123 }
124
125 put_unaligned_le64(host_reads, &slog->host_reads[0]);
126 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
127 put_unaligned_le64(host_writes, &slog->host_writes[0]);
128 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
129
130 return NVME_SC_SUCCESS;
131 }
132
nvmet_execute_get_log_page_smart(struct nvmet_req * req)133 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
134 {
135 struct nvme_smart_log *log;
136 u16 status = NVME_SC_INTERNAL;
137 unsigned long flags;
138
139 if (req->transfer_len != sizeof(*log))
140 goto out;
141
142 log = kzalloc(sizeof(*log), GFP_KERNEL);
143 if (!log)
144 goto out;
145
146 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
147 status = nvmet_get_smart_log_all(req, log);
148 else
149 status = nvmet_get_smart_log_nsid(req, log);
150 if (status)
151 goto out_free_log;
152
153 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
154 put_unaligned_le64(req->sq->ctrl->err_counter,
155 &log->num_err_log_entries);
156 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
157
158 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
159 out_free_log:
160 kfree(log);
161 out:
162 nvmet_req_complete(req, status);
163 }
164
nvmet_get_cmd_effects_nvm(struct nvme_effects_log * log)165 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
166 {
167 log->acs[nvme_admin_get_log_page] =
168 log->acs[nvme_admin_identify] =
169 log->acs[nvme_admin_abort_cmd] =
170 log->acs[nvme_admin_set_features] =
171 log->acs[nvme_admin_get_features] =
172 log->acs[nvme_admin_async_event] =
173 log->acs[nvme_admin_keep_alive] =
174 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
175
176 log->iocs[nvme_cmd_read] =
177 log->iocs[nvme_cmd_write] =
178 log->iocs[nvme_cmd_flush] =
179 log->iocs[nvme_cmd_dsm] =
180 log->iocs[nvme_cmd_write_zeroes] =
181 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
182 }
183
nvmet_get_cmd_effects_zns(struct nvme_effects_log * log)184 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
185 {
186 log->iocs[nvme_cmd_zone_append] =
187 log->iocs[nvme_cmd_zone_mgmt_send] =
188 log->iocs[nvme_cmd_zone_mgmt_recv] =
189 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
190 }
191
nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req * req)192 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
193 {
194 struct nvme_effects_log *log;
195 u16 status = NVME_SC_SUCCESS;
196
197 log = kzalloc(sizeof(*log), GFP_KERNEL);
198 if (!log) {
199 status = NVME_SC_INTERNAL;
200 goto out;
201 }
202
203 switch (req->cmd->get_log_page.csi) {
204 case NVME_CSI_NVM:
205 nvmet_get_cmd_effects_nvm(log);
206 break;
207 case NVME_CSI_ZNS:
208 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
209 status = NVME_SC_INVALID_IO_CMD_SET;
210 goto free;
211 }
212 nvmet_get_cmd_effects_nvm(log);
213 nvmet_get_cmd_effects_zns(log);
214 break;
215 default:
216 status = NVME_SC_INVALID_LOG_PAGE;
217 goto free;
218 }
219
220 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
221 free:
222 kfree(log);
223 out:
224 nvmet_req_complete(req, status);
225 }
226
nvmet_execute_get_log_changed_ns(struct nvmet_req * req)227 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
228 {
229 struct nvmet_ctrl *ctrl = req->sq->ctrl;
230 u16 status = NVME_SC_INTERNAL;
231 size_t len;
232
233 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
234 goto out;
235
236 mutex_lock(&ctrl->lock);
237 if (ctrl->nr_changed_ns == U32_MAX)
238 len = sizeof(__le32);
239 else
240 len = ctrl->nr_changed_ns * sizeof(__le32);
241 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
242 if (!status)
243 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
244 ctrl->nr_changed_ns = 0;
245 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
246 mutex_unlock(&ctrl->lock);
247 out:
248 nvmet_req_complete(req, status);
249 }
250
nvmet_format_ana_group(struct nvmet_req * req,u32 grpid,struct nvme_ana_group_desc * desc)251 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
252 struct nvme_ana_group_desc *desc)
253 {
254 struct nvmet_ctrl *ctrl = req->sq->ctrl;
255 struct nvmet_ns *ns;
256 unsigned long idx;
257 u32 count = 0;
258
259 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
260 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
261 if (ns->anagrpid == grpid)
262 desc->nsids[count++] = cpu_to_le32(ns->nsid);
263 }
264
265 desc->grpid = cpu_to_le32(grpid);
266 desc->nnsids = cpu_to_le32(count);
267 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
268 desc->state = req->port->ana_state[grpid];
269 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
270 return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
271 }
272
nvmet_execute_get_log_page_ana(struct nvmet_req * req)273 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
274 {
275 struct nvme_ana_rsp_hdr hdr = { 0, };
276 struct nvme_ana_group_desc *desc;
277 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
278 size_t len;
279 u32 grpid;
280 u16 ngrps = 0;
281 u16 status;
282
283 status = NVME_SC_INTERNAL;
284 desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
285 NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
286 if (!desc)
287 goto out;
288
289 down_read(&nvmet_ana_sem);
290 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
291 if (!nvmet_ana_group_enabled[grpid])
292 continue;
293 len = nvmet_format_ana_group(req, grpid, desc);
294 status = nvmet_copy_to_sgl(req, offset, desc, len);
295 if (status)
296 break;
297 offset += len;
298 ngrps++;
299 }
300 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
301 if (nvmet_ana_group_enabled[grpid])
302 ngrps++;
303 }
304
305 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
306 hdr.ngrps = cpu_to_le16(ngrps);
307 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
308 up_read(&nvmet_ana_sem);
309
310 kfree(desc);
311
312 /* copy the header last once we know the number of groups */
313 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
314 out:
315 nvmet_req_complete(req, status);
316 }
317
nvmet_execute_get_log_page(struct nvmet_req * req)318 static void nvmet_execute_get_log_page(struct nvmet_req *req)
319 {
320 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
321 return;
322
323 switch (req->cmd->get_log_page.lid) {
324 case NVME_LOG_ERROR:
325 return nvmet_execute_get_log_page_error(req);
326 case NVME_LOG_SMART:
327 return nvmet_execute_get_log_page_smart(req);
328 case NVME_LOG_FW_SLOT:
329 /*
330 * We only support a single firmware slot which always is
331 * active, so we can zero out the whole firmware slot log and
332 * still claim to fully implement this mandatory log page.
333 */
334 return nvmet_execute_get_log_page_noop(req);
335 case NVME_LOG_CHANGED_NS:
336 return nvmet_execute_get_log_changed_ns(req);
337 case NVME_LOG_CMD_EFFECTS:
338 return nvmet_execute_get_log_cmd_effects_ns(req);
339 case NVME_LOG_ANA:
340 return nvmet_execute_get_log_page_ana(req);
341 }
342 pr_debug("unhandled lid %d on qid %d\n",
343 req->cmd->get_log_page.lid, req->sq->qid);
344 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
345 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
346 }
347
nvmet_execute_identify_ctrl(struct nvmet_req * req)348 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
349 {
350 struct nvmet_ctrl *ctrl = req->sq->ctrl;
351 struct nvmet_subsys *subsys = ctrl->subsys;
352 struct nvme_id_ctrl *id;
353 u32 cmd_capsule_size;
354 u16 status = 0;
355
356 if (!subsys->subsys_discovered) {
357 mutex_lock(&subsys->lock);
358 subsys->subsys_discovered = true;
359 mutex_unlock(&subsys->lock);
360 }
361
362 id = kzalloc(sizeof(*id), GFP_KERNEL);
363 if (!id) {
364 status = NVME_SC_INTERNAL;
365 goto out;
366 }
367
368 /* XXX: figure out how to assign real vendors IDs. */
369 id->vid = 0;
370 id->ssvid = 0;
371
372 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
373 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
374 strlen(subsys->model_number), ' ');
375 memcpy_and_pad(id->fr, sizeof(id->fr),
376 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
377
378 id->rab = 6;
379
380 /*
381 * XXX: figure out how we can assign a IEEE OUI, but until then
382 * the safest is to leave it as zeroes.
383 */
384
385 /* we support multiple ports, multiples hosts and ANA: */
386 id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
387
388 /* Limit MDTS according to transport capability */
389 if (ctrl->ops->get_mdts)
390 id->mdts = ctrl->ops->get_mdts(ctrl);
391 else
392 id->mdts = 0;
393
394 id->cntlid = cpu_to_le16(ctrl->cntlid);
395 id->ver = cpu_to_le32(ctrl->subsys->ver);
396
397 /* XXX: figure out what to do about RTD3R/RTD3 */
398 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
399 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
400 NVME_CTRL_ATTR_TBKAS);
401
402 id->oacs = 0;
403
404 /*
405 * We don't really have a practical limit on the number of abort
406 * comands. But we don't do anything useful for abort either, so
407 * no point in allowing more abort commands than the spec requires.
408 */
409 id->acl = 3;
410
411 id->aerl = NVMET_ASYNC_EVENTS - 1;
412
413 /* first slot is read-only, only one slot supported */
414 id->frmw = (1 << 0) | (1 << 1);
415 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
416 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
417 id->npss = 0;
418
419 /* We support keep-alive timeout in granularity of seconds */
420 id->kas = cpu_to_le16(NVMET_KAS);
421
422 id->sqes = (0x6 << 4) | 0x6;
423 id->cqes = (0x4 << 4) | 0x4;
424
425 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
426 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
427
428 id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
429 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
430 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
431 NVME_CTRL_ONCS_WRITE_ZEROES);
432
433 /* XXX: don't report vwc if the underlying device is write through */
434 id->vwc = NVME_CTRL_VWC_PRESENT;
435
436 /*
437 * We can't support atomic writes bigger than a LBA without support
438 * from the backend device.
439 */
440 id->awun = 0;
441 id->awupf = 0;
442
443 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
444 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
445 id->sgls |= cpu_to_le32(1 << 2);
446 if (req->port->inline_data_size)
447 id->sgls |= cpu_to_le32(1 << 20);
448
449 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
450
451 /*
452 * Max command capsule size is sqe + in-capsule data size.
453 * Disable in-capsule data for Metadata capable controllers.
454 */
455 cmd_capsule_size = sizeof(struct nvme_command);
456 if (!ctrl->pi_support)
457 cmd_capsule_size += req->port->inline_data_size;
458 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
459
460 /* Max response capsule size is cqe */
461 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
462
463 id->msdbd = ctrl->ops->msdbd;
464
465 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
466 id->anatt = 10; /* random value */
467 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
468 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
469
470 /*
471 * Meh, we don't really support any power state. Fake up the same
472 * values that qemu does.
473 */
474 id->psd[0].max_power = cpu_to_le16(0x9c4);
475 id->psd[0].entry_lat = cpu_to_le32(0x10);
476 id->psd[0].exit_lat = cpu_to_le32(0x4);
477
478 id->nwpc = 1 << 0; /* write protect and no write protect */
479
480 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
481
482 kfree(id);
483 out:
484 nvmet_req_complete(req, status);
485 }
486
nvmet_execute_identify_ns(struct nvmet_req * req)487 static void nvmet_execute_identify_ns(struct nvmet_req *req)
488 {
489 struct nvme_id_ns *id;
490 u16 status;
491
492 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
493 req->error_loc = offsetof(struct nvme_identify, nsid);
494 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
495 goto out;
496 }
497
498 id = kzalloc(sizeof(*id), GFP_KERNEL);
499 if (!id) {
500 status = NVME_SC_INTERNAL;
501 goto out;
502 }
503
504 /* return an all zeroed buffer if we can't find an active namespace */
505 status = nvmet_req_find_ns(req);
506 if (status) {
507 status = 0;
508 goto done;
509 }
510
511 if (nvmet_ns_revalidate(req->ns)) {
512 mutex_lock(&req->ns->subsys->lock);
513 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
514 mutex_unlock(&req->ns->subsys->lock);
515 }
516
517 /*
518 * nuse = ncap = nsze isn't always true, but we have no way to find
519 * that out from the underlying device.
520 */
521 id->ncap = id->nsze =
522 cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
523 switch (req->port->ana_state[req->ns->anagrpid]) {
524 case NVME_ANA_INACCESSIBLE:
525 case NVME_ANA_PERSISTENT_LOSS:
526 break;
527 default:
528 id->nuse = id->nsze;
529 break;
530 }
531
532 if (req->ns->bdev)
533 nvmet_bdev_set_limits(req->ns->bdev, id);
534
535 /*
536 * We just provide a single LBA format that matches what the
537 * underlying device reports.
538 */
539 id->nlbaf = 0;
540 id->flbas = 0;
541
542 /*
543 * Our namespace might always be shared. Not just with other
544 * controllers, but also with any other user of the block device.
545 */
546 id->nmic = (1 << 0);
547 id->anagrpid = cpu_to_le32(req->ns->anagrpid);
548
549 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
550
551 id->lbaf[0].ds = req->ns->blksize_shift;
552
553 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
554 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
555 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
556 NVME_NS_DPC_PI_TYPE3;
557 id->mc = NVME_MC_EXTENDED_LBA;
558 id->dps = req->ns->pi_type;
559 id->flbas = NVME_NS_FLBAS_META_EXT;
560 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
561 }
562
563 if (req->ns->readonly)
564 id->nsattr |= (1 << 0);
565 done:
566 if (!status)
567 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
568
569 kfree(id);
570 out:
571 nvmet_req_complete(req, status);
572 }
573
nvmet_execute_identify_nslist(struct nvmet_req * req)574 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
575 {
576 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
577 struct nvmet_ctrl *ctrl = req->sq->ctrl;
578 struct nvmet_ns *ns;
579 unsigned long idx;
580 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
581 __le32 *list;
582 u16 status = 0;
583 int i = 0;
584
585 list = kzalloc(buf_size, GFP_KERNEL);
586 if (!list) {
587 status = NVME_SC_INTERNAL;
588 goto out;
589 }
590
591 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
592 if (ns->nsid <= min_nsid)
593 continue;
594 list[i++] = cpu_to_le32(ns->nsid);
595 if (i == buf_size / sizeof(__le32))
596 break;
597 }
598
599 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
600
601 kfree(list);
602 out:
603 nvmet_req_complete(req, status);
604 }
605
nvmet_copy_ns_identifier(struct nvmet_req * req,u8 type,u8 len,void * id,off_t * off)606 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
607 void *id, off_t *off)
608 {
609 struct nvme_ns_id_desc desc = {
610 .nidt = type,
611 .nidl = len,
612 };
613 u16 status;
614
615 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
616 if (status)
617 return status;
618 *off += sizeof(desc);
619
620 status = nvmet_copy_to_sgl(req, *off, id, len);
621 if (status)
622 return status;
623 *off += len;
624
625 return 0;
626 }
627
nvmet_execute_identify_desclist(struct nvmet_req * req)628 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
629 {
630 off_t off = 0;
631 u16 status;
632
633 status = nvmet_req_find_ns(req);
634 if (status)
635 goto out;
636
637 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
638 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
639 NVME_NIDT_UUID_LEN,
640 &req->ns->uuid, &off);
641 if (status)
642 goto out;
643 }
644 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
645 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
646 NVME_NIDT_NGUID_LEN,
647 &req->ns->nguid, &off);
648 if (status)
649 goto out;
650 }
651
652 status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
653 NVME_NIDT_CSI_LEN,
654 &req->ns->csi, &off);
655 if (status)
656 goto out;
657
658 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
659 off) != NVME_IDENTIFY_DATA_SIZE - off)
660 status = NVME_SC_INTERNAL | NVME_SC_DNR;
661
662 out:
663 nvmet_req_complete(req, status);
664 }
665
nvmet_handle_identify_desclist(struct nvmet_req * req)666 static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
667 {
668 switch (req->cmd->identify.csi) {
669 case NVME_CSI_NVM:
670 nvmet_execute_identify_desclist(req);
671 return true;
672 case NVME_CSI_ZNS:
673 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
674 nvmet_execute_identify_desclist(req);
675 return true;
676 }
677 return false;
678 default:
679 return false;
680 }
681 }
682
nvmet_execute_identify_ctrl_nvm(struct nvmet_req * req)683 static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
684 {
685 /* Not supported: return zeroes */
686 nvmet_req_complete(req,
687 nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
688 }
689
nvmet_execute_identify(struct nvmet_req * req)690 static void nvmet_execute_identify(struct nvmet_req *req)
691 {
692 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
693 return;
694
695 switch (req->cmd->identify.cns) {
696 case NVME_ID_CNS_NS:
697 nvmet_execute_identify_ns(req);
698 return;
699 case NVME_ID_CNS_CS_NS:
700 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
701 switch (req->cmd->identify.csi) {
702 case NVME_CSI_ZNS:
703 return nvmet_execute_identify_cns_cs_ns(req);
704 default:
705 break;
706 }
707 }
708 break;
709 case NVME_ID_CNS_CTRL:
710 nvmet_execute_identify_ctrl(req);
711 return;
712 case NVME_ID_CNS_CS_CTRL:
713 switch (req->cmd->identify.csi) {
714 case NVME_CSI_NVM:
715 nvmet_execute_identify_ctrl_nvm(req);
716 return;
717 case NVME_CSI_ZNS:
718 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
719 nvmet_execute_identify_ctrl_zns(req);
720 return;
721 }
722 break;
723 }
724 break;
725 case NVME_ID_CNS_NS_ACTIVE_LIST:
726 nvmet_execute_identify_nslist(req);
727 return;
728 case NVME_ID_CNS_NS_DESC_LIST:
729 if (nvmet_handle_identify_desclist(req) == true)
730 return;
731 break;
732 }
733
734 nvmet_req_cns_error_complete(req);
735 }
736
737 /*
738 * A "minimum viable" abort implementation: the command is mandatory in the
739 * spec, but we are not required to do any useful work. We couldn't really
740 * do a useful abort, so don't bother even with waiting for the command
741 * to be exectuted and return immediately telling the command to abort
742 * wasn't found.
743 */
nvmet_execute_abort(struct nvmet_req * req)744 static void nvmet_execute_abort(struct nvmet_req *req)
745 {
746 if (!nvmet_check_transfer_len(req, 0))
747 return;
748 nvmet_set_result(req, 1);
749 nvmet_req_complete(req, 0);
750 }
751
nvmet_write_protect_flush_sync(struct nvmet_req * req)752 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
753 {
754 u16 status;
755
756 if (req->ns->file)
757 status = nvmet_file_flush(req);
758 else
759 status = nvmet_bdev_flush(req);
760
761 if (status)
762 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
763 return status;
764 }
765
nvmet_set_feat_write_protect(struct nvmet_req * req)766 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
767 {
768 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
769 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
770 u16 status;
771
772 status = nvmet_req_find_ns(req);
773 if (status)
774 return status;
775
776 mutex_lock(&subsys->lock);
777 switch (write_protect) {
778 case NVME_NS_WRITE_PROTECT:
779 req->ns->readonly = true;
780 status = nvmet_write_protect_flush_sync(req);
781 if (status)
782 req->ns->readonly = false;
783 break;
784 case NVME_NS_NO_WRITE_PROTECT:
785 req->ns->readonly = false;
786 status = 0;
787 break;
788 default:
789 break;
790 }
791
792 if (!status)
793 nvmet_ns_changed(subsys, req->ns->nsid);
794 mutex_unlock(&subsys->lock);
795 return status;
796 }
797
nvmet_set_feat_kato(struct nvmet_req * req)798 u16 nvmet_set_feat_kato(struct nvmet_req *req)
799 {
800 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
801
802 nvmet_stop_keep_alive_timer(req->sq->ctrl);
803 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
804 nvmet_start_keep_alive_timer(req->sq->ctrl);
805
806 nvmet_set_result(req, req->sq->ctrl->kato);
807
808 return 0;
809 }
810
nvmet_set_feat_async_event(struct nvmet_req * req,u32 mask)811 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
812 {
813 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
814
815 if (val32 & ~mask) {
816 req->error_loc = offsetof(struct nvme_common_command, cdw11);
817 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
818 }
819
820 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
821 nvmet_set_result(req, val32);
822
823 return 0;
824 }
825
nvmet_execute_set_features(struct nvmet_req * req)826 void nvmet_execute_set_features(struct nvmet_req *req)
827 {
828 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
829 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
830 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
831 u16 status = 0;
832 u16 nsqr;
833 u16 ncqr;
834
835 if (!nvmet_check_transfer_len(req, 0))
836 return;
837
838 switch (cdw10 & 0xff) {
839 case NVME_FEAT_NUM_QUEUES:
840 ncqr = (cdw11 >> 16) & 0xffff;
841 nsqr = cdw11 & 0xffff;
842 if (ncqr == 0xffff || nsqr == 0xffff) {
843 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
844 break;
845 }
846 nvmet_set_result(req,
847 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
848 break;
849 case NVME_FEAT_KATO:
850 status = nvmet_set_feat_kato(req);
851 break;
852 case NVME_FEAT_ASYNC_EVENT:
853 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
854 break;
855 case NVME_FEAT_HOST_ID:
856 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
857 break;
858 case NVME_FEAT_WRITE_PROTECT:
859 status = nvmet_set_feat_write_protect(req);
860 break;
861 default:
862 req->error_loc = offsetof(struct nvme_common_command, cdw10);
863 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
864 break;
865 }
866
867 nvmet_req_complete(req, status);
868 }
869
nvmet_get_feat_write_protect(struct nvmet_req * req)870 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
871 {
872 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
873 u32 result;
874
875 result = nvmet_req_find_ns(req);
876 if (result)
877 return result;
878
879 mutex_lock(&subsys->lock);
880 if (req->ns->readonly == true)
881 result = NVME_NS_WRITE_PROTECT;
882 else
883 result = NVME_NS_NO_WRITE_PROTECT;
884 nvmet_set_result(req, result);
885 mutex_unlock(&subsys->lock);
886
887 return 0;
888 }
889
nvmet_get_feat_kato(struct nvmet_req * req)890 void nvmet_get_feat_kato(struct nvmet_req *req)
891 {
892 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
893 }
894
nvmet_get_feat_async_event(struct nvmet_req * req)895 void nvmet_get_feat_async_event(struct nvmet_req *req)
896 {
897 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
898 }
899
nvmet_execute_get_features(struct nvmet_req * req)900 void nvmet_execute_get_features(struct nvmet_req *req)
901 {
902 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
903 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
904 u16 status = 0;
905
906 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
907 return;
908
909 switch (cdw10 & 0xff) {
910 /*
911 * These features are mandatory in the spec, but we don't
912 * have a useful way to implement them. We'll eventually
913 * need to come up with some fake values for these.
914 */
915 #if 0
916 case NVME_FEAT_ARBITRATION:
917 break;
918 case NVME_FEAT_POWER_MGMT:
919 break;
920 case NVME_FEAT_TEMP_THRESH:
921 break;
922 case NVME_FEAT_ERR_RECOVERY:
923 break;
924 case NVME_FEAT_IRQ_COALESCE:
925 break;
926 case NVME_FEAT_IRQ_CONFIG:
927 break;
928 case NVME_FEAT_WRITE_ATOMIC:
929 break;
930 #endif
931 case NVME_FEAT_ASYNC_EVENT:
932 nvmet_get_feat_async_event(req);
933 break;
934 case NVME_FEAT_VOLATILE_WC:
935 nvmet_set_result(req, 1);
936 break;
937 case NVME_FEAT_NUM_QUEUES:
938 nvmet_set_result(req,
939 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
940 break;
941 case NVME_FEAT_KATO:
942 nvmet_get_feat_kato(req);
943 break;
944 case NVME_FEAT_HOST_ID:
945 /* need 128-bit host identifier flag */
946 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
947 req->error_loc =
948 offsetof(struct nvme_common_command, cdw11);
949 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
950 break;
951 }
952
953 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
954 sizeof(req->sq->ctrl->hostid));
955 break;
956 case NVME_FEAT_WRITE_PROTECT:
957 status = nvmet_get_feat_write_protect(req);
958 break;
959 default:
960 req->error_loc =
961 offsetof(struct nvme_common_command, cdw10);
962 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
963 break;
964 }
965
966 nvmet_req_complete(req, status);
967 }
968
nvmet_execute_async_event(struct nvmet_req * req)969 void nvmet_execute_async_event(struct nvmet_req *req)
970 {
971 struct nvmet_ctrl *ctrl = req->sq->ctrl;
972
973 if (!nvmet_check_transfer_len(req, 0))
974 return;
975
976 mutex_lock(&ctrl->lock);
977 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
978 mutex_unlock(&ctrl->lock);
979 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
980 return;
981 }
982 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
983 mutex_unlock(&ctrl->lock);
984
985 queue_work(nvmet_wq, &ctrl->async_event_work);
986 }
987
nvmet_execute_keep_alive(struct nvmet_req * req)988 void nvmet_execute_keep_alive(struct nvmet_req *req)
989 {
990 struct nvmet_ctrl *ctrl = req->sq->ctrl;
991 u16 status = 0;
992
993 if (!nvmet_check_transfer_len(req, 0))
994 return;
995
996 if (!ctrl->kato) {
997 status = NVME_SC_KA_TIMEOUT_INVALID;
998 goto out;
999 }
1000
1001 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
1002 ctrl->cntlid, ctrl->kato);
1003 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1004 out:
1005 nvmet_req_complete(req, status);
1006 }
1007
nvmet_parse_admin_cmd(struct nvmet_req * req)1008 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1009 {
1010 struct nvme_command *cmd = req->cmd;
1011 u16 ret;
1012
1013 if (nvme_is_fabrics(cmd))
1014 return nvmet_parse_fabrics_cmd(req);
1015 if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
1016 return nvmet_parse_discovery_cmd(req);
1017
1018 ret = nvmet_check_ctrl_status(req);
1019 if (unlikely(ret))
1020 return ret;
1021
1022 if (nvmet_is_passthru_req(req))
1023 return nvmet_parse_passthru_admin_cmd(req);
1024
1025 switch (cmd->common.opcode) {
1026 case nvme_admin_get_log_page:
1027 req->execute = nvmet_execute_get_log_page;
1028 return 0;
1029 case nvme_admin_identify:
1030 req->execute = nvmet_execute_identify;
1031 return 0;
1032 case nvme_admin_abort_cmd:
1033 req->execute = nvmet_execute_abort;
1034 return 0;
1035 case nvme_admin_set_features:
1036 req->execute = nvmet_execute_set_features;
1037 return 0;
1038 case nvme_admin_get_features:
1039 req->execute = nvmet_execute_get_features;
1040 return 0;
1041 case nvme_admin_async_event:
1042 req->execute = nvmet_execute_async_event;
1043 return 0;
1044 case nvme_admin_keep_alive:
1045 req->execute = nvmet_execute_keep_alive;
1046 return 0;
1047 default:
1048 return nvmet_report_invalid_opcode(req);
1049 }
1050 }
1051