1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
4 * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
5 * All rights reserved.
6 */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/blkdev.h>
9 #include <linux/random.h>
10 #include <linux/nvme-auth.h>
11 #include <crypto/hash.h>
12 #include <crypto/kpp.h>
13 #include "nvmet.h"
14
nvmet_auth_expired_work(struct work_struct * work)15 static void nvmet_auth_expired_work(struct work_struct *work)
16 {
17 struct nvmet_sq *sq = container_of(to_delayed_work(work),
18 struct nvmet_sq, auth_expired_work);
19
20 pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
21 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
22 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
23 sq->dhchap_tid = -1;
24 }
25
nvmet_auth_sq_init(struct nvmet_sq * sq)26 void nvmet_auth_sq_init(struct nvmet_sq *sq)
27 {
28 /* Initialize in-band authentication */
29 INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
30 sq->authenticated = false;
31 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
32 }
33
nvmet_auth_negotiate(struct nvmet_req * req,void * d)34 static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
35 {
36 struct nvmet_ctrl *ctrl = req->sq->ctrl;
37 struct nvmf_auth_dhchap_negotiate_data *data = d;
38 int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
39
40 pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
41 __func__, ctrl->cntlid, req->sq->qid,
42 data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
43 data->auth_protocol[0].dhchap.halen,
44 data->auth_protocol[0].dhchap.dhlen);
45 req->sq->dhchap_tid = le16_to_cpu(data->t_id);
46 if (data->sc_c)
47 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
48
49 if (data->napd != 1)
50 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
51
52 if (data->auth_protocol[0].dhchap.authid !=
53 NVME_AUTH_DHCHAP_AUTH_ID)
54 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
55
56 for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
57 u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
58
59 if (!fallback_hash_id &&
60 crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0))
61 fallback_hash_id = host_hmac_id;
62 if (ctrl->shash_id != host_hmac_id)
63 continue;
64 hash_id = ctrl->shash_id;
65 break;
66 }
67 if (hash_id == 0) {
68 if (fallback_hash_id == 0) {
69 pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
70 __func__, ctrl->cntlid, req->sq->qid);
71 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
72 }
73 pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
74 __func__, ctrl->cntlid, req->sq->qid,
75 nvme_auth_hmac_name(fallback_hash_id));
76 ctrl->shash_id = fallback_hash_id;
77 }
78
79 dhgid = -1;
80 fallback_dhgid = -1;
81 for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
82 int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
83
84 if (tmp_dhgid != ctrl->dh_gid) {
85 dhgid = tmp_dhgid;
86 break;
87 }
88 if (fallback_dhgid < 0) {
89 const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
90
91 if (crypto_has_kpp(kpp, 0, 0))
92 fallback_dhgid = tmp_dhgid;
93 }
94 }
95 if (dhgid < 0) {
96 if (fallback_dhgid < 0) {
97 pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
98 __func__, ctrl->cntlid, req->sq->qid);
99 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
100 }
101 pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
102 __func__, ctrl->cntlid, req->sq->qid,
103 nvme_auth_dhgroup_name(fallback_dhgid));
104 ctrl->dh_gid = fallback_dhgid;
105 }
106 pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
107 __func__, ctrl->cntlid, req->sq->qid,
108 nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
109 return 0;
110 }
111
nvmet_auth_reply(struct nvmet_req * req,void * d)112 static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
113 {
114 struct nvmet_ctrl *ctrl = req->sq->ctrl;
115 struct nvmf_auth_dhchap_reply_data *data = d;
116 u16 dhvlen = le16_to_cpu(data->dhvlen);
117 u8 *response;
118
119 pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
120 __func__, ctrl->cntlid, req->sq->qid,
121 data->hl, data->cvalid, dhvlen);
122
123 if (dhvlen) {
124 if (!ctrl->dh_tfm)
125 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
126 if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
127 dhvlen) < 0)
128 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
129 }
130
131 response = kmalloc(data->hl, GFP_KERNEL);
132 if (!response)
133 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
134
135 if (!ctrl->host_key) {
136 pr_warn("ctrl %d qid %d no host key\n",
137 ctrl->cntlid, req->sq->qid);
138 kfree(response);
139 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
140 }
141 if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
142 pr_debug("ctrl %d qid %d host hash failed\n",
143 ctrl->cntlid, req->sq->qid);
144 kfree(response);
145 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
146 }
147
148 if (memcmp(data->rval, response, data->hl)) {
149 pr_info("ctrl %d qid %d host response mismatch\n",
150 ctrl->cntlid, req->sq->qid);
151 kfree(response);
152 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
153 }
154 kfree(response);
155 pr_debug("%s: ctrl %d qid %d host authenticated\n",
156 __func__, ctrl->cntlid, req->sq->qid);
157 if (data->cvalid) {
158 req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
159 GFP_KERNEL);
160 if (!req->sq->dhchap_c2)
161 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
162
163 pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
164 __func__, ctrl->cntlid, req->sq->qid, data->hl,
165 req->sq->dhchap_c2);
166 req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
167 } else {
168 req->sq->authenticated = true;
169 req->sq->dhchap_c2 = NULL;
170 }
171
172 return 0;
173 }
174
nvmet_auth_failure2(void * d)175 static u16 nvmet_auth_failure2(void *d)
176 {
177 struct nvmf_auth_dhchap_failure_data *data = d;
178
179 return data->rescode_exp;
180 }
181
nvmet_execute_auth_send(struct nvmet_req * req)182 void nvmet_execute_auth_send(struct nvmet_req *req)
183 {
184 struct nvmet_ctrl *ctrl = req->sq->ctrl;
185 struct nvmf_auth_dhchap_success2_data *data;
186 void *d;
187 u32 tl;
188 u16 status = 0;
189
190 if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
191 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
192 req->error_loc =
193 offsetof(struct nvmf_auth_send_command, secp);
194 goto done;
195 }
196 if (req->cmd->auth_send.spsp0 != 0x01) {
197 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
198 req->error_loc =
199 offsetof(struct nvmf_auth_send_command, spsp0);
200 goto done;
201 }
202 if (req->cmd->auth_send.spsp1 != 0x01) {
203 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
204 req->error_loc =
205 offsetof(struct nvmf_auth_send_command, spsp1);
206 goto done;
207 }
208 tl = le32_to_cpu(req->cmd->auth_send.tl);
209 if (!tl) {
210 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
211 req->error_loc =
212 offsetof(struct nvmf_auth_send_command, tl);
213 goto done;
214 }
215 if (!nvmet_check_transfer_len(req, tl)) {
216 pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
217 return;
218 }
219
220 d = kmalloc(tl, GFP_KERNEL);
221 if (!d) {
222 status = NVME_SC_INTERNAL;
223 goto done;
224 }
225
226 status = nvmet_copy_from_sgl(req, 0, d, tl);
227 if (status)
228 goto done_kfree;
229
230 data = d;
231 pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
232 ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
233 req->sq->dhchap_step);
234 if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
235 data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
236 goto done_failure1;
237 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
238 if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
239 /* Restart negotiation */
240 pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
241 ctrl->cntlid, req->sq->qid);
242 if (!req->sq->qid) {
243 if (nvmet_setup_auth(ctrl) < 0) {
244 status = NVME_SC_INTERNAL;
245 pr_err("ctrl %d qid 0 failed to setup"
246 "re-authentication",
247 ctrl->cntlid);
248 goto done_failure1;
249 }
250 }
251 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
252 } else if (data->auth_id != req->sq->dhchap_step)
253 goto done_failure1;
254 /* Validate negotiation parameters */
255 status = nvmet_auth_negotiate(req, d);
256 if (status == 0)
257 req->sq->dhchap_step =
258 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
259 else {
260 req->sq->dhchap_step =
261 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
262 req->sq->dhchap_status = status;
263 status = 0;
264 }
265 goto done_kfree;
266 }
267 if (data->auth_id != req->sq->dhchap_step) {
268 pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
269 __func__, ctrl->cntlid, req->sq->qid,
270 data->auth_id, req->sq->dhchap_step);
271 goto done_failure1;
272 }
273 if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
274 pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
275 __func__, ctrl->cntlid, req->sq->qid,
276 le16_to_cpu(data->t_id),
277 req->sq->dhchap_tid);
278 req->sq->dhchap_step =
279 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
280 req->sq->dhchap_status =
281 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
282 goto done_kfree;
283 }
284
285 switch (data->auth_id) {
286 case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
287 status = nvmet_auth_reply(req, d);
288 if (status == 0)
289 req->sq->dhchap_step =
290 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
291 else {
292 req->sq->dhchap_step =
293 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
294 req->sq->dhchap_status = status;
295 status = 0;
296 }
297 goto done_kfree;
298 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
299 req->sq->authenticated = true;
300 pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
301 __func__, ctrl->cntlid, req->sq->qid);
302 goto done_kfree;
303 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
304 status = nvmet_auth_failure2(d);
305 if (status) {
306 pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
307 ctrl->cntlid, req->sq->qid, status);
308 req->sq->dhchap_status = status;
309 req->sq->authenticated = false;
310 status = 0;
311 }
312 goto done_kfree;
313 default:
314 req->sq->dhchap_status =
315 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
316 req->sq->dhchap_step =
317 NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
318 req->sq->authenticated = false;
319 goto done_kfree;
320 }
321 done_failure1:
322 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
323 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
324
325 done_kfree:
326 kfree(d);
327 done:
328 pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
329 ctrl->cntlid, req->sq->qid,
330 req->sq->dhchap_status, req->sq->dhchap_step);
331 if (status)
332 pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
333 __func__, ctrl->cntlid, req->sq->qid,
334 status, req->error_loc);
335 if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
336 req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
337 unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
338
339 mod_delayed_work(system_wq, &req->sq->auth_expired_work,
340 auth_expire_secs * HZ);
341 goto complete;
342 }
343 /* Final states, clear up variables */
344 nvmet_auth_sq_free(req->sq);
345 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
346 nvmet_ctrl_fatal_error(ctrl);
347
348 complete:
349 nvmet_req_complete(req, status);
350 }
351
nvmet_auth_challenge(struct nvmet_req * req,void * d,int al)352 static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
353 {
354 struct nvmf_auth_dhchap_challenge_data *data = d;
355 struct nvmet_ctrl *ctrl = req->sq->ctrl;
356 int ret = 0;
357 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
358 int data_size = sizeof(*d) + hash_len;
359
360 if (ctrl->dh_tfm)
361 data_size += ctrl->dh_keysize;
362 if (al < data_size) {
363 pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
364 al, data_size);
365 return -EINVAL;
366 }
367 memset(data, 0, data_size);
368 req->sq->dhchap_s1 = nvme_auth_get_seqnum();
369 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
370 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
371 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
372 data->hashid = ctrl->shash_id;
373 data->hl = hash_len;
374 data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
375 req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
376 if (!req->sq->dhchap_c1)
377 return -ENOMEM;
378 get_random_bytes(req->sq->dhchap_c1, data->hl);
379 memcpy(data->cval, req->sq->dhchap_c1, data->hl);
380 if (ctrl->dh_tfm) {
381 data->dhgid = ctrl->dh_gid;
382 data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
383 ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
384 ctrl->dh_keysize);
385 }
386 pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
387 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
388 req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
389 return ret;
390 }
391
nvmet_auth_success1(struct nvmet_req * req,void * d,int al)392 static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
393 {
394 struct nvmf_auth_dhchap_success1_data *data = d;
395 struct nvmet_ctrl *ctrl = req->sq->ctrl;
396 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
397
398 WARN_ON(al < sizeof(*data));
399 memset(data, 0, sizeof(*data));
400 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
401 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
402 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
403 data->hl = hash_len;
404 if (req->sq->dhchap_c2) {
405 if (!ctrl->ctrl_key) {
406 pr_warn("ctrl %d qid %d no ctrl key\n",
407 ctrl->cntlid, req->sq->qid);
408 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
409 }
410 if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
411 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
412 data->rvalid = 1;
413 pr_debug("ctrl %d qid %d response %*ph\n",
414 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
415 }
416 return 0;
417 }
418
nvmet_auth_failure1(struct nvmet_req * req,void * d,int al)419 static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
420 {
421 struct nvmf_auth_dhchap_failure_data *data = d;
422
423 WARN_ON(al < sizeof(*data));
424 data->auth_type = NVME_AUTH_COMMON_MESSAGES;
425 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
426 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
427 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
428 data->rescode_exp = req->sq->dhchap_status;
429 }
430
nvmet_execute_auth_receive(struct nvmet_req * req)431 void nvmet_execute_auth_receive(struct nvmet_req *req)
432 {
433 struct nvmet_ctrl *ctrl = req->sq->ctrl;
434 void *d;
435 u32 al;
436 u16 status = 0;
437
438 if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
439 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
440 req->error_loc =
441 offsetof(struct nvmf_auth_receive_command, secp);
442 goto done;
443 }
444 if (req->cmd->auth_receive.spsp0 != 0x01) {
445 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
446 req->error_loc =
447 offsetof(struct nvmf_auth_receive_command, spsp0);
448 goto done;
449 }
450 if (req->cmd->auth_receive.spsp1 != 0x01) {
451 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
452 req->error_loc =
453 offsetof(struct nvmf_auth_receive_command, spsp1);
454 goto done;
455 }
456 al = le32_to_cpu(req->cmd->auth_receive.al);
457 if (!al) {
458 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
459 req->error_loc =
460 offsetof(struct nvmf_auth_receive_command, al);
461 goto done;
462 }
463 if (!nvmet_check_transfer_len(req, al)) {
464 pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
465 return;
466 }
467
468 d = kmalloc(al, GFP_KERNEL);
469 if (!d) {
470 status = NVME_SC_INTERNAL;
471 goto done;
472 }
473 pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
474 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
475 switch (req->sq->dhchap_step) {
476 case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
477 if (nvmet_auth_challenge(req, d, al) < 0) {
478 pr_warn("ctrl %d qid %d: challenge error (%d)\n",
479 ctrl->cntlid, req->sq->qid, status);
480 status = NVME_SC_INTERNAL;
481 break;
482 }
483 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
484 break;
485 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
486 status = nvmet_auth_success1(req, d, al);
487 if (status) {
488 req->sq->dhchap_status = status;
489 req->sq->authenticated = false;
490 nvmet_auth_failure1(req, d, al);
491 pr_warn("ctrl %d qid %d: success1 status (%x)\n",
492 ctrl->cntlid, req->sq->qid,
493 req->sq->dhchap_status);
494 break;
495 }
496 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
497 break;
498 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
499 req->sq->authenticated = false;
500 nvmet_auth_failure1(req, d, al);
501 pr_warn("ctrl %d qid %d failure1 (%x)\n",
502 ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
503 break;
504 default:
505 pr_warn("ctrl %d qid %d unhandled step (%d)\n",
506 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
507 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
508 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
509 nvmet_auth_failure1(req, d, al);
510 status = 0;
511 break;
512 }
513
514 status = nvmet_copy_to_sgl(req, 0, d, al);
515 kfree(d);
516 done:
517 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
518 nvmet_auth_sq_free(req->sq);
519 else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
520 nvmet_auth_sq_free(req->sq);
521 nvmet_ctrl_fatal_error(ctrl);
522 }
523 nvmet_req_complete(req, status);
524 }
525