• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/dma-mapping.h>
5 #include <linux/slab.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/err.h>
9 #include <linux/dma-direction.h>
10 #include "hclge_cmd.h"
11 #include "hnae3.h"
12 #include "hclge_main.h"
13 
14 #define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
15 
hclge_ring_space(struct hclge_cmq_ring * ring)16 static int hclge_ring_space(struct hclge_cmq_ring *ring)
17 {
18 	int ntu = ring->next_to_use;
19 	int ntc = ring->next_to_clean;
20 	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
21 
22 	return ring->desc_num - used - 1;
23 }
24 
is_valid_csq_clean_head(struct hclge_cmq_ring * ring,int head)25 static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
26 {
27 	int ntu = ring->next_to_use;
28 	int ntc = ring->next_to_clean;
29 
30 	if (ntu > ntc)
31 		return head >= ntc && head <= ntu;
32 
33 	return head >= ntc || head <= ntu;
34 }
35 
hclge_alloc_cmd_desc(struct hclge_cmq_ring * ring)36 static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
37 {
38 	int size  = ring->desc_num * sizeof(struct hclge_desc);
39 
40 	ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
41 					&ring->desc_dma_addr, GFP_KERNEL);
42 	if (!ring->desc)
43 		return -ENOMEM;
44 
45 	return 0;
46 }
47 
hclge_free_cmd_desc(struct hclge_cmq_ring * ring)48 static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
49 {
50 	int size  = ring->desc_num * sizeof(struct hclge_desc);
51 
52 	if (ring->desc) {
53 		dma_free_coherent(cmq_ring_to_dev(ring), size,
54 				  ring->desc, ring->desc_dma_addr);
55 		ring->desc = NULL;
56 	}
57 }
58 
hclge_alloc_cmd_queue(struct hclge_dev * hdev,int ring_type)59 static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
60 {
61 	struct hclge_hw *hw = &hdev->hw;
62 	struct hclge_cmq_ring *ring =
63 		(ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
64 	int ret;
65 
66 	ring->ring_type = ring_type;
67 	ring->dev = hdev;
68 
69 	ret = hclge_alloc_cmd_desc(ring);
70 	if (ret) {
71 		dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
72 			(ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
73 		return ret;
74 	}
75 
76 	return 0;
77 }
78 
hclge_cmd_reuse_desc(struct hclge_desc * desc,bool is_read)79 void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
80 {
81 	desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
82 	if (is_read)
83 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
84 	else
85 		desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
86 }
87 
hclge_cmd_setup_basic_desc(struct hclge_desc * desc,enum hclge_opcode_type opcode,bool is_read)88 void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
89 				enum hclge_opcode_type opcode, bool is_read)
90 {
91 	memset((void *)desc, 0, sizeof(struct hclge_desc));
92 	desc->opcode = cpu_to_le16(opcode);
93 	desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
94 
95 	if (is_read)
96 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
97 }
98 
hclge_cmd_config_regs(struct hclge_cmq_ring * ring)99 static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
100 {
101 	dma_addr_t dma = ring->desc_dma_addr;
102 	struct hclge_dev *hdev = ring->dev;
103 	struct hclge_hw *hw = &hdev->hw;
104 	u32 reg_val;
105 
106 	if (ring->ring_type == HCLGE_TYPE_CSQ) {
107 		hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
108 				lower_32_bits(dma));
109 		hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
110 				upper_32_bits(dma));
111 		reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
112 		reg_val &= HCLGE_NIC_SW_RST_RDY;
113 		reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
114 		hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
115 		hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
116 		hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
117 	} else {
118 		hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
119 				lower_32_bits(dma));
120 		hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
121 				upper_32_bits(dma));
122 		hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
123 				ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
124 		hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
125 		hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
126 	}
127 }
128 
hclge_cmd_init_regs(struct hclge_hw * hw)129 static void hclge_cmd_init_regs(struct hclge_hw *hw)
130 {
131 	hclge_cmd_config_regs(&hw->cmq.csq);
132 	hclge_cmd_config_regs(&hw->cmq.crq);
133 }
134 
hclge_cmd_csq_clean(struct hclge_hw * hw)135 static int hclge_cmd_csq_clean(struct hclge_hw *hw)
136 {
137 	struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
138 	struct hclge_cmq_ring *csq = &hw->cmq.csq;
139 	u32 head;
140 	int clean;
141 
142 	head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
143 	rmb(); /* Make sure head is ready before touch any data */
144 
145 	if (!is_valid_csq_clean_head(csq, head)) {
146 		dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
147 			 csq->next_to_use, csq->next_to_clean);
148 		dev_warn(&hdev->pdev->dev,
149 			 "Disabling any further commands to IMP firmware\n");
150 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
151 		dev_warn(&hdev->pdev->dev,
152 			 "IMP firmware watchdog reset soon expected!\n");
153 		return -EIO;
154 	}
155 
156 	clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
157 	csq->next_to_clean = head;
158 	return clean;
159 }
160 
hclge_cmd_csq_done(struct hclge_hw * hw)161 static int hclge_cmd_csq_done(struct hclge_hw *hw)
162 {
163 	u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
164 	return head == hw->cmq.csq.next_to_use;
165 }
166 
hclge_is_special_opcode(u16 opcode)167 static bool hclge_is_special_opcode(u16 opcode)
168 {
169 	/* these commands have several descriptors,
170 	 * and use the first one to save opcode and return value
171 	 */
172 	u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT,
173 			     HCLGE_OPC_STATS_32_BIT,
174 			     HCLGE_OPC_STATS_MAC,
175 			     HCLGE_OPC_STATS_MAC_ALL,
176 			     HCLGE_OPC_QUERY_32_BIT_REG,
177 			     HCLGE_OPC_QUERY_64_BIT_REG,
178 			     HCLGE_QUERY_CLEAR_MPF_RAS_INT,
179 			     HCLGE_QUERY_CLEAR_PF_RAS_INT,
180 			     HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
181 			     HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT};
182 	int i;
183 
184 	for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
185 		if (spec_opcode[i] == opcode)
186 			return true;
187 	}
188 
189 	return false;
190 }
191 
hclge_cmd_convert_err_code(u16 desc_ret)192 static int hclge_cmd_convert_err_code(u16 desc_ret)
193 {
194 	switch (desc_ret) {
195 	case HCLGE_CMD_EXEC_SUCCESS:
196 		return 0;
197 	case HCLGE_CMD_NO_AUTH:
198 		return -EPERM;
199 	case HCLGE_CMD_NOT_SUPPORTED:
200 		return -EOPNOTSUPP;
201 	case HCLGE_CMD_QUEUE_FULL:
202 		return -EXFULL;
203 	case HCLGE_CMD_NEXT_ERR:
204 		return -ENOSR;
205 	case HCLGE_CMD_UNEXE_ERR:
206 		return -ENOTBLK;
207 	case HCLGE_CMD_PARA_ERR:
208 		return -EINVAL;
209 	case HCLGE_CMD_RESULT_ERR:
210 		return -ERANGE;
211 	case HCLGE_CMD_TIMEOUT:
212 		return -ETIME;
213 	case HCLGE_CMD_HILINK_ERR:
214 		return -ENOLINK;
215 	case HCLGE_CMD_QUEUE_ILLEGAL:
216 		return -ENXIO;
217 	case HCLGE_CMD_INVALID:
218 		return -EBADR;
219 	default:
220 		return -EIO;
221 	}
222 }
223 
hclge_cmd_check_retval(struct hclge_hw * hw,struct hclge_desc * desc,int num,int ntc)224 static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
225 				  int num, int ntc)
226 {
227 	u16 opcode, desc_ret;
228 	int handle;
229 
230 	opcode = le16_to_cpu(desc[0].opcode);
231 	for (handle = 0; handle < num; handle++) {
232 		desc[handle] = hw->cmq.csq.desc[ntc];
233 		ntc++;
234 		if (ntc >= hw->cmq.csq.desc_num)
235 			ntc = 0;
236 	}
237 	if (likely(!hclge_is_special_opcode(opcode)))
238 		desc_ret = le16_to_cpu(desc[num - 1].retval);
239 	else
240 		desc_ret = le16_to_cpu(desc[0].retval);
241 
242 	hw->cmq.last_status = desc_ret;
243 
244 	return hclge_cmd_convert_err_code(desc_ret);
245 }
246 
247 /**
248  * hclge_cmd_send - send command to command queue
249  * @hw: pointer to the hw struct
250  * @desc: prefilled descriptor for describing the command
251  * @num : the number of descriptors to be sent
252  *
253  * This is the main send command for command queue, it
254  * sends the queue, cleans the queue, etc
255  **/
hclge_cmd_send(struct hclge_hw * hw,struct hclge_desc * desc,int num)256 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
257 {
258 	struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
259 	struct hclge_cmq_ring *csq = &hw->cmq.csq;
260 	struct hclge_desc *desc_to_use;
261 	bool complete = false;
262 	u32 timeout = 0;
263 	int handle = 0;
264 	int retval;
265 	int ntc;
266 
267 	spin_lock_bh(&hw->cmq.csq.lock);
268 
269 	if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
270 		spin_unlock_bh(&hw->cmq.csq.lock);
271 		return -EBUSY;
272 	}
273 
274 	if (num > hclge_ring_space(&hw->cmq.csq)) {
275 		/* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
276 		 * need update the SW HEAD pointer csq->next_to_clean
277 		 */
278 		csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
279 		spin_unlock_bh(&hw->cmq.csq.lock);
280 		return -EBUSY;
281 	}
282 
283 	/**
284 	 * Record the location of desc in the ring for this time
285 	 * which will be use for hardware to write back
286 	 */
287 	ntc = hw->cmq.csq.next_to_use;
288 	while (handle < num) {
289 		desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
290 		*desc_to_use = desc[handle];
291 		(hw->cmq.csq.next_to_use)++;
292 		if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
293 			hw->cmq.csq.next_to_use = 0;
294 		handle++;
295 	}
296 
297 	/* Write to hardware */
298 	hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
299 
300 	/**
301 	 * If the command is sync, wait for the firmware to write back,
302 	 * if multi descriptors to be sent, use the first one to check
303 	 */
304 	if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
305 		do {
306 			if (hclge_cmd_csq_done(hw)) {
307 				complete = true;
308 				break;
309 			}
310 			udelay(1);
311 			timeout++;
312 		} while (timeout < hw->cmq.tx_timeout);
313 	}
314 
315 	if (!complete)
316 		retval = -EBADE;
317 	else
318 		retval = hclge_cmd_check_retval(hw, desc, num, ntc);
319 
320 	/* Clean the command send queue */
321 	handle = hclge_cmd_csq_clean(hw);
322 	if (handle < 0)
323 		retval = handle;
324 	else if (handle != num)
325 		dev_warn(&hdev->pdev->dev,
326 			 "cleaned %d, need to clean %d\n", handle, num);
327 
328 	spin_unlock_bh(&hw->cmq.csq.lock);
329 
330 	return retval;
331 }
332 
hclge_set_default_capability(struct hclge_dev * hdev)333 static void hclge_set_default_capability(struct hclge_dev *hdev)
334 {
335 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
336 
337 	set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
338 	set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
339 	set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
340 }
341 
hclge_parse_capability(struct hclge_dev * hdev,struct hclge_query_version_cmd * cmd)342 static void hclge_parse_capability(struct hclge_dev *hdev,
343 				   struct hclge_query_version_cmd *cmd)
344 {
345 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
346 	u32 caps;
347 
348 	caps = __le32_to_cpu(cmd->caps[0]);
349 
350 	if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B))
351 		set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
352 	if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B))
353 		set_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps);
354 	if (hnae3_get_bit(caps, HCLGE_CAP_INT_QL_B))
355 		set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
356 	if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B))
357 		set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
358 }
359 
360 static enum hclge_cmd_status
hclge_cmd_query_version_and_capability(struct hclge_dev * hdev)361 hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
362 {
363 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
364 	struct hclge_query_version_cmd *resp;
365 	struct hclge_desc desc;
366 	int ret;
367 
368 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
369 	resp = (struct hclge_query_version_cmd *)desc.data;
370 
371 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
372 	if (ret)
373 		return ret;
374 
375 	hdev->fw_version = le32_to_cpu(resp->firmware);
376 
377 	ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
378 					 HNAE3_PCI_REVISION_BIT_SIZE;
379 	ae_dev->dev_version |= hdev->pdev->revision;
380 
381 	if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
382 		hclge_set_default_capability(hdev);
383 
384 	hclge_parse_capability(hdev, resp);
385 
386 	return ret;
387 }
388 
hclge_cmd_queue_init(struct hclge_dev * hdev)389 int hclge_cmd_queue_init(struct hclge_dev *hdev)
390 {
391 	int ret;
392 
393 	/* Setup the lock for command queue */
394 	spin_lock_init(&hdev->hw.cmq.csq.lock);
395 	spin_lock_init(&hdev->hw.cmq.crq.lock);
396 
397 	/* Setup the queue entries for use cmd queue */
398 	hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
399 	hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
400 
401 	/* Setup Tx write back timeout */
402 	hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
403 
404 	/* Setup queue rings */
405 	ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
406 	if (ret) {
407 		dev_err(&hdev->pdev->dev,
408 			"CSQ ring setup error %d\n", ret);
409 		return ret;
410 	}
411 
412 	ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
413 	if (ret) {
414 		dev_err(&hdev->pdev->dev,
415 			"CRQ ring setup error %d\n", ret);
416 		goto err_csq;
417 	}
418 
419 	return 0;
420 err_csq:
421 	hclge_free_cmd_desc(&hdev->hw.cmq.csq);
422 	return ret;
423 }
424 
hclge_firmware_compat_config(struct hclge_dev * hdev)425 static int hclge_firmware_compat_config(struct hclge_dev *hdev)
426 {
427 	struct hclge_firmware_compat_cmd *req;
428 	struct hclge_desc desc;
429 	u32 compat = 0;
430 
431 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false);
432 
433 	req = (struct hclge_firmware_compat_cmd *)desc.data;
434 
435 	hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
436 	hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
437 	req->compat = cpu_to_le32(compat);
438 
439 	return hclge_cmd_send(&hdev->hw, &desc, 1);
440 }
441 
hclge_cmd_init(struct hclge_dev * hdev)442 int hclge_cmd_init(struct hclge_dev *hdev)
443 {
444 	int ret;
445 
446 	spin_lock_bh(&hdev->hw.cmq.csq.lock);
447 	spin_lock(&hdev->hw.cmq.crq.lock);
448 
449 	hdev->hw.cmq.csq.next_to_clean = 0;
450 	hdev->hw.cmq.csq.next_to_use = 0;
451 	hdev->hw.cmq.crq.next_to_clean = 0;
452 	hdev->hw.cmq.crq.next_to_use = 0;
453 
454 	hclge_cmd_init_regs(&hdev->hw);
455 
456 	spin_unlock(&hdev->hw.cmq.crq.lock);
457 	spin_unlock_bh(&hdev->hw.cmq.csq.lock);
458 
459 	clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
460 
461 	/* Check if there is new reset pending, because the higher level
462 	 * reset may happen when lower level reset is being processed.
463 	 */
464 	if ((hclge_is_reset_pending(hdev))) {
465 		dev_err(&hdev->pdev->dev,
466 			"failed to init cmd since reset %#lx pending\n",
467 			hdev->reset_pending);
468 		ret = -EBUSY;
469 		goto err_cmd_init;
470 	}
471 
472 	/* get version and device capabilities */
473 	ret = hclge_cmd_query_version_and_capability(hdev);
474 	if (ret) {
475 		dev_err(&hdev->pdev->dev,
476 			"failed to query version and capabilities, ret = %d\n",
477 			ret);
478 		goto err_cmd_init;
479 	}
480 
481 	dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
482 		 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
483 				 HNAE3_FW_VERSION_BYTE3_SHIFT),
484 		 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
485 				 HNAE3_FW_VERSION_BYTE2_SHIFT),
486 		 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
487 				 HNAE3_FW_VERSION_BYTE1_SHIFT),
488 		 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
489 				 HNAE3_FW_VERSION_BYTE0_SHIFT));
490 
491 	/* ask the firmware to enable some features, driver can work without
492 	 * it.
493 	 */
494 	ret = hclge_firmware_compat_config(hdev);
495 	if (ret)
496 		dev_warn(&hdev->pdev->dev,
497 			 "Firmware compatible features not enabled(%d).\n",
498 			 ret);
499 
500 	return 0;
501 
502 err_cmd_init:
503 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
504 
505 	return ret;
506 }
507 
hclge_cmd_uninit_regs(struct hclge_hw * hw)508 static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
509 {
510 	hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
511 	hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
512 	hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
513 	hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
514 	hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
515 	hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
516 	hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
517 	hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
518 	hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
519 	hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
520 }
521 
hclge_cmd_uninit(struct hclge_dev * hdev)522 void hclge_cmd_uninit(struct hclge_dev *hdev)
523 {
524 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
525 	/* wait to ensure that the firmware completes the possible left
526 	 * over commands.
527 	 */
528 	msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME);
529 	spin_lock_bh(&hdev->hw.cmq.csq.lock);
530 	spin_lock(&hdev->hw.cmq.crq.lock);
531 	hclge_cmd_uninit_regs(&hdev->hw);
532 	spin_unlock(&hdev->hw.cmq.crq.lock);
533 	spin_unlock_bh(&hdev->hw.cmq.csq.lock);
534 
535 	hclge_free_cmd_desc(&hdev->hw.cmq.csq);
536 	hclge_free_cmd_desc(&hdev->hw.cmq.crq);
537 }
538