• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/dma-mapping.h>
5 #include <linux/slab.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/err.h>
9 #include <linux/dma-direction.h>
10 #include "hclge_cmd.h"
11 #include "hnae3.h"
12 #include "hclge_main.h"
13 
14 #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
15 
16 #define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
17 
hclge_ring_space(struct hclge_cmq_ring * ring)18 static int hclge_ring_space(struct hclge_cmq_ring *ring)
19 {
20 	int ntu = ring->next_to_use;
21 	int ntc = ring->next_to_clean;
22 	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
23 
24 	return ring->desc_num - used - 1;
25 }
26 
is_valid_csq_clean_head(struct hclge_cmq_ring * ring,int head)27 static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
28 {
29 	int ntu = ring->next_to_use;
30 	int ntc = ring->next_to_clean;
31 
32 	if (ntu > ntc)
33 		return head >= ntc && head <= ntu;
34 
35 	return head >= ntc || head <= ntu;
36 }
37 
hclge_alloc_cmd_desc(struct hclge_cmq_ring * ring)38 static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
39 {
40 	int size  = ring->desc_num * sizeof(struct hclge_desc);
41 
42 	ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
43 					&ring->desc_dma_addr, GFP_KERNEL);
44 	if (!ring->desc)
45 		return -ENOMEM;
46 
47 	return 0;
48 }
49 
hclge_free_cmd_desc(struct hclge_cmq_ring * ring)50 static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
51 {
52 	int size  = ring->desc_num * sizeof(struct hclge_desc);
53 
54 	if (ring->desc) {
55 		dma_free_coherent(cmq_ring_to_dev(ring), size,
56 				  ring->desc, ring->desc_dma_addr);
57 		ring->desc = NULL;
58 	}
59 }
60 
hclge_alloc_cmd_queue(struct hclge_dev * hdev,int ring_type)61 static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
62 {
63 	struct hclge_hw *hw = &hdev->hw;
64 	struct hclge_cmq_ring *ring =
65 		(ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
66 	int ret;
67 
68 	ring->ring_type = ring_type;
69 	ring->dev = hdev;
70 
71 	ret = hclge_alloc_cmd_desc(ring);
72 	if (ret) {
73 		dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
74 			(ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
75 		return ret;
76 	}
77 
78 	return 0;
79 }
80 
hclge_cmd_reuse_desc(struct hclge_desc * desc,bool is_read)81 void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
82 {
83 	desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
84 	if (is_read)
85 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
86 	else
87 		desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
88 }
89 
hclge_cmd_setup_basic_desc(struct hclge_desc * desc,enum hclge_opcode_type opcode,bool is_read)90 void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
91 				enum hclge_opcode_type opcode, bool is_read)
92 {
93 	memset((void *)desc, 0, sizeof(struct hclge_desc));
94 	desc->opcode = cpu_to_le16(opcode);
95 	desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
96 
97 	if (is_read)
98 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
99 }
100 
hclge_cmd_config_regs(struct hclge_cmq_ring * ring)101 static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
102 {
103 	dma_addr_t dma = ring->desc_dma_addr;
104 	struct hclge_dev *hdev = ring->dev;
105 	struct hclge_hw *hw = &hdev->hw;
106 	u32 reg_val;
107 
108 	if (ring->ring_type == HCLGE_TYPE_CSQ) {
109 		hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
110 				lower_32_bits(dma));
111 		hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
112 				upper_32_bits(dma));
113 		reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
114 		reg_val &= HCLGE_NIC_SW_RST_RDY;
115 		reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
116 		hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
117 		hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
118 		hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
119 	} else {
120 		hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
121 				lower_32_bits(dma));
122 		hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
123 				upper_32_bits(dma));
124 		hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
125 				ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
126 		hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
127 		hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
128 	}
129 }
130 
hclge_cmd_init_regs(struct hclge_hw * hw)131 static void hclge_cmd_init_regs(struct hclge_hw *hw)
132 {
133 	hclge_cmd_config_regs(&hw->cmq.csq);
134 	hclge_cmd_config_regs(&hw->cmq.crq);
135 }
136 
hclge_cmd_csq_clean(struct hclge_hw * hw)137 static int hclge_cmd_csq_clean(struct hclge_hw *hw)
138 {
139 	struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
140 	struct hclge_cmq_ring *csq = &hw->cmq.csq;
141 	u32 head;
142 	int clean;
143 
144 	head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
145 	rmb(); /* Make sure head is ready before touch any data */
146 
147 	if (!is_valid_csq_clean_head(csq, head)) {
148 		dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
149 			 csq->next_to_use, csq->next_to_clean);
150 		dev_warn(&hdev->pdev->dev,
151 			 "Disabling any further commands to IMP firmware\n");
152 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
153 		dev_warn(&hdev->pdev->dev,
154 			 "IMP firmware watchdog reset soon expected!\n");
155 		return -EIO;
156 	}
157 
158 	clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
159 	csq->next_to_clean = head;
160 	return clean;
161 }
162 
hclge_cmd_csq_done(struct hclge_hw * hw)163 static int hclge_cmd_csq_done(struct hclge_hw *hw)
164 {
165 	u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
166 	return head == hw->cmq.csq.next_to_use;
167 }
168 
hclge_is_special_opcode(u16 opcode)169 static bool hclge_is_special_opcode(u16 opcode)
170 {
171 	/* these commands have several descriptors,
172 	 * and use the first one to save opcode and return value
173 	 */
174 	u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT,
175 			     HCLGE_OPC_STATS_32_BIT,
176 			     HCLGE_OPC_STATS_MAC,
177 			     HCLGE_OPC_STATS_MAC_ALL,
178 			     HCLGE_OPC_QUERY_32_BIT_REG,
179 			     HCLGE_OPC_QUERY_64_BIT_REG,
180 			     HCLGE_QUERY_CLEAR_MPF_RAS_INT,
181 			     HCLGE_QUERY_CLEAR_PF_RAS_INT,
182 			     HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
183 			     HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT};
184 	int i;
185 
186 	for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
187 		if (spec_opcode[i] == opcode)
188 			return true;
189 	}
190 
191 	return false;
192 }
193 
hclge_cmd_convert_err_code(u16 desc_ret)194 static int hclge_cmd_convert_err_code(u16 desc_ret)
195 {
196 	switch (desc_ret) {
197 	case HCLGE_CMD_EXEC_SUCCESS:
198 		return 0;
199 	case HCLGE_CMD_NO_AUTH:
200 		return -EPERM;
201 	case HCLGE_CMD_NOT_SUPPORTED:
202 		return -EOPNOTSUPP;
203 	case HCLGE_CMD_QUEUE_FULL:
204 		return -EXFULL;
205 	case HCLGE_CMD_NEXT_ERR:
206 		return -ENOSR;
207 	case HCLGE_CMD_UNEXE_ERR:
208 		return -ENOTBLK;
209 	case HCLGE_CMD_PARA_ERR:
210 		return -EINVAL;
211 	case HCLGE_CMD_RESULT_ERR:
212 		return -ERANGE;
213 	case HCLGE_CMD_TIMEOUT:
214 		return -ETIME;
215 	case HCLGE_CMD_HILINK_ERR:
216 		return -ENOLINK;
217 	case HCLGE_CMD_QUEUE_ILLEGAL:
218 		return -ENXIO;
219 	case HCLGE_CMD_INVALID:
220 		return -EBADR;
221 	default:
222 		return -EIO;
223 	}
224 }
225 
hclge_cmd_check_retval(struct hclge_hw * hw,struct hclge_desc * desc,int num,int ntc)226 static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
227 				  int num, int ntc)
228 {
229 	u16 opcode, desc_ret;
230 	int handle;
231 
232 	opcode = le16_to_cpu(desc[0].opcode);
233 	for (handle = 0; handle < num; handle++) {
234 		desc[handle] = hw->cmq.csq.desc[ntc];
235 		ntc++;
236 		if (ntc >= hw->cmq.csq.desc_num)
237 			ntc = 0;
238 	}
239 	if (likely(!hclge_is_special_opcode(opcode)))
240 		desc_ret = le16_to_cpu(desc[num - 1].retval);
241 	else
242 		desc_ret = le16_to_cpu(desc[0].retval);
243 
244 	hw->cmq.last_status = desc_ret;
245 
246 	return hclge_cmd_convert_err_code(desc_ret);
247 }
248 
249 /**
250  * hclge_cmd_send - send command to command queue
251  * @hw: pointer to the hw struct
252  * @desc: prefilled descriptor for describing the command
253  * @num : the number of descriptors to be sent
254  *
255  * This is the main send command for command queue, it
256  * sends the queue, cleans the queue, etc
257  **/
hclge_cmd_send(struct hclge_hw * hw,struct hclge_desc * desc,int num)258 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
259 {
260 	struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
261 	struct hclge_cmq_ring *csq = &hw->cmq.csq;
262 	struct hclge_desc *desc_to_use;
263 	bool complete = false;
264 	u32 timeout = 0;
265 	int handle = 0;
266 	int retval = 0;
267 	int ntc;
268 
269 	spin_lock_bh(&hw->cmq.csq.lock);
270 
271 	if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
272 		spin_unlock_bh(&hw->cmq.csq.lock);
273 		return -EBUSY;
274 	}
275 
276 	if (num > hclge_ring_space(&hw->cmq.csq)) {
277 		/* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
278 		 * need update the SW HEAD pointer csq->next_to_clean
279 		 */
280 		csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
281 		spin_unlock_bh(&hw->cmq.csq.lock);
282 		return -EBUSY;
283 	}
284 
285 	/**
286 	 * Record the location of desc in the ring for this time
287 	 * which will be use for hardware to write back
288 	 */
289 	ntc = hw->cmq.csq.next_to_use;
290 	while (handle < num) {
291 		desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
292 		*desc_to_use = desc[handle];
293 		(hw->cmq.csq.next_to_use)++;
294 		if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
295 			hw->cmq.csq.next_to_use = 0;
296 		handle++;
297 	}
298 
299 	/* Write to hardware */
300 	hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
301 
302 	/**
303 	 * If the command is sync, wait for the firmware to write back,
304 	 * if multi descriptors to be sent, use the first one to check
305 	 */
306 	if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
307 		do {
308 			if (hclge_cmd_csq_done(hw)) {
309 				complete = true;
310 				break;
311 			}
312 			udelay(1);
313 			timeout++;
314 		} while (timeout < hw->cmq.tx_timeout);
315 	}
316 
317 	if (!complete) {
318 		retval = -EBADE;
319 	} else {
320 		retval = hclge_cmd_check_retval(hw, desc, num, ntc);
321 	}
322 
323 	/* Clean the command send queue */
324 	handle = hclge_cmd_csq_clean(hw);
325 	if (handle < 0)
326 		retval = handle;
327 	else if (handle != num)
328 		dev_warn(&hdev->pdev->dev,
329 			 "cleaned %d, need to clean %d\n", handle, num);
330 
331 	spin_unlock_bh(&hw->cmq.csq.lock);
332 
333 	return retval;
334 }
335 
hclge_cmd_query_firmware_version(struct hclge_hw * hw,u32 * version)336 static enum hclge_cmd_status hclge_cmd_query_firmware_version(
337 		struct hclge_hw *hw, u32 *version)
338 {
339 	struct hclge_query_version_cmd *resp;
340 	struct hclge_desc desc;
341 	int ret;
342 
343 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
344 	resp = (struct hclge_query_version_cmd *)desc.data;
345 
346 	ret = hclge_cmd_send(hw, &desc, 1);
347 	if (!ret)
348 		*version = le32_to_cpu(resp->firmware);
349 
350 	return ret;
351 }
352 
hclge_cmd_queue_init(struct hclge_dev * hdev)353 int hclge_cmd_queue_init(struct hclge_dev *hdev)
354 {
355 	int ret;
356 
357 	/* Setup the lock for command queue */
358 	spin_lock_init(&hdev->hw.cmq.csq.lock);
359 	spin_lock_init(&hdev->hw.cmq.crq.lock);
360 
361 	/* Setup the queue entries for use cmd queue */
362 	hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
363 	hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
364 
365 	/* Setup Tx write back timeout */
366 	hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
367 
368 	/* Setup queue rings */
369 	ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
370 	if (ret) {
371 		dev_err(&hdev->pdev->dev,
372 			"CSQ ring setup error %d\n", ret);
373 		return ret;
374 	}
375 
376 	ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
377 	if (ret) {
378 		dev_err(&hdev->pdev->dev,
379 			"CRQ ring setup error %d\n", ret);
380 		goto err_csq;
381 	}
382 
383 	return 0;
384 err_csq:
385 	hclge_free_cmd_desc(&hdev->hw.cmq.csq);
386 	return ret;
387 }
388 
hclge_firmware_compat_config(struct hclge_dev * hdev)389 static int hclge_firmware_compat_config(struct hclge_dev *hdev)
390 {
391 	struct hclge_firmware_compat_cmd *req;
392 	struct hclge_desc desc;
393 	u32 compat = 0;
394 
395 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false);
396 
397 	req = (struct hclge_firmware_compat_cmd *)desc.data;
398 
399 	hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
400 	hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
401 	req->compat = cpu_to_le32(compat);
402 
403 	return hclge_cmd_send(&hdev->hw, &desc, 1);
404 }
405 
hclge_cmd_init(struct hclge_dev * hdev)406 int hclge_cmd_init(struct hclge_dev *hdev)
407 {
408 	u32 version;
409 	int ret;
410 
411 	spin_lock_bh(&hdev->hw.cmq.csq.lock);
412 	spin_lock(&hdev->hw.cmq.crq.lock);
413 
414 	hdev->hw.cmq.csq.next_to_clean = 0;
415 	hdev->hw.cmq.csq.next_to_use = 0;
416 	hdev->hw.cmq.crq.next_to_clean = 0;
417 	hdev->hw.cmq.crq.next_to_use = 0;
418 
419 	hclge_cmd_init_regs(&hdev->hw);
420 
421 	spin_unlock(&hdev->hw.cmq.crq.lock);
422 	spin_unlock_bh(&hdev->hw.cmq.csq.lock);
423 
424 	clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
425 
426 	/* Check if there is new reset pending, because the higher level
427 	 * reset may happen when lower level reset is being processed.
428 	 */
429 	if ((hclge_is_reset_pending(hdev))) {
430 		ret = -EBUSY;
431 		goto err_cmd_init;
432 	}
433 
434 	ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
435 	if (ret) {
436 		dev_err(&hdev->pdev->dev,
437 			"firmware version query failed %d\n", ret);
438 		goto err_cmd_init;
439 	}
440 	hdev->fw_version = version;
441 
442 	dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
443 		 hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
444 				 HNAE3_FW_VERSION_BYTE3_SHIFT),
445 		 hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
446 				 HNAE3_FW_VERSION_BYTE2_SHIFT),
447 		 hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
448 				 HNAE3_FW_VERSION_BYTE1_SHIFT),
449 		 hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
450 				 HNAE3_FW_VERSION_BYTE0_SHIFT));
451 
452 	/* ask the firmware to enable some features, driver can work without
453 	 * it.
454 	 */
455 	ret = hclge_firmware_compat_config(hdev);
456 	if (ret)
457 		dev_warn(&hdev->pdev->dev,
458 			 "Firmware compatible features not enabled(%d).\n",
459 			 ret);
460 
461 	return 0;
462 
463 err_cmd_init:
464 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
465 
466 	return ret;
467 }
468 
hclge_cmd_uninit_regs(struct hclge_hw * hw)469 static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
470 {
471 	hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
472 	hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
473 	hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
474 	hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
475 	hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
476 	hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
477 	hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
478 	hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
479 	hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
480 	hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
481 }
482 
hclge_destroy_queue(struct hclge_cmq_ring * ring)483 static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
484 {
485 	spin_lock(&ring->lock);
486 	hclge_free_cmd_desc(ring);
487 	spin_unlock(&ring->lock);
488 }
489 
hclge_destroy_cmd_queue(struct hclge_hw * hw)490 static void hclge_destroy_cmd_queue(struct hclge_hw *hw)
491 {
492 	hclge_destroy_queue(&hw->cmq.csq);
493 	hclge_destroy_queue(&hw->cmq.crq);
494 }
495 
hclge_cmd_uninit(struct hclge_dev * hdev)496 void hclge_cmd_uninit(struct hclge_dev *hdev)
497 {
498 	spin_lock_bh(&hdev->hw.cmq.csq.lock);
499 	spin_lock(&hdev->hw.cmq.crq.lock);
500 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
501 	hclge_cmd_uninit_regs(&hdev->hw);
502 	spin_unlock(&hdev->hw.cmq.crq.lock);
503 	spin_unlock_bh(&hdev->hw.cmq.csq.lock);
504 
505 	hclge_destroy_cmd_queue(&hdev->hw);
506 }
507