• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 
6 /**
7  * ice_adminq_init_regs - Initialize AdminQ registers
8  * @hw: pointer to the hardware structure
9  *
10  * This assumes the alloc_sq and alloc_rq functions have already been called
11  */
ice_adminq_init_regs(struct ice_hw * hw)12 static void ice_adminq_init_regs(struct ice_hw *hw)
13 {
14 	struct ice_ctl_q_info *cq = &hw->adminq;
15 
16 	cq->sq.head = PF_FW_ATQH;
17 	cq->sq.tail = PF_FW_ATQT;
18 	cq->sq.len = PF_FW_ATQLEN;
19 	cq->sq.bah = PF_FW_ATQBAH;
20 	cq->sq.bal = PF_FW_ATQBAL;
21 	cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M;
22 	cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
23 	cq->sq.head_mask = PF_FW_ATQH_ATQH_M;
24 
25 	cq->rq.head = PF_FW_ARQH;
26 	cq->rq.tail = PF_FW_ARQT;
27 	cq->rq.len = PF_FW_ARQLEN;
28 	cq->rq.bah = PF_FW_ARQBAH;
29 	cq->rq.bal = PF_FW_ARQBAL;
30 	cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M;
31 	cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
32 	cq->rq.head_mask = PF_FW_ARQH_ARQH_M;
33 }
34 
35 /**
36  * ice_check_sq_alive
37  * @hw: pointer to the hw struct
38  * @cq: pointer to the specific Control queue
39  *
40  * Returns true if Queue is enabled else false.
41  */
ice_check_sq_alive(struct ice_hw * hw,struct ice_ctl_q_info * cq)42 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
43 {
44 	/* check both queue-length and queue-enable fields */
45 	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
46 		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
47 						cq->sq.len_ena_mask)) ==
48 			(cq->num_sq_entries | cq->sq.len_ena_mask);
49 
50 	return false;
51 }
52 
53 /**
54  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
55  * @hw: pointer to the hardware structure
56  * @cq: pointer to the specific Control queue
57  */
58 static enum ice_status
ice_alloc_ctrlq_sq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq)59 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
60 {
61 	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
62 
63 	cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
64 						 &cq->sq.desc_buf.pa,
65 						 GFP_KERNEL | __GFP_ZERO);
66 	if (!cq->sq.desc_buf.va)
67 		return ICE_ERR_NO_MEMORY;
68 	cq->sq.desc_buf.size = size;
69 
70 	cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
71 				      sizeof(struct ice_sq_cd), GFP_KERNEL);
72 	if (!cq->sq.cmd_buf) {
73 		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
74 				   cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
75 		cq->sq.desc_buf.va = NULL;
76 		cq->sq.desc_buf.pa = 0;
77 		cq->sq.desc_buf.size = 0;
78 		return ICE_ERR_NO_MEMORY;
79 	}
80 
81 	return 0;
82 }
83 
84 /**
85  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
86  * @hw: pointer to the hardware structure
87  * @cq: pointer to the specific Control queue
88  */
89 static enum ice_status
ice_alloc_ctrlq_rq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq)90 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
91 {
92 	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
93 
94 	cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
95 						 &cq->rq.desc_buf.pa,
96 						 GFP_KERNEL | __GFP_ZERO);
97 	if (!cq->rq.desc_buf.va)
98 		return ICE_ERR_NO_MEMORY;
99 	cq->rq.desc_buf.size = size;
100 	return 0;
101 }
102 
103 /**
104  * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings
105  * @hw: pointer to the hardware structure
106  * @cq: pointer to the specific Control queue
107  *
108  * This assumes the posted send buffers have already been cleaned
109  * and de-allocated
110  */
ice_free_ctrlq_sq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq)111 static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
112 {
113 	dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
114 			   cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
115 	cq->sq.desc_buf.va = NULL;
116 	cq->sq.desc_buf.pa = 0;
117 	cq->sq.desc_buf.size = 0;
118 }
119 
120 /**
121  * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings
122  * @hw: pointer to the hardware structure
123  * @cq: pointer to the specific Control queue
124  *
125  * This assumes the posted receive buffers have already been cleaned
126  * and de-allocated
127  */
ice_free_ctrlq_rq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq)128 static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
129 {
130 	dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size,
131 			   cq->rq.desc_buf.va, cq->rq.desc_buf.pa);
132 	cq->rq.desc_buf.va = NULL;
133 	cq->rq.desc_buf.pa = 0;
134 	cq->rq.desc_buf.size = 0;
135 }
136 
137 /**
138  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
139  * @hw: pointer to the hardware structure
140  * @cq: pointer to the specific Control queue
141  */
142 static enum ice_status
ice_alloc_rq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq)143 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
144 {
145 	int i;
146 
147 	/* We'll be allocating the buffer info memory first, then we can
148 	 * allocate the mapped buffers for the event processing
149 	 */
150 	cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
151 				       sizeof(cq->rq.desc_buf), GFP_KERNEL);
152 	if (!cq->rq.dma_head)
153 		return ICE_ERR_NO_MEMORY;
154 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
155 
156 	/* allocate the mapped buffers */
157 	for (i = 0; i < cq->num_rq_entries; i++) {
158 		struct ice_aq_desc *desc;
159 		struct ice_dma_mem *bi;
160 
161 		bi = &cq->rq.r.rq_bi[i];
162 		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
163 					     cq->rq_buf_size, &bi->pa,
164 					     GFP_KERNEL | __GFP_ZERO);
165 		if (!bi->va)
166 			goto unwind_alloc_rq_bufs;
167 		bi->size = cq->rq_buf_size;
168 
169 		/* now configure the descriptors for use */
170 		desc = ICE_CTL_Q_DESC(cq->rq, i);
171 
172 		desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
173 		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
174 			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
175 		desc->opcode = 0;
176 		/* This is in accordance with Admin queue design, there is no
177 		 * register for buffer size configuration
178 		 */
179 		desc->datalen = cpu_to_le16(bi->size);
180 		desc->retval = 0;
181 		desc->cookie_high = 0;
182 		desc->cookie_low = 0;
183 		desc->params.generic.addr_high =
184 			cpu_to_le32(upper_32_bits(bi->pa));
185 		desc->params.generic.addr_low =
186 			cpu_to_le32(lower_32_bits(bi->pa));
187 		desc->params.generic.param0 = 0;
188 		desc->params.generic.param1 = 0;
189 	}
190 	return 0;
191 
192 unwind_alloc_rq_bufs:
193 	/* don't try to free the one that failed... */
194 	i--;
195 	for (; i >= 0; i--) {
196 		dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
197 				   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
198 		cq->rq.r.rq_bi[i].va = NULL;
199 		cq->rq.r.rq_bi[i].pa = 0;
200 		cq->rq.r.rq_bi[i].size = 0;
201 	}
202 	devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
203 
204 	return ICE_ERR_NO_MEMORY;
205 }
206 
207 /**
208  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
209  * @hw: pointer to the hardware structure
210  * @cq: pointer to the specific Control queue
211  */
212 static enum ice_status
ice_alloc_sq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq)213 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
214 {
215 	int i;
216 
217 	/* No mapped memory needed yet, just the buffer info structures */
218 	cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
219 				       sizeof(cq->sq.desc_buf), GFP_KERNEL);
220 	if (!cq->sq.dma_head)
221 		return ICE_ERR_NO_MEMORY;
222 	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
223 
224 	/* allocate the mapped buffers */
225 	for (i = 0; i < cq->num_sq_entries; i++) {
226 		struct ice_dma_mem *bi;
227 
228 		bi = &cq->sq.r.sq_bi[i];
229 		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
230 					     cq->sq_buf_size, &bi->pa,
231 					     GFP_KERNEL | __GFP_ZERO);
232 		if (!bi->va)
233 			goto unwind_alloc_sq_bufs;
234 		bi->size = cq->sq_buf_size;
235 	}
236 	return 0;
237 
238 unwind_alloc_sq_bufs:
239 	/* don't try to free the one that failed... */
240 	i--;
241 	for (; i >= 0; i--) {
242 		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
243 				   cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
244 		cq->sq.r.sq_bi[i].va = NULL;
245 		cq->sq.r.sq_bi[i].pa = 0;
246 		cq->sq.r.sq_bi[i].size = 0;
247 	}
248 	devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
249 
250 	return ICE_ERR_NO_MEMORY;
251 }
252 
253 /**
254  * ice_free_rq_bufs - Free ARQ buffer info elements
255  * @hw: pointer to the hardware structure
256  * @cq: pointer to the specific Control queue
257  */
ice_free_rq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq)258 static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
259 {
260 	int i;
261 
262 	/* free descriptors */
263 	for (i = 0; i < cq->num_rq_entries; i++) {
264 		dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
265 				   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
266 		cq->rq.r.rq_bi[i].va = NULL;
267 		cq->rq.r.rq_bi[i].pa = 0;
268 		cq->rq.r.rq_bi[i].size = 0;
269 	}
270 
271 	/* free the dma header */
272 	devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
273 }
274 
275 /**
276  * ice_free_sq_bufs - Free ATQ buffer info elements
277  * @hw: pointer to the hardware structure
278  * @cq: pointer to the specific Control queue
279  */
ice_free_sq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq)280 static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
281 {
282 	int i;
283 
284 	/* only unmap if the address is non-NULL */
285 	for (i = 0; i < cq->num_sq_entries; i++)
286 		if (cq->sq.r.sq_bi[i].pa) {
287 			dmam_free_coherent(ice_hw_to_dev(hw),
288 					   cq->sq.r.sq_bi[i].size,
289 					   cq->sq.r.sq_bi[i].va,
290 					   cq->sq.r.sq_bi[i].pa);
291 			cq->sq.r.sq_bi[i].va = NULL;
292 			cq->sq.r.sq_bi[i].pa = 0;
293 			cq->sq.r.sq_bi[i].size = 0;
294 		}
295 
296 	/* free the buffer info list */
297 	devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf);
298 
299 	/* free the dma header */
300 	devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
301 }
302 
303 /**
304  * ice_cfg_sq_regs - configure Control ATQ registers
305  * @hw: pointer to the hardware structure
306  * @cq: pointer to the specific Control queue
307  *
308  * Configure base address and length registers for the transmit queue
309  */
310 static enum ice_status
ice_cfg_sq_regs(struct ice_hw * hw,struct ice_ctl_q_info * cq)311 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
312 {
313 	u32 reg = 0;
314 
315 	/* Clear Head and Tail */
316 	wr32(hw, cq->sq.head, 0);
317 	wr32(hw, cq->sq.tail, 0);
318 
319 	/* set starting point */
320 	wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask));
321 	wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa));
322 	wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa));
323 
324 	/* Check one register to verify that config was applied */
325 	reg = rd32(hw, cq->sq.bal);
326 	if (reg != lower_32_bits(cq->sq.desc_buf.pa))
327 		return ICE_ERR_AQ_ERROR;
328 
329 	return 0;
330 }
331 
332 /**
333  * ice_cfg_rq_regs - configure Control ARQ register
334  * @hw: pointer to the hardware structure
335  * @cq: pointer to the specific Control queue
336  *
337  * Configure base address and length registers for the receive (event q)
338  */
339 static enum ice_status
ice_cfg_rq_regs(struct ice_hw * hw,struct ice_ctl_q_info * cq)340 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
341 {
342 	u32 reg = 0;
343 
344 	/* Clear Head and Tail */
345 	wr32(hw, cq->rq.head, 0);
346 	wr32(hw, cq->rq.tail, 0);
347 
348 	/* set starting point */
349 	wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask));
350 	wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa));
351 	wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa));
352 
353 	/* Update tail in the HW to post pre-allocated buffers */
354 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
355 
356 	/* Check one register to verify that config was applied */
357 	reg = rd32(hw, cq->rq.bal);
358 	if (reg != lower_32_bits(cq->rq.desc_buf.pa))
359 		return ICE_ERR_AQ_ERROR;
360 
361 	return 0;
362 }
363 
364 /**
365  * ice_init_sq - main initialization routine for Control ATQ
366  * @hw: pointer to the hardware structure
367  * @cq: pointer to the specific Control queue
368  *
369  * This is the main initialization routine for the Control Send Queue
370  * Prior to calling this function, drivers *MUST* set the following fields
371  * in the cq->structure:
372  *     - cq->num_sq_entries
373  *     - cq->sq_buf_size
374  *
375  * Do *NOT* hold the lock when calling this as the memory allocation routines
376  * called are not going to be atomic context safe
377  */
ice_init_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)378 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
379 {
380 	enum ice_status ret_code;
381 
382 	if (cq->sq.count > 0) {
383 		/* queue already initialized */
384 		ret_code = ICE_ERR_NOT_READY;
385 		goto init_ctrlq_exit;
386 	}
387 
388 	/* verify input for valid configuration */
389 	if (!cq->num_sq_entries || !cq->sq_buf_size) {
390 		ret_code = ICE_ERR_CFG;
391 		goto init_ctrlq_exit;
392 	}
393 
394 	cq->sq.next_to_use = 0;
395 	cq->sq.next_to_clean = 0;
396 
397 	/* allocate the ring memory */
398 	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
399 	if (ret_code)
400 		goto init_ctrlq_exit;
401 
402 	/* allocate buffers in the rings */
403 	ret_code = ice_alloc_sq_bufs(hw, cq);
404 	if (ret_code)
405 		goto init_ctrlq_free_rings;
406 
407 	/* initialize base registers */
408 	ret_code = ice_cfg_sq_regs(hw, cq);
409 	if (ret_code)
410 		goto init_ctrlq_free_rings;
411 
412 	/* success! */
413 	cq->sq.count = cq->num_sq_entries;
414 	goto init_ctrlq_exit;
415 
416 init_ctrlq_free_rings:
417 	ice_free_ctrlq_sq_ring(hw, cq);
418 
419 init_ctrlq_exit:
420 	return ret_code;
421 }
422 
423 /**
424  * ice_init_rq - initialize ARQ
425  * @hw: pointer to the hardware structure
426  * @cq: pointer to the specific Control queue
427  *
428  * The main initialization routine for the Admin Receive (Event) Queue.
429  * Prior to calling this function, drivers *MUST* set the following fields
430  * in the cq->structure:
431  *     - cq->num_rq_entries
432  *     - cq->rq_buf_size
433  *
434  * Do *NOT* hold the lock when calling this as the memory allocation routines
435  * called are not going to be atomic context safe
436  */
ice_init_rq(struct ice_hw * hw,struct ice_ctl_q_info * cq)437 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
438 {
439 	enum ice_status ret_code;
440 
441 	if (cq->rq.count > 0) {
442 		/* queue already initialized */
443 		ret_code = ICE_ERR_NOT_READY;
444 		goto init_ctrlq_exit;
445 	}
446 
447 	/* verify input for valid configuration */
448 	if (!cq->num_rq_entries || !cq->rq_buf_size) {
449 		ret_code = ICE_ERR_CFG;
450 		goto init_ctrlq_exit;
451 	}
452 
453 	cq->rq.next_to_use = 0;
454 	cq->rq.next_to_clean = 0;
455 
456 	/* allocate the ring memory */
457 	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
458 	if (ret_code)
459 		goto init_ctrlq_exit;
460 
461 	/* allocate buffers in the rings */
462 	ret_code = ice_alloc_rq_bufs(hw, cq);
463 	if (ret_code)
464 		goto init_ctrlq_free_rings;
465 
466 	/* initialize base registers */
467 	ret_code = ice_cfg_rq_regs(hw, cq);
468 	if (ret_code)
469 		goto init_ctrlq_free_rings;
470 
471 	/* success! */
472 	cq->rq.count = cq->num_rq_entries;
473 	goto init_ctrlq_exit;
474 
475 init_ctrlq_free_rings:
476 	ice_free_ctrlq_rq_ring(hw, cq);
477 
478 init_ctrlq_exit:
479 	return ret_code;
480 }
481 
482 /**
483  * ice_shutdown_sq - shutdown the Control ATQ
484  * @hw: pointer to the hardware structure
485  * @cq: pointer to the specific Control queue
486  *
487  * The main shutdown routine for the Control Transmit Queue
488  */
489 static enum ice_status
ice_shutdown_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)490 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
491 {
492 	enum ice_status ret_code = 0;
493 
494 	mutex_lock(&cq->sq_lock);
495 
496 	if (!cq->sq.count) {
497 		ret_code = ICE_ERR_NOT_READY;
498 		goto shutdown_sq_out;
499 	}
500 
501 	/* Stop firmware AdminQ processing */
502 	wr32(hw, cq->sq.head, 0);
503 	wr32(hw, cq->sq.tail, 0);
504 	wr32(hw, cq->sq.len, 0);
505 	wr32(hw, cq->sq.bal, 0);
506 	wr32(hw, cq->sq.bah, 0);
507 
508 	cq->sq.count = 0;	/* to indicate uninitialized queue */
509 
510 	/* free ring buffers and the ring itself */
511 	ice_free_sq_bufs(hw, cq);
512 	ice_free_ctrlq_sq_ring(hw, cq);
513 
514 shutdown_sq_out:
515 	mutex_unlock(&cq->sq_lock);
516 	return ret_code;
517 }
518 
519 /**
520  * ice_aq_ver_check - Check the reported AQ API version.
521  * @hw: pointer to the hardware structure
522  *
523  * Checks if the driver should load on a given AQ API version.
524  *
525  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
526  */
ice_aq_ver_check(struct ice_hw * hw)527 static bool ice_aq_ver_check(struct ice_hw *hw)
528 {
529 	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
530 		/* Major API version is newer than expected, don't load */
531 		dev_warn(ice_hw_to_dev(hw),
532 			 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
533 		return false;
534 	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
535 		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
536 			dev_info(ice_hw_to_dev(hw),
537 				 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
538 		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
539 			dev_info(ice_hw_to_dev(hw),
540 				 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
541 	} else {
542 		/* Major API version is older than expected, log a warning */
543 		dev_info(ice_hw_to_dev(hw),
544 			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
545 	}
546 	return true;
547 }
548 
549 /**
550  * ice_shutdown_rq - shutdown Control ARQ
551  * @hw: pointer to the hardware structure
552  * @cq: pointer to the specific Control queue
553  *
554  * The main shutdown routine for the Control Receive Queue
555  */
556 static enum ice_status
ice_shutdown_rq(struct ice_hw * hw,struct ice_ctl_q_info * cq)557 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
558 {
559 	enum ice_status ret_code = 0;
560 
561 	mutex_lock(&cq->rq_lock);
562 
563 	if (!cq->rq.count) {
564 		ret_code = ICE_ERR_NOT_READY;
565 		goto shutdown_rq_out;
566 	}
567 
568 	/* Stop Control Queue processing */
569 	wr32(hw, cq->rq.head, 0);
570 	wr32(hw, cq->rq.tail, 0);
571 	wr32(hw, cq->rq.len, 0);
572 	wr32(hw, cq->rq.bal, 0);
573 	wr32(hw, cq->rq.bah, 0);
574 
575 	/* set rq.count to 0 to indicate uninitialized queue */
576 	cq->rq.count = 0;
577 
578 	/* free ring buffers and the ring itself */
579 	ice_free_rq_bufs(hw, cq);
580 	ice_free_ctrlq_rq_ring(hw, cq);
581 
582 shutdown_rq_out:
583 	mutex_unlock(&cq->rq_lock);
584 	return ret_code;
585 }
586 
587 /**
588  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
589  * @hw: pointer to the hardware structure
590  */
ice_init_check_adminq(struct ice_hw * hw)591 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
592 {
593 	struct ice_ctl_q_info *cq = &hw->adminq;
594 	enum ice_status status;
595 
596 	status = ice_aq_get_fw_ver(hw, NULL);
597 	if (status)
598 		goto init_ctrlq_free_rq;
599 
600 	if (!ice_aq_ver_check(hw)) {
601 		status = ICE_ERR_FW_API_VER;
602 		goto init_ctrlq_free_rq;
603 	}
604 
605 	return 0;
606 
607 init_ctrlq_free_rq:
608 	if (cq->rq.head) {
609 		ice_shutdown_rq(hw, cq);
610 		mutex_destroy(&cq->rq_lock);
611 	}
612 	if (cq->sq.head) {
613 		ice_shutdown_sq(hw, cq);
614 		mutex_destroy(&cq->sq_lock);
615 	}
616 	return status;
617 }
618 
619 /**
620  * ice_init_ctrlq - main initialization routine for any control Queue
621  * @hw: pointer to the hardware structure
622  * @q_type: specific Control queue type
623  *
624  * Prior to calling this function, drivers *MUST* set the following fields
625  * in the cq->structure:
626  *     - cq->num_sq_entries
627  *     - cq->num_rq_entries
628  *     - cq->rq_buf_size
629  *     - cq->sq_buf_size
630  *
631  */
ice_init_ctrlq(struct ice_hw * hw,enum ice_ctl_q q_type)632 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
633 {
634 	struct ice_ctl_q_info *cq;
635 	enum ice_status ret_code;
636 
637 	switch (q_type) {
638 	case ICE_CTL_Q_ADMIN:
639 		ice_adminq_init_regs(hw);
640 		cq = &hw->adminq;
641 		break;
642 	default:
643 		return ICE_ERR_PARAM;
644 	}
645 	cq->qtype = q_type;
646 
647 	/* verify input for valid configuration */
648 	if (!cq->num_rq_entries || !cq->num_sq_entries ||
649 	    !cq->rq_buf_size || !cq->sq_buf_size) {
650 		return ICE_ERR_CFG;
651 	}
652 	mutex_init(&cq->sq_lock);
653 	mutex_init(&cq->rq_lock);
654 
655 	/* setup SQ command write back timeout */
656 	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
657 
658 	/* allocate the ATQ */
659 	ret_code = ice_init_sq(hw, cq);
660 	if (ret_code)
661 		goto init_ctrlq_destroy_locks;
662 
663 	/* allocate the ARQ */
664 	ret_code = ice_init_rq(hw, cq);
665 	if (ret_code)
666 		goto init_ctrlq_free_sq;
667 
668 	/* success! */
669 	return 0;
670 
671 init_ctrlq_free_sq:
672 	ice_shutdown_sq(hw, cq);
673 init_ctrlq_destroy_locks:
674 	mutex_destroy(&cq->sq_lock);
675 	mutex_destroy(&cq->rq_lock);
676 	return ret_code;
677 }
678 
679 /**
680  * ice_init_all_ctrlq - main initialization routine for all control queues
681  * @hw: pointer to the hardware structure
682  *
683  * Prior to calling this function, drivers *MUST* set the following fields
684  * in the cq->structure for all control queues:
685  *     - cq->num_sq_entries
686  *     - cq->num_rq_entries
687  *     - cq->rq_buf_size
688  *     - cq->sq_buf_size
689  */
ice_init_all_ctrlq(struct ice_hw * hw)690 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
691 {
692 	enum ice_status ret_code;
693 
694 	/* Init FW admin queue */
695 	ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
696 	if (ret_code)
697 		return ret_code;
698 
699 	return ice_init_check_adminq(hw);
700 }
701 
702 /**
703  * ice_shutdown_ctrlq - shutdown routine for any control queue
704  * @hw: pointer to the hardware structure
705  * @q_type: specific Control queue type
706  */
ice_shutdown_ctrlq(struct ice_hw * hw,enum ice_ctl_q q_type)707 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
708 {
709 	struct ice_ctl_q_info *cq;
710 
711 	switch (q_type) {
712 	case ICE_CTL_Q_ADMIN:
713 		cq = &hw->adminq;
714 		if (ice_check_sq_alive(hw, cq))
715 			ice_aq_q_shutdown(hw, true);
716 		break;
717 	default:
718 		return;
719 	}
720 
721 	if (cq->sq.head) {
722 		ice_shutdown_sq(hw, cq);
723 		mutex_destroy(&cq->sq_lock);
724 	}
725 	if (cq->rq.head) {
726 		ice_shutdown_rq(hw, cq);
727 		mutex_destroy(&cq->rq_lock);
728 	}
729 }
730 
731 /**
732  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
733  * @hw: pointer to the hardware structure
734  */
ice_shutdown_all_ctrlq(struct ice_hw * hw)735 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
736 {
737 	/* Shutdown FW admin queue */
738 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
739 }
740 
741 /**
742  * ice_clean_sq - cleans Admin send queue (ATQ)
743  * @hw: pointer to the hardware structure
744  * @cq: pointer to the specific Control queue
745  *
746  * returns the number of free desc
747  */
ice_clean_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)748 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
749 {
750 	struct ice_ctl_q_ring *sq = &cq->sq;
751 	u16 ntc = sq->next_to_clean;
752 	struct ice_sq_cd *details;
753 	struct ice_aq_desc *desc;
754 
755 	desc = ICE_CTL_Q_DESC(*sq, ntc);
756 	details = ICE_CTL_Q_DETAILS(*sq, ntc);
757 
758 	while (rd32(hw, cq->sq.head) != ntc) {
759 		ice_debug(hw, ICE_DBG_AQ_MSG,
760 			  "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
761 		memset(desc, 0, sizeof(*desc));
762 		memset(details, 0, sizeof(*details));
763 		ntc++;
764 		if (ntc == sq->count)
765 			ntc = 0;
766 		desc = ICE_CTL_Q_DESC(*sq, ntc);
767 		details = ICE_CTL_Q_DETAILS(*sq, ntc);
768 	}
769 
770 	sq->next_to_clean = ntc;
771 
772 	return ICE_CTL_Q_DESC_UNUSED(sq);
773 }
774 
775 /**
776  * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
777  * @hw: pointer to the hw struct
778  * @cq: pointer to the specific Control queue
779  *
780  * Returns true if the firmware has processed all descriptors on the
781  * admin send queue. Returns false if there are still requests pending.
782  */
ice_sq_done(struct ice_hw * hw,struct ice_ctl_q_info * cq)783 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
784 {
785 	/* AQ designers suggest use of head for better
786 	 * timing reliability than DD bit
787 	 */
788 	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
789 }
790 
791 /**
792  * ice_sq_send_cmd - send command to Control Queue (ATQ)
793  * @hw: pointer to the hw struct
794  * @cq: pointer to the specific Control queue
795  * @desc: prefilled descriptor describing the command (non DMA mem)
796  * @buf: buffer to use for indirect commands (or NULL for direct commands)
797  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
798  * @cd: pointer to command details structure
799  *
800  * This is the main send command routine for the ATQ.  It runs the q,
801  * cleans the queue, etc.
802  */
803 enum ice_status
ice_sq_send_cmd(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)804 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
805 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
806 		struct ice_sq_cd *cd)
807 {
808 	struct ice_dma_mem *dma_buf = NULL;
809 	struct ice_aq_desc *desc_on_ring;
810 	bool cmd_completed = false;
811 	enum ice_status status = 0;
812 	struct ice_sq_cd *details;
813 	u32 total_delay = 0;
814 	u16 retval = 0;
815 	u32 val = 0;
816 
817 	/* if reset is in progress return a soft error */
818 	if (hw->reset_ongoing)
819 		return ICE_ERR_RESET_ONGOING;
820 	mutex_lock(&cq->sq_lock);
821 
822 	cq->sq_last_status = ICE_AQ_RC_OK;
823 
824 	if (!cq->sq.count) {
825 		ice_debug(hw, ICE_DBG_AQ_MSG,
826 			  "Control Send queue not initialized.\n");
827 		status = ICE_ERR_AQ_EMPTY;
828 		goto sq_send_command_error;
829 	}
830 
831 	if ((buf && !buf_size) || (!buf && buf_size)) {
832 		status = ICE_ERR_PARAM;
833 		goto sq_send_command_error;
834 	}
835 
836 	if (buf) {
837 		if (buf_size > cq->sq_buf_size) {
838 			ice_debug(hw, ICE_DBG_AQ_MSG,
839 				  "Invalid buffer size for Control Send queue: %d.\n",
840 				  buf_size);
841 			status = ICE_ERR_INVAL_SIZE;
842 			goto sq_send_command_error;
843 		}
844 
845 		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
846 		if (buf_size > ICE_AQ_LG_BUF)
847 			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
848 	}
849 
850 	val = rd32(hw, cq->sq.head);
851 	if (val >= cq->num_sq_entries) {
852 		ice_debug(hw, ICE_DBG_AQ_MSG,
853 			  "head overrun at %d in the Control Send Queue ring\n",
854 			  val);
855 		status = ICE_ERR_AQ_EMPTY;
856 		goto sq_send_command_error;
857 	}
858 
859 	details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
860 	if (cd)
861 		memcpy(details, cd, sizeof(*details));
862 	else
863 		memset(details, 0, sizeof(*details));
864 
865 	/* Call clean and check queue available function to reclaim the
866 	 * descriptors that were processed by FW/MBX; the function returns the
867 	 * number of desc available. The clean function called here could be
868 	 * called in a separate thread in case of asynchronous completions.
869 	 */
870 	if (ice_clean_sq(hw, cq) == 0) {
871 		ice_debug(hw, ICE_DBG_AQ_MSG,
872 			  "Error: Control Send Queue is full.\n");
873 		status = ICE_ERR_AQ_FULL;
874 		goto sq_send_command_error;
875 	}
876 
877 	/* initialize the temp desc pointer with the right desc */
878 	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
879 
880 	/* if the desc is available copy the temp desc to the right place */
881 	memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
882 
883 	/* if buf is not NULL assume indirect command */
884 	if (buf) {
885 		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
886 		/* copy the user buf into the respective DMA buf */
887 		memcpy(dma_buf->va, buf, buf_size);
888 		desc_on_ring->datalen = cpu_to_le16(buf_size);
889 
890 		/* Update the address values in the desc with the pa value
891 		 * for respective buffer
892 		 */
893 		desc_on_ring->params.generic.addr_high =
894 			cpu_to_le32(upper_32_bits(dma_buf->pa));
895 		desc_on_ring->params.generic.addr_low =
896 			cpu_to_le32(lower_32_bits(dma_buf->pa));
897 	}
898 
899 	/* Debug desc and buffer */
900 	ice_debug(hw, ICE_DBG_AQ_MSG,
901 		  "ATQ: Control Send queue desc and buffer:\n");
902 
903 	ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size);
904 
905 	(cq->sq.next_to_use)++;
906 	if (cq->sq.next_to_use == cq->sq.count)
907 		cq->sq.next_to_use = 0;
908 	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
909 
910 	do {
911 		if (ice_sq_done(hw, cq))
912 			break;
913 
914 		udelay(ICE_CTL_Q_SQ_CMD_USEC);
915 		total_delay++;
916 	} while (total_delay < cq->sq_cmd_timeout);
917 
918 	/* if ready, copy the desc back to temp */
919 	if (ice_sq_done(hw, cq)) {
920 		memcpy(desc, desc_on_ring, sizeof(*desc));
921 		if (buf) {
922 			/* get returned length to copy */
923 			u16 copy_size = le16_to_cpu(desc->datalen);
924 
925 			if (copy_size > buf_size) {
926 				ice_debug(hw, ICE_DBG_AQ_MSG,
927 					  "Return len %d > than buf len %d\n",
928 					  copy_size, buf_size);
929 				status = ICE_ERR_AQ_ERROR;
930 			} else {
931 				memcpy(buf, dma_buf->va, copy_size);
932 			}
933 		}
934 		retval = le16_to_cpu(desc->retval);
935 		if (retval) {
936 			ice_debug(hw, ICE_DBG_AQ_MSG,
937 				  "Control Send Queue command completed with error 0x%x\n",
938 				  retval);
939 
940 			/* strip off FW internal code */
941 			retval &= 0xff;
942 		}
943 		cmd_completed = true;
944 		if (!status && retval != ICE_AQ_RC_OK)
945 			status = ICE_ERR_AQ_ERROR;
946 		cq->sq_last_status = (enum ice_aq_err)retval;
947 	}
948 
949 	ice_debug(hw, ICE_DBG_AQ_MSG,
950 		  "ATQ: desc and buffer writeback:\n");
951 
952 	ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size);
953 
954 	/* save writeback AQ if requested */
955 	if (details->wb_desc)
956 		memcpy(details->wb_desc, desc_on_ring,
957 		       sizeof(*details->wb_desc));
958 
959 	/* update the error if time out occurred */
960 	if (!cmd_completed) {
961 		ice_debug(hw, ICE_DBG_AQ_MSG,
962 			  "Control Send Queue Writeback timeout.\n");
963 		status = ICE_ERR_AQ_TIMEOUT;
964 	}
965 
966 sq_send_command_error:
967 	mutex_unlock(&cq->sq_lock);
968 	return status;
969 }
970 
971 /**
972  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
973  * @desc: pointer to the temp descriptor (non DMA mem)
974  * @opcode: the opcode can be used to decide which flags to turn off or on
975  *
976  * Fill the desc with default values
977  */
ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc * desc,u16 opcode)978 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
979 {
980 	/* zero out the desc */
981 	memset(desc, 0, sizeof(*desc));
982 	desc->opcode = cpu_to_le16(opcode);
983 	desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
984 }
985 
986 /**
987  * ice_clean_rq_elem
988  * @hw: pointer to the hw struct
989  * @cq: pointer to the specific Control queue
990  * @e: event info from the receive descriptor, includes any buffers
991  * @pending: number of events that could be left to process
992  *
993  * This function cleans one Admin Receive Queue element and returns
994  * the contents through e.  It can also return how many events are
995  * left to process through 'pending'.
996  */
997 enum ice_status
ice_clean_rq_elem(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_rq_event_info * e,u16 * pending)998 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
999 		  struct ice_rq_event_info *e, u16 *pending)
1000 {
1001 	u16 ntc = cq->rq.next_to_clean;
1002 	enum ice_status ret_code = 0;
1003 	struct ice_aq_desc *desc;
1004 	struct ice_dma_mem *bi;
1005 	u16 desc_idx;
1006 	u16 datalen;
1007 	u16 flags;
1008 	u16 ntu;
1009 
1010 	/* pre-clean the event info */
1011 	memset(&e->desc, 0, sizeof(e->desc));
1012 
1013 	/* take the lock before we start messing with the ring */
1014 	mutex_lock(&cq->rq_lock);
1015 
1016 	if (!cq->rq.count) {
1017 		ice_debug(hw, ICE_DBG_AQ_MSG,
1018 			  "Control Receive queue not initialized.\n");
1019 		ret_code = ICE_ERR_AQ_EMPTY;
1020 		goto clean_rq_elem_err;
1021 	}
1022 
1023 	/* set next_to_use to head */
1024 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1025 
1026 	if (ntu == ntc) {
1027 		/* nothing to do - shouldn't need to update ring's values */
1028 		ret_code = ICE_ERR_AQ_NO_WORK;
1029 		goto clean_rq_elem_out;
1030 	}
1031 
1032 	/* now clean the next descriptor */
1033 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1034 	desc_idx = ntc;
1035 
1036 	cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1037 	flags = le16_to_cpu(desc->flags);
1038 	if (flags & ICE_AQ_FLAG_ERR) {
1039 		ret_code = ICE_ERR_AQ_ERROR;
1040 		ice_debug(hw, ICE_DBG_AQ_MSG,
1041 			  "Control Receive Queue Event received with error 0x%x\n",
1042 			  cq->rq_last_status);
1043 	}
1044 	memcpy(&e->desc, desc, sizeof(e->desc));
1045 	datalen = le16_to_cpu(desc->datalen);
1046 	e->msg_len = min(datalen, e->buf_len);
1047 	if (e->msg_buf && e->msg_len)
1048 		memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1049 
1050 	ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n");
1051 
1052 	ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf,
1053 		     cq->rq_buf_size);
1054 
1055 	/* Restore the original datalen and buffer address in the desc,
1056 	 * FW updates datalen to indicate the event message size
1057 	 */
1058 	bi = &cq->rq.r.rq_bi[ntc];
1059 	memset(desc, 0, sizeof(*desc));
1060 
1061 	desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1062 	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1063 		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1064 	desc->datalen = cpu_to_le16(bi->size);
1065 	desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1066 	desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1067 
1068 	/* set tail = the last cleaned desc index. */
1069 	wr32(hw, cq->rq.tail, ntc);
1070 	/* ntc is updated to tail + 1 */
1071 	ntc++;
1072 	if (ntc == cq->num_rq_entries)
1073 		ntc = 0;
1074 	cq->rq.next_to_clean = ntc;
1075 	cq->rq.next_to_use = ntu;
1076 
1077 clean_rq_elem_out:
1078 	/* Set pending if needed, unlock and return */
1079 	if (pending) {
1080 		/* re-read HW head to calculate actual pending messages */
1081 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1082 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1083 	}
1084 clean_rq_elem_err:
1085 	mutex_unlock(&cq->rq_lock);
1086 
1087 	return ret_code;
1088 }
1089