• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "iavf_status.h"
5 #include "iavf_type.h"
6 #include "iavf_register.h"
7 #include "iavf_adminq.h"
8 #include "iavf_prototype.h"
9 
10 /**
11  *  iavf_adminq_init_regs - Initialize AdminQ registers
12  *  @hw: pointer to the hardware structure
13  *
14  *  This assumes the alloc_asq and alloc_arq functions have already been called
15  **/
iavf_adminq_init_regs(struct iavf_hw * hw)16 static void iavf_adminq_init_regs(struct iavf_hw *hw)
17 {
18 	/* set head and tail registers in our local struct */
19 	hw->aq.asq.tail = IAVF_VF_ATQT1;
20 	hw->aq.asq.head = IAVF_VF_ATQH1;
21 	hw->aq.asq.len  = IAVF_VF_ATQLEN1;
22 	hw->aq.asq.bal  = IAVF_VF_ATQBAL1;
23 	hw->aq.asq.bah  = IAVF_VF_ATQBAH1;
24 	hw->aq.arq.tail = IAVF_VF_ARQT1;
25 	hw->aq.arq.head = IAVF_VF_ARQH1;
26 	hw->aq.arq.len  = IAVF_VF_ARQLEN1;
27 	hw->aq.arq.bal  = IAVF_VF_ARQBAL1;
28 	hw->aq.arq.bah  = IAVF_VF_ARQBAH1;
29 }
30 
31 /**
32  *  iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
33  *  @hw: pointer to the hardware structure
34  **/
iavf_alloc_adminq_asq_ring(struct iavf_hw * hw)35 static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
36 {
37 	enum iavf_status ret_code;
38 
39 	ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
40 					 iavf_mem_atq_ring,
41 					 (hw->aq.num_asq_entries *
42 					 sizeof(struct iavf_aq_desc)),
43 					 IAVF_ADMINQ_DESC_ALIGNMENT);
44 	if (ret_code)
45 		return ret_code;
46 
47 	ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
48 					  (hw->aq.num_asq_entries *
49 					  sizeof(struct iavf_asq_cmd_details)));
50 	if (ret_code) {
51 		iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
52 		return ret_code;
53 	}
54 
55 	return ret_code;
56 }
57 
58 /**
59  *  iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
60  *  @hw: pointer to the hardware structure
61  **/
iavf_alloc_adminq_arq_ring(struct iavf_hw * hw)62 static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
63 {
64 	enum iavf_status ret_code;
65 
66 	ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
67 					 iavf_mem_arq_ring,
68 					 (hw->aq.num_arq_entries *
69 					 sizeof(struct iavf_aq_desc)),
70 					 IAVF_ADMINQ_DESC_ALIGNMENT);
71 
72 	return ret_code;
73 }
74 
75 /**
76  *  iavf_free_adminq_asq - Free Admin Queue send rings
77  *  @hw: pointer to the hardware structure
78  *
79  *  This assumes the posted send buffers have already been cleaned
80  *  and de-allocated
81  **/
iavf_free_adminq_asq(struct iavf_hw * hw)82 static void iavf_free_adminq_asq(struct iavf_hw *hw)
83 {
84 	iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
85 }
86 
87 /**
88  *  iavf_free_adminq_arq - Free Admin Queue receive rings
89  *  @hw: pointer to the hardware structure
90  *
91  *  This assumes the posted receive buffers have already been cleaned
92  *  and de-allocated
93  **/
iavf_free_adminq_arq(struct iavf_hw * hw)94 static void iavf_free_adminq_arq(struct iavf_hw *hw)
95 {
96 	iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
97 }
98 
99 /**
100  *  iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
101  *  @hw: pointer to the hardware structure
102  **/
iavf_alloc_arq_bufs(struct iavf_hw * hw)103 static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
104 {
105 	struct iavf_aq_desc *desc;
106 	struct iavf_dma_mem *bi;
107 	enum iavf_status ret_code;
108 	int i;
109 
110 	/* We'll be allocating the buffer info memory first, then we can
111 	 * allocate the mapped buffers for the event processing
112 	 */
113 
114 	/* buffer_info structures do not need alignment */
115 	ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
116 					  (hw->aq.num_arq_entries *
117 					   sizeof(struct iavf_dma_mem)));
118 	if (ret_code)
119 		goto alloc_arq_bufs;
120 	hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
121 
122 	/* allocate the mapped buffers */
123 	for (i = 0; i < hw->aq.num_arq_entries; i++) {
124 		bi = &hw->aq.arq.r.arq_bi[i];
125 		ret_code = iavf_allocate_dma_mem(hw, bi,
126 						 iavf_mem_arq_buf,
127 						 hw->aq.arq_buf_size,
128 						 IAVF_ADMINQ_DESC_ALIGNMENT);
129 		if (ret_code)
130 			goto unwind_alloc_arq_bufs;
131 
132 		/* now configure the descriptors for use */
133 		desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
134 
135 		desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
136 		if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
137 			desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
138 		desc->opcode = 0;
139 		/* This is in accordance with Admin queue design, there is no
140 		 * register for buffer size configuration
141 		 */
142 		desc->datalen = cpu_to_le16((u16)bi->size);
143 		desc->retval = 0;
144 		desc->cookie_high = 0;
145 		desc->cookie_low = 0;
146 		desc->params.external.addr_high =
147 			cpu_to_le32(upper_32_bits(bi->pa));
148 		desc->params.external.addr_low =
149 			cpu_to_le32(lower_32_bits(bi->pa));
150 		desc->params.external.param0 = 0;
151 		desc->params.external.param1 = 0;
152 	}
153 
154 alloc_arq_bufs:
155 	return ret_code;
156 
157 unwind_alloc_arq_bufs:
158 	/* don't try to free the one that failed... */
159 	i--;
160 	for (; i >= 0; i--)
161 		iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
162 	iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
163 
164 	return ret_code;
165 }
166 
167 /**
168  *  iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
169  *  @hw: pointer to the hardware structure
170  **/
iavf_alloc_asq_bufs(struct iavf_hw * hw)171 static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
172 {
173 	struct iavf_dma_mem *bi;
174 	enum iavf_status ret_code;
175 	int i;
176 
177 	/* No mapped memory needed yet, just the buffer info structures */
178 	ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
179 					  (hw->aq.num_asq_entries *
180 					   sizeof(struct iavf_dma_mem)));
181 	if (ret_code)
182 		goto alloc_asq_bufs;
183 	hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
184 
185 	/* allocate the mapped buffers */
186 	for (i = 0; i < hw->aq.num_asq_entries; i++) {
187 		bi = &hw->aq.asq.r.asq_bi[i];
188 		ret_code = iavf_allocate_dma_mem(hw, bi,
189 						 iavf_mem_asq_buf,
190 						 hw->aq.asq_buf_size,
191 						 IAVF_ADMINQ_DESC_ALIGNMENT);
192 		if (ret_code)
193 			goto unwind_alloc_asq_bufs;
194 	}
195 alloc_asq_bufs:
196 	return ret_code;
197 
198 unwind_alloc_asq_bufs:
199 	/* don't try to free the one that failed... */
200 	i--;
201 	for (; i >= 0; i--)
202 		iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
203 	iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
204 
205 	return ret_code;
206 }
207 
208 /**
209  *  iavf_free_arq_bufs - Free receive queue buffer info elements
210  *  @hw: pointer to the hardware structure
211  **/
iavf_free_arq_bufs(struct iavf_hw * hw)212 static void iavf_free_arq_bufs(struct iavf_hw *hw)
213 {
214 	int i;
215 
216 	/* free descriptors */
217 	for (i = 0; i < hw->aq.num_arq_entries; i++)
218 		iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
219 
220 	/* free the descriptor memory */
221 	iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
222 
223 	/* free the dma header */
224 	iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
225 }
226 
227 /**
228  *  iavf_free_asq_bufs - Free send queue buffer info elements
229  *  @hw: pointer to the hardware structure
230  **/
iavf_free_asq_bufs(struct iavf_hw * hw)231 static void iavf_free_asq_bufs(struct iavf_hw *hw)
232 {
233 	int i;
234 
235 	/* only unmap if the address is non-NULL */
236 	for (i = 0; i < hw->aq.num_asq_entries; i++)
237 		if (hw->aq.asq.r.asq_bi[i].pa)
238 			iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
239 
240 	/* free the buffer info list */
241 	iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
242 
243 	/* free the descriptor memory */
244 	iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
245 
246 	/* free the dma header */
247 	iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
248 }
249 
250 /**
251  *  iavf_config_asq_regs - configure ASQ registers
252  *  @hw: pointer to the hardware structure
253  *
254  *  Configure base address and length registers for the transmit queue
255  **/
iavf_config_asq_regs(struct iavf_hw * hw)256 static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
257 {
258 	enum iavf_status ret_code = 0;
259 	u32 reg = 0;
260 
261 	/* Clear Head and Tail */
262 	wr32(hw, hw->aq.asq.head, 0);
263 	wr32(hw, hw->aq.asq.tail, 0);
264 
265 	/* set starting point */
266 	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
267 				  IAVF_VF_ATQLEN1_ATQENABLE_MASK));
268 	wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
269 	wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
270 
271 	/* Check one register to verify that config was applied */
272 	reg = rd32(hw, hw->aq.asq.bal);
273 	if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
274 		ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
275 
276 	return ret_code;
277 }
278 
279 /**
280  *  iavf_config_arq_regs - ARQ register configuration
281  *  @hw: pointer to the hardware structure
282  *
283  * Configure base address and length registers for the receive (event queue)
284  **/
iavf_config_arq_regs(struct iavf_hw * hw)285 static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
286 {
287 	enum iavf_status ret_code = 0;
288 	u32 reg = 0;
289 
290 	/* Clear Head and Tail */
291 	wr32(hw, hw->aq.arq.head, 0);
292 	wr32(hw, hw->aq.arq.tail, 0);
293 
294 	/* set starting point */
295 	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
296 				  IAVF_VF_ARQLEN1_ARQENABLE_MASK));
297 	wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
298 	wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
299 
300 	/* Update tail in the HW to post pre-allocated buffers */
301 	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
302 
303 	/* Check one register to verify that config was applied */
304 	reg = rd32(hw, hw->aq.arq.bal);
305 	if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
306 		ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
307 
308 	return ret_code;
309 }
310 
311 /**
312  *  iavf_init_asq - main initialization routine for ASQ
313  *  @hw: pointer to the hardware structure
314  *
315  *  This is the main initialization routine for the Admin Send Queue
316  *  Prior to calling this function, drivers *MUST* set the following fields
317  *  in the hw->aq structure:
318  *     - hw->aq.num_asq_entries
319  *     - hw->aq.arq_buf_size
320  *
321  *  Do *NOT* hold the lock when calling this as the memory allocation routines
322  *  called are not going to be atomic context safe
323  **/
iavf_init_asq(struct iavf_hw * hw)324 static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
325 {
326 	enum iavf_status ret_code = 0;
327 	int i;
328 
329 	if (hw->aq.asq.count > 0) {
330 		/* queue already initialized */
331 		ret_code = IAVF_ERR_NOT_READY;
332 		goto init_adminq_exit;
333 	}
334 
335 	/* verify input for valid configuration */
336 	if ((hw->aq.num_asq_entries == 0) ||
337 	    (hw->aq.asq_buf_size == 0)) {
338 		ret_code = IAVF_ERR_CONFIG;
339 		goto init_adminq_exit;
340 	}
341 
342 	hw->aq.asq.next_to_use = 0;
343 	hw->aq.asq.next_to_clean = 0;
344 
345 	/* allocate the ring memory */
346 	ret_code = iavf_alloc_adminq_asq_ring(hw);
347 	if (ret_code)
348 		goto init_adminq_exit;
349 
350 	/* allocate buffers in the rings */
351 	ret_code = iavf_alloc_asq_bufs(hw);
352 	if (ret_code)
353 		goto init_adminq_free_rings;
354 
355 	/* initialize base registers */
356 	ret_code = iavf_config_asq_regs(hw);
357 	if (ret_code)
358 		goto init_free_asq_bufs;
359 
360 	/* success! */
361 	hw->aq.asq.count = hw->aq.num_asq_entries;
362 	goto init_adminq_exit;
363 
364 init_free_asq_bufs:
365 	for (i = 0; i < hw->aq.num_asq_entries; i++)
366 		iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
367 	iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
368 
369 init_adminq_free_rings:
370 	iavf_free_adminq_asq(hw);
371 
372 init_adminq_exit:
373 	return ret_code;
374 }
375 
376 /**
377  *  iavf_init_arq - initialize ARQ
378  *  @hw: pointer to the hardware structure
379  *
380  *  The main initialization routine for the Admin Receive (Event) Queue.
381  *  Prior to calling this function, drivers *MUST* set the following fields
382  *  in the hw->aq structure:
383  *     - hw->aq.num_asq_entries
384  *     - hw->aq.arq_buf_size
385  *
386  *  Do *NOT* hold the lock when calling this as the memory allocation routines
387  *  called are not going to be atomic context safe
388  **/
iavf_init_arq(struct iavf_hw * hw)389 static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
390 {
391 	enum iavf_status ret_code = 0;
392 	int i;
393 
394 	if (hw->aq.arq.count > 0) {
395 		/* queue already initialized */
396 		ret_code = IAVF_ERR_NOT_READY;
397 		goto init_adminq_exit;
398 	}
399 
400 	/* verify input for valid configuration */
401 	if ((hw->aq.num_arq_entries == 0) ||
402 	    (hw->aq.arq_buf_size == 0)) {
403 		ret_code = IAVF_ERR_CONFIG;
404 		goto init_adminq_exit;
405 	}
406 
407 	hw->aq.arq.next_to_use = 0;
408 	hw->aq.arq.next_to_clean = 0;
409 
410 	/* allocate the ring memory */
411 	ret_code = iavf_alloc_adminq_arq_ring(hw);
412 	if (ret_code)
413 		goto init_adminq_exit;
414 
415 	/* allocate buffers in the rings */
416 	ret_code = iavf_alloc_arq_bufs(hw);
417 	if (ret_code)
418 		goto init_adminq_free_rings;
419 
420 	/* initialize base registers */
421 	ret_code = iavf_config_arq_regs(hw);
422 	if (ret_code)
423 		goto init_free_arq_bufs;
424 
425 	/* success! */
426 	hw->aq.arq.count = hw->aq.num_arq_entries;
427 	goto init_adminq_exit;
428 
429 init_free_arq_bufs:
430 	for (i = 0; i < hw->aq.num_arq_entries; i++)
431 		iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
432 	iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
433 init_adminq_free_rings:
434 	iavf_free_adminq_arq(hw);
435 
436 init_adminq_exit:
437 	return ret_code;
438 }
439 
440 /**
441  *  iavf_shutdown_asq - shutdown the ASQ
442  *  @hw: pointer to the hardware structure
443  *
444  *  The main shutdown routine for the Admin Send Queue
445  **/
iavf_shutdown_asq(struct iavf_hw * hw)446 static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
447 {
448 	enum iavf_status ret_code = 0;
449 
450 	mutex_lock(&hw->aq.asq_mutex);
451 
452 	if (hw->aq.asq.count == 0) {
453 		ret_code = IAVF_ERR_NOT_READY;
454 		goto shutdown_asq_out;
455 	}
456 
457 	/* Stop firmware AdminQ processing */
458 	wr32(hw, hw->aq.asq.head, 0);
459 	wr32(hw, hw->aq.asq.tail, 0);
460 	wr32(hw, hw->aq.asq.len, 0);
461 	wr32(hw, hw->aq.asq.bal, 0);
462 	wr32(hw, hw->aq.asq.bah, 0);
463 
464 	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
465 
466 	/* free ring buffers */
467 	iavf_free_asq_bufs(hw);
468 
469 shutdown_asq_out:
470 	mutex_unlock(&hw->aq.asq_mutex);
471 	return ret_code;
472 }
473 
474 /**
475  *  iavf_shutdown_arq - shutdown ARQ
476  *  @hw: pointer to the hardware structure
477  *
478  *  The main shutdown routine for the Admin Receive Queue
479  **/
iavf_shutdown_arq(struct iavf_hw * hw)480 static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
481 {
482 	enum iavf_status ret_code = 0;
483 
484 	mutex_lock(&hw->aq.arq_mutex);
485 
486 	if (hw->aq.arq.count == 0) {
487 		ret_code = IAVF_ERR_NOT_READY;
488 		goto shutdown_arq_out;
489 	}
490 
491 	/* Stop firmware AdminQ processing */
492 	wr32(hw, hw->aq.arq.head, 0);
493 	wr32(hw, hw->aq.arq.tail, 0);
494 	wr32(hw, hw->aq.arq.len, 0);
495 	wr32(hw, hw->aq.arq.bal, 0);
496 	wr32(hw, hw->aq.arq.bah, 0);
497 
498 	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
499 
500 	/* free ring buffers */
501 	iavf_free_arq_bufs(hw);
502 
503 shutdown_arq_out:
504 	mutex_unlock(&hw->aq.arq_mutex);
505 	return ret_code;
506 }
507 
508 /**
509  *  iavf_init_adminq - main initialization routine for Admin Queue
510  *  @hw: pointer to the hardware structure
511  *
512  *  Prior to calling this function, drivers *MUST* set the following fields
513  *  in the hw->aq structure:
514  *     - hw->aq.num_asq_entries
515  *     - hw->aq.num_arq_entries
516  *     - hw->aq.arq_buf_size
517  *     - hw->aq.asq_buf_size
518  **/
iavf_init_adminq(struct iavf_hw * hw)519 enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
520 {
521 	enum iavf_status ret_code;
522 
523 	/* verify input for valid configuration */
524 	if ((hw->aq.num_arq_entries == 0) ||
525 	    (hw->aq.num_asq_entries == 0) ||
526 	    (hw->aq.arq_buf_size == 0) ||
527 	    (hw->aq.asq_buf_size == 0)) {
528 		ret_code = IAVF_ERR_CONFIG;
529 		goto init_adminq_exit;
530 	}
531 
532 	/* Set up register offsets */
533 	iavf_adminq_init_regs(hw);
534 
535 	/* setup ASQ command write back timeout */
536 	hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
537 
538 	/* allocate the ASQ */
539 	ret_code = iavf_init_asq(hw);
540 	if (ret_code)
541 		goto init_adminq_destroy_locks;
542 
543 	/* allocate the ARQ */
544 	ret_code = iavf_init_arq(hw);
545 	if (ret_code)
546 		goto init_adminq_free_asq;
547 
548 	/* success! */
549 	goto init_adminq_exit;
550 
551 init_adminq_free_asq:
552 	iavf_shutdown_asq(hw);
553 init_adminq_destroy_locks:
554 
555 init_adminq_exit:
556 	return ret_code;
557 }
558 
559 /**
560  *  iavf_shutdown_adminq - shutdown routine for the Admin Queue
561  *  @hw: pointer to the hardware structure
562  **/
iavf_shutdown_adminq(struct iavf_hw * hw)563 enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
564 {
565 	enum iavf_status ret_code = 0;
566 
567 	if (iavf_check_asq_alive(hw))
568 		iavf_aq_queue_shutdown(hw, true);
569 
570 	iavf_shutdown_asq(hw);
571 	iavf_shutdown_arq(hw);
572 
573 	return ret_code;
574 }
575 
576 /**
577  *  iavf_clean_asq - cleans Admin send queue
578  *  @hw: pointer to the hardware structure
579  *
580  *  returns the number of free desc
581  **/
iavf_clean_asq(struct iavf_hw * hw)582 static u16 iavf_clean_asq(struct iavf_hw *hw)
583 {
584 	struct iavf_adminq_ring *asq = &hw->aq.asq;
585 	struct iavf_asq_cmd_details *details;
586 	u16 ntc = asq->next_to_clean;
587 	struct iavf_aq_desc desc_cb;
588 	struct iavf_aq_desc *desc;
589 
590 	desc = IAVF_ADMINQ_DESC(*asq, ntc);
591 	details = IAVF_ADMINQ_DETAILS(*asq, ntc);
592 	while (rd32(hw, hw->aq.asq.head) != ntc) {
593 		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
594 			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
595 
596 		if (details->callback) {
597 			IAVF_ADMINQ_CALLBACK cb_func =
598 					(IAVF_ADMINQ_CALLBACK)details->callback;
599 			desc_cb = *desc;
600 			cb_func(hw, &desc_cb);
601 		}
602 		memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
603 		memset((void *)details, 0,
604 		       sizeof(struct iavf_asq_cmd_details));
605 		ntc++;
606 		if (ntc == asq->count)
607 			ntc = 0;
608 		desc = IAVF_ADMINQ_DESC(*asq, ntc);
609 		details = IAVF_ADMINQ_DETAILS(*asq, ntc);
610 	}
611 
612 	asq->next_to_clean = ntc;
613 
614 	return IAVF_DESC_UNUSED(asq);
615 }
616 
617 /**
618  *  iavf_asq_done - check if FW has processed the Admin Send Queue
619  *  @hw: pointer to the hw struct
620  *
621  *  Returns true if the firmware has processed all descriptors on the
622  *  admin send queue. Returns false if there are still requests pending.
623  **/
iavf_asq_done(struct iavf_hw * hw)624 bool iavf_asq_done(struct iavf_hw *hw)
625 {
626 	/* AQ designers suggest use of head for better
627 	 * timing reliability than DD bit
628 	 */
629 	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
630 }
631 
632 /**
633  *  iavf_asq_send_command - send command to Admin Queue
634  *  @hw: pointer to the hw struct
635  *  @desc: prefilled descriptor describing the command (non DMA mem)
636  *  @buff: buffer to use for indirect commands
637  *  @buff_size: size of buffer for indirect commands
638  *  @cmd_details: pointer to command details structure
639  *
640  *  This is the main send command driver routine for the Admin Queue send
641  *  queue.  It runs the queue, cleans the queue, etc
642  **/
iavf_asq_send_command(struct iavf_hw * hw,struct iavf_aq_desc * desc,void * buff,u16 buff_size,struct iavf_asq_cmd_details * cmd_details)643 enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
644 				       struct iavf_aq_desc *desc,
645 				       void *buff, /* can be NULL */
646 				       u16  buff_size,
647 				       struct iavf_asq_cmd_details *cmd_details)
648 {
649 	struct iavf_dma_mem *dma_buff = NULL;
650 	struct iavf_asq_cmd_details *details;
651 	struct iavf_aq_desc *desc_on_ring;
652 	bool cmd_completed = false;
653 	enum iavf_status status = 0;
654 	u16  retval = 0;
655 	u32  val = 0;
656 
657 	mutex_lock(&hw->aq.asq_mutex);
658 
659 	if (hw->aq.asq.count == 0) {
660 		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
661 			   "AQTX: Admin queue not initialized.\n");
662 		status = IAVF_ERR_QUEUE_EMPTY;
663 		goto asq_send_command_error;
664 	}
665 
666 	hw->aq.asq_last_status = IAVF_AQ_RC_OK;
667 
668 	val = rd32(hw, hw->aq.asq.head);
669 	if (val >= hw->aq.num_asq_entries) {
670 		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
671 			   "AQTX: head overrun at %d\n", val);
672 		status = IAVF_ERR_QUEUE_EMPTY;
673 		goto asq_send_command_error;
674 	}
675 
676 	details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
677 	if (cmd_details) {
678 		*details = *cmd_details;
679 
680 		/* If the cmd_details are defined copy the cookie.  The
681 		 * cpu_to_le32 is not needed here because the data is ignored
682 		 * by the FW, only used by the driver
683 		 */
684 		if (details->cookie) {
685 			desc->cookie_high =
686 				cpu_to_le32(upper_32_bits(details->cookie));
687 			desc->cookie_low =
688 				cpu_to_le32(lower_32_bits(details->cookie));
689 		}
690 	} else {
691 		memset(details, 0, sizeof(struct iavf_asq_cmd_details));
692 	}
693 
694 	/* clear requested flags and then set additional flags if defined */
695 	desc->flags &= ~cpu_to_le16(details->flags_dis);
696 	desc->flags |= cpu_to_le16(details->flags_ena);
697 
698 	if (buff_size > hw->aq.asq_buf_size) {
699 		iavf_debug(hw,
700 			   IAVF_DEBUG_AQ_MESSAGE,
701 			   "AQTX: Invalid buffer size: %d.\n",
702 			   buff_size);
703 		status = IAVF_ERR_INVALID_SIZE;
704 		goto asq_send_command_error;
705 	}
706 
707 	if (details->postpone && !details->async) {
708 		iavf_debug(hw,
709 			   IAVF_DEBUG_AQ_MESSAGE,
710 			   "AQTX: Async flag not set along with postpone flag");
711 		status = IAVF_ERR_PARAM;
712 		goto asq_send_command_error;
713 	}
714 
715 	/* call clean and check queue available function to reclaim the
716 	 * descriptors that were processed by FW, the function returns the
717 	 * number of desc available
718 	 */
719 	/* the clean function called here could be called in a separate thread
720 	 * in case of asynchronous completions
721 	 */
722 	if (iavf_clean_asq(hw) == 0) {
723 		iavf_debug(hw,
724 			   IAVF_DEBUG_AQ_MESSAGE,
725 			   "AQTX: Error queue is full.\n");
726 		status = IAVF_ERR_ADMIN_QUEUE_FULL;
727 		goto asq_send_command_error;
728 	}
729 
730 	/* initialize the temp desc pointer with the right desc */
731 	desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
732 
733 	/* if the desc is available copy the temp desc to the right place */
734 	*desc_on_ring = *desc;
735 
736 	/* if buff is not NULL assume indirect command */
737 	if (buff) {
738 		dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
739 		/* copy the user buff into the respective DMA buff */
740 		memcpy(dma_buff->va, buff, buff_size);
741 		desc_on_ring->datalen = cpu_to_le16(buff_size);
742 
743 		/* Update the address values in the desc with the pa value
744 		 * for respective buffer
745 		 */
746 		desc_on_ring->params.external.addr_high =
747 				cpu_to_le32(upper_32_bits(dma_buff->pa));
748 		desc_on_ring->params.external.addr_low =
749 				cpu_to_le32(lower_32_bits(dma_buff->pa));
750 	}
751 
752 	/* bump the tail */
753 	iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
754 	iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
755 		      buff, buff_size);
756 	(hw->aq.asq.next_to_use)++;
757 	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
758 		hw->aq.asq.next_to_use = 0;
759 	if (!details->postpone)
760 		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
761 
762 	/* if cmd_details are not defined or async flag is not set,
763 	 * we need to wait for desc write back
764 	 */
765 	if (!details->async && !details->postpone) {
766 		u32 total_delay = 0;
767 
768 		do {
769 			/* AQ designers suggest use of head for better
770 			 * timing reliability than DD bit
771 			 */
772 			if (iavf_asq_done(hw))
773 				break;
774 			udelay(50);
775 			total_delay += 50;
776 		} while (total_delay < hw->aq.asq_cmd_timeout);
777 	}
778 
779 	/* if ready, copy the desc back to temp */
780 	if (iavf_asq_done(hw)) {
781 		*desc = *desc_on_ring;
782 		if (buff)
783 			memcpy(buff, dma_buff->va, buff_size);
784 		retval = le16_to_cpu(desc->retval);
785 		if (retval != 0) {
786 			iavf_debug(hw,
787 				   IAVF_DEBUG_AQ_MESSAGE,
788 				   "AQTX: Command completed with error 0x%X.\n",
789 				   retval);
790 
791 			/* strip off FW internal code */
792 			retval &= 0xff;
793 		}
794 		cmd_completed = true;
795 		if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
796 			status = 0;
797 		else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
798 			status = IAVF_ERR_NOT_READY;
799 		else
800 			status = IAVF_ERR_ADMIN_QUEUE_ERROR;
801 		hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
802 	}
803 
804 	iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
805 		   "AQTX: desc and buffer writeback:\n");
806 	iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
807 
808 	/* save writeback aq if requested */
809 	if (details->wb_desc)
810 		*details->wb_desc = *desc_on_ring;
811 
812 	/* update the error if time out occurred */
813 	if ((!cmd_completed) &&
814 	    (!details->async && !details->postpone)) {
815 		if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
816 			iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
817 				   "AQTX: AQ Critical error.\n");
818 			status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
819 		} else {
820 			iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
821 				   "AQTX: Writeback timeout.\n");
822 			status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
823 		}
824 	}
825 
826 asq_send_command_error:
827 	mutex_unlock(&hw->aq.asq_mutex);
828 	return status;
829 }
830 
831 /**
832  *  iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
833  *  @desc:     pointer to the temp descriptor (non DMA mem)
834  *  @opcode:   the opcode can be used to decide which flags to turn off or on
835  *
836  *  Fill the desc with default values
837  **/
iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc * desc,u16 opcode)838 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode)
839 {
840 	/* zero out the desc */
841 	memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
842 	desc->opcode = cpu_to_le16(opcode);
843 	desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI);
844 }
845 
846 /**
847  *  iavf_clean_arq_element
848  *  @hw: pointer to the hw struct
849  *  @e: event info from the receive descriptor, includes any buffers
850  *  @pending: number of events that could be left to process
851  *
852  *  This function cleans one Admin Receive Queue element and returns
853  *  the contents through e.  It can also return how many events are
854  *  left to process through 'pending'
855  **/
iavf_clean_arq_element(struct iavf_hw * hw,struct iavf_arq_event_info * e,u16 * pending)856 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
857 					struct iavf_arq_event_info *e,
858 					u16 *pending)
859 {
860 	u16 ntc = hw->aq.arq.next_to_clean;
861 	struct iavf_aq_desc *desc;
862 	enum iavf_status ret_code = 0;
863 	struct iavf_dma_mem *bi;
864 	u16 desc_idx;
865 	u16 datalen;
866 	u16 flags;
867 	u16 ntu;
868 
869 	/* pre-clean the event info */
870 	memset(&e->desc, 0, sizeof(e->desc));
871 
872 	/* take the lock before we start messing with the ring */
873 	mutex_lock(&hw->aq.arq_mutex);
874 
875 	if (hw->aq.arq.count == 0) {
876 		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
877 			   "AQRX: Admin queue not initialized.\n");
878 		ret_code = IAVF_ERR_QUEUE_EMPTY;
879 		goto clean_arq_element_err;
880 	}
881 
882 	/* set next_to_use to head */
883 	ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
884 	if (ntu == ntc) {
885 		/* nothing to do - shouldn't need to update ring's values */
886 		ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
887 		goto clean_arq_element_out;
888 	}
889 
890 	/* now clean the next descriptor */
891 	desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
892 	desc_idx = ntc;
893 
894 	hw->aq.arq_last_status =
895 		(enum iavf_admin_queue_err)le16_to_cpu(desc->retval);
896 	flags = le16_to_cpu(desc->flags);
897 	if (flags & IAVF_AQ_FLAG_ERR) {
898 		ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
899 		iavf_debug(hw,
900 			   IAVF_DEBUG_AQ_MESSAGE,
901 			   "AQRX: Event received with error 0x%X.\n",
902 			   hw->aq.arq_last_status);
903 	}
904 
905 	e->desc = *desc;
906 	datalen = le16_to_cpu(desc->datalen);
907 	e->msg_len = min(datalen, e->buf_len);
908 	if (e->msg_buf && (e->msg_len != 0))
909 		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
910 		       e->msg_len);
911 
912 	iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
913 	iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
914 		      hw->aq.arq_buf_size);
915 
916 	/* Restore the original datalen and buffer address in the desc,
917 	 * FW updates datalen to indicate the event message
918 	 * size
919 	 */
920 	bi = &hw->aq.arq.r.arq_bi[ntc];
921 	memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
922 
923 	desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
924 	if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
925 		desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
926 	desc->datalen = cpu_to_le16((u16)bi->size);
927 	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
928 	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
929 
930 	/* set tail = the last cleaned desc index. */
931 	wr32(hw, hw->aq.arq.tail, ntc);
932 	/* ntc is updated to tail + 1 */
933 	ntc++;
934 	if (ntc == hw->aq.num_arq_entries)
935 		ntc = 0;
936 	hw->aq.arq.next_to_clean = ntc;
937 	hw->aq.arq.next_to_use = ntu;
938 
939 clean_arq_element_out:
940 	/* Set pending if needed, unlock and return */
941 	if (pending)
942 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
943 
944 clean_arq_element_err:
945 	mutex_unlock(&hw->aq.arq_mutex);
946 
947 	return ret_code;
948 }
949