1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2003-2014, 2018-2021, 2023 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #include <linux/etherdevice.h>
8 #include <linux/ieee80211.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #include <net/ip6_checksum.h>
12 #include <net/tso.h>
13
14 #include "iwl-debug.h"
15 #include "iwl-csr.h"
16 #include "iwl-prph.h"
17 #include "iwl-io.h"
18 #include "iwl-scd.h"
19 #include "iwl-op-mode.h"
20 #include "internal.h"
21 #include "fw/api/tx.h"
22
23 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
24 * DMA services
25 *
26 * Theory of operation
27 *
28 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
29 * of buffer descriptors, each of which points to one or more data buffers for
30 * the device to read from or fill. Driver and device exchange status of each
31 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
32 * entries in each circular buffer, to protect against confusing empty and full
33 * queue states.
34 *
35 * The device reads or writes the data in the queues via the device's several
36 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
37 *
38 * For Tx queue, there are low mark and high mark limits. If, after queuing
39 * the packet for Tx, free space become < low mark, Tx queue stopped. When
40 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
41 * Tx queue resumed.
42 *
43 ***************************************************/
44
45
iwl_pcie_alloc_dma_ptr(struct iwl_trans * trans,struct iwl_dma_ptr * ptr,size_t size)46 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
47 struct iwl_dma_ptr *ptr, size_t size)
48 {
49 if (WARN_ON(ptr->addr))
50 return -EINVAL;
51
52 ptr->addr = dma_alloc_coherent(trans->dev, size,
53 &ptr->dma, GFP_KERNEL);
54 if (!ptr->addr)
55 return -ENOMEM;
56 ptr->size = size;
57 return 0;
58 }
59
iwl_pcie_free_dma_ptr(struct iwl_trans * trans,struct iwl_dma_ptr * ptr)60 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
61 {
62 if (unlikely(!ptr->addr))
63 return;
64
65 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
66 memset(ptr, 0, sizeof(*ptr));
67 }
68
69 /*
70 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
71 */
iwl_pcie_txq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_txq * txq)72 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
73 struct iwl_txq *txq)
74 {
75 u32 reg = 0;
76 int txq_id = txq->id;
77
78 lockdep_assert_held(&txq->lock);
79
80 /*
81 * explicitly wake up the NIC if:
82 * 1. shadow registers aren't enabled
83 * 2. NIC is woken up for CMD regardless of shadow outside this function
84 * 3. there is a chance that the NIC is asleep
85 */
86 if (!trans->trans_cfg->base_params->shadow_reg_enable &&
87 txq_id != trans->txqs.cmd.q_id &&
88 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
89 /*
90 * wake up nic if it's powered down ...
91 * uCode will wake up, and interrupt us again, so next
92 * time we'll skip this part.
93 */
94 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
95
96 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
97 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
98 txq_id, reg);
99 iwl_set_bit(trans, CSR_GP_CNTRL,
100 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
101 txq->need_update = true;
102 return;
103 }
104 }
105
106 /*
107 * if not in power-save mode, uCode will never sleep when we're
108 * trying to tx (during RFKILL, we're not trying to tx).
109 */
110 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
111 if (!txq->block)
112 iwl_write32(trans, HBUS_TARG_WRPTR,
113 txq->write_ptr | (txq_id << 8));
114 }
115
iwl_pcie_txq_check_wrptrs(struct iwl_trans * trans)116 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
117 {
118 int i;
119
120 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
121 struct iwl_txq *txq = trans->txqs.txq[i];
122
123 if (!test_bit(i, trans->txqs.queue_used))
124 continue;
125
126 spin_lock_bh(&txq->lock);
127 if (txq->need_update) {
128 iwl_pcie_txq_inc_wr_ptr(trans, txq);
129 txq->need_update = false;
130 }
131 spin_unlock_bh(&txq->lock);
132 }
133 }
134
iwl_pcie_txq_build_tfd(struct iwl_trans * trans,struct iwl_txq * txq,dma_addr_t addr,u16 len,bool reset)135 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
136 dma_addr_t addr, u16 len, bool reset)
137 {
138 void *tfd;
139 u32 num_tbs;
140
141 tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
142
143 if (reset)
144 memset(tfd, 0, trans->txqs.tfd.size);
145
146 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
147
148 /* Each TFD can point to a maximum max_tbs Tx buffers */
149 if (num_tbs >= trans->txqs.tfd.max_tbs) {
150 IWL_ERR(trans, "Error can not send more than %d chunks\n",
151 trans->txqs.tfd.max_tbs);
152 return -EINVAL;
153 }
154
155 if (WARN(addr & ~IWL_TX_DMA_MASK,
156 "Unaligned address = %llx\n", (unsigned long long)addr))
157 return -EINVAL;
158
159 iwl_pcie_gen1_tfd_set_tb(trans, tfd, num_tbs, addr, len);
160
161 return num_tbs;
162 }
163
iwl_pcie_clear_cmd_in_flight(struct iwl_trans * trans)164 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
165 {
166 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
167
168 if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
169 return;
170
171 spin_lock(&trans_pcie->reg_lock);
172
173 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) {
174 spin_unlock(&trans_pcie->reg_lock);
175 return;
176 }
177
178 trans_pcie->cmd_hold_nic_awake = false;
179 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
180 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
181 spin_unlock(&trans_pcie->reg_lock);
182 }
183
184 /*
185 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
186 */
iwl_pcie_txq_unmap(struct iwl_trans * trans,int txq_id)187 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
188 {
189 struct iwl_txq *txq = trans->txqs.txq[txq_id];
190
191 if (!txq) {
192 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
193 return;
194 }
195
196 spin_lock_bh(&txq->lock);
197 while (txq->write_ptr != txq->read_ptr) {
198 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
199 txq_id, txq->read_ptr);
200
201 if (txq_id != trans->txqs.cmd.q_id) {
202 struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
203
204 if (WARN_ON_ONCE(!skb))
205 continue;
206
207 iwl_txq_free_tso_page(trans, skb);
208 }
209 iwl_txq_free_tfd(trans, txq);
210 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
211
212 if (txq->read_ptr == txq->write_ptr &&
213 txq_id == trans->txqs.cmd.q_id)
214 iwl_pcie_clear_cmd_in_flight(trans);
215 }
216
217 while (!skb_queue_empty(&txq->overflow_q)) {
218 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
219
220 iwl_op_mode_free_skb(trans->op_mode, skb);
221 }
222
223 spin_unlock_bh(&txq->lock);
224
225 /* just in case - this queue may have been stopped */
226 iwl_wake_queue(trans, txq);
227 }
228
229 /*
230 * iwl_pcie_txq_free - Deallocate DMA queue.
231 * @txq: Transmit queue to deallocate.
232 *
233 * Empty queue by removing and destroying all BD's.
234 * Free all buffers.
235 * 0-fill, but do not free "txq" descriptor structure.
236 */
iwl_pcie_txq_free(struct iwl_trans * trans,int txq_id)237 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
238 {
239 struct iwl_txq *txq = trans->txqs.txq[txq_id];
240 struct device *dev = trans->dev;
241 int i;
242
243 if (WARN_ON(!txq))
244 return;
245
246 iwl_pcie_txq_unmap(trans, txq_id);
247
248 /* De-alloc array of command/tx buffers */
249 if (txq_id == trans->txqs.cmd.q_id)
250 for (i = 0; i < txq->n_window; i++) {
251 kfree_sensitive(txq->entries[i].cmd);
252 kfree_sensitive(txq->entries[i].free_buf);
253 }
254
255 /* De-alloc circular buffer of TFDs */
256 if (txq->tfds) {
257 dma_free_coherent(dev,
258 trans->txqs.tfd.size *
259 trans->trans_cfg->base_params->max_tfd_queue_size,
260 txq->tfds, txq->dma_addr);
261 txq->dma_addr = 0;
262 txq->tfds = NULL;
263
264 dma_free_coherent(dev,
265 sizeof(*txq->first_tb_bufs) * txq->n_window,
266 txq->first_tb_bufs, txq->first_tb_dma);
267 }
268
269 kfree(txq->entries);
270 txq->entries = NULL;
271
272 del_timer_sync(&txq->stuck_timer);
273
274 /* 0-fill queue descriptor structure */
275 memset(txq, 0, sizeof(*txq));
276 }
277
iwl_pcie_tx_start(struct iwl_trans * trans,u32 scd_base_addr)278 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
279 {
280 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
281 int nq = trans->trans_cfg->base_params->num_of_queues;
282 int chan;
283 u32 reg_val;
284 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
285 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
286
287 /* make sure all queue are not stopped/used */
288 memset(trans->txqs.queue_stopped, 0,
289 sizeof(trans->txqs.queue_stopped));
290 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
291
292 trans_pcie->scd_base_addr =
293 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
294
295 WARN_ON(scd_base_addr != 0 &&
296 scd_base_addr != trans_pcie->scd_base_addr);
297
298 /* reset context data, TX status and translation data */
299 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
300 SCD_CONTEXT_MEM_LOWER_BOUND,
301 NULL, clear_dwords);
302
303 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
304 trans->txqs.scd_bc_tbls.dma >> 10);
305
306 /* The chain extension of the SCD doesn't work well. This feature is
307 * enabled by default by the HW, so we need to disable it manually.
308 */
309 if (trans->trans_cfg->base_params->scd_chain_ext_wa)
310 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
311
312 iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
313 trans->txqs.cmd.fifo,
314 trans->txqs.cmd.wdg_timeout);
315
316 /* Activate all Tx DMA/FIFO channels */
317 iwl_scd_activate_fifos(trans);
318
319 /* Enable DMA channel */
320 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
321 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
322 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
323 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
324
325 /* Update FH chicken bits */
326 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
327 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
328 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
329
330 /* Enable L1-Active */
331 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
332 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
333 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
334 }
335
iwl_trans_pcie_tx_reset(struct iwl_trans * trans)336 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
337 {
338 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
339 int txq_id;
340
341 /*
342 * we should never get here in gen2 trans mode return early to avoid
343 * having invalid accesses
344 */
345 if (WARN_ON_ONCE(trans->trans_cfg->gen2))
346 return;
347
348 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
349 txq_id++) {
350 struct iwl_txq *txq = trans->txqs.txq[txq_id];
351 if (trans->trans_cfg->gen2)
352 iwl_write_direct64(trans,
353 FH_MEM_CBBC_QUEUE(trans, txq_id),
354 txq->dma_addr);
355 else
356 iwl_write_direct32(trans,
357 FH_MEM_CBBC_QUEUE(trans, txq_id),
358 txq->dma_addr >> 8);
359 iwl_pcie_txq_unmap(trans, txq_id);
360 txq->read_ptr = 0;
361 txq->write_ptr = 0;
362 }
363
364 /* Tell NIC where to find the "keep warm" buffer */
365 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
366 trans_pcie->kw.dma >> 4);
367
368 /*
369 * Send 0 as the scd_base_addr since the device may have be reset
370 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
371 * contain garbage.
372 */
373 iwl_pcie_tx_start(trans, 0);
374 }
375
iwl_pcie_tx_stop_fh(struct iwl_trans * trans)376 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
377 {
378 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
379 int ch, ret;
380 u32 mask = 0;
381
382 spin_lock_bh(&trans_pcie->irq_lock);
383
384 if (!iwl_trans_grab_nic_access(trans))
385 goto out;
386
387 /* Stop each Tx DMA channel */
388 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
389 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
390 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
391 }
392
393 /* Wait for DMA channels to be idle */
394 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
395 if (ret < 0)
396 IWL_ERR(trans,
397 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
398 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
399
400 iwl_trans_release_nic_access(trans);
401
402 out:
403 spin_unlock_bh(&trans_pcie->irq_lock);
404 }
405
406 /*
407 * iwl_pcie_tx_stop - Stop all Tx DMA channels
408 */
iwl_pcie_tx_stop(struct iwl_trans * trans)409 int iwl_pcie_tx_stop(struct iwl_trans *trans)
410 {
411 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
412 int txq_id;
413
414 /* Turn off all Tx DMA fifos */
415 iwl_scd_deactivate_fifos(trans);
416
417 /* Turn off all Tx DMA channels */
418 iwl_pcie_tx_stop_fh(trans);
419
420 /*
421 * This function can be called before the op_mode disabled the
422 * queues. This happens when we have an rfkill interrupt.
423 * Since we stop Tx altogether - mark the queues as stopped.
424 */
425 memset(trans->txqs.queue_stopped, 0,
426 sizeof(trans->txqs.queue_stopped));
427 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
428
429 /* This can happen: start_hw, stop_device */
430 if (!trans_pcie->txq_memory)
431 return 0;
432
433 /* Unmap DMA from host system and free skb's */
434 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
435 txq_id++)
436 iwl_pcie_txq_unmap(trans, txq_id);
437
438 return 0;
439 }
440
441 /*
442 * iwl_trans_tx_free - Free TXQ Context
443 *
444 * Destroy all TX DMA queues and structures
445 */
iwl_pcie_tx_free(struct iwl_trans * trans)446 void iwl_pcie_tx_free(struct iwl_trans *trans)
447 {
448 int txq_id;
449 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
450
451 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
452
453 /* Tx queues */
454 if (trans_pcie->txq_memory) {
455 for (txq_id = 0;
456 txq_id < trans->trans_cfg->base_params->num_of_queues;
457 txq_id++) {
458 iwl_pcie_txq_free(trans, txq_id);
459 trans->txqs.txq[txq_id] = NULL;
460 }
461 }
462
463 kfree(trans_pcie->txq_memory);
464 trans_pcie->txq_memory = NULL;
465
466 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
467
468 iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls);
469 }
470
471 /*
472 * iwl_pcie_tx_alloc - allocate TX context
473 * Allocate all Tx DMA structures and initialize them
474 */
iwl_pcie_tx_alloc(struct iwl_trans * trans)475 static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
476 {
477 int ret;
478 int txq_id, slots_num;
479 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
480 u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
481
482 if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
483 return -EINVAL;
484
485 bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
486
487 /*It is not allowed to alloc twice, so warn when this happens.
488 * We cannot rely on the previous allocation, so free and fail */
489 if (WARN_ON(trans_pcie->txq_memory)) {
490 ret = -EINVAL;
491 goto error;
492 }
493
494 ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls,
495 bc_tbls_size);
496 if (ret) {
497 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
498 goto error;
499 }
500
501 /* Alloc keep-warm buffer */
502 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
503 if (ret) {
504 IWL_ERR(trans, "Keep Warm allocation failed\n");
505 goto error;
506 }
507
508 trans_pcie->txq_memory =
509 kcalloc(trans->trans_cfg->base_params->num_of_queues,
510 sizeof(struct iwl_txq), GFP_KERNEL);
511 if (!trans_pcie->txq_memory) {
512 IWL_ERR(trans, "Not enough memory for txq\n");
513 ret = -ENOMEM;
514 goto error;
515 }
516
517 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
518 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
519 txq_id++) {
520 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
521
522 if (cmd_queue)
523 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
524 trans->cfg->min_txq_size);
525 else
526 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
527 trans->cfg->min_ba_txq_size);
528 trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
529 ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
530 cmd_queue);
531 if (ret) {
532 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
533 goto error;
534 }
535 trans->txqs.txq[txq_id]->id = txq_id;
536 }
537
538 return 0;
539
540 error:
541 iwl_pcie_tx_free(trans);
542
543 return ret;
544 }
545
iwl_pcie_tx_init(struct iwl_trans * trans)546 int iwl_pcie_tx_init(struct iwl_trans *trans)
547 {
548 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
549 int ret;
550 int txq_id, slots_num;
551 bool alloc = false;
552
553 if (!trans_pcie->txq_memory) {
554 ret = iwl_pcie_tx_alloc(trans);
555 if (ret)
556 goto error;
557 alloc = true;
558 }
559
560 spin_lock_bh(&trans_pcie->irq_lock);
561
562 /* Turn off all Tx DMA fifos */
563 iwl_scd_deactivate_fifos(trans);
564
565 /* Tell NIC where to find the "keep warm" buffer */
566 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
567 trans_pcie->kw.dma >> 4);
568
569 spin_unlock_bh(&trans_pcie->irq_lock);
570
571 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
572 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
573 txq_id++) {
574 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
575
576 if (cmd_queue)
577 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
578 trans->cfg->min_txq_size);
579 else
580 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
581 trans->cfg->min_ba_txq_size);
582 ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
583 cmd_queue);
584 if (ret) {
585 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
586 goto error;
587 }
588
589 /*
590 * Tell nic where to find circular buffer of TFDs for a
591 * given Tx queue, and enable the DMA channel used for that
592 * queue.
593 * Circular buffer (TFD queue in DRAM) physical base address
594 */
595 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
596 trans->txqs.txq[txq_id]->dma_addr >> 8);
597 }
598
599 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
600 if (trans->trans_cfg->base_params->num_of_queues > 20)
601 iwl_set_bits_prph(trans, SCD_GP_CTRL,
602 SCD_GP_CTRL_ENABLE_31_QUEUES);
603
604 return 0;
605 error:
606 /*Upon error, free only if we allocated something */
607 if (alloc)
608 iwl_pcie_tx_free(trans);
609 return ret;
610 }
611
iwl_pcie_set_cmd_in_flight(struct iwl_trans * trans,const struct iwl_host_cmd * cmd)612 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
613 const struct iwl_host_cmd *cmd)
614 {
615 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
616
617 /* Make sure the NIC is still alive in the bus */
618 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
619 return -ENODEV;
620
621 if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
622 return 0;
623
624 /*
625 * wake up the NIC to make sure that the firmware will see the host
626 * command - we will let the NIC sleep once all the host commands
627 * returned. This needs to be done only on NICs that have
628 * apmg_wake_up_wa set (see above.)
629 */
630 if (!_iwl_trans_pcie_grab_nic_access(trans))
631 return -EIO;
632
633 /*
634 * In iwl_trans_grab_nic_access(), we've acquired the reg_lock.
635 * There, we also returned immediately if cmd_hold_nic_awake is
636 * already true, so it's OK to unconditionally set it to true.
637 */
638 trans_pcie->cmd_hold_nic_awake = true;
639 spin_unlock(&trans_pcie->reg_lock);
640
641 return 0;
642 }
643
644 /*
645 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
646 *
647 * When FW advances 'R' index, all entries between old and new 'R' index
648 * need to be reclaimed. As result, some free space forms. If there is
649 * enough free space (> low mark), wake the stack that feeds us.
650 */
iwl_pcie_cmdq_reclaim(struct iwl_trans * trans,int txq_id,int idx)651 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
652 {
653 struct iwl_txq *txq = trans->txqs.txq[txq_id];
654 int nfreed = 0;
655 u16 r;
656
657 lockdep_assert_held(&txq->lock);
658
659 idx = iwl_txq_get_cmd_index(txq, idx);
660 r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
661
662 if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
663 (!iwl_txq_used(txq, idx))) {
664 WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
665 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
666 __func__, txq_id, idx,
667 trans->trans_cfg->base_params->max_tfd_queue_size,
668 txq->write_ptr, txq->read_ptr);
669 return;
670 }
671
672 for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
673 r = iwl_txq_inc_wrap(trans, r)) {
674 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
675
676 if (nfreed++ > 0) {
677 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
678 idx, txq->write_ptr, r);
679 iwl_force_nmi(trans);
680 }
681 }
682
683 if (txq->read_ptr == txq->write_ptr)
684 iwl_pcie_clear_cmd_in_flight(trans);
685
686 iwl_txq_progress(txq);
687 }
688
iwl_pcie_txq_set_ratid_map(struct iwl_trans * trans,u16 ra_tid,u16 txq_id)689 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
690 u16 txq_id)
691 {
692 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
693 u32 tbl_dw_addr;
694 u32 tbl_dw;
695 u16 scd_q2ratid;
696
697 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
698
699 tbl_dw_addr = trans_pcie->scd_base_addr +
700 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
701
702 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
703
704 if (txq_id & 0x1)
705 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
706 else
707 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
708
709 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
710
711 return 0;
712 }
713
714 /* Receiver address (actually, Rx station's index into station table),
715 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
716 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
717
iwl_trans_pcie_txq_enable(struct iwl_trans * trans,int txq_id,u16 ssn,const struct iwl_trans_txq_scd_cfg * cfg,unsigned int wdg_timeout)718 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
719 const struct iwl_trans_txq_scd_cfg *cfg,
720 unsigned int wdg_timeout)
721 {
722 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
723 struct iwl_txq *txq = trans->txqs.txq[txq_id];
724 int fifo = -1;
725 bool scd_bug = false;
726
727 if (test_and_set_bit(txq_id, trans->txqs.queue_used))
728 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
729
730 txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
731
732 if (cfg) {
733 fifo = cfg->fifo;
734
735 /* Disable the scheduler prior configuring the cmd queue */
736 if (txq_id == trans->txqs.cmd.q_id &&
737 trans_pcie->scd_set_active)
738 iwl_scd_enable_set_active(trans, 0);
739
740 /* Stop this Tx queue before configuring it */
741 iwl_scd_txq_set_inactive(trans, txq_id);
742
743 /* Set this queue as a chain-building queue unless it is CMD */
744 if (txq_id != trans->txqs.cmd.q_id)
745 iwl_scd_txq_set_chain(trans, txq_id);
746
747 if (cfg->aggregate) {
748 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
749
750 /* Map receiver-address / traffic-ID to this queue */
751 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
752
753 /* enable aggregations for the queue */
754 iwl_scd_txq_enable_agg(trans, txq_id);
755 txq->ampdu = true;
756 } else {
757 /*
758 * disable aggregations for the queue, this will also
759 * make the ra_tid mapping configuration irrelevant
760 * since it is now a non-AGG queue.
761 */
762 iwl_scd_txq_disable_agg(trans, txq_id);
763
764 ssn = txq->read_ptr;
765 }
766 } else {
767 /*
768 * If we need to move the SCD write pointer by steps of
769 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
770 * the op_mode know by returning true later.
771 * Do this only in case cfg is NULL since this trick can
772 * be done only if we have DQA enabled which is true for mvm
773 * only. And mvm never sets a cfg pointer.
774 * This is really ugly, but this is the easiest way out for
775 * this sad hardware issue.
776 * This bug has been fixed on devices 9000 and up.
777 */
778 scd_bug = !trans->trans_cfg->mq_rx_supported &&
779 !((ssn - txq->write_ptr) & 0x3f) &&
780 (ssn != txq->write_ptr);
781 if (scd_bug)
782 ssn++;
783 }
784
785 /* Place first TFD at index corresponding to start sequence number.
786 * Assumes that ssn_idx is valid (!= 0xFFF) */
787 txq->read_ptr = (ssn & 0xff);
788 txq->write_ptr = (ssn & 0xff);
789 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
790 (ssn & 0xff) | (txq_id << 8));
791
792 if (cfg) {
793 u8 frame_limit = cfg->frame_limit;
794
795 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
796
797 /* Set up Tx window size and frame limit for this queue */
798 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
799 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
800 iwl_trans_write_mem32(trans,
801 trans_pcie->scd_base_addr +
802 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
803 SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
804 SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
805
806 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
807 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
808 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
809 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
810 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
811 SCD_QUEUE_STTS_REG_MSK);
812
813 /* enable the scheduler for this queue (only) */
814 if (txq_id == trans->txqs.cmd.q_id &&
815 trans_pcie->scd_set_active)
816 iwl_scd_enable_set_active(trans, BIT(txq_id));
817
818 IWL_DEBUG_TX_QUEUES(trans,
819 "Activate queue %d on FIFO %d WrPtr: %d\n",
820 txq_id, fifo, ssn & 0xff);
821 } else {
822 IWL_DEBUG_TX_QUEUES(trans,
823 "Activate queue %d WrPtr: %d\n",
824 txq_id, ssn & 0xff);
825 }
826
827 return scd_bug;
828 }
829
iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans * trans,u32 txq_id,bool shared_mode)830 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
831 bool shared_mode)
832 {
833 struct iwl_txq *txq = trans->txqs.txq[txq_id];
834
835 txq->ampdu = !shared_mode;
836 }
837
iwl_trans_pcie_txq_disable(struct iwl_trans * trans,int txq_id,bool configure_scd)838 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
839 bool configure_scd)
840 {
841 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
842 u32 stts_addr = trans_pcie->scd_base_addr +
843 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
844 static const u32 zero_val[4] = {};
845
846 trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
847 trans->txqs.txq[txq_id]->frozen = false;
848
849 /*
850 * Upon HW Rfkill - we stop the device, and then stop the queues
851 * in the op_mode. Just for the sake of the simplicity of the op_mode,
852 * allow the op_mode to call txq_disable after it already called
853 * stop_device.
854 */
855 if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
856 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
857 "queue %d not used", txq_id);
858 return;
859 }
860
861 if (configure_scd) {
862 iwl_scd_txq_set_inactive(trans, txq_id);
863
864 iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
865 ARRAY_SIZE(zero_val));
866 }
867
868 iwl_pcie_txq_unmap(trans, txq_id);
869 trans->txqs.txq[txq_id]->ampdu = false;
870
871 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
872 }
873
874 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
875
876 /*
877 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
878 * @priv: device private data point
879 * @cmd: a pointer to the ucode command structure
880 *
881 * The function returns < 0 values to indicate the operation
882 * failed. On success, it returns the index (>= 0) of command in the
883 * command queue.
884 */
iwl_pcie_enqueue_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)885 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
886 struct iwl_host_cmd *cmd)
887 {
888 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
889 struct iwl_device_cmd *out_cmd;
890 struct iwl_cmd_meta *out_meta;
891 void *dup_buf = NULL;
892 dma_addr_t phys_addr;
893 int idx;
894 u16 copy_size, cmd_size, tb0_size;
895 bool had_nocopy = false;
896 u8 group_id = iwl_cmd_groupid(cmd->id);
897 int i, ret;
898 u32 cmd_pos;
899 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
900 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
901 unsigned long flags;
902
903 if (WARN(!trans->wide_cmd_header &&
904 group_id > IWL_ALWAYS_LONG_GROUP,
905 "unsupported wide command %#x\n", cmd->id))
906 return -EINVAL;
907
908 if (group_id != 0) {
909 copy_size = sizeof(struct iwl_cmd_header_wide);
910 cmd_size = sizeof(struct iwl_cmd_header_wide);
911 } else {
912 copy_size = sizeof(struct iwl_cmd_header);
913 cmd_size = sizeof(struct iwl_cmd_header);
914 }
915
916 /* need one for the header if the first is NOCOPY */
917 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
918
919 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
920 cmddata[i] = cmd->data[i];
921 cmdlen[i] = cmd->len[i];
922
923 if (!cmd->len[i])
924 continue;
925
926 /* need at least IWL_FIRST_TB_SIZE copied */
927 if (copy_size < IWL_FIRST_TB_SIZE) {
928 int copy = IWL_FIRST_TB_SIZE - copy_size;
929
930 if (copy > cmdlen[i])
931 copy = cmdlen[i];
932 cmdlen[i] -= copy;
933 cmddata[i] += copy;
934 copy_size += copy;
935 }
936
937 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
938 had_nocopy = true;
939 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
940 idx = -EINVAL;
941 goto free_dup_buf;
942 }
943 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
944 /*
945 * This is also a chunk that isn't copied
946 * to the static buffer so set had_nocopy.
947 */
948 had_nocopy = true;
949
950 /* only allowed once */
951 if (WARN_ON(dup_buf)) {
952 idx = -EINVAL;
953 goto free_dup_buf;
954 }
955
956 dup_buf = kmemdup(cmddata[i], cmdlen[i],
957 GFP_ATOMIC);
958 if (!dup_buf)
959 return -ENOMEM;
960 } else {
961 /* NOCOPY must not be followed by normal! */
962 if (WARN_ON(had_nocopy)) {
963 idx = -EINVAL;
964 goto free_dup_buf;
965 }
966 copy_size += cmdlen[i];
967 }
968 cmd_size += cmd->len[i];
969 }
970
971 /*
972 * If any of the command structures end up being larger than
973 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
974 * allocated into separate TFDs, then we will need to
975 * increase the size of the buffers.
976 */
977 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
978 "Command %s (%#x) is too large (%d bytes)\n",
979 iwl_get_cmd_string(trans, cmd->id),
980 cmd->id, copy_size)) {
981 idx = -EINVAL;
982 goto free_dup_buf;
983 }
984
985 spin_lock_irqsave(&txq->lock, flags);
986
987 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
988 spin_unlock_irqrestore(&txq->lock, flags);
989
990 IWL_ERR(trans, "No space in command queue\n");
991 iwl_op_mode_cmd_queue_full(trans->op_mode);
992 idx = -ENOSPC;
993 goto free_dup_buf;
994 }
995
996 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
997 out_cmd = txq->entries[idx].cmd;
998 out_meta = &txq->entries[idx].meta;
999
1000 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
1001 if (cmd->flags & CMD_WANT_SKB)
1002 out_meta->source = cmd;
1003
1004 /* set up the header */
1005 if (group_id != 0) {
1006 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1007 out_cmd->hdr_wide.group_id = group_id;
1008 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1009 out_cmd->hdr_wide.length =
1010 cpu_to_le16(cmd_size -
1011 sizeof(struct iwl_cmd_header_wide));
1012 out_cmd->hdr_wide.reserved = 0;
1013 out_cmd->hdr_wide.sequence =
1014 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
1015 INDEX_TO_SEQ(txq->write_ptr));
1016
1017 cmd_pos = sizeof(struct iwl_cmd_header_wide);
1018 copy_size = sizeof(struct iwl_cmd_header_wide);
1019 } else {
1020 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
1021 out_cmd->hdr.sequence =
1022 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
1023 INDEX_TO_SEQ(txq->write_ptr));
1024 out_cmd->hdr.group_id = 0;
1025
1026 cmd_pos = sizeof(struct iwl_cmd_header);
1027 copy_size = sizeof(struct iwl_cmd_header);
1028 }
1029
1030 /* and copy the data that needs to be copied */
1031 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1032 int copy;
1033
1034 if (!cmd->len[i])
1035 continue;
1036
1037 /* copy everything if not nocopy/dup */
1038 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1039 IWL_HCMD_DFL_DUP))) {
1040 copy = cmd->len[i];
1041
1042 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1043 cmd_pos += copy;
1044 copy_size += copy;
1045 continue;
1046 }
1047
1048 /*
1049 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
1050 * in total (for bi-directional DMA), but copy up to what
1051 * we can fit into the payload for debug dump purposes.
1052 */
1053 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1054
1055 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1056 cmd_pos += copy;
1057
1058 /* However, treat copy_size the proper way, we need it below */
1059 if (copy_size < IWL_FIRST_TB_SIZE) {
1060 copy = IWL_FIRST_TB_SIZE - copy_size;
1061
1062 if (copy > cmd->len[i])
1063 copy = cmd->len[i];
1064 copy_size += copy;
1065 }
1066 }
1067
1068 IWL_DEBUG_HC(trans,
1069 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1070 iwl_get_cmd_string(trans, cmd->id),
1071 group_id, out_cmd->hdr.cmd,
1072 le16_to_cpu(out_cmd->hdr.sequence),
1073 cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
1074
1075 /* start the TFD with the minimum copy bytes */
1076 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
1077 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
1078 iwl_pcie_txq_build_tfd(trans, txq,
1079 iwl_txq_get_first_tb_dma(txq, idx),
1080 tb0_size, true);
1081
1082 /* map first command fragment, if any remains */
1083 if (copy_size > tb0_size) {
1084 phys_addr = dma_map_single(trans->dev,
1085 ((u8 *)&out_cmd->hdr) + tb0_size,
1086 copy_size - tb0_size,
1087 DMA_TO_DEVICE);
1088 if (dma_mapping_error(trans->dev, phys_addr)) {
1089 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1090 txq->write_ptr);
1091 idx = -ENOMEM;
1092 goto out;
1093 }
1094
1095 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1096 copy_size - tb0_size, false);
1097 }
1098
1099 /* map the remaining (adjusted) nocopy/dup fragments */
1100 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1101 void *data = (void *)(uintptr_t)cmddata[i];
1102
1103 if (!cmdlen[i])
1104 continue;
1105 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1106 IWL_HCMD_DFL_DUP)))
1107 continue;
1108 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1109 data = dup_buf;
1110 phys_addr = dma_map_single(trans->dev, data,
1111 cmdlen[i], DMA_TO_DEVICE);
1112 if (dma_mapping_error(trans->dev, phys_addr)) {
1113 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1114 txq->write_ptr);
1115 idx = -ENOMEM;
1116 goto out;
1117 }
1118
1119 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1120 }
1121
1122 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
1123 out_meta->flags = cmd->flags;
1124 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1125 kfree_sensitive(txq->entries[idx].free_buf);
1126 txq->entries[idx].free_buf = dup_buf;
1127
1128 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1129
1130 /* start timer if queue currently empty */
1131 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1132 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1133
1134 ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1135 if (ret < 0) {
1136 idx = ret;
1137 goto out;
1138 }
1139
1140 /* Increment and update queue's write index */
1141 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1142 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1143
1144 out:
1145 spin_unlock_irqrestore(&txq->lock, flags);
1146 free_dup_buf:
1147 if (idx < 0)
1148 kfree(dup_buf);
1149 return idx;
1150 }
1151
1152 /*
1153 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1154 * @rxb: Rx buffer to reclaim
1155 */
iwl_pcie_hcmd_complete(struct iwl_trans * trans,struct iwl_rx_cmd_buffer * rxb)1156 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1157 struct iwl_rx_cmd_buffer *rxb)
1158 {
1159 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1160 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1161 u8 group_id;
1162 u32 cmd_id;
1163 int txq_id = SEQ_TO_QUEUE(sequence);
1164 int index = SEQ_TO_INDEX(sequence);
1165 int cmd_index;
1166 struct iwl_device_cmd *cmd;
1167 struct iwl_cmd_meta *meta;
1168 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1169 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1170
1171 /* If a Tx command is being handled and it isn't in the actual
1172 * command queue then there a command routing bug has been introduced
1173 * in the queue management code. */
1174 if (WARN(txq_id != trans->txqs.cmd.q_id,
1175 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1176 txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
1177 txq->write_ptr)) {
1178 iwl_print_hex_error(trans, pkt, 32);
1179 return;
1180 }
1181
1182 spin_lock_bh(&txq->lock);
1183
1184 cmd_index = iwl_txq_get_cmd_index(txq, index);
1185 cmd = txq->entries[cmd_index].cmd;
1186 meta = &txq->entries[cmd_index].meta;
1187 group_id = cmd->hdr.group_id;
1188 cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
1189
1190 if (trans->trans_cfg->gen2)
1191 iwl_txq_gen2_tfd_unmap(trans, meta,
1192 iwl_txq_get_tfd(trans, txq, index));
1193 else
1194 iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
1195
1196 /* Input error checking is done when commands are added to queue. */
1197 if (meta->flags & CMD_WANT_SKB) {
1198 struct page *p = rxb_steal_page(rxb);
1199
1200 meta->source->resp_pkt = pkt;
1201 meta->source->_rx_page_addr = (unsigned long)page_address(p);
1202 meta->source->_rx_page_order = trans_pcie->rx_page_order;
1203 }
1204
1205 if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
1206 iwl_op_mode_async_cb(trans->op_mode, cmd);
1207
1208 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1209
1210 if (!(meta->flags & CMD_ASYNC)) {
1211 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1212 IWL_WARN(trans,
1213 "HCMD_ACTIVE already clear for command %s\n",
1214 iwl_get_cmd_string(trans, cmd_id));
1215 }
1216 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1217 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1218 iwl_get_cmd_string(trans, cmd_id));
1219 wake_up(&trans->wait_command_queue);
1220 }
1221
1222 meta->flags = 0;
1223
1224 spin_unlock_bh(&txq->lock);
1225 }
1226
iwl_fill_data_tbs(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta)1227 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
1228 struct iwl_txq *txq, u8 hdr_len,
1229 struct iwl_cmd_meta *out_meta)
1230 {
1231 u16 head_tb_len;
1232 int i;
1233
1234 /*
1235 * Set up TFD's third entry to point directly to remainder
1236 * of skb's head, if any
1237 */
1238 head_tb_len = skb_headlen(skb) - hdr_len;
1239
1240 if (head_tb_len > 0) {
1241 dma_addr_t tb_phys = dma_map_single(trans->dev,
1242 skb->data + hdr_len,
1243 head_tb_len, DMA_TO_DEVICE);
1244 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1245 return -EINVAL;
1246 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
1247 tb_phys, head_tb_len);
1248 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
1249 }
1250
1251 /* set up the remaining entries to point to the data */
1252 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1253 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1254 dma_addr_t tb_phys;
1255 int tb_idx;
1256
1257 if (!skb_frag_size(frag))
1258 continue;
1259
1260 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
1261 skb_frag_size(frag), DMA_TO_DEVICE);
1262
1263 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1264 return -EINVAL;
1265 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
1266 tb_phys, skb_frag_size(frag));
1267 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
1268 skb_frag_size(frag), false);
1269 if (tb_idx < 0)
1270 return tb_idx;
1271
1272 out_meta->tbs |= BIT(tb_idx);
1273 }
1274
1275 return 0;
1276 }
1277
1278 #ifdef CONFIG_INET
iwl_fill_data_tbs_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta,struct iwl_device_tx_cmd * dev_cmd,u16 tb1_len)1279 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
1280 struct iwl_txq *txq, u8 hdr_len,
1281 struct iwl_cmd_meta *out_meta,
1282 struct iwl_device_tx_cmd *dev_cmd,
1283 u16 tb1_len)
1284 {
1285 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1286 struct ieee80211_hdr *hdr = (void *)skb->data;
1287 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
1288 unsigned int mss = skb_shinfo(skb)->gso_size;
1289 u16 length, iv_len, amsdu_pad;
1290 u8 *start_hdr;
1291 struct iwl_tso_hdr_page *hdr_page;
1292 struct tso_t tso;
1293
1294 /* if the packet is protected, then it must be CCMP or GCMP */
1295 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
1296 iv_len = ieee80211_has_protected(hdr->frame_control) ?
1297 IEEE80211_CCMP_HDR_LEN : 0;
1298
1299 trace_iwlwifi_dev_tx(trans->dev, skb,
1300 iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1301 trans->txqs.tfd.size,
1302 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
1303
1304 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
1305 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
1306 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
1307 amsdu_pad = 0;
1308
1309 /* total amount of header we may need for this A-MSDU */
1310 hdr_room = DIV_ROUND_UP(total_len, mss) *
1311 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
1312
1313 /* Our device supports 9 segments at most, it will fit in 1 page */
1314 hdr_page = get_page_hdr(trans, hdr_room, skb);
1315 if (!hdr_page)
1316 return -ENOMEM;
1317
1318 start_hdr = hdr_page->pos;
1319 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
1320 hdr_page->pos += iv_len;
1321
1322 /*
1323 * Pull the ieee80211 header + IV to be able to use TSO core,
1324 * we will restore it for the tx_status flow.
1325 */
1326 skb_pull(skb, hdr_len + iv_len);
1327
1328 /*
1329 * Remove the length of all the headers that we don't actually
1330 * have in the MPDU by themselves, but that we duplicate into
1331 * all the different MSDUs inside the A-MSDU.
1332 */
1333 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
1334
1335 tso_start(skb, &tso);
1336
1337 while (total_len) {
1338 /* this is the data left for this subframe */
1339 unsigned int data_left =
1340 min_t(unsigned int, mss, total_len);
1341 unsigned int hdr_tb_len;
1342 dma_addr_t hdr_tb_phys;
1343 u8 *subf_hdrs_start = hdr_page->pos;
1344
1345 total_len -= data_left;
1346
1347 memset(hdr_page->pos, 0, amsdu_pad);
1348 hdr_page->pos += amsdu_pad;
1349 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
1350 data_left)) & 0x3;
1351 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
1352 hdr_page->pos += ETH_ALEN;
1353 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
1354 hdr_page->pos += ETH_ALEN;
1355
1356 length = snap_ip_tcp_hdrlen + data_left;
1357 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
1358 hdr_page->pos += sizeof(length);
1359
1360 /*
1361 * This will copy the SNAP as well which will be considered
1362 * as MAC header.
1363 */
1364 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
1365
1366 hdr_page->pos += snap_ip_tcp_hdrlen;
1367
1368 hdr_tb_len = hdr_page->pos - start_hdr;
1369 hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
1370 hdr_tb_len, DMA_TO_DEVICE);
1371 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys)))
1372 return -EINVAL;
1373 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
1374 hdr_tb_len, false);
1375 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
1376 hdr_tb_phys, hdr_tb_len);
1377 /* add this subframe's headers' length to the tx_cmd */
1378 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
1379
1380 /* prepare the start_hdr for the next subframe */
1381 start_hdr = hdr_page->pos;
1382
1383 /* put the payload */
1384 while (data_left) {
1385 unsigned int size = min_t(unsigned int, tso.size,
1386 data_left);
1387 dma_addr_t tb_phys;
1388
1389 tb_phys = dma_map_single(trans->dev, tso.data,
1390 size, DMA_TO_DEVICE);
1391 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1392 return -EINVAL;
1393
1394 iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
1395 size, false);
1396 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
1397 tb_phys, size);
1398
1399 data_left -= size;
1400 tso_build_data(skb, &tso, size);
1401 }
1402 }
1403
1404 /* re -add the WiFi header and IV */
1405 skb_push(skb, hdr_len + iv_len);
1406
1407 return 0;
1408 }
1409 #else /* CONFIG_INET */
iwl_fill_data_tbs_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta,struct iwl_device_tx_cmd * dev_cmd,u16 tb1_len)1410 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
1411 struct iwl_txq *txq, u8 hdr_len,
1412 struct iwl_cmd_meta *out_meta,
1413 struct iwl_device_tx_cmd *dev_cmd,
1414 u16 tb1_len)
1415 {
1416 /* No A-MSDU without CONFIG_INET */
1417 WARN_ON(1);
1418
1419 return -1;
1420 }
1421 #endif /* CONFIG_INET */
1422
iwl_trans_pcie_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int txq_id)1423 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1424 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
1425 {
1426 struct ieee80211_hdr *hdr;
1427 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
1428 struct iwl_cmd_meta *out_meta;
1429 struct iwl_txq *txq;
1430 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
1431 void *tb1_addr;
1432 void *tfd;
1433 u16 len, tb1_len;
1434 bool wait_write_ptr;
1435 __le16 fc;
1436 u8 hdr_len;
1437 u16 wifi_seq;
1438 bool amsdu;
1439
1440 txq = trans->txqs.txq[txq_id];
1441
1442 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
1443 "TX on unused queue %d\n", txq_id))
1444 return -EINVAL;
1445
1446 if (skb_is_nonlinear(skb) &&
1447 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
1448 __skb_linearize(skb))
1449 return -ENOMEM;
1450
1451 /* mac80211 always puts the full header into the SKB's head,
1452 * so there's no need to check if it's readable there
1453 */
1454 hdr = (struct ieee80211_hdr *)skb->data;
1455 fc = hdr->frame_control;
1456 hdr_len = ieee80211_hdrlen(fc);
1457
1458 spin_lock(&txq->lock);
1459
1460 if (iwl_txq_space(trans, txq) < txq->high_mark) {
1461 iwl_txq_stop(trans, txq);
1462
1463 /* don't put the packet on the ring, if there is no room */
1464 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
1465 struct iwl_device_tx_cmd **dev_cmd_ptr;
1466
1467 dev_cmd_ptr = (void *)((u8 *)skb->cb +
1468 trans->txqs.dev_cmd_offs);
1469
1470 *dev_cmd_ptr = dev_cmd;
1471 __skb_queue_tail(&txq->overflow_q, skb);
1472
1473 spin_unlock(&txq->lock);
1474 return 0;
1475 }
1476 }
1477
1478 /* In AGG mode, the index in the ring must correspond to the WiFi
1479 * sequence number. This is a HW requirements to help the SCD to parse
1480 * the BA.
1481 * Check here that the packets are in the right place on the ring.
1482 */
1483 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1484 WARN_ONCE(txq->ampdu &&
1485 (wifi_seq & 0xff) != txq->write_ptr,
1486 "Q: %d WiFi Seq %d tfdNum %d",
1487 txq_id, wifi_seq, txq->write_ptr);
1488
1489 /* Set up driver data for this TFD */
1490 txq->entries[txq->write_ptr].skb = skb;
1491 txq->entries[txq->write_ptr].cmd = dev_cmd;
1492
1493 dev_cmd->hdr.sequence =
1494 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1495 INDEX_TO_SEQ(txq->write_ptr)));
1496
1497 tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
1498 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
1499 offsetof(struct iwl_tx_cmd, scratch);
1500
1501 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1502 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1503
1504 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1505 out_meta = &txq->entries[txq->write_ptr].meta;
1506 out_meta->flags = 0;
1507
1508 /*
1509 * The second TB (tb1) points to the remainder of the TX command
1510 * and the 802.11 header - dword aligned size
1511 * (This calculation modifies the TX command, so do it before the
1512 * setup of the first TB)
1513 */
1514 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
1515 hdr_len - IWL_FIRST_TB_SIZE;
1516 /* do not align A-MSDU to dword as the subframe header aligns it */
1517 amsdu = ieee80211_is_data_qos(fc) &&
1518 (*ieee80211_get_qos_ctl(hdr) &
1519 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
1520 if (!amsdu) {
1521 tb1_len = ALIGN(len, 4);
1522 /* Tell NIC about any 2-byte padding after MAC header */
1523 if (tb1_len != len)
1524 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
1525 } else {
1526 tb1_len = len;
1527 }
1528
1529 /*
1530 * The first TB points to bi-directional DMA data, we'll
1531 * memcpy the data into it later.
1532 */
1533 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
1534 IWL_FIRST_TB_SIZE, true);
1535
1536 /* there must be data left over for TB1 or this code must be changed */
1537 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
1538 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
1539 offsetofend(struct iwl_tx_cmd, scratch) >
1540 IWL_FIRST_TB_SIZE);
1541
1542 /* map the data for TB1 */
1543 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
1544 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
1545 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
1546 goto out_err;
1547 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
1548
1549 trace_iwlwifi_dev_tx(trans->dev, skb,
1550 iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1551 trans->txqs.tfd.size,
1552 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
1553 hdr_len);
1554
1555 /*
1556 * If gso_size wasn't set, don't give the frame "amsdu treatment"
1557 * (adding subframes, etc.).
1558 * This can happen in some testing flows when the amsdu was already
1559 * pre-built, and we just need to send the resulting skb.
1560 */
1561 if (amsdu && skb_shinfo(skb)->gso_size) {
1562 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
1563 out_meta, dev_cmd,
1564 tb1_len)))
1565 goto out_err;
1566 } else {
1567 struct sk_buff *frag;
1568
1569 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
1570 out_meta)))
1571 goto out_err;
1572
1573 skb_walk_frags(skb, frag) {
1574 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
1575 out_meta)))
1576 goto out_err;
1577 }
1578 }
1579
1580 /* building the A-MSDU might have changed this data, so memcpy it now */
1581 memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
1582
1583 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
1584 /* Set up entry for this TFD in Tx byte-count array */
1585 iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
1586 iwl_txq_gen1_tfd_get_num_tbs(trans,
1587 tfd));
1588
1589 wait_write_ptr = ieee80211_has_morefrags(fc);
1590
1591 /* start timer if queue currently empty */
1592 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
1593 /*
1594 * If the TXQ is active, then set the timer, if not,
1595 * set the timer in remainder so that the timer will
1596 * be armed with the right value when the station will
1597 * wake up.
1598 */
1599 if (!txq->frozen)
1600 mod_timer(&txq->stuck_timer,
1601 jiffies + txq->wd_timeout);
1602 else
1603 txq->frozen_expiry_remainder = txq->wd_timeout;
1604 }
1605
1606 /* Tell device the write index *just past* this latest filled TFD */
1607 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1608 if (!wait_write_ptr)
1609 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1610
1611 /*
1612 * At this point the frame is "transmitted" successfully
1613 * and we will get a TX status notification eventually.
1614 */
1615 spin_unlock(&txq->lock);
1616 return 0;
1617 out_err:
1618 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
1619 spin_unlock(&txq->lock);
1620 return -1;
1621 }
1622