• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Intel Wireless Multicomm 3200 WiFi driver
3  *
4  * Copyright (C) 2009 Intel Corporation. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  *
10  *   * Redistributions of source code must retain the above copyright
11  *     notice, this list of conditions and the following disclaimer.
12  *   * Redistributions in binary form must reproduce the above copyright
13  *     notice, this list of conditions and the following disclaimer in
14  *     the documentation and/or other materials provided with the
15  *     distribution.
16  *   * Neither the name of Intel Corporation nor the names of its
17  *     contributors may be used to endorse or promote products derived
18  *     from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  *
33  * Intel Corporation <ilw@linux.intel.com>
34  * Samuel Ortiz <samuel.ortiz@intel.com>
35  * Zhu Yi <yi.zhu@intel.com>
36  *
37  */
38 
39 /*
40  * iwm Tx theory of operation:
41  *
42  * 1) We receive a 802.3 frame from the stack
43  * 2) We convert it to a 802.11 frame [iwm_xmit_frame]
44  * 3) We queue it to its corresponding tx queue [iwm_xmit_frame]
45  * 4) We schedule the tx worker. There is one worker per tx
46  *    queue. [iwm_xmit_frame]
47  * 5) The tx worker is scheduled
48  * 6) We go through every queued skb on the tx queue, and for each
49  *    and every one of them: [iwm_tx_worker]
50  *    a) We check if we have enough Tx credits (see below for a Tx
51  *       credits description) for the frame length. [iwm_tx_worker]
52  *    b) If we do, we aggregate the Tx frame into a UDMA one, by
53  *       concatenating one REPLY_TX command per Tx frame. [iwm_tx_worker]
54  *    c) When we run out of credits, or when we reach the maximum
55  *       concatenation size, we actually send the concatenated UDMA
56  *       frame. [iwm_tx_worker]
57  *
58  * When we run out of Tx credits, the skbs are filling the tx queue,
59  * and eventually we will stop the netdev queue. [iwm_tx_worker]
60  * The tx queue is emptied as we're getting new tx credits, by
61  * scheduling the tx_worker. [iwm_tx_credit_inc]
62  * The netdev queue is started again when we have enough tx credits,
63  * and when our tx queue has some reasonable amout of space available
64  * (i.e. half of the max size). [iwm_tx_worker]
65  */
66 
67 #include <linux/slab.h>
68 #include <linux/skbuff.h>
69 #include <linux/netdevice.h>
70 #include <linux/ieee80211.h>
71 
72 #include "iwm.h"
73 #include "debug.h"
74 #include "commands.h"
75 #include "hal.h"
76 #include "umac.h"
77 #include "bus.h"
78 
79 #define IWM_UMAC_PAGE_ALLOC_WRAP 0xffff
80 
81 #define BYTES_TO_PAGES(n)	 (1 + ((n) >> ilog2(IWM_UMAC_PAGE_SIZE)) - \
82 				 (((n) & (IWM_UMAC_PAGE_SIZE - 1)) == 0))
83 
84 #define pool_id_to_queue(id)	 ((id < IWM_TX_CMD_QUEUE) ? id : id - 1)
85 #define queue_to_pool_id(q)	 ((q < IWM_TX_CMD_QUEUE) ? q : q + 1)
86 
87 /* require to hold tx_credit lock */
iwm_tx_credit_get(struct iwm_tx_credit * tx_credit,int id)88 static int iwm_tx_credit_get(struct iwm_tx_credit *tx_credit, int id)
89 {
90 	struct pool_entry *pool = &tx_credit->pools[id];
91 	struct spool_entry *spool = &tx_credit->spools[pool->sid];
92 	int spool_pages;
93 
94 	/* number of pages can be taken from spool by this pool */
95 	spool_pages = spool->max_pages - spool->alloc_pages +
96 		      max(pool->min_pages - pool->alloc_pages, 0);
97 
98 	return min(pool->max_pages - pool->alloc_pages, spool_pages);
99 }
100 
iwm_tx_credit_ok(struct iwm_priv * iwm,int id,int nb)101 static bool iwm_tx_credit_ok(struct iwm_priv *iwm, int id, int nb)
102 {
103 	u32 npages = BYTES_TO_PAGES(nb);
104 
105 	if (npages <= iwm_tx_credit_get(&iwm->tx_credit, id))
106 		return 1;
107 
108 	set_bit(id, &iwm->tx_credit.full_pools_map);
109 
110 	IWM_DBG_TX(iwm, DBG, "LINK: stop txq[%d], available credit: %d\n",
111 		   pool_id_to_queue(id),
112 		   iwm_tx_credit_get(&iwm->tx_credit, id));
113 
114 	return 0;
115 }
116 
iwm_tx_credit_inc(struct iwm_priv * iwm,int id,int total_freed_pages)117 void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages)
118 {
119 	struct pool_entry *pool;
120 	struct spool_entry *spool;
121 	int freed_pages;
122 	int queue;
123 
124 	BUG_ON(id >= IWM_MACS_OUT_GROUPS);
125 
126 	pool = &iwm->tx_credit.pools[id];
127 	spool = &iwm->tx_credit.spools[pool->sid];
128 
129 	freed_pages = total_freed_pages - pool->total_freed_pages;
130 	IWM_DBG_TX(iwm, DBG, "Free %d pages for pool[%d]\n", freed_pages, id);
131 
132 	if (!freed_pages) {
133 		IWM_DBG_TX(iwm, DBG, "No pages are freed by UMAC\n");
134 		return;
135 	} else if (freed_pages < 0)
136 		freed_pages += IWM_UMAC_PAGE_ALLOC_WRAP + 1;
137 
138 	if (pool->alloc_pages > pool->min_pages) {
139 		int spool_pages = pool->alloc_pages - pool->min_pages;
140 		spool_pages = min(spool_pages, freed_pages);
141 		spool->alloc_pages -= spool_pages;
142 	}
143 
144 	pool->alloc_pages -= freed_pages;
145 	pool->total_freed_pages = total_freed_pages;
146 
147 	IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
148 		   "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
149 		   pool->total_freed_pages, pool->sid, spool->alloc_pages);
150 
151 	if (test_bit(id, &iwm->tx_credit.full_pools_map) &&
152 	    (pool->alloc_pages < pool->max_pages / 2)) {
153 		clear_bit(id, &iwm->tx_credit.full_pools_map);
154 
155 		queue = pool_id_to_queue(id);
156 
157 		IWM_DBG_TX(iwm, DBG, "LINK: start txq[%d], available "
158 			   "credit: %d\n", queue,
159 			   iwm_tx_credit_get(&iwm->tx_credit, id));
160 		queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
161 	}
162 }
163 
iwm_tx_credit_dec(struct iwm_priv * iwm,int id,int alloc_pages)164 static void iwm_tx_credit_dec(struct iwm_priv *iwm, int id, int alloc_pages)
165 {
166 	struct pool_entry *pool;
167 	struct spool_entry *spool;
168 	int spool_pages;
169 
170 	IWM_DBG_TX(iwm, DBG, "Allocate %d pages for pool[%d]\n",
171 		   alloc_pages, id);
172 
173 	BUG_ON(id >= IWM_MACS_OUT_GROUPS);
174 
175 	pool = &iwm->tx_credit.pools[id];
176 	spool = &iwm->tx_credit.spools[pool->sid];
177 
178 	spool_pages = pool->alloc_pages + alloc_pages - pool->min_pages;
179 
180 	if (pool->alloc_pages >= pool->min_pages)
181 		spool->alloc_pages += alloc_pages;
182 	else if (spool_pages > 0)
183 		spool->alloc_pages += spool_pages;
184 
185 	pool->alloc_pages += alloc_pages;
186 
187 	IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
188 		   "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
189 		   pool->total_freed_pages, pool->sid, spool->alloc_pages);
190 }
191 
iwm_tx_credit_alloc(struct iwm_priv * iwm,int id,int nb)192 int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb)
193 {
194 	u32 npages = BYTES_TO_PAGES(nb);
195 	int ret = 0;
196 
197 	spin_lock(&iwm->tx_credit.lock);
198 
199 	if (!iwm_tx_credit_ok(iwm, id, nb)) {
200 		IWM_DBG_TX(iwm, DBG, "No credit available for pool[%d]\n", id);
201 		ret = -ENOSPC;
202 		goto out;
203 	}
204 
205 	iwm_tx_credit_dec(iwm, id, npages);
206 
207  out:
208 	spin_unlock(&iwm->tx_credit.lock);
209 	return ret;
210 }
211 
212 /*
213  * Since we're on an SDIO or USB bus, we are not sharing memory
214  * for storing to be transmitted frames. The host needs to push
215  * them upstream. As a consequence there needs to be a way for
216  * the target to let us know if it can actually take more TX frames
217  * or not. This is what Tx credits are for.
218  *
219  * For each Tx HW queue, we have a Tx pool, and then we have one
220  * unique super pool (spool), which is actually a global pool of
221  * all the UMAC pages.
222  * For each Tx pool we have a min_pages, a max_pages fields, and a
223  * alloc_pages fields. The alloc_pages tracks the number of pages
224  * currently allocated from the tx pool.
225  * Here are the rules to check if given a tx frame we have enough
226  * tx credits for it:
227  * 1) We translate the frame length into a number of UMAC pages.
228  *    Let's call them n_pages.
229  * 2) For the corresponding tx pool, we check if n_pages +
230  *    pool->alloc_pages is higher than pool->min_pages. min_pages
231  *    represent a set of pre-allocated pages on the tx pool. If
232  *    that's the case, then we need to allocate those pages from
233  *    the spool. We can do so until we reach spool->max_pages.
234  * 3) Each tx pool is not allowed to allocate more than pool->max_pages
235  *    from the spool, so once we're over min_pages, we can allocate
236  *    pages from the spool, but not more than max_pages.
237  *
238  * When the tx code path needs to send a tx frame, it checks first
239  * if it has enough tx credits, following those rules. [iwm_tx_credit_get]
240  * If it does, it then updates the pool and spool counters and
241  * then send the frame. [iwm_tx_credit_alloc and iwm_tx_credit_dec]
242  * On the other side, when the UMAC is done transmitting frames, it
243  * will send a credit update notification to the host. This is when
244  * the pool and spool counters gets to be decreased. [iwm_tx_credit_inc,
245  * called from rx.c:iwm_ntf_tx_credit_update]
246  *
247  */
iwm_tx_credit_init_pools(struct iwm_priv * iwm,struct iwm_umac_notif_alive * alive)248 void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
249 			      struct iwm_umac_notif_alive *alive)
250 {
251 	int i, sid, pool_pages;
252 
253 	spin_lock(&iwm->tx_credit.lock);
254 
255 	iwm->tx_credit.pool_nr = le16_to_cpu(alive->page_grp_count);
256 	iwm->tx_credit.full_pools_map = 0;
257 	memset(&iwm->tx_credit.spools[0], 0, sizeof(struct spool_entry));
258 
259 	IWM_DBG_TX(iwm, DBG, "Pools number is %d\n", iwm->tx_credit.pool_nr);
260 
261 	for (i = 0; i < iwm->tx_credit.pool_nr; i++) {
262 		__le32 page_grp_state = alive->page_grp_state[i];
263 
264 		iwm->tx_credit.pools[i].id = GET_VAL32(page_grp_state,
265 				UMAC_ALIVE_PAGE_STS_GRP_NUM);
266 		iwm->tx_credit.pools[i].sid = GET_VAL32(page_grp_state,
267 				UMAC_ALIVE_PAGE_STS_SGRP_NUM);
268 		iwm->tx_credit.pools[i].min_pages = GET_VAL32(page_grp_state,
269 				UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE);
270 		iwm->tx_credit.pools[i].max_pages = GET_VAL32(page_grp_state,
271 				UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE);
272 		iwm->tx_credit.pools[i].alloc_pages = 0;
273 		iwm->tx_credit.pools[i].total_freed_pages = 0;
274 
275 		sid = iwm->tx_credit.pools[i].sid;
276 		pool_pages = iwm->tx_credit.pools[i].min_pages;
277 
278 		if (iwm->tx_credit.spools[sid].max_pages == 0) {
279 			iwm->tx_credit.spools[sid].id = sid;
280 			iwm->tx_credit.spools[sid].max_pages =
281 				GET_VAL32(page_grp_state,
282 					  UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE);
283 			iwm->tx_credit.spools[sid].alloc_pages = 0;
284 		}
285 
286 		iwm->tx_credit.spools[sid].alloc_pages += pool_pages;
287 
288 		IWM_DBG_TX(iwm, DBG, "Pool idx: %d, id: %d, sid: %d, capacity "
289 			   "min: %d, max: %d, pool alloc: %d, total_free: %d, "
290 			   "super poll alloc: %d\n",
291 			   i, iwm->tx_credit.pools[i].id,
292 			   iwm->tx_credit.pools[i].sid,
293 			   iwm->tx_credit.pools[i].min_pages,
294 			   iwm->tx_credit.pools[i].max_pages,
295 			   iwm->tx_credit.pools[i].alloc_pages,
296 			   iwm->tx_credit.pools[i].total_freed_pages,
297 			   iwm->tx_credit.spools[sid].alloc_pages);
298 	}
299 
300 	spin_unlock(&iwm->tx_credit.lock);
301 }
302 
303 #define IWM_UDMA_HDR_LEN	sizeof(struct iwm_umac_wifi_out_hdr)
304 
iwm_tx_build_packet(struct iwm_priv * iwm,struct sk_buff * skb,int pool_id,u8 * buf)305 static __le16 iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
306 				  int pool_id, u8 *buf)
307 {
308 	struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf;
309 	struct iwm_udma_wifi_cmd udma_cmd;
310 	struct iwm_umac_cmd umac_cmd;
311 	struct iwm_tx_info *tx_info = skb_to_tx_info(skb);
312 
313 	udma_cmd.count = cpu_to_le16(skb->len +
314 				     sizeof(struct iwm_umac_fw_cmd_hdr));
315 	/* set EOP to 0 here. iwm_udma_wifi_hdr_set_eop() will be
316 	 * called later to set EOP for the last packet. */
317 	udma_cmd.eop = 0;
318 	udma_cmd.credit_group = pool_id;
319 	udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid;
320 	udma_cmd.lmac_offset = 0;
321 
322 	umac_cmd.id = REPLY_TX;
323 	umac_cmd.count = cpu_to_le16(skb->len);
324 	umac_cmd.color = tx_info->color;
325 	umac_cmd.resp = 0;
326 	umac_cmd.seq_num = cpu_to_le16(iwm_alloc_wifi_cmd_seq(iwm));
327 
328 	iwm_build_udma_wifi_hdr(iwm, &hdr->hw_hdr, &udma_cmd);
329 	iwm_build_umac_hdr(iwm, &hdr->sw_hdr, &umac_cmd);
330 
331 	memcpy(buf + sizeof(*hdr), skb->data, skb->len);
332 
333 	return umac_cmd.seq_num;
334 }
335 
iwm_tx_send_concat_packets(struct iwm_priv * iwm,struct iwm_tx_queue * txq)336 static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
337 				      struct iwm_tx_queue *txq)
338 {
339 	int ret;
340 
341 	if (!txq->concat_count)
342 		return 0;
343 
344 	IWM_DBG_TX(iwm, DBG, "Send concatenated Tx: queue %d, %d bytes\n",
345 		   txq->id, txq->concat_count);
346 
347 	/* mark EOP for the last packet */
348 	iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1);
349 
350 	trace_iwm_tx_packets(iwm, txq->concat_buf, txq->concat_count);
351 	ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count);
352 
353 	txq->concat_count = 0;
354 	txq->concat_ptr = txq->concat_buf;
355 
356 	return ret;
357 }
358 
iwm_tx_worker(struct work_struct * work)359 void iwm_tx_worker(struct work_struct *work)
360 {
361 	struct iwm_priv *iwm;
362 	struct iwm_tx_info *tx_info = NULL;
363 	struct sk_buff *skb;
364 	struct iwm_tx_queue *txq;
365 	struct iwm_sta_info *sta_info;
366 	struct iwm_tid_info *tid_info;
367 	int cmdlen, ret, pool_id;
368 
369 	txq = container_of(work, struct iwm_tx_queue, worker);
370 	iwm = container_of(txq, struct iwm_priv, txq[txq->id]);
371 
372 	pool_id = queue_to_pool_id(txq->id);
373 
374 	while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
375 	       !skb_queue_empty(&txq->queue)) {
376 
377 		spin_lock_bh(&txq->lock);
378 		skb = skb_dequeue(&txq->queue);
379 		spin_unlock_bh(&txq->lock);
380 
381 		tx_info = skb_to_tx_info(skb);
382 		sta_info = &iwm->sta_table[tx_info->sta];
383 		if (!sta_info->valid) {
384 			IWM_ERR(iwm, "Trying to send a frame to unknown STA\n");
385 			kfree_skb(skb);
386 			continue;
387 		}
388 
389 		tid_info = &sta_info->tid_info[tx_info->tid];
390 
391 		mutex_lock(&tid_info->mutex);
392 
393 		/*
394 		 * If the RAxTID is stopped, we queue the skb to the stopped
395 		 * queue.
396 		 * Whenever we'll get a UMAC notification to resume the tx flow
397 		 * for this RAxTID, we'll merge back the stopped queue into the
398 		 * regular queue. See iwm_ntf_stop_resume_tx() from rx.c.
399 		 */
400 		if (tid_info->stopped) {
401 			IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n",
402 				   tx_info->sta, tx_info->tid);
403 			spin_lock_bh(&txq->lock);
404 			skb_queue_tail(&txq->stopped_queue, skb);
405 			spin_unlock_bh(&txq->lock);
406 
407 			mutex_unlock(&tid_info->mutex);
408 			continue;
409 		}
410 
411 		cmdlen = IWM_UDMA_HDR_LEN + skb->len;
412 
413 		IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
414 			   "%d, color: %d\n", txq->id, skb, tx_info->sta,
415 			   tx_info->color);
416 
417 		if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
418 			iwm_tx_send_concat_packets(iwm, txq);
419 
420 		ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen);
421 		if (ret) {
422 			IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
423 				   "%d, Tx worker stopped\n", txq->id);
424 			spin_lock_bh(&txq->lock);
425 			skb_queue_head(&txq->queue, skb);
426 			spin_unlock_bh(&txq->lock);
427 
428 			mutex_unlock(&tid_info->mutex);
429 			break;
430 		}
431 
432 		txq->concat_ptr = txq->concat_buf + txq->concat_count;
433 		tid_info->last_seq_num =
434 			iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
435 		txq->concat_count += ALIGN(cmdlen, 16);
436 
437 		mutex_unlock(&tid_info->mutex);
438 
439 		kfree_skb(skb);
440 	}
441 
442 	iwm_tx_send_concat_packets(iwm, txq);
443 
444 	if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) &&
445 	    !test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
446 	    (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) {
447 		IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id);
448 		netif_wake_subqueue(iwm_to_ndev(iwm), txq->id);
449 	}
450 }
451 
iwm_xmit_frame(struct sk_buff * skb,struct net_device * netdev)452 int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
453 {
454 	struct iwm_priv *iwm = ndev_to_iwm(netdev);
455 	struct wireless_dev *wdev = iwm_to_wdev(iwm);
456 	struct iwm_tx_info *tx_info;
457 	struct iwm_tx_queue *txq;
458 	struct iwm_sta_info *sta_info;
459 	u8 *dst_addr, sta_id;
460 	u16 queue;
461 	int ret;
462 
463 
464 	if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
465 		IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: "
466 			   "not associated\n");
467 		netif_tx_stop_all_queues(netdev);
468 		goto drop;
469 	}
470 
471 	queue = skb_get_queue_mapping(skb);
472 	BUG_ON(queue >= IWM_TX_DATA_QUEUES); /* no iPAN yet */
473 
474 	txq = &iwm->txq[queue];
475 
476 	/* No free space for Tx, tx_worker is too slow */
477 	if ((skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) ||
478 	    (skb_queue_len(&txq->stopped_queue) > IWM_TX_LIST_SIZE)) {
479 		IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue);
480 		netif_stop_subqueue(netdev, queue);
481 		return NETDEV_TX_BUSY;
482 	}
483 
484 	ret = ieee80211_data_from_8023(skb, netdev->dev_addr, wdev->iftype,
485 				       iwm->bssid, 0);
486 	if (ret) {
487 		IWM_ERR(iwm, "build wifi header failed\n");
488 		goto drop;
489 	}
490 
491 	dst_addr = ((struct ieee80211_hdr *)(skb->data))->addr1;
492 
493 	for (sta_id = 0; sta_id < IWM_STA_TABLE_NUM; sta_id++) {
494 		sta_info = &iwm->sta_table[sta_id];
495 		if (sta_info->valid &&
496 		    !memcmp(dst_addr, sta_info->addr, ETH_ALEN))
497 			break;
498 	}
499 
500 	if (sta_id == IWM_STA_TABLE_NUM) {
501 		IWM_ERR(iwm, "STA %pM not found in sta_table, Tx ignored\n",
502 			dst_addr);
503 		goto drop;
504 	}
505 
506 	tx_info = skb_to_tx_info(skb);
507 	tx_info->sta = sta_id;
508 	tx_info->color = sta_info->color;
509 	/* UMAC uses TID 8 (vs. 0) for non QoS packets */
510 	if (sta_info->qos)
511 		tx_info->tid = skb->priority;
512 	else
513 		tx_info->tid = IWM_UMAC_MGMT_TID;
514 
515 	spin_lock_bh(&iwm->txq[queue].lock);
516 	skb_queue_tail(&iwm->txq[queue].queue, skb);
517 	spin_unlock_bh(&iwm->txq[queue].lock);
518 
519 	queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
520 
521 	netdev->stats.tx_packets++;
522 	netdev->stats.tx_bytes += skb->len;
523 	return NETDEV_TX_OK;
524 
525  drop:
526 	netdev->stats.tx_dropped++;
527 	dev_kfree_skb_any(skb);
528 	return NETDEV_TX_OK;
529 }
530