• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2017 Intel Deutschland GmbH
9  * Copyright(c) 2018 - 2019 Intel Corporation
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of version 2 of the GNU General Public License as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * BSD LICENSE
21  *
22  * Copyright(c) 2017 Intel Deutschland GmbH
23  * Copyright(c) 2018 - 2019 Intel Corporation
24  * All rights reserved.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  *
30  *  * Redistributions of source code must retain the above copyright
31  *    notice, this list of conditions and the following disclaimer.
32  *  * Redistributions in binary form must reproduce the above copyright
33  *    notice, this list of conditions and the following disclaimer in
34  *    the documentation and/or other materials provided with the
35  *    distribution.
36  *  * Neither the name Intel Corporation nor the names of its
37  *    contributors may be used to endorse or promote products derived
38  *    from this software without specific prior written permission.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  *
52  *****************************************************************************/
53 #include <net/tso.h>
54 #include <linux/tcp.h>
55 
56 #include "iwl-debug.h"
57 #include "iwl-csr.h"
58 #include "iwl-io.h"
59 #include "internal.h"
60 #include "fw/api/tx.h"
61 
62  /*
63  * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
64  */
iwl_pcie_gen2_tx_stop(struct iwl_trans * trans)65 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
66 {
67 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
68 	int txq_id;
69 
70 	/*
71 	 * This function can be called before the op_mode disabled the
72 	 * queues. This happens when we have an rfkill interrupt.
73 	 * Since we stop Tx altogether - mark the queues as stopped.
74 	 */
75 	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
76 	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
77 
78 	/* Unmap DMA from host system and free skb's */
79 	for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
80 		if (!trans_pcie->txq[txq_id])
81 			continue;
82 		iwl_pcie_gen2_txq_unmap(trans, txq_id);
83 	}
84 }
85 
86 /*
87  * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
88  */
iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie * trans_pcie,struct iwl_txq * txq,u16 byte_cnt,int num_tbs)89 void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
90 				   struct iwl_txq *txq, u16 byte_cnt,
91 				   int num_tbs)
92 {
93 	struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
94 	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
95 	struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
96 	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
97 	u8 filled_tfd_size, num_fetch_chunks;
98 	u16 len = byte_cnt;
99 	__le16 bc_ent;
100 
101 	if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
102 		return;
103 
104 	filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
105 				   num_tbs * sizeof(struct iwl_tfh_tb);
106 	/*
107 	 * filled_tfd_size contains the number of filled bytes in the TFD.
108 	 * Dividing it by 64 will give the number of chunks to fetch
109 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
110 	 * If, for example, TFD contains only 3 TBs then 32 bytes
111 	 * of the TFD are used, and only one chunk of 64 bytes should
112 	 * be fetched
113 	 */
114 	num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
115 
116 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
117 		/* Starting from 22560, the HW expects bytes */
118 		WARN_ON(trans_pcie->bc_table_dword);
119 		WARN_ON(len > 0x3FFF);
120 		bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
121 		scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
122 	} else {
123 		/* Until 22560, the HW expects DW */
124 		WARN_ON(!trans_pcie->bc_table_dword);
125 		len = DIV_ROUND_UP(len, 4);
126 		WARN_ON(len > 0xFFF);
127 		bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
128 		scd_bc_tbl->tfd_offset[idx] = bc_ent;
129 	}
130 }
131 
132 /*
133  * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
134  */
iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_txq * txq)135 void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
136 				  struct iwl_txq *txq)
137 {
138 	lockdep_assert_held(&txq->lock);
139 
140 	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
141 
142 	/*
143 	 * if not in power-save mode, uCode will never sleep when we're
144 	 * trying to tx (during RFKILL, we're not trying to tx).
145 	 */
146 	iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
147 }
148 
iwl_pcie_gen2_get_num_tbs(struct iwl_trans * trans,struct iwl_tfh_tfd * tfd)149 static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
150 				    struct iwl_tfh_tfd *tfd)
151 {
152 	return le16_to_cpu(tfd->num_tbs) & 0x1f;
153 }
154 
iwl_pcie_gen2_tfd_unmap(struct iwl_trans * trans,struct iwl_cmd_meta * meta,struct iwl_tfh_tfd * tfd)155 static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
156 				    struct iwl_cmd_meta *meta,
157 				    struct iwl_tfh_tfd *tfd)
158 {
159 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
160 	int i, num_tbs;
161 
162 	/* Sanity check on number of chunks */
163 	num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
164 
165 	if (num_tbs > trans_pcie->max_tbs) {
166 		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
167 		return;
168 	}
169 
170 	/* first TB is never freed - it's the bidirectional DMA data */
171 	for (i = 1; i < num_tbs; i++) {
172 		if (meta->tbs & BIT(i))
173 			dma_unmap_page(trans->dev,
174 				       le64_to_cpu(tfd->tbs[i].addr),
175 				       le16_to_cpu(tfd->tbs[i].tb_len),
176 				       DMA_TO_DEVICE);
177 		else
178 			dma_unmap_single(trans->dev,
179 					 le64_to_cpu(tfd->tbs[i].addr),
180 					 le16_to_cpu(tfd->tbs[i].tb_len),
181 					 DMA_TO_DEVICE);
182 	}
183 
184 	tfd->num_tbs = 0;
185 }
186 
iwl_pcie_gen2_free_tfd(struct iwl_trans * trans,struct iwl_txq * txq)187 static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
188 {
189 	/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
190 	 * idx is bounded by n_window
191 	 */
192 	int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
193 
194 	lockdep_assert_held(&txq->lock);
195 
196 	iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
197 				iwl_pcie_get_tfd(trans, txq, idx));
198 
199 	/* free SKB */
200 	if (txq->entries) {
201 		struct sk_buff *skb;
202 
203 		skb = txq->entries[idx].skb;
204 
205 		/* Can be called from irqs-disabled context
206 		 * If skb is not NULL, it means that the whole queue is being
207 		 * freed and that the queue is not empty - free the skb
208 		 */
209 		if (skb) {
210 			iwl_op_mode_free_skb(trans->op_mode, skb);
211 			txq->entries[idx].skb = NULL;
212 		}
213 	}
214 }
215 
iwl_pcie_gen2_set_tb(struct iwl_trans * trans,struct iwl_tfh_tfd * tfd,dma_addr_t addr,u16 len)216 static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
217 				struct iwl_tfh_tfd *tfd, dma_addr_t addr,
218 				u16 len)
219 {
220 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
221 	int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
222 	struct iwl_tfh_tb *tb;
223 
224 	if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
225 		return -EINVAL;
226 	tb = &tfd->tbs[idx];
227 
228 	/* Each TFD can point to a maximum max_tbs Tx buffers */
229 	if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
230 		IWL_ERR(trans, "Error can not send more than %d chunks\n",
231 			trans_pcie->max_tbs);
232 		return -EINVAL;
233 	}
234 
235 	put_unaligned_le64(addr, &tb->addr);
236 	tb->tb_len = cpu_to_le16(len);
237 
238 	tfd->num_tbs = cpu_to_le16(idx + 1);
239 
240 	return idx;
241 }
242 
iwl_pcie_gen2_build_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,int start_len,u8 hdr_len,struct iwl_device_cmd * dev_cmd)243 static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
244 				     struct sk_buff *skb,
245 				     struct iwl_tfh_tfd *tfd, int start_len,
246 				     u8 hdr_len, struct iwl_device_cmd *dev_cmd)
247 {
248 #ifdef CONFIG_INET
249 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
250 	struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
251 	struct ieee80211_hdr *hdr = (void *)skb->data;
252 	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
253 	unsigned int mss = skb_shinfo(skb)->gso_size;
254 	u16 length, amsdu_pad;
255 	u8 *start_hdr;
256 	struct iwl_tso_hdr_page *hdr_page;
257 	struct page **page_ptr;
258 	struct tso_t tso;
259 
260 	trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
261 			     &dev_cmd->hdr, start_len, 0);
262 
263 	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
264 	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
265 	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
266 	amsdu_pad = 0;
267 
268 	/* total amount of header we may need for this A-MSDU */
269 	hdr_room = DIV_ROUND_UP(total_len, mss) *
270 		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
271 
272 	/* Our device supports 9 segments at most, it will fit in 1 page */
273 	hdr_page = get_page_hdr(trans, hdr_room);
274 	if (!hdr_page)
275 		return -ENOMEM;
276 
277 	get_page(hdr_page->page);
278 	start_hdr = hdr_page->pos;
279 	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
280 	*page_ptr = hdr_page->page;
281 
282 	/*
283 	 * Pull the ieee80211 header to be able to use TSO core,
284 	 * we will restore it for the tx_status flow.
285 	 */
286 	skb_pull(skb, hdr_len);
287 
288 	/*
289 	 * Remove the length of all the headers that we don't actually
290 	 * have in the MPDU by themselves, but that we duplicate into
291 	 * all the different MSDUs inside the A-MSDU.
292 	 */
293 	le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
294 
295 	tso_start(skb, &tso);
296 
297 	while (total_len) {
298 		/* this is the data left for this subframe */
299 		unsigned int data_left = min_t(unsigned int, mss, total_len);
300 		struct sk_buff *csum_skb = NULL;
301 		unsigned int tb_len;
302 		dma_addr_t tb_phys;
303 		u8 *subf_hdrs_start = hdr_page->pos;
304 
305 		total_len -= data_left;
306 
307 		memset(hdr_page->pos, 0, amsdu_pad);
308 		hdr_page->pos += amsdu_pad;
309 		amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
310 				  data_left)) & 0x3;
311 		ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
312 		hdr_page->pos += ETH_ALEN;
313 		ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
314 		hdr_page->pos += ETH_ALEN;
315 
316 		length = snap_ip_tcp_hdrlen + data_left;
317 		*((__be16 *)hdr_page->pos) = cpu_to_be16(length);
318 		hdr_page->pos += sizeof(length);
319 
320 		/*
321 		 * This will copy the SNAP as well which will be considered
322 		 * as MAC header.
323 		 */
324 		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
325 
326 		hdr_page->pos += snap_ip_tcp_hdrlen;
327 
328 		tb_len = hdr_page->pos - start_hdr;
329 		tb_phys = dma_map_single(trans->dev, start_hdr,
330 					 tb_len, DMA_TO_DEVICE);
331 		if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
332 			dev_kfree_skb(csum_skb);
333 			goto out_err;
334 		}
335 		iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
336 		trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
337 		/* add this subframe's headers' length to the tx_cmd */
338 		le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
339 
340 		/* prepare the start_hdr for the next subframe */
341 		start_hdr = hdr_page->pos;
342 
343 		/* put the payload */
344 		while (data_left) {
345 			tb_len = min_t(unsigned int, tso.size, data_left);
346 			tb_phys = dma_map_single(trans->dev, tso.data,
347 						 tb_len, DMA_TO_DEVICE);
348 			if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
349 				dev_kfree_skb(csum_skb);
350 				goto out_err;
351 			}
352 			iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
353 			trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
354 						tb_len);
355 
356 			data_left -= tb_len;
357 			tso_build_data(skb, &tso, tb_len);
358 		}
359 	}
360 
361 	/* re -add the WiFi header */
362 	skb_push(skb, hdr_len);
363 
364 	return 0;
365 
366 out_err:
367 #endif
368 	return -EINVAL;
369 }
370 
371 static struct
iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta,int hdr_len,int tx_cmd_len)372 iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
373 					  struct iwl_txq *txq,
374 					  struct iwl_device_cmd *dev_cmd,
375 					  struct sk_buff *skb,
376 					  struct iwl_cmd_meta *out_meta,
377 					  int hdr_len,
378 					  int tx_cmd_len)
379 {
380 	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
381 	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
382 	dma_addr_t tb_phys;
383 	int len;
384 	void *tb1_addr;
385 
386 	tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
387 
388 	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
389 
390 	/*
391 	 * The second TB (tb1) points to the remainder of the TX command
392 	 * and the 802.11 header - dword aligned size
393 	 * (This calculation modifies the TX command, so do it before the
394 	 * setup of the first TB)
395 	 */
396 	len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
397 	      IWL_FIRST_TB_SIZE;
398 
399 	/* do not align A-MSDU to dword as the subframe header aligns it */
400 
401 	/* map the data for TB1 */
402 	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
403 	tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
404 	if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
405 		goto out_err;
406 	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
407 
408 	if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
409 				      len + IWL_FIRST_TB_SIZE,
410 				      hdr_len, dev_cmd))
411 		goto out_err;
412 
413 	/* building the A-MSDU might have changed this data, memcpy it now */
414 	memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
415 	return tfd;
416 
417 out_err:
418 	iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
419 	return NULL;
420 }
421 
iwl_pcie_gen2_tx_add_frags(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,struct iwl_cmd_meta * out_meta)422 static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
423 				      struct sk_buff *skb,
424 				      struct iwl_tfh_tfd *tfd,
425 				      struct iwl_cmd_meta *out_meta)
426 {
427 	int i;
428 
429 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
430 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
431 		dma_addr_t tb_phys;
432 		int tb_idx;
433 
434 		if (!skb_frag_size(frag))
435 			continue;
436 
437 		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
438 					   skb_frag_size(frag), DMA_TO_DEVICE);
439 
440 		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
441 			return -ENOMEM;
442 		tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
443 					      skb_frag_size(frag));
444 		trace_iwlwifi_dev_tx_tb(trans->dev, skb,
445 					skb_frag_address(frag),
446 					skb_frag_size(frag));
447 		if (tb_idx < 0)
448 			return tb_idx;
449 
450 		out_meta->tbs |= BIT(tb_idx);
451 	}
452 
453 	return 0;
454 }
455 
456 static struct
iwl_pcie_gen2_build_tx(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta,int hdr_len,int tx_cmd_len,bool pad)457 iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
458 				    struct iwl_txq *txq,
459 				    struct iwl_device_cmd *dev_cmd,
460 				    struct sk_buff *skb,
461 				    struct iwl_cmd_meta *out_meta,
462 				    int hdr_len,
463 				    int tx_cmd_len,
464 				    bool pad)
465 {
466 	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
467 	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
468 	dma_addr_t tb_phys;
469 	int len, tb1_len, tb2_len;
470 	void *tb1_addr;
471 	struct sk_buff *frag;
472 
473 	tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
474 
475 	/* The first TB points to bi-directional DMA data */
476 	memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
477 
478 	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
479 
480 	/*
481 	 * The second TB (tb1) points to the remainder of the TX command
482 	 * and the 802.11 header - dword aligned size
483 	 * (This calculation modifies the TX command, so do it before the
484 	 * setup of the first TB)
485 	 */
486 	len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
487 	      IWL_FIRST_TB_SIZE;
488 
489 	if (pad)
490 		tb1_len = ALIGN(len, 4);
491 	else
492 		tb1_len = len;
493 
494 	/* map the data for TB1 */
495 	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
496 	tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
497 	if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
498 		goto out_err;
499 	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
500 	trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
501 			     IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
502 
503 	/* set up TFD's third entry to point to remainder of skb's head */
504 	tb2_len = skb_headlen(skb) - hdr_len;
505 
506 	if (tb2_len > 0) {
507 		tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
508 					 tb2_len, DMA_TO_DEVICE);
509 		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
510 			goto out_err;
511 		iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
512 		trace_iwlwifi_dev_tx_tb(trans->dev, skb,
513 					skb->data + hdr_len,
514 					tb2_len);
515 	}
516 
517 	if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
518 		goto out_err;
519 
520 	skb_walk_frags(skb, frag) {
521 		tb_phys = dma_map_single(trans->dev, frag->data,
522 					 skb_headlen(frag), DMA_TO_DEVICE);
523 		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
524 			goto out_err;
525 		iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag));
526 		trace_iwlwifi_dev_tx_tb(trans->dev, skb,
527 					frag->data,
528 					skb_headlen(frag));
529 		if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
530 			goto out_err;
531 	}
532 
533 	return tfd;
534 
535 out_err:
536 	iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
537 	return NULL;
538 }
539 
540 static
iwl_pcie_gen2_build_tfd(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta)541 struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
542 					    struct iwl_txq *txq,
543 					    struct iwl_device_cmd *dev_cmd,
544 					    struct sk_buff *skb,
545 					    struct iwl_cmd_meta *out_meta)
546 {
547 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
548 	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
549 	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
550 	int len, hdr_len;
551 	bool amsdu;
552 
553 	/* There must be data left over for TB1 or this code must be changed */
554 	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
555 
556 	memset(tfd, 0, sizeof(*tfd));
557 
558 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560)
559 		len = sizeof(struct iwl_tx_cmd_gen2);
560 	else
561 		len = sizeof(struct iwl_tx_cmd_gen3);
562 
563 	amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
564 			(*ieee80211_get_qos_ctl(hdr) &
565 			 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
566 
567 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
568 
569 	/*
570 	 * Only build A-MSDUs here if doing so by GSO, otherwise it may be
571 	 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
572 	 * built in the higher layers already.
573 	 */
574 	if (amsdu && skb_shinfo(skb)->gso_size)
575 		return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
576 						    out_meta, hdr_len, len);
577 
578 	return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
579 				      hdr_len, len, !amsdu);
580 }
581 
iwl_trans_pcie_gen2_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_cmd * dev_cmd,int txq_id)582 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
583 			   struct iwl_device_cmd *dev_cmd, int txq_id)
584 {
585 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
586 	struct iwl_cmd_meta *out_meta;
587 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
588 	u16 cmd_len;
589 	int idx;
590 	void *tfd;
591 
592 	if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
593 		      "TX on unused queue %d\n", txq_id))
594 		return -EINVAL;
595 
596 	if (skb_is_nonlinear(skb) &&
597 	    skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
598 	    __skb_linearize(skb))
599 		return -ENOMEM;
600 
601 	spin_lock(&txq->lock);
602 
603 	if (iwl_queue_space(trans, txq) < txq->high_mark) {
604 		iwl_stop_queue(trans, txq);
605 
606 		/* don't put the packet on the ring, if there is no room */
607 		if (unlikely(iwl_queue_space(trans, txq) < 3)) {
608 			struct iwl_device_cmd **dev_cmd_ptr;
609 
610 			dev_cmd_ptr = (void *)((u8 *)skb->cb +
611 					       trans_pcie->dev_cmd_offs);
612 
613 			*dev_cmd_ptr = dev_cmd;
614 			__skb_queue_tail(&txq->overflow_q, skb);
615 			spin_unlock(&txq->lock);
616 			return 0;
617 		}
618 	}
619 
620 	idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
621 
622 	/* Set up driver data for this TFD */
623 	txq->entries[idx].skb = skb;
624 	txq->entries[idx].cmd = dev_cmd;
625 
626 	dev_cmd->hdr.sequence =
627 		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
628 			    INDEX_TO_SEQ(idx)));
629 
630 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
631 	out_meta = &txq->entries[idx].meta;
632 	out_meta->flags = 0;
633 
634 	tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
635 	if (!tfd) {
636 		spin_unlock(&txq->lock);
637 		return -1;
638 	}
639 
640 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
641 		struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
642 			(void *)dev_cmd->payload;
643 
644 		cmd_len = le16_to_cpu(tx_cmd_gen3->len);
645 	} else {
646 		struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
647 			(void *)dev_cmd->payload;
648 
649 		cmd_len = le16_to_cpu(tx_cmd_gen2->len);
650 	}
651 
652 	/* Set up entry for this TFD in Tx byte-count array */
653 	iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
654 				      iwl_pcie_gen2_get_num_tbs(trans, tfd));
655 
656 	/* start timer if queue currently empty */
657 	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
658 		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
659 
660 	/* Tell device the write index *just past* this latest filled TFD */
661 	txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
662 	iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
663 	/*
664 	 * At this point the frame is "transmitted" successfully
665 	 * and we will get a TX status notification eventually.
666 	 */
667 	spin_unlock(&txq->lock);
668 	return 0;
669 }
670 
671 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
672 
673 /*
674  * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
675  * @priv: device private data point
676  * @cmd: a pointer to the ucode command structure
677  *
678  * The function returns < 0 values to indicate the operation
679  * failed. On success, it returns the index (>= 0) of command in the
680  * command queue.
681  */
iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)682 static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
683 				      struct iwl_host_cmd *cmd)
684 {
685 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
686 	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
687 	struct iwl_device_cmd *out_cmd;
688 	struct iwl_cmd_meta *out_meta;
689 	unsigned long flags;
690 	void *dup_buf = NULL;
691 	dma_addr_t phys_addr;
692 	int i, cmd_pos, idx;
693 	u16 copy_size, cmd_size, tb0_size;
694 	bool had_nocopy = false;
695 	u8 group_id = iwl_cmd_groupid(cmd->id);
696 	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
697 	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
698 	struct iwl_tfh_tfd *tfd;
699 
700 	copy_size = sizeof(struct iwl_cmd_header_wide);
701 	cmd_size = sizeof(struct iwl_cmd_header_wide);
702 
703 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
704 		cmddata[i] = cmd->data[i];
705 		cmdlen[i] = cmd->len[i];
706 
707 		if (!cmd->len[i])
708 			continue;
709 
710 		/* need at least IWL_FIRST_TB_SIZE copied */
711 		if (copy_size < IWL_FIRST_TB_SIZE) {
712 			int copy = IWL_FIRST_TB_SIZE - copy_size;
713 
714 			if (copy > cmdlen[i])
715 				copy = cmdlen[i];
716 			cmdlen[i] -= copy;
717 			cmddata[i] += copy;
718 			copy_size += copy;
719 		}
720 
721 		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
722 			had_nocopy = true;
723 			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
724 				idx = -EINVAL;
725 				goto free_dup_buf;
726 			}
727 		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
728 			/*
729 			 * This is also a chunk that isn't copied
730 			 * to the static buffer so set had_nocopy.
731 			 */
732 			had_nocopy = true;
733 
734 			/* only allowed once */
735 			if (WARN_ON(dup_buf)) {
736 				idx = -EINVAL;
737 				goto free_dup_buf;
738 			}
739 
740 			dup_buf = kmemdup(cmddata[i], cmdlen[i],
741 					  GFP_ATOMIC);
742 			if (!dup_buf)
743 				return -ENOMEM;
744 		} else {
745 			/* NOCOPY must not be followed by normal! */
746 			if (WARN_ON(had_nocopy)) {
747 				idx = -EINVAL;
748 				goto free_dup_buf;
749 			}
750 			copy_size += cmdlen[i];
751 		}
752 		cmd_size += cmd->len[i];
753 	}
754 
755 	/*
756 	 * If any of the command structures end up being larger than the
757 	 * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
758 	 * separate TFDs, then we will need to increase the size of the buffers
759 	 */
760 	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
761 		 "Command %s (%#x) is too large (%d bytes)\n",
762 		 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
763 		idx = -EINVAL;
764 		goto free_dup_buf;
765 	}
766 
767 	spin_lock_bh(&txq->lock);
768 
769 	idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
770 	tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
771 	memset(tfd, 0, sizeof(*tfd));
772 
773 	if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
774 		spin_unlock_bh(&txq->lock);
775 
776 		IWL_ERR(trans, "No space in command queue\n");
777 		iwl_op_mode_cmd_queue_full(trans->op_mode);
778 		idx = -ENOSPC;
779 		goto free_dup_buf;
780 	}
781 
782 	out_cmd = txq->entries[idx].cmd;
783 	out_meta = &txq->entries[idx].meta;
784 
785 	/* re-initialize to NULL */
786 	memset(out_meta, 0, sizeof(*out_meta));
787 	if (cmd->flags & CMD_WANT_SKB)
788 		out_meta->source = cmd;
789 
790 	/* set up the header */
791 	out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
792 	out_cmd->hdr_wide.group_id = group_id;
793 	out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
794 	out_cmd->hdr_wide.length =
795 		cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
796 	out_cmd->hdr_wide.reserved = 0;
797 	out_cmd->hdr_wide.sequence =
798 		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
799 					 INDEX_TO_SEQ(txq->write_ptr));
800 
801 	cmd_pos = sizeof(struct iwl_cmd_header_wide);
802 	copy_size = sizeof(struct iwl_cmd_header_wide);
803 
804 	/* and copy the data that needs to be copied */
805 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
806 		int copy;
807 
808 		if (!cmd->len[i])
809 			continue;
810 
811 		/* copy everything if not nocopy/dup */
812 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
813 					   IWL_HCMD_DFL_DUP))) {
814 			copy = cmd->len[i];
815 
816 			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
817 			cmd_pos += copy;
818 			copy_size += copy;
819 			continue;
820 		}
821 
822 		/*
823 		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
824 		 * in total (for bi-directional DMA), but copy up to what
825 		 * we can fit into the payload for debug dump purposes.
826 		 */
827 		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
828 
829 		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
830 		cmd_pos += copy;
831 
832 		/* However, treat copy_size the proper way, we need it below */
833 		if (copy_size < IWL_FIRST_TB_SIZE) {
834 			copy = IWL_FIRST_TB_SIZE - copy_size;
835 
836 			if (copy > cmd->len[i])
837 				copy = cmd->len[i];
838 			copy_size += copy;
839 		}
840 	}
841 
842 	IWL_DEBUG_HC(trans,
843 		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
844 		     iwl_get_cmd_string(trans, cmd->id), group_id,
845 		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
846 		     cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
847 
848 	/* start the TFD with the minimum copy bytes */
849 	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
850 	memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
851 	iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
852 			     tb0_size);
853 
854 	/* map first command fragment, if any remains */
855 	if (copy_size > tb0_size) {
856 		phys_addr = dma_map_single(trans->dev,
857 					   (u8 *)out_cmd + tb0_size,
858 					   copy_size - tb0_size,
859 					   DMA_TO_DEVICE);
860 		if (dma_mapping_error(trans->dev, phys_addr)) {
861 			idx = -ENOMEM;
862 			iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
863 			goto out;
864 		}
865 		iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
866 				     copy_size - tb0_size);
867 	}
868 
869 	/* map the remaining (adjusted) nocopy/dup fragments */
870 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
871 		const void *data = cmddata[i];
872 
873 		if (!cmdlen[i])
874 			continue;
875 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
876 					   IWL_HCMD_DFL_DUP)))
877 			continue;
878 		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
879 			data = dup_buf;
880 		phys_addr = dma_map_single(trans->dev, (void *)data,
881 					   cmdlen[i], DMA_TO_DEVICE);
882 		if (dma_mapping_error(trans->dev, phys_addr)) {
883 			idx = -ENOMEM;
884 			iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
885 			goto out;
886 		}
887 		iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
888 	}
889 
890 	BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
891 	out_meta->flags = cmd->flags;
892 	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
893 		kzfree(txq->entries[idx].free_buf);
894 	txq->entries[idx].free_buf = dup_buf;
895 
896 	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
897 
898 	/* start timer if queue currently empty */
899 	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
900 		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
901 
902 	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
903 	/* Increment and update queue's write index */
904 	txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
905 	iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
906 	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
907 
908 out:
909 	spin_unlock_bh(&txq->lock);
910 free_dup_buf:
911 	if (idx < 0)
912 		kfree(dup_buf);
913 	return idx;
914 }
915 
916 #define HOST_COMPLETE_TIMEOUT	(2 * HZ)
917 
iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans * trans,struct iwl_host_cmd * cmd)918 static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
919 					struct iwl_host_cmd *cmd)
920 {
921 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
922 	const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
923 	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
924 	int cmd_idx;
925 	int ret;
926 
927 	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
928 
929 	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
930 				  &trans->status),
931 		 "Command %s: a command is already active!\n", cmd_str))
932 		return -EIO;
933 
934 	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
935 
936 	cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
937 	if (cmd_idx < 0) {
938 		ret = cmd_idx;
939 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
940 		IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
941 			cmd_str, ret);
942 		return ret;
943 	}
944 
945 	ret = wait_event_timeout(trans_pcie->wait_command_queue,
946 				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
947 					   &trans->status),
948 				 HOST_COMPLETE_TIMEOUT);
949 	if (!ret) {
950 		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
951 			cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
952 
953 		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
954 			txq->read_ptr, txq->write_ptr);
955 
956 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
957 		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
958 			       cmd_str);
959 		ret = -ETIMEDOUT;
960 
961 		iwl_trans_pcie_sync_nmi(trans);
962 		goto cancel;
963 	}
964 
965 	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
966 		IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
967 		dump_stack();
968 		ret = -EIO;
969 		goto cancel;
970 	}
971 
972 	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
973 	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
974 		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
975 		ret = -ERFKILL;
976 		goto cancel;
977 	}
978 
979 	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
980 		IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
981 		ret = -EIO;
982 		goto cancel;
983 	}
984 
985 	return 0;
986 
987 cancel:
988 	if (cmd->flags & CMD_WANT_SKB) {
989 		/*
990 		 * Cancel the CMD_WANT_SKB flag for the cmd in the
991 		 * TX cmd queue. Otherwise in case the cmd comes
992 		 * in later, it will possibly set an invalid
993 		 * address (cmd->meta.source).
994 		 */
995 		txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
996 	}
997 
998 	if (cmd->resp_pkt) {
999 		iwl_free_resp(cmd);
1000 		cmd->resp_pkt = NULL;
1001 	}
1002 
1003 	return ret;
1004 }
1005 
iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)1006 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
1007 				  struct iwl_host_cmd *cmd)
1008 {
1009 	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1010 	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1011 		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1012 				  cmd->id);
1013 		return -ERFKILL;
1014 	}
1015 
1016 	if (cmd->flags & CMD_ASYNC) {
1017 		int ret;
1018 
1019 		/* An asynchronous command can not expect an SKB to be set. */
1020 		if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1021 			return -EINVAL;
1022 
1023 		ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
1024 		if (ret < 0) {
1025 			IWL_ERR(trans,
1026 				"Error sending %s: enqueue_hcmd failed: %d\n",
1027 				iwl_get_cmd_string(trans, cmd->id), ret);
1028 			return ret;
1029 		}
1030 		return 0;
1031 	}
1032 
1033 	return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
1034 }
1035 
1036 /*
1037  * iwl_pcie_gen2_txq_unmap -  Unmap any remaining DMA mappings and free skb's
1038  */
iwl_pcie_gen2_txq_unmap(struct iwl_trans * trans,int txq_id)1039 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
1040 {
1041 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1042 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
1043 
1044 	spin_lock_bh(&txq->lock);
1045 	while (txq->write_ptr != txq->read_ptr) {
1046 		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
1047 				   txq_id, txq->read_ptr);
1048 
1049 		if (txq_id != trans_pcie->cmd_queue) {
1050 			int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
1051 			struct sk_buff *skb = txq->entries[idx].skb;
1052 
1053 			if (WARN_ON_ONCE(!skb))
1054 				continue;
1055 
1056 			iwl_pcie_free_tso_page(trans_pcie, skb);
1057 		}
1058 		iwl_pcie_gen2_free_tfd(trans, txq);
1059 		txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
1060 	}
1061 
1062 	while (!skb_queue_empty(&txq->overflow_q)) {
1063 		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
1064 
1065 		iwl_op_mode_free_skb(trans->op_mode, skb);
1066 	}
1067 
1068 	spin_unlock_bh(&txq->lock);
1069 
1070 	/* just in case - this queue may have been stopped */
1071 	iwl_wake_queue(trans, txq);
1072 }
1073 
iwl_pcie_gen2_txq_free_memory(struct iwl_trans * trans,struct iwl_txq * txq)1074 void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
1075 				   struct iwl_txq *txq)
1076 {
1077 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1078 	struct device *dev = trans->dev;
1079 
1080 	/* De-alloc circular buffer of TFDs */
1081 	if (txq->tfds) {
1082 		dma_free_coherent(dev,
1083 				  trans_pcie->tfd_size * txq->n_window,
1084 				  txq->tfds, txq->dma_addr);
1085 		dma_free_coherent(dev,
1086 				  sizeof(*txq->first_tb_bufs) * txq->n_window,
1087 				  txq->first_tb_bufs, txq->first_tb_dma);
1088 	}
1089 
1090 	kfree(txq->entries);
1091 	iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
1092 	kfree(txq);
1093 }
1094 
1095 /*
1096  * iwl_pcie_txq_free - Deallocate DMA queue.
1097  * @txq: Transmit queue to deallocate.
1098  *
1099  * Empty queue by removing and destroying all BD's.
1100  * Free all buffers.
1101  * 0-fill, but do not free "txq" descriptor structure.
1102  */
iwl_pcie_gen2_txq_free(struct iwl_trans * trans,int txq_id)1103 static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
1104 {
1105 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1106 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
1107 	int i;
1108 
1109 	if (WARN_ON(!txq))
1110 		return;
1111 
1112 	iwl_pcie_gen2_txq_unmap(trans, txq_id);
1113 
1114 	/* De-alloc array of command/tx buffers */
1115 	if (txq_id == trans_pcie->cmd_queue)
1116 		for (i = 0; i < txq->n_window; i++) {
1117 			kzfree(txq->entries[i].cmd);
1118 			kzfree(txq->entries[i].free_buf);
1119 		}
1120 	del_timer_sync(&txq->stuck_timer);
1121 
1122 	iwl_pcie_gen2_txq_free_memory(trans, txq);
1123 
1124 	trans_pcie->txq[txq_id] = NULL;
1125 
1126 	clear_bit(txq_id, trans_pcie->queue_used);
1127 }
1128 
iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans * trans,struct iwl_txq ** intxq,int size,unsigned int timeout)1129 int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
1130 				     struct iwl_txq **intxq, int size,
1131 				     unsigned int timeout)
1132 {
1133 	int ret;
1134 
1135 	struct iwl_txq *txq;
1136 	txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1137 	if (!txq)
1138 		return -ENOMEM;
1139 	ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
1140 				     (trans->trans_cfg->device_family >=
1141 				      IWL_DEVICE_FAMILY_22560) ?
1142 				     sizeof(struct iwl_gen3_bc_tbl) :
1143 				     sizeof(struct iwlagn_scd_bc_tbl));
1144 	if (ret) {
1145 		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1146 		kfree(txq);
1147 		return -ENOMEM;
1148 	}
1149 
1150 	ret = iwl_pcie_txq_alloc(trans, txq, size, false);
1151 	if (ret) {
1152 		IWL_ERR(trans, "Tx queue alloc failed\n");
1153 		goto error;
1154 	}
1155 	ret = iwl_pcie_txq_init(trans, txq, size, false);
1156 	if (ret) {
1157 		IWL_ERR(trans, "Tx queue init failed\n");
1158 		goto error;
1159 	}
1160 
1161 	txq->wd_timeout = msecs_to_jiffies(timeout);
1162 
1163 	*intxq = txq;
1164 	return 0;
1165 
1166 error:
1167 	iwl_pcie_gen2_txq_free_memory(trans, txq);
1168 	return ret;
1169 }
1170 
iwl_trans_pcie_txq_alloc_response(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_host_cmd * hcmd)1171 int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
1172 				      struct iwl_txq *txq,
1173 				      struct iwl_host_cmd *hcmd)
1174 {
1175 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1176 	struct iwl_tx_queue_cfg_rsp *rsp;
1177 	int ret, qid;
1178 	u32 wr_ptr;
1179 
1180 	if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1181 		    sizeof(*rsp))) {
1182 		ret = -EINVAL;
1183 		goto error_free_resp;
1184 	}
1185 
1186 	rsp = (void *)hcmd->resp_pkt->data;
1187 	qid = le16_to_cpu(rsp->queue_number);
1188 	wr_ptr = le16_to_cpu(rsp->write_pointer);
1189 
1190 	if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
1191 		WARN_ONCE(1, "queue index %d unsupported", qid);
1192 		ret = -EIO;
1193 		goto error_free_resp;
1194 	}
1195 
1196 	if (test_and_set_bit(qid, trans_pcie->queue_used)) {
1197 		WARN_ONCE(1, "queue %d already used", qid);
1198 		ret = -EIO;
1199 		goto error_free_resp;
1200 	}
1201 
1202 	txq->id = qid;
1203 	trans_pcie->txq[qid] = txq;
1204 	wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1205 
1206 	/* Place first TFD at index corresponding to start sequence number */
1207 	txq->read_ptr = wr_ptr;
1208 	txq->write_ptr = wr_ptr;
1209 
1210 	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1211 
1212 	iwl_free_resp(hcmd);
1213 	return qid;
1214 
1215 error_free_resp:
1216 	iwl_free_resp(hcmd);
1217 	iwl_pcie_gen2_txq_free_memory(trans, txq);
1218 	return ret;
1219 }
1220 
iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans * trans,__le16 flags,u8 sta_id,u8 tid,int cmd_id,int size,unsigned int timeout)1221 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1222 				 __le16 flags, u8 sta_id, u8 tid,
1223 				 int cmd_id, int size,
1224 				 unsigned int timeout)
1225 {
1226 	struct iwl_txq *txq = NULL;
1227 	struct iwl_tx_queue_cfg_cmd cmd = {
1228 		.flags = flags,
1229 		.sta_id = sta_id,
1230 		.tid = tid,
1231 	};
1232 	struct iwl_host_cmd hcmd = {
1233 		.id = cmd_id,
1234 		.len = { sizeof(cmd) },
1235 		.data = { &cmd, },
1236 		.flags = CMD_WANT_SKB,
1237 	};
1238 	int ret;
1239 
1240 	ret = iwl_trans_pcie_dyn_txq_alloc_dma(trans, &txq, size, timeout);
1241 	if (ret)
1242 		return ret;
1243 
1244 	cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
1245 	cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1246 	cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1247 
1248 	ret = iwl_trans_send_cmd(trans, &hcmd);
1249 	if (ret)
1250 		goto error;
1251 
1252 	return iwl_trans_pcie_txq_alloc_response(trans, txq, &hcmd);
1253 
1254 error:
1255 	iwl_pcie_gen2_txq_free_memory(trans, txq);
1256 	return ret;
1257 }
1258 
iwl_trans_pcie_dyn_txq_free(struct iwl_trans * trans,int queue)1259 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
1260 {
1261 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1262 
1263 	/*
1264 	 * Upon HW Rfkill - we stop the device, and then stop the queues
1265 	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1266 	 * allow the op_mode to call txq_disable after it already called
1267 	 * stop_device.
1268 	 */
1269 	if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
1270 		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1271 			  "queue %d not used", queue);
1272 		return;
1273 	}
1274 
1275 	iwl_pcie_gen2_txq_unmap(trans, queue);
1276 
1277 	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1278 }
1279 
iwl_pcie_gen2_tx_free(struct iwl_trans * trans)1280 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
1281 {
1282 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1283 	int i;
1284 
1285 	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1286 
1287 	/* Free all TX queues */
1288 	for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
1289 		if (!trans_pcie->txq[i])
1290 			continue;
1291 
1292 		iwl_pcie_gen2_txq_free(trans, i);
1293 	}
1294 }
1295 
iwl_pcie_gen2_tx_init(struct iwl_trans * trans,int txq_id,int queue_size)1296 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
1297 {
1298 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1299 	struct iwl_txq *queue;
1300 	int ret;
1301 
1302 	/* alloc and init the tx queue */
1303 	if (!trans_pcie->txq[txq_id]) {
1304 		queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1305 		if (!queue) {
1306 			IWL_ERR(trans, "Not enough memory for tx queue\n");
1307 			return -ENOMEM;
1308 		}
1309 		trans_pcie->txq[txq_id] = queue;
1310 		ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
1311 		if (ret) {
1312 			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1313 			goto error;
1314 		}
1315 	} else {
1316 		queue = trans_pcie->txq[txq_id];
1317 	}
1318 
1319 	ret = iwl_pcie_txq_init(trans, queue, queue_size,
1320 				(txq_id == trans_pcie->cmd_queue));
1321 	if (ret) {
1322 		IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1323 		goto error;
1324 	}
1325 	trans_pcie->txq[txq_id]->id = txq_id;
1326 	set_bit(txq_id, trans_pcie->queue_used);
1327 
1328 	return 0;
1329 
1330 error:
1331 	iwl_pcie_gen2_tx_free(trans);
1332 	return ret;
1333 }
1334 
1335