• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc.
3  *
4  * Author: Ryder Lee <ryder.lee@mediatek.com>
5  *         Roy Luo <royluo@google.com>
6  *         Felix Fietkau <nbd@nbd.name>
7  *         Lorenzo Bianconi <lorenzo@kernel.org>
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/timekeeping.h>
12 
13 #include "mt7615.h"
14 #include "../dma.h"
15 #include "mac.h"
16 
mt7615_tx_complete_skb(struct mt76_dev * mdev,struct mt76_queue_entry * e)17 void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
18 {
19 	if (!e->txwi) {
20 		dev_kfree_skb_any(e->skb);
21 		return;
22 	}
23 
24 	/* error path */
25 	if (e->skb == DMA_DUMMY_DATA) {
26 		struct mt76_txwi_cache *t;
27 		struct mt7615_dev *dev;
28 		struct mt7615_txp_common *txp;
29 		u16 token;
30 
31 		dev = container_of(mdev, struct mt7615_dev, mt76);
32 		txp = mt7615_txwi_to_txp(mdev, e->txwi);
33 
34 		if (is_mt7615(&dev->mt76))
35 			token = le16_to_cpu(txp->fw.token);
36 		else
37 			token = le16_to_cpu(txp->hw.msdu_id[0]) &
38 				~MT_MSDU_ID_VALID;
39 
40 		spin_lock_bh(&dev->token_lock);
41 		t = idr_remove(&dev->token, token);
42 		spin_unlock_bh(&dev->token_lock);
43 		e->skb = t ? t->skb : NULL;
44 	}
45 
46 	if (e->skb)
47 		mt76_tx_complete_skb(mdev, e->wcid, e->skb);
48 }
49 
50 static void
mt7615_write_hw_txp(struct mt7615_dev * dev,struct mt76_tx_info * tx_info,void * txp_ptr,u32 id)51 mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
52 		    void *txp_ptr, u32 id)
53 {
54 	struct mt7615_hw_txp *txp = txp_ptr;
55 	struct mt7615_txp_ptr *ptr = &txp->ptr[0];
56 	int i, nbuf = tx_info->nbuf - 1;
57 	u32 last_mask;
58 
59 	tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
60 	tx_info->nbuf = 1;
61 
62 	txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
63 
64 	if (is_mt7663(&dev->mt76))
65 		last_mask = MT_TXD_LEN_LAST;
66 	else
67 		last_mask = MT_TXD_LEN_AMSDU_LAST |
68 			    MT_TXD_LEN_MSDU_LAST;
69 
70 	for (i = 0; i < nbuf; i++) {
71 		u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
72 		u32 addr = tx_info->buf[i + 1].addr;
73 
74 		if (i == nbuf - 1)
75 			len |= last_mask;
76 
77 		if (i & 1) {
78 			ptr->buf1 = cpu_to_le32(addr);
79 			ptr->len1 = cpu_to_le16(len);
80 			ptr++;
81 		} else {
82 			ptr->buf0 = cpu_to_le32(addr);
83 			ptr->len0 = cpu_to_le16(len);
84 		}
85 	}
86 }
87 
88 static void
mt7615_write_fw_txp(struct mt7615_dev * dev,struct mt76_tx_info * tx_info,void * txp_ptr,u32 id)89 mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
90 		    void *txp_ptr, u32 id)
91 {
92 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
93 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
94 	struct ieee80211_key_conf *key = info->control.hw_key;
95 	struct ieee80211_vif *vif = info->control.vif;
96 	struct mt7615_fw_txp *txp = txp_ptr;
97 	int nbuf = tx_info->nbuf - 1;
98 	int i;
99 
100 	for (i = 0; i < nbuf; i++) {
101 		txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
102 		txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
103 	}
104 	txp->nbuf = nbuf;
105 
106 	/* pass partial skb header to fw */
107 	tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
108 	tx_info->buf[1].len = MT_CT_PARSE_LEN;
109 	tx_info->buf[1].skip_unmap = true;
110 	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
111 
112 	txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
113 
114 	if (!key)
115 		txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
116 
117 	if (ieee80211_is_mgmt(hdr->frame_control))
118 		txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
119 
120 	if (vif) {
121 		struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
122 
123 		txp->bss_idx = mvif->idx;
124 	}
125 
126 	txp->token = cpu_to_le16(id);
127 	txp->rept_wds_wcid = 0xff;
128 }
129 
mt7615_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)130 int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
131 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
132 			  struct ieee80211_sta *sta,
133 			  struct mt76_tx_info *tx_info)
134 {
135 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
136 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
137 	struct ieee80211_key_conf *key = info->control.hw_key;
138 	int pid, id;
139 	u8 *txwi = (u8 *)txwi_ptr;
140 	struct mt76_txwi_cache *t;
141 	struct mt7615_sta *msta;
142 	void *txp;
143 
144 	msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
145 	if (!wcid)
146 		wcid = &dev->mt76.global_wcid;
147 
148 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
149 
150 	if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
151 		struct mt7615_phy *phy = &dev->phy;
152 
153 		if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
154 			phy = mdev->phy2->priv;
155 
156 		spin_lock_bh(&dev->mt76.lock);
157 		mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
158 				     msta->rates);
159 		spin_unlock_bh(&dev->mt76.lock);
160 	}
161 
162 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
163 	t->skb = tx_info->skb;
164 
165 	spin_lock_bh(&dev->token_lock);
166 	id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
167 	spin_unlock_bh(&dev->token_lock);
168 	if (id < 0)
169 		return id;
170 
171 	mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
172 			      pid, key, false);
173 
174 	txp = txwi + MT_TXD_SIZE;
175 	memset(txp, 0, sizeof(struct mt7615_txp_common));
176 	if (is_mt7615(&dev->mt76))
177 		mt7615_write_fw_txp(dev, tx_info, txp, id);
178 	else
179 		mt7615_write_hw_txp(dev, tx_info, txp, id);
180 
181 	tx_info->skb = DMA_DUMMY_DATA;
182 
183 	return 0;
184 }
185