1 /*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/delay.h>
18 #include "mt76x2.h"
19 #include "mt76x2_mcu.h"
20 #include "mt76x2_eeprom.h"
21 #include "mt76x2_trace.h"
22
mt76x2_mac_set_bssid(struct mt76x2_dev * dev,u8 idx,const u8 * addr)23 void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
24 {
25 idx &= 7;
26 mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
27 mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
28 get_unaligned_le16(addr + 4));
29 }
30
mt76x2_mac_poll_tx_status(struct mt76x2_dev * dev,bool irq)31 void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
32 {
33 struct mt76x2_tx_status stat = {};
34 unsigned long flags;
35 u8 update = 1;
36 bool ret;
37
38 if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
39 return;
40
41 trace_mac_txstat_poll(dev);
42
43 while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
44 spin_lock_irqsave(&dev->irq_lock, flags);
45 ret = mt76x2_mac_load_tx_status(dev, &stat);
46 spin_unlock_irqrestore(&dev->irq_lock, flags);
47
48 if (!ret)
49 break;
50
51 trace_mac_txstat_fetch(dev, &stat);
52
53 if (!irq) {
54 mt76x2_send_tx_status(dev, &stat, &update);
55 continue;
56 }
57
58 kfifo_put(&dev->txstatus_fifo, stat);
59 }
60 }
61
62 static void
mt76x2_mac_queue_txdone(struct mt76x2_dev * dev,struct sk_buff * skb,void * txwi_ptr)63 mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
64 void *txwi_ptr)
65 {
66 struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
67 struct mt76x2_txwi *txwi = txwi_ptr;
68
69 mt76x2_mac_poll_tx_status(dev, false);
70
71 txi->tries = 0;
72 txi->jiffies = jiffies;
73 txi->wcid = txwi->wcid;
74 txi->pktid = txwi->pktid;
75 trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
76 mt76x2_tx_complete(dev, skb);
77 }
78
mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev * dev)79 void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
80 {
81 struct mt76x2_tx_status stat;
82 u8 update = 1;
83
84 while (kfifo_get(&dev->txstatus_fifo, &stat))
85 mt76x2_send_tx_status(dev, &stat, &update);
86 }
87
mt76x2_tx_complete_skb(struct mt76_dev * mdev,struct mt76_queue * q,struct mt76_queue_entry * e,bool flush)88 void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
89 struct mt76_queue_entry *e, bool flush)
90 {
91 struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
92
93 if (e->txwi)
94 mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
95 else
96 dev_kfree_skb_any(e->skb);
97 }
98
99 static int
mt76_write_beacon(struct mt76x2_dev * dev,int offset,struct sk_buff * skb)100 mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
101 {
102 int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
103 struct mt76x2_txwi txwi;
104
105 if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
106 return -ENOSPC;
107
108 mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
109
110 mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
111 offset += sizeof(txwi);
112
113 mt76_wr_copy(dev, offset, skb->data, skb->len);
114 return 0;
115 }
116
117 static int
__mt76x2_mac_set_beacon(struct mt76x2_dev * dev,u8 bcn_idx,struct sk_buff * skb)118 __mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
119 {
120 int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
121 int beacon_addr = dev->beacon_offsets[bcn_idx];
122 int ret = 0;
123 int i;
124
125 /* Prevent corrupt transmissions during update */
126 mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
127
128 if (skb) {
129 ret = mt76_write_beacon(dev, beacon_addr, skb);
130 if (!ret)
131 dev->beacon_data_mask |= BIT(bcn_idx);
132 } else {
133 dev->beacon_data_mask &= ~BIT(bcn_idx);
134 for (i = 0; i < beacon_len; i += 4)
135 mt76_wr(dev, beacon_addr + i, 0);
136 }
137
138 mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
139
140 return ret;
141 }
142
mt76x2_mac_set_beacon(struct mt76x2_dev * dev,u8 vif_idx,struct sk_buff * skb)143 int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
144 struct sk_buff *skb)
145 {
146 bool force_update = false;
147 int bcn_idx = 0;
148 int i;
149
150 for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
151 if (vif_idx == i) {
152 force_update = !!dev->beacons[i] ^ !!skb;
153
154 if (dev->beacons[i])
155 dev_kfree_skb(dev->beacons[i]);
156
157 dev->beacons[i] = skb;
158 __mt76x2_mac_set_beacon(dev, bcn_idx, skb);
159 } else if (force_update && dev->beacons[i]) {
160 __mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
161 }
162
163 bcn_idx += !!dev->beacons[i];
164 }
165
166 for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
167 if (!(dev->beacon_data_mask & BIT(i)))
168 break;
169
170 __mt76x2_mac_set_beacon(dev, i, NULL);
171 }
172
173 mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
174 bcn_idx - 1);
175 return 0;
176 }
177
mt76x2_mac_set_beacon_enable(struct mt76x2_dev * dev,u8 vif_idx,bool val)178 void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
179 {
180 u8 old_mask = dev->beacon_mask;
181 bool en;
182 u32 reg;
183
184 if (val) {
185 dev->beacon_mask |= BIT(vif_idx);
186 } else {
187 dev->beacon_mask &= ~BIT(vif_idx);
188 mt76x2_mac_set_beacon(dev, vif_idx, NULL);
189 }
190
191 if (!!old_mask == !!dev->beacon_mask)
192 return;
193
194 en = dev->beacon_mask;
195
196 mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
197 reg = MT_BEACON_TIME_CFG_BEACON_TX |
198 MT_BEACON_TIME_CFG_TBTT_EN |
199 MT_BEACON_TIME_CFG_TIMER_EN;
200 mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
201
202 if (en)
203 mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
204 else
205 mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
206 }
207
mt76x2_update_channel(struct mt76_dev * mdev)208 void mt76x2_update_channel(struct mt76_dev *mdev)
209 {
210 struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
211 struct mt76_channel_state *state;
212 u32 active, busy;
213
214 state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
215
216 busy = mt76_rr(dev, MT_CH_BUSY);
217 active = busy + mt76_rr(dev, MT_CH_IDLE);
218
219 spin_lock_bh(&dev->mt76.cc_lock);
220 state->cc_busy += busy;
221 state->cc_active += active;
222 spin_unlock_bh(&dev->mt76.cc_lock);
223 }
224
mt76x2_mac_work(struct work_struct * work)225 void mt76x2_mac_work(struct work_struct *work)
226 {
227 struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev,
228 mac_work.work);
229 int i, idx;
230
231 mt76x2_update_channel(&dev->mt76);
232 for (i = 0, idx = 0; i < 16; i++) {
233 u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
234
235 dev->aggr_stats[idx++] += val & 0xffff;
236 dev->aggr_stats[idx++] += val >> 16;
237 }
238
239 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
240 MT_CALIBRATE_INTERVAL);
241 }
242
mt76x2_mac_set_tx_protection(struct mt76x2_dev * dev,u32 val)243 void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val)
244 {
245 u32 data = 0;
246
247 if (val != ~0)
248 data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
249 MT_PROT_CFG_RTS_THRESH;
250
251 mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
252
253 mt76_rmw(dev, MT_CCK_PROT_CFG,
254 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
255 mt76_rmw(dev, MT_OFDM_PROT_CFG,
256 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
257 mt76_rmw(dev, MT_MM20_PROT_CFG,
258 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
259 mt76_rmw(dev, MT_MM40_PROT_CFG,
260 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
261 mt76_rmw(dev, MT_GF20_PROT_CFG,
262 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
263 mt76_rmw(dev, MT_GF40_PROT_CFG,
264 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
265 mt76_rmw(dev, MT_TX_PROT_CFG6,
266 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
267 mt76_rmw(dev, MT_TX_PROT_CFG7,
268 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
269 mt76_rmw(dev, MT_TX_PROT_CFG8,
270 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
271 }
272