1 /*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/delay.h>
18
19 #include "mt76x2u.h"
20 #include "mt76x2_eeprom.h"
21
mt76x2u_init_dma(struct mt76x2_dev * dev)22 static void mt76x2u_init_dma(struct mt76x2_dev *dev)
23 {
24 u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
25
26 val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD |
27 MT_USB_DMA_CFG_RX_BULK_EN |
28 MT_USB_DMA_CFG_TX_BULK_EN;
29
30 /* disable AGGR_BULK_RX in order to receive one
31 * frame in each rx urb and avoid copies
32 */
33 val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
34 mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
35 }
36
mt76x2u_power_on_rf_patch(struct mt76x2_dev * dev)37 static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev)
38 {
39 mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
40 udelay(1);
41
42 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff);
43 mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30);
44
45 mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f);
46 udelay(1);
47
48 mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17));
49 usleep_range(150, 200);
50
51 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16));
52 usleep_range(50, 100);
53
54 mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
55 }
56
mt76x2u_power_on_rf(struct mt76x2_dev * dev,int unit)57 static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit)
58 {
59 int shift = unit ? 8 : 0;
60 u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
61
62 /* Enable RF BG */
63 mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift);
64 usleep_range(10, 20);
65
66 /* Enable RFDIG LDO/AFE/ABB/ADDA */
67 mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val);
68 usleep_range(10, 20);
69
70 /* Switch RFDIG power to internal LDO */
71 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift);
72 usleep_range(10, 20);
73
74 mt76x2u_power_on_rf_patch(dev);
75
76 mt76_set(dev, 0x530, 0xf);
77 }
78
mt76x2u_power_on(struct mt76x2_dev * dev)79 static void mt76x2u_power_on(struct mt76x2_dev *dev)
80 {
81 u32 val;
82
83 /* Turn on WL MTCMOS */
84 mt76_set(dev, MT_VEND_ADDR(CFG, 0x148),
85 MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
86
87 val = MT_WLAN_MTC_CTRL_STATE_UP |
88 MT_WLAN_MTC_CTRL_PWR_ACK |
89 MT_WLAN_MTC_CTRL_PWR_ACK_S;
90
91 mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000);
92
93 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16);
94 usleep_range(10, 20);
95
96 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
97 usleep_range(10, 20);
98
99 mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
100 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff);
101
102 /* Turn on AD/DA power down */
103 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3));
104
105 /* WLAN function enable */
106 mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0));
107
108 /* Release BBP software reset */
109 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18));
110
111 mt76x2u_power_on_rf(dev, 0);
112 mt76x2u_power_on_rf(dev, 1);
113 }
114
mt76x2u_init_eeprom(struct mt76x2_dev * dev)115 static int mt76x2u_init_eeprom(struct mt76x2_dev *dev)
116 {
117 u32 val, i;
118
119 dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev,
120 MT7612U_EEPROM_SIZE,
121 GFP_KERNEL);
122 dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE;
123 if (!dev->mt76.eeprom.data)
124 return -ENOMEM;
125
126 for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) {
127 val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i));
128 put_unaligned_le32(val, dev->mt76.eeprom.data + i);
129 }
130
131 mt76x2_eeprom_parse_hw_cap(dev);
132 return 0;
133 }
134
mt76x2u_alloc_device(struct device * pdev)135 struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev)
136 {
137 static const struct mt76_driver_ops drv_ops = {
138 .tx_prepare_skb = mt76x2u_tx_prepare_skb,
139 .tx_complete_skb = mt76x2u_tx_complete_skb,
140 .tx_status_data = mt76x2u_tx_status_data,
141 .rx_skb = mt76x2_queue_rx_skb,
142 };
143 struct mt76x2_dev *dev;
144 struct mt76_dev *mdev;
145
146 mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
147 if (!mdev)
148 return NULL;
149
150 dev = container_of(mdev, struct mt76x2_dev, mt76);
151 mdev->dev = pdev;
152 mdev->drv = &drv_ops;
153
154 mutex_init(&dev->mutex);
155
156 return dev;
157 }
158
mt76x2u_init_beacon_offsets(struct mt76x2_dev * dev)159 static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev)
160 {
161 mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
162 mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
163 mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840);
164 mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
165 }
166
mt76x2u_init_hardware(struct mt76x2_dev * dev)167 int mt76x2u_init_hardware(struct mt76x2_dev *dev)
168 {
169 static const u16 beacon_offsets[] = {
170 /* 512 byte per beacon */
171 0xc000, 0xc200, 0xc400, 0xc600,
172 0xc800, 0xca00, 0xcc00, 0xce00,
173 0xd000, 0xd200, 0xd400, 0xd600,
174 0xd800, 0xda00, 0xdc00, 0xde00
175 };
176 const struct mt76_wcid_addr addr = {
177 .macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
178 .ba_mask = 0,
179 };
180 int i, err;
181
182 dev->beacon_offsets = beacon_offsets;
183
184 mt76x2_reset_wlan(dev, true);
185 mt76x2u_power_on(dev);
186
187 if (!mt76x2_wait_for_mac(dev))
188 return -ETIMEDOUT;
189
190 err = mt76x2u_mcu_fw_init(dev);
191 if (err < 0)
192 return err;
193
194 if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
195 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
196 MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
197 return -EIO;
198
199 /* wait for asic ready after fw load. */
200 if (!mt76x2_wait_for_mac(dev))
201 return -ETIMEDOUT;
202
203 mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0);
204 mt76_wr(dev, MT_TSO_CTRL, 0);
205
206 mt76x2u_init_dma(dev);
207
208 err = mt76x2u_mcu_init(dev);
209 if (err < 0)
210 return err;
211
212 err = mt76x2u_mac_reset(dev);
213 if (err < 0)
214 return err;
215
216 mt76x2u_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
217 dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
218
219 mt76x2u_init_beacon_offsets(dev);
220
221 if (!mt76x2_wait_for_bbp(dev))
222 return -ETIMEDOUT;
223
224 /* reset wcid table */
225 for (i = 0; i < 254; i++)
226 mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr,
227 sizeof(struct mt76_wcid_addr));
228
229 /* reset shared key table and pairwise key table */
230 for (i = 0; i < 4; i++)
231 mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0);
232 for (i = 0; i < 256; i++)
233 mt76_wr(dev, MT_WCID_ATTR(i), 1);
234
235 mt76_clear(dev, MT_BEACON_TIME_CFG,
236 MT_BEACON_TIME_CFG_TIMER_EN |
237 MT_BEACON_TIME_CFG_SYNC_MODE |
238 MT_BEACON_TIME_CFG_TBTT_EN |
239 MT_BEACON_TIME_CFG_BEACON_TX);
240
241 mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
242 mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
243
244 err = mt76x2u_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
245 if (err < 0)
246 return err;
247
248 mt76x2u_phy_set_rxpath(dev);
249 mt76x2u_phy_set_txdac(dev);
250
251 return mt76x2u_mac_stop(dev);
252 }
253
mt76x2u_register_device(struct mt76x2_dev * dev)254 int mt76x2u_register_device(struct mt76x2_dev *dev)
255 {
256 struct ieee80211_hw *hw = mt76_hw(dev);
257 struct wiphy *wiphy = hw->wiphy;
258 int err;
259
260 INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
261 mt76x2_init_device(dev);
262
263 err = mt76x2u_init_eeprom(dev);
264 if (err < 0)
265 return err;
266
267 err = mt76u_mcu_init_rx(&dev->mt76);
268 if (err < 0)
269 return err;
270
271 err = mt76u_alloc_queues(&dev->mt76);
272 if (err < 0)
273 goto fail;
274
275 err = mt76x2u_init_hardware(dev);
276 if (err < 0)
277 goto fail;
278
279 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
280
281 err = mt76_register_device(&dev->mt76, true, mt76x2_rates,
282 ARRAY_SIZE(mt76x2_rates));
283 if (err)
284 goto fail;
285
286 /* check hw sg support in order to enable AMSDU */
287 if (mt76u_check_sg(&dev->mt76))
288 hw->max_tx_fragments = MT_SG_MAX_SIZE;
289 else
290 hw->max_tx_fragments = 1;
291
292 set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
293
294 mt76x2_init_debugfs(dev);
295 mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
296 mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
297
298 return 0;
299
300 fail:
301 mt76x2u_cleanup(dev);
302 return err;
303 }
304
mt76x2u_stop_hw(struct mt76x2_dev * dev)305 void mt76x2u_stop_hw(struct mt76x2_dev *dev)
306 {
307 mt76u_stop_stat_wk(&dev->mt76);
308 cancel_delayed_work_sync(&dev->cal_work);
309 mt76x2u_mac_stop(dev);
310 }
311
mt76x2u_cleanup(struct mt76x2_dev * dev)312 void mt76x2u_cleanup(struct mt76x2_dev *dev)
313 {
314 mt76x2u_mcu_set_radio_state(dev, false);
315 mt76x2u_stop_hw(dev);
316 mt76u_queues_deinit(&dev->mt76);
317 mt76x2u_mcu_deinit(dev);
318 }
319