• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/firmware.h>
18 
19 #include "mt76x2u.h"
20 #include "mt76x2_eeprom.h"
21 
22 #define MT_CMD_HDR_LEN			4
23 #define MT_INBAND_PACKET_MAX_LEN	192
24 #define MT_MCU_MEMMAP_WLAN		0x410000
25 
26 #define MCU_FW_URB_MAX_PAYLOAD		0x3900
27 #define MCU_ROM_PATCH_MAX_PAYLOAD	2048
28 
29 #define MT76U_MCU_ILM_OFFSET		0x80000
30 #define MT76U_MCU_DLM_OFFSET		0x110000
31 #define MT76U_MCU_ROM_PATCH_OFFSET	0x90000
32 
33 static int
mt76x2u_mcu_function_select(struct mt76x2_dev * dev,enum mcu_function func,u32 val)34 mt76x2u_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
35 			    u32 val)
36 {
37 	struct {
38 		__le32 id;
39 		__le32 value;
40 	} __packed __aligned(4) msg = {
41 		.id = cpu_to_le32(func),
42 		.value = cpu_to_le32(val),
43 	};
44 	struct sk_buff *skb;
45 
46 	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
47 	if (!skb)
48 		return -ENOMEM;
49 	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_FUN_SET_OP,
50 				  func != Q_SELECT);
51 }
52 
mt76x2u_mcu_set_radio_state(struct mt76x2_dev * dev,bool val)53 int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val)
54 {
55 	struct {
56 		__le32 mode;
57 		__le32 level;
58 	} __packed __aligned(4) msg = {
59 		.mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF),
60 		.level = cpu_to_le32(0),
61 	};
62 	struct sk_buff *skb;
63 
64 	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
65 	if (!skb)
66 		return -ENOMEM;
67 	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_POWER_SAVING_OP,
68 				  false);
69 }
70 
mt76x2u_mcu_load_cr(struct mt76x2_dev * dev,u8 type,u8 temp_level,u8 channel)71 int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
72 			u8 channel)
73 {
74 	struct {
75 		u8 cr_mode;
76 		u8 temp;
77 		u8 ch;
78 		u8 _pad0;
79 		__le32 cfg;
80 	} __packed __aligned(4) msg = {
81 		.cr_mode = type,
82 		.temp = temp_level,
83 		.ch = channel,
84 	};
85 	struct sk_buff *skb;
86 	u32 val;
87 
88 	val = BIT(31);
89 	val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
90 	val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
91 	msg.cfg = cpu_to_le32(val);
92 
93 	/* first set the channel without the extension channel info */
94 	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
95 	if (!skb)
96 		return -ENOMEM;
97 	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_LOAD_CR, true);
98 }
99 
mt76x2u_mcu_set_channel(struct mt76x2_dev * dev,u8 channel,u8 bw,u8 bw_index,bool scan)100 int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
101 			    u8 bw_index, bool scan)
102 {
103 	struct {
104 		u8 idx;
105 		u8 scan;
106 		u8 bw;
107 		u8 _pad0;
108 
109 		__le16 chainmask;
110 		u8 ext_chan;
111 		u8 _pad1;
112 
113 	} __packed __aligned(4) msg = {
114 		.idx = channel,
115 		.scan = scan,
116 		.bw = bw,
117 		.chainmask = cpu_to_le16(dev->chainmask),
118 	};
119 	struct sk_buff *skb;
120 
121 	/* first set the channel without the extension channel info */
122 	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
123 	if (!skb)
124 		return -ENOMEM;
125 
126 	mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
127 
128 	usleep_range(5000, 10000);
129 
130 	msg.ext_chan = 0xe0 + bw_index;
131 	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
132 	if (!skb)
133 		return -ENOMEM;
134 
135 	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
136 }
137 
mt76x2u_mcu_calibrate(struct mt76x2_dev * dev,enum mcu_calibration type,u32 val)138 int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
139 			  u32 val)
140 {
141 	struct {
142 		__le32 id;
143 		__le32 value;
144 	} __packed __aligned(4) msg = {
145 		.id = cpu_to_le32(type),
146 		.value = cpu_to_le32(val),
147 	};
148 	struct sk_buff *skb;
149 
150 	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
151 	if (!skb)
152 		return -ENOMEM;
153 	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
154 }
155 
mt76x2u_mcu_init_gain(struct mt76x2_dev * dev,u8 channel,u32 gain,bool force)156 int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
157 			  bool force)
158 {
159 	struct {
160 		__le32 channel;
161 		__le32 gain_val;
162 	} __packed __aligned(4) msg = {
163 		.channel = cpu_to_le32(channel),
164 		.gain_val = cpu_to_le32(gain),
165 	};
166 	struct sk_buff *skb;
167 
168 	if (force)
169 		msg.channel |= cpu_to_le32(BIT(31));
170 
171 	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
172 	if (!skb)
173 		return -ENOMEM;
174 	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_INIT_GAIN_OP, true);
175 }
176 
mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev * dev,u8 channel,bool ap,bool ext,int rssi,u32 false_cca)177 int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
178 				bool ext, int rssi, u32 false_cca)
179 {
180 	struct {
181 		__le32 channel;
182 		__le32 rssi_val;
183 		__le32 false_cca_val;
184 	} __packed __aligned(4) msg = {
185 		.rssi_val = cpu_to_le32(rssi),
186 		.false_cca_val = cpu_to_le32(false_cca),
187 	};
188 	struct sk_buff *skb;
189 	u32 val = channel;
190 
191 	if (ap)
192 		val |= BIT(31);
193 	if (ext)
194 		val |= BIT(30);
195 	msg.channel = cpu_to_le32(val);
196 
197 	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
198 	if (!skb)
199 		return -ENOMEM;
200 	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_DYNC_VGA_OP, true);
201 }
202 
mt76x2u_mcu_tssi_comp(struct mt76x2_dev * dev,struct mt76x2_tssi_comp * tssi_data)203 int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
204 			  struct mt76x2_tssi_comp *tssi_data)
205 {
206 	struct {
207 		__le32 id;
208 		struct mt76x2_tssi_comp data;
209 	} __packed __aligned(4) msg = {
210 		.id = cpu_to_le32(MCU_CAL_TSSI_COMP),
211 		.data = *tssi_data,
212 	};
213 	struct sk_buff *skb;
214 
215 	skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
216 	if (!skb)
217 		return -ENOMEM;
218 	return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
219 }
220 
mt76x2u_mcu_load_ivb(struct mt76x2_dev * dev)221 static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev)
222 {
223 	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
224 			     USB_DIR_OUT | USB_TYPE_VENDOR,
225 			     0x12, 0, NULL, 0);
226 }
227 
mt76x2u_mcu_enable_patch(struct mt76x2_dev * dev)228 static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev)
229 {
230 	struct mt76_usb *usb = &dev->mt76.usb;
231 	const u8 data[] = {
232 		0x6f, 0xfc, 0x08, 0x01,
233 		0x20, 0x04, 0x00, 0x00,
234 		0x00, 0x09, 0x00,
235 	};
236 
237 	memcpy(usb->data, data, sizeof(data));
238 	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
239 			     USB_DIR_OUT | USB_TYPE_CLASS,
240 			     0x12, 0, usb->data, sizeof(data));
241 }
242 
mt76x2u_mcu_reset_wmt(struct mt76x2_dev * dev)243 static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev)
244 {
245 	struct mt76_usb *usb = &dev->mt76.usb;
246 	u8 data[] = {
247 		0x6f, 0xfc, 0x05, 0x01,
248 		0x07, 0x01, 0x00, 0x04
249 	};
250 
251 	memcpy(usb->data, data, sizeof(data));
252 	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
253 			     USB_DIR_OUT | USB_TYPE_CLASS,
254 			     0x12, 0, usb->data, sizeof(data));
255 }
256 
mt76x2u_mcu_load_rom_patch(struct mt76x2_dev * dev)257 static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
258 {
259 	bool rom_protect = !is_mt7612(dev);
260 	struct mt76x2_patch_header *hdr;
261 	u32 val, patch_mask, patch_reg;
262 	const struct firmware *fw;
263 	int err;
264 
265 	if (rom_protect &&
266 	    !mt76_poll_msec(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
267 		dev_err(dev->mt76.dev,
268 			"could not get hardware semaphore for ROM PATCH\n");
269 		return -ETIMEDOUT;
270 	}
271 
272 	if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
273 		patch_mask = BIT(0);
274 		patch_reg = MT_MCU_CLOCK_CTL;
275 	} else {
276 		patch_mask = BIT(1);
277 		patch_reg = MT_MCU_COM_REG0;
278 	}
279 
280 	if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
281 		dev_info(dev->mt76.dev, "ROM patch already applied\n");
282 		return 0;
283 	}
284 
285 	err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev);
286 	if (err < 0)
287 		return err;
288 
289 	if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
290 		dev_err(dev->mt76.dev, "failed to load firmware\n");
291 		err = -EIO;
292 		goto out;
293 	}
294 
295 	hdr = (struct mt76x2_patch_header *)fw->data;
296 	dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
297 
298 	/* enable USB_DMA_CFG */
299 	val = MT_USB_DMA_CFG_RX_BULK_EN |
300 	      MT_USB_DMA_CFG_TX_BULK_EN |
301 	      FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
302 	mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
303 
304 	/* vendor reset */
305 	mt76u_mcu_fw_reset(&dev->mt76);
306 	usleep_range(5000, 10000);
307 
308 	/* enable FCE to send in-band cmd */
309 	mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
310 	/* FCE tx_fs_base_ptr */
311 	mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
312 	/* FCE tx_fs_max_cnt */
313 	mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
314 	/* FCE pdma enable */
315 	mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
316 	/* FCE skip_fs_en */
317 	mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
318 
319 	err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
320 				     fw->size - sizeof(*hdr),
321 				     MCU_ROM_PATCH_MAX_PAYLOAD,
322 				     MT76U_MCU_ROM_PATCH_OFFSET);
323 	if (err < 0) {
324 		err = -EIO;
325 		goto out;
326 	}
327 
328 	mt76x2u_mcu_enable_patch(dev);
329 	mt76x2u_mcu_reset_wmt(dev);
330 	mdelay(20);
331 
332 	if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 100)) {
333 		dev_err(dev->mt76.dev, "failed to load ROM patch\n");
334 		err = -ETIMEDOUT;
335 	}
336 
337 out:
338 	if (rom_protect)
339 		mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
340 	release_firmware(fw);
341 	return err;
342 }
343 
mt76x2u_mcu_load_firmware(struct mt76x2_dev * dev)344 static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
345 {
346 	u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET;
347 	const struct mt76x2_fw_header *hdr;
348 	int err, len, ilm_len, dlm_len;
349 	const struct firmware *fw;
350 
351 	err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev);
352 	if (err < 0)
353 		return err;
354 
355 	if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
356 		err = -EINVAL;
357 		goto out;
358 	}
359 
360 	hdr = (const struct mt76x2_fw_header *)fw->data;
361 	ilm_len = le32_to_cpu(hdr->ilm_len);
362 	dlm_len = le32_to_cpu(hdr->dlm_len);
363 	len = sizeof(*hdr) + ilm_len + dlm_len;
364 	if (fw->size != len) {
365 		err = -EINVAL;
366 		goto out;
367 	}
368 
369 	val = le16_to_cpu(hdr->fw_ver);
370 	dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
371 		 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
372 
373 	val = le16_to_cpu(hdr->build_ver);
374 	dev_info(dev->mt76.dev, "Build: %x\n", val);
375 	dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
376 
377 	/* vendor reset */
378 	mt76u_mcu_fw_reset(&dev->mt76);
379 	usleep_range(5000, 10000);
380 
381 	/* enable USB_DMA_CFG */
382 	val = MT_USB_DMA_CFG_RX_BULK_EN |
383 	      MT_USB_DMA_CFG_TX_BULK_EN |
384 	      FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
385 	mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
386 	/* enable FCE to send in-band cmd */
387 	mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
388 	/* FCE tx_fs_base_ptr */
389 	mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
390 	/* FCE tx_fs_max_cnt */
391 	mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
392 	/* FCE pdma enable */
393 	mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
394 	/* FCE skip_fs_en */
395 	mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
396 
397 	/* load ILM */
398 	err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
399 				     ilm_len, MCU_FW_URB_MAX_PAYLOAD,
400 				     MT76U_MCU_ILM_OFFSET);
401 	if (err < 0) {
402 		err = -EIO;
403 		goto out;
404 	}
405 
406 	/* load DLM */
407 	if (mt76xx_rev(dev) >= MT76XX_REV_E3)
408 		dlm_offset += 0x800;
409 	err = mt76u_mcu_fw_send_data(&dev->mt76,
410 				     fw->data + sizeof(*hdr) + ilm_len,
411 				     dlm_len, MCU_FW_URB_MAX_PAYLOAD,
412 				     dlm_offset);
413 	if (err < 0) {
414 		err = -EIO;
415 		goto out;
416 	}
417 
418 	mt76x2u_mcu_load_ivb(dev);
419 	if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 100)) {
420 		dev_err(dev->mt76.dev, "firmware failed to start\n");
421 		err = -ETIMEDOUT;
422 		goto out;
423 	}
424 
425 	mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
426 	/* enable FCE to send in-band cmd */
427 	mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
428 	dev_dbg(dev->mt76.dev, "firmware running\n");
429 
430 out:
431 	release_firmware(fw);
432 	return err;
433 }
434 
mt76x2u_mcu_fw_init(struct mt76x2_dev * dev)435 int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev)
436 {
437 	int err;
438 
439 	err = mt76x2u_mcu_load_rom_patch(dev);
440 	if (err < 0)
441 		return err;
442 
443 	return mt76x2u_mcu_load_firmware(dev);
444 }
445 
mt76x2u_mcu_init(struct mt76x2_dev * dev)446 int mt76x2u_mcu_init(struct mt76x2_dev *dev)
447 {
448 	int err;
449 
450 	err = mt76x2u_mcu_function_select(dev, Q_SELECT, 1);
451 	if (err < 0)
452 		return err;
453 
454 	return mt76x2u_mcu_set_radio_state(dev, true);
455 }
456 
mt76x2u_mcu_deinit(struct mt76x2_dev * dev)457 void mt76x2u_mcu_deinit(struct mt76x2_dev *dev)
458 {
459 	struct mt76_usb *usb = &dev->mt76.usb;
460 
461 	usb_kill_urb(usb->mcu.res.urb);
462 	mt76u_buf_free(&usb->mcu.res);
463 }
464