• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4 
5 #include "main.h"
6 #include "mac.h"
7 #include "reg.h"
8 #include "fw.h"
9 #include "debug.h"
10 
rtw_set_channel_mac(struct rtw_dev * rtwdev,u8 channel,u8 bw,u8 primary_ch_idx)11 void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
12 			 u8 primary_ch_idx)
13 {
14 	u8 txsc40 = 0, txsc20 = 0;
15 	u32 value32;
16 	u8 value8;
17 
18 	txsc20 = primary_ch_idx;
19 	if (bw == RTW_CHANNEL_WIDTH_80) {
20 		if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST)
21 			txsc40 = RTW_SC_40_UPPER;
22 		else
23 			txsc40 = RTW_SC_40_LOWER;
24 	}
25 	rtw_write8(rtwdev, REG_DATA_SC,
26 		   BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
27 
28 	value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL);
29 	value32 &= ~BIT_RFMOD;
30 	switch (bw) {
31 	case RTW_CHANNEL_WIDTH_80:
32 		value32 |= BIT_RFMOD_80M;
33 		break;
34 	case RTW_CHANNEL_WIDTH_40:
35 		value32 |= BIT_RFMOD_40M;
36 		break;
37 	case RTW_CHANNEL_WIDTH_20:
38 	default:
39 		break;
40 	}
41 	rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
42 
43 	if (rtw_chip_wcpu_11n(rtwdev))
44 		return;
45 
46 	value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
47 	value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
48 	rtw_write32(rtwdev, REG_AFE_CTRL1, value32);
49 
50 	rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
51 	rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
52 
53 	value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
54 	value8 = value8 & ~BIT_CHECK_CCK_EN;
55 	if (IS_CH_5G_BAND(channel))
56 		value8 |= BIT_CHECK_CCK_EN;
57 	rtw_write8(rtwdev, REG_CCK_CHECK, value8);
58 }
59 EXPORT_SYMBOL(rtw_set_channel_mac);
60 
rtw_mac_pre_system_cfg(struct rtw_dev * rtwdev)61 static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
62 {
63 	u32 value32;
64 	u8 value8;
65 
66 	rtw_write8(rtwdev, REG_RSV_CTRL, 0);
67 
68 	if (rtw_chip_wcpu_11n(rtwdev)) {
69 		if (rtw_read32(rtwdev, REG_SYS_CFG1) & BIT_LDO)
70 			rtw_write8(rtwdev, REG_LDO_SWR_CTRL, LDO_SEL);
71 		else
72 			rtw_write8(rtwdev, REG_LDO_SWR_CTRL, SPS_SEL);
73 		return 0;
74 	}
75 
76 	switch (rtw_hci_type(rtwdev)) {
77 	case RTW_HCI_TYPE_PCIE:
78 		rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN);
79 		break;
80 	case RTW_HCI_TYPE_USB:
81 		break;
82 	default:
83 		return -EINVAL;
84 	}
85 
86 	/* config PIN Mux */
87 	value32 = rtw_read32(rtwdev, REG_PAD_CTRL1);
88 	value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL;
89 	rtw_write32(rtwdev, REG_PAD_CTRL1, value32);
90 
91 	value32 = rtw_read32(rtwdev, REG_LED_CFG);
92 	value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN);
93 	rtw_write32(rtwdev, REG_LED_CFG, value32);
94 
95 	value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG);
96 	value32 |= BIT_WLRFE_4_5_EN;
97 	rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32);
98 
99 	/* disable BB/RF */
100 	value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN);
101 	value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
102 	rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8);
103 
104 	value8 = rtw_read8(rtwdev, REG_RF_CTRL);
105 	value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN);
106 	rtw_write8(rtwdev, REG_RF_CTRL, value8);
107 
108 	value32 = rtw_read32(rtwdev, REG_WLRF1);
109 	value32 &= ~BIT_WLRF1_BBRF_EN;
110 	rtw_write32(rtwdev, REG_WLRF1, value32);
111 
112 	return 0;
113 }
114 
do_pwr_poll_cmd(struct rtw_dev * rtwdev,u32 addr,u32 mask,u32 target)115 static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target)
116 {
117 	u32 val;
118 
119 	target &= mask;
120 
121 	return read_poll_timeout_atomic(rtw_read8, val, (val & mask) == target,
122 					50, 50 * RTW_PWR_POLLING_CNT, false,
123 					rtwdev, addr) == 0;
124 }
125 
rtw_pwr_cmd_polling(struct rtw_dev * rtwdev,const struct rtw_pwr_seq_cmd * cmd)126 static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
127 			       const struct rtw_pwr_seq_cmd *cmd)
128 {
129 	u8 value;
130 	u32 offset;
131 
132 	if (cmd->base == RTW_PWR_ADDR_SDIO)
133 		offset = cmd->offset | SDIO_LOCAL_OFFSET;
134 	else
135 		offset = cmd->offset;
136 
137 	if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
138 		return 0;
139 
140 	if (rtw_hci_type(rtwdev) != RTW_HCI_TYPE_PCIE)
141 		goto err;
142 
143 	/* if PCIE, toggle BIT_PFM_WOWL and try again */
144 	value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
145 	if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
146 		rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
147 	rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
148 	rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
149 	if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
150 		rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
151 
152 	if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
153 		return 0;
154 
155 err:
156 	rtw_err(rtwdev, "failed to poll offset=0x%x mask=0x%x value=0x%x\n",
157 		offset, cmd->mask, cmd->value);
158 	return -EBUSY;
159 }
160 
rtw_sub_pwr_seq_parser(struct rtw_dev * rtwdev,u8 intf_mask,u8 cut_mask,const struct rtw_pwr_seq_cmd * cmd)161 static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
162 				  u8 cut_mask,
163 				  const struct rtw_pwr_seq_cmd *cmd)
164 {
165 	const struct rtw_pwr_seq_cmd *cur_cmd;
166 	u32 offset;
167 	u8 value;
168 
169 	for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) {
170 		if (!(cur_cmd->intf_mask & intf_mask) ||
171 		    !(cur_cmd->cut_mask & cut_mask))
172 			continue;
173 
174 		switch (cur_cmd->cmd) {
175 		case RTW_PWR_CMD_WRITE:
176 			offset = cur_cmd->offset;
177 
178 			if (cur_cmd->base == RTW_PWR_ADDR_SDIO)
179 				offset |= SDIO_LOCAL_OFFSET;
180 
181 			value = rtw_read8(rtwdev, offset);
182 			value &= ~cur_cmd->mask;
183 			value |= (cur_cmd->value & cur_cmd->mask);
184 			rtw_write8(rtwdev, offset, value);
185 			break;
186 		case RTW_PWR_CMD_POLLING:
187 			if (rtw_pwr_cmd_polling(rtwdev, cur_cmd))
188 				return -EBUSY;
189 			break;
190 		case RTW_PWR_CMD_DELAY:
191 			if (cur_cmd->value == RTW_PWR_DELAY_US)
192 				udelay(cur_cmd->offset);
193 			else
194 				mdelay(cur_cmd->offset);
195 			break;
196 		case RTW_PWR_CMD_READ:
197 			break;
198 		default:
199 			return -EINVAL;
200 		}
201 	}
202 
203 	return 0;
204 }
205 
rtw_pwr_seq_parser(struct rtw_dev * rtwdev,const struct rtw_pwr_seq_cmd ** cmd_seq)206 static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
207 			      const struct rtw_pwr_seq_cmd **cmd_seq)
208 {
209 	u8 cut_mask;
210 	u8 intf_mask;
211 	u8 cut;
212 	u32 idx = 0;
213 	const struct rtw_pwr_seq_cmd *cmd;
214 	int ret;
215 
216 	cut = rtwdev->hal.cut_version;
217 	cut_mask = cut_version_to_mask(cut);
218 	switch (rtw_hci_type(rtwdev)) {
219 	case RTW_HCI_TYPE_PCIE:
220 		intf_mask = BIT(2);
221 		break;
222 	case RTW_HCI_TYPE_USB:
223 		intf_mask = BIT(1);
224 		break;
225 	default:
226 		return -EINVAL;
227 	}
228 
229 	do {
230 		cmd = cmd_seq[idx];
231 		if (!cmd)
232 			break;
233 
234 		ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
235 		if (ret)
236 			return ret;
237 
238 		idx++;
239 	} while (1);
240 
241 	return 0;
242 }
243 
rtw_mac_power_switch(struct rtw_dev * rtwdev,bool pwr_on)244 static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
245 {
246 	struct rtw_chip_info *chip = rtwdev->chip;
247 	const struct rtw_pwr_seq_cmd **pwr_seq;
248 	u8 rpwm;
249 	bool cur_pwr;
250 	int ret;
251 
252 	if (rtw_chip_wcpu_11ac(rtwdev)) {
253 		rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
254 
255 		/* Check FW still exist or not */
256 		if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
257 			rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
258 			rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
259 		}
260 	}
261 
262 	if (rtw_read8(rtwdev, REG_CR) == 0xea)
263 		cur_pwr = false;
264 	else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
265 		 (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
266 		cur_pwr = false;
267 	else
268 		cur_pwr = true;
269 
270 	if (pwr_on == cur_pwr)
271 		return -EALREADY;
272 
273 	pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
274 	ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
275 	if (ret)
276 		return ret;
277 
278 	return 0;
279 }
280 
__rtw_mac_init_system_cfg(struct rtw_dev * rtwdev)281 static int __rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
282 {
283 	u8 sys_func_en = rtwdev->chip->sys_func_en;
284 	u8 value8;
285 	u32 value, tmp;
286 
287 	value = rtw_read32(rtwdev, REG_CPU_DMEM_CON);
288 	value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
289 	rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
290 
291 	rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
292 	value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
293 	rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
294 
295 	/* disable boot-from-flash for driver's DL FW */
296 	tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL);
297 	if (tmp & BIT_BOOT_FSPI_EN) {
298 		rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN));
299 		value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN);
300 		rtw_write32(rtwdev, REG_GPIO_MUXCFG, value);
301 	}
302 
303 	return 0;
304 }
305 
__rtw_mac_init_system_cfg_legacy(struct rtw_dev * rtwdev)306 static int __rtw_mac_init_system_cfg_legacy(struct rtw_dev *rtwdev)
307 {
308 	rtw_write8(rtwdev, REG_CR, 0xff);
309 	mdelay(2);
310 	rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0x7f);
311 	mdelay(2);
312 
313 	rtw_write8_set(rtwdev, REG_SYS_CLKR, BIT_WAKEPAD_EN);
314 	rtw_write16_clr(rtwdev, REG_GPIO_MUXCFG, BIT_EN_SIC);
315 
316 	rtw_write16(rtwdev, REG_CR, 0x2ff);
317 
318 	return 0;
319 }
320 
rtw_mac_init_system_cfg(struct rtw_dev * rtwdev)321 static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
322 {
323 	if (rtw_chip_wcpu_11n(rtwdev))
324 		return __rtw_mac_init_system_cfg_legacy(rtwdev);
325 
326 	return __rtw_mac_init_system_cfg(rtwdev);
327 }
328 
rtw_mac_power_on(struct rtw_dev * rtwdev)329 int rtw_mac_power_on(struct rtw_dev *rtwdev)
330 {
331 	int ret = 0;
332 
333 	ret = rtw_mac_pre_system_cfg(rtwdev);
334 	if (ret)
335 		goto err;
336 
337 	ret = rtw_mac_power_switch(rtwdev, true);
338 	if (ret == -EALREADY) {
339 		rtw_mac_power_switch(rtwdev, false);
340 		ret = rtw_mac_power_switch(rtwdev, true);
341 		if (ret)
342 			goto err;
343 	} else if (ret) {
344 		goto err;
345 	}
346 
347 	ret = rtw_mac_init_system_cfg(rtwdev);
348 	if (ret)
349 		goto err;
350 
351 	return 0;
352 
353 err:
354 	rtw_err(rtwdev, "mac power on failed");
355 	return ret;
356 }
357 
rtw_mac_power_off(struct rtw_dev * rtwdev)358 void rtw_mac_power_off(struct rtw_dev *rtwdev)
359 {
360 	rtw_mac_power_switch(rtwdev, false);
361 }
362 
check_firmware_size(const u8 * data,u32 size)363 static bool check_firmware_size(const u8 *data, u32 size)
364 {
365 	const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
366 	u32 dmem_size;
367 	u32 imem_size;
368 	u32 emem_size;
369 	u32 real_size;
370 
371 	dmem_size = le32_to_cpu(fw_hdr->dmem_size);
372 	imem_size = le32_to_cpu(fw_hdr->imem_size);
373 	emem_size = (fw_hdr->mem_usage & BIT(4)) ?
374 		    le32_to_cpu(fw_hdr->emem_size) : 0;
375 
376 	dmem_size += FW_HDR_CHKSUM_SIZE;
377 	imem_size += FW_HDR_CHKSUM_SIZE;
378 	emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
379 	real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size;
380 	if (real_size != size)
381 		return false;
382 
383 	return true;
384 }
385 
wlan_cpu_enable(struct rtw_dev * rtwdev,bool enable)386 static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable)
387 {
388 	if (enable) {
389 		/* cpu io interface enable */
390 		rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
391 
392 		/* cpu enable */
393 		rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
394 	} else {
395 		/* cpu io interface disable */
396 		rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
397 
398 		/* cpu disable */
399 		rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
400 	}
401 }
402 
403 #define DLFW_RESTORE_REG_NUM 6
404 
download_firmware_reg_backup(struct rtw_dev * rtwdev,struct rtw_backup_info * bckp)405 static void download_firmware_reg_backup(struct rtw_dev *rtwdev,
406 					 struct rtw_backup_info *bckp)
407 {
408 	u8 tmp;
409 	u8 bckp_idx = 0;
410 
411 	/* set HIQ to hi priority */
412 	bckp[bckp_idx].len = 1;
413 	bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1;
414 	bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1);
415 	bckp_idx++;
416 	tmp = RTW_DMA_MAPPING_HIGH << 6;
417 	rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp);
418 
419 	/* DLFW only use HIQ, map HIQ to hi priority */
420 	bckp[bckp_idx].len = 1;
421 	bckp[bckp_idx].reg = REG_CR;
422 	bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR);
423 	bckp_idx++;
424 	bckp[bckp_idx].len = 4;
425 	bckp[bckp_idx].reg = REG_H2CQ_CSR;
426 	bckp[bckp_idx].val = BIT_H2CQ_FULL;
427 	bckp_idx++;
428 	tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN;
429 	rtw_write8(rtwdev, REG_CR, tmp);
430 	rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
431 
432 	/* Config hi priority queue and public priority queue page number */
433 	bckp[bckp_idx].len = 2;
434 	bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1;
435 	bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1);
436 	bckp_idx++;
437 	bckp[bckp_idx].len = 4;
438 	bckp[bckp_idx].reg = REG_RQPN_CTRL_2;
439 	bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN;
440 	bckp_idx++;
441 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200);
442 	rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val);
443 
444 	/* Disable beacon related functions */
445 	tmp = rtw_read8(rtwdev, REG_BCN_CTRL);
446 	bckp[bckp_idx].len = 1;
447 	bckp[bckp_idx].reg = REG_BCN_CTRL;
448 	bckp[bckp_idx].val = tmp;
449 	bckp_idx++;
450 	tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT);
451 	rtw_write8(rtwdev, REG_BCN_CTRL, tmp);
452 
453 	WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n");
454 }
455 
download_firmware_reset_platform(struct rtw_dev * rtwdev)456 static void download_firmware_reset_platform(struct rtw_dev *rtwdev)
457 {
458 	rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
459 	rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
460 	rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
461 	rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
462 }
463 
download_firmware_reg_restore(struct rtw_dev * rtwdev,struct rtw_backup_info * bckp,u8 bckp_num)464 static void download_firmware_reg_restore(struct rtw_dev *rtwdev,
465 					  struct rtw_backup_info *bckp,
466 					  u8 bckp_num)
467 {
468 	rtw_restore_reg(rtwdev, bckp, bckp_num);
469 }
470 
471 #define TX_DESC_SIZE 48
472 
send_firmware_pkt_rsvd_page(struct rtw_dev * rtwdev,u16 pg_addr,const u8 * data,u32 size)473 static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
474 				       const u8 *data, u32 size)
475 {
476 	u8 *buf;
477 	int ret;
478 
479 	buf = kmemdup(data, size, GFP_KERNEL);
480 	if (!buf)
481 		return -ENOMEM;
482 
483 	ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
484 	kfree(buf);
485 	return ret;
486 }
487 
488 static int
send_firmware_pkt(struct rtw_dev * rtwdev,u16 pg_addr,const u8 * data,u32 size)489 send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size)
490 {
491 	int ret;
492 
493 	if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
494 	    !((size + TX_DESC_SIZE) & (512 - 1)))
495 		size += 1;
496 
497 	ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size);
498 	if (ret)
499 		rtw_err(rtwdev, "failed to download rsvd page\n");
500 
501 	return ret;
502 }
503 
504 static int
iddma_enable(struct rtw_dev * rtwdev,u32 src,u32 dst,u32 ctrl)505 iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl)
506 {
507 	rtw_write32(rtwdev, REG_DDMA_CH0SA, src);
508 	rtw_write32(rtwdev, REG_DDMA_CH0DA, dst);
509 	rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl);
510 
511 	if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
512 		return -EBUSY;
513 
514 	return 0;
515 }
516 
iddma_download_firmware(struct rtw_dev * rtwdev,u32 src,u32 dst,u32 len,u8 first)517 static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst,
518 				   u32 len, u8 first)
519 {
520 	u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN;
521 
522 	if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
523 		return -EBUSY;
524 
525 	ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN;
526 	if (!first)
527 		ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT;
528 
529 	if (iddma_enable(rtwdev, src, dst, ch0_ctrl))
530 		return -EBUSY;
531 
532 	return 0;
533 }
534 
rtw_ddma_to_fw_fifo(struct rtw_dev * rtwdev,u32 ocp_src,u32 size)535 int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size)
536 {
537 	u32 ch0_ctrl = BIT_DDMACH0_OWN | BIT_DDMACH0_DDMA_MODE;
538 
539 	if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) {
540 		rtw_dbg(rtwdev, RTW_DBG_FW, "busy to start ddma\n");
541 		return -EBUSY;
542 	}
543 
544 	ch0_ctrl |= size & BIT_MASK_DDMACH0_DLEN;
545 
546 	if (iddma_enable(rtwdev, ocp_src, OCPBASE_RXBUF_FW_88XX, ch0_ctrl)) {
547 		rtw_dbg(rtwdev, RTW_DBG_FW, "busy to complete ddma\n");
548 		return -EBUSY;
549 	}
550 
551 	return 0;
552 }
553 
554 static bool
check_fw_checksum(struct rtw_dev * rtwdev,u32 addr)555 check_fw_checksum(struct rtw_dev *rtwdev, u32 addr)
556 {
557 	u8 fw_ctrl;
558 
559 	fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL);
560 
561 	if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) {
562 		if (addr < OCPBASE_DMEM_88XX) {
563 			fw_ctrl |= BIT_IMEM_DW_OK;
564 			fw_ctrl &= ~BIT_IMEM_CHKSUM_OK;
565 			rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
566 		} else {
567 			fw_ctrl |= BIT_DMEM_DW_OK;
568 			fw_ctrl &= ~BIT_DMEM_CHKSUM_OK;
569 			rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
570 		}
571 
572 		rtw_err(rtwdev, "invalid fw checksum\n");
573 
574 		return false;
575 	}
576 
577 	if (addr < OCPBASE_DMEM_88XX) {
578 		fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK);
579 		rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
580 	} else {
581 		fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK);
582 		rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
583 	}
584 
585 	return true;
586 }
587 
588 static int
download_firmware_to_mem(struct rtw_dev * rtwdev,const u8 * data,u32 src,u32 dst,u32 size)589 download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
590 			 u32 src, u32 dst, u32 size)
591 {
592 	struct rtw_chip_info *chip = rtwdev->chip;
593 	u32 desc_size = chip->tx_pkt_desc_sz;
594 	u8 first_part;
595 	u32 mem_offset;
596 	u32 residue_size;
597 	u32 pkt_size;
598 	u32 max_size = 0x1000;
599 	u32 val;
600 	int ret;
601 
602 	mem_offset = 0;
603 	first_part = 1;
604 	residue_size = size;
605 
606 	val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL);
607 	val |= BIT_DDMACH0_RESET_CHKSUM_STS;
608 	rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val);
609 
610 	while (residue_size) {
611 		if (residue_size >= max_size)
612 			pkt_size = max_size;
613 		else
614 			pkt_size = residue_size;
615 
616 		ret = send_firmware_pkt(rtwdev, (u16)(src >> 7),
617 					data + mem_offset, pkt_size);
618 		if (ret)
619 			return ret;
620 
621 		ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX +
622 					      src + desc_size,
623 					      dst + mem_offset, pkt_size,
624 					      first_part);
625 		if (ret)
626 			return ret;
627 
628 		first_part = 0;
629 		mem_offset += pkt_size;
630 		residue_size -= pkt_size;
631 	}
632 
633 	if (!check_fw_checksum(rtwdev, dst))
634 		return -EINVAL;
635 
636 	return 0;
637 }
638 
639 static int
start_download_firmware(struct rtw_dev * rtwdev,const u8 * data,u32 size)640 start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
641 {
642 	const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
643 	const u8 *cur_fw;
644 	u16 val;
645 	u32 imem_size;
646 	u32 dmem_size;
647 	u32 emem_size;
648 	u32 addr;
649 	int ret;
650 
651 	dmem_size = le32_to_cpu(fw_hdr->dmem_size);
652 	imem_size = le32_to_cpu(fw_hdr->imem_size);
653 	emem_size = (fw_hdr->mem_usage & BIT(4)) ?
654 		    le32_to_cpu(fw_hdr->emem_size) : 0;
655 	dmem_size += FW_HDR_CHKSUM_SIZE;
656 	imem_size += FW_HDR_CHKSUM_SIZE;
657 	emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
658 
659 	val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800);
660 	val |= BIT_MCUFWDL_EN;
661 	rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
662 
663 	cur_fw = data + FW_HDR_SIZE;
664 	addr = le32_to_cpu(fw_hdr->dmem_addr);
665 	addr &= ~BIT(31);
666 	ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
667 	if (ret)
668 		return ret;
669 
670 	cur_fw = data + FW_HDR_SIZE + dmem_size;
671 	addr = le32_to_cpu(fw_hdr->imem_addr);
672 	addr &= ~BIT(31);
673 	ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
674 	if (ret)
675 		return ret;
676 
677 	if (emem_size) {
678 		cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
679 		addr = le32_to_cpu(fw_hdr->emem_addr);
680 		addr &= ~BIT(31);
681 		ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
682 					       emem_size);
683 		if (ret)
684 			return ret;
685 	}
686 
687 	return 0;
688 }
689 
download_firmware_validate(struct rtw_dev * rtwdev)690 static int download_firmware_validate(struct rtw_dev *rtwdev)
691 {
692 	u32 fw_key;
693 
694 	if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) {
695 		fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK;
696 		if (fw_key == ILLEGAL_KEY_GROUP)
697 			rtw_err(rtwdev, "invalid fw key\n");
698 		return -EINVAL;
699 	}
700 
701 	return 0;
702 }
703 
download_firmware_end_flow(struct rtw_dev * rtwdev)704 static void download_firmware_end_flow(struct rtw_dev *rtwdev)
705 {
706 	u16 fw_ctrl;
707 
708 	rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF);
709 
710 	/* Check IMEM & DMEM checksum is OK or not */
711 	fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL);
712 	if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK)
713 		return;
714 
715 	fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN;
716 	rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
717 }
718 
__rtw_download_firmware(struct rtw_dev * rtwdev,struct rtw_fw_state * fw)719 static int __rtw_download_firmware(struct rtw_dev *rtwdev,
720 				   struct rtw_fw_state *fw)
721 {
722 	struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM];
723 	const u8 *data = fw->firmware->data;
724 	u32 size = fw->firmware->size;
725 	u32 ltecoex_bckp;
726 	int ret;
727 
728 	if (!check_firmware_size(data, size))
729 		return -EINVAL;
730 
731 	if (!ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
732 		return -EBUSY;
733 
734 	wlan_cpu_enable(rtwdev, false);
735 
736 	download_firmware_reg_backup(rtwdev, bckp);
737 	download_firmware_reset_platform(rtwdev);
738 
739 	ret = start_download_firmware(rtwdev, data, size);
740 	if (ret)
741 		goto dlfw_fail;
742 
743 	download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM);
744 
745 	download_firmware_end_flow(rtwdev);
746 
747 	wlan_cpu_enable(rtwdev, true);
748 
749 	if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp))
750 		return -EBUSY;
751 
752 	ret = download_firmware_validate(rtwdev);
753 	if (ret)
754 		goto dlfw_fail;
755 
756 	/* reset desc and index */
757 	rtw_hci_setup(rtwdev);
758 
759 	rtwdev->h2c.last_box_num = 0;
760 	rtwdev->h2c.seq = 0;
761 
762 	set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
763 
764 	return 0;
765 
766 dlfw_fail:
767 	/* Disable FWDL_EN */
768 	rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
769 	rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
770 
771 	return ret;
772 }
773 
en_download_firmware_legacy(struct rtw_dev * rtwdev,bool en)774 static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en)
775 {
776 	int try;
777 
778 	if (en) {
779 		wlan_cpu_enable(rtwdev, false);
780 		wlan_cpu_enable(rtwdev, true);
781 
782 		rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
783 
784 		for (try = 0; try < 10; try++) {
785 			if (rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_MCUFWDL_EN)
786 				goto fwdl_ready;
787 			rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
788 			msleep(20);
789 		}
790 		rtw_err(rtwdev, "failed to check fw download ready\n");
791 fwdl_ready:
792 		rtw_write32_clr(rtwdev, REG_MCUFW_CTRL, BIT_ROM_DLEN);
793 	} else {
794 		rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
795 	}
796 }
797 
798 static void
write_firmware_page(struct rtw_dev * rtwdev,u32 page,const u8 * data,u32 size)799 write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size)
800 {
801 	u32 val32;
802 	u32 block_nr;
803 	u32 remain_size;
804 	u32 write_addr = FW_START_ADDR_LEGACY;
805 	const __le32 *ptr = (const __le32 *)data;
806 	u32 block;
807 	__le32 remain_data = 0;
808 
809 	block_nr = size >> DLFW_BLK_SIZE_SHIFT_LEGACY;
810 	remain_size = size & (DLFW_BLK_SIZE_LEGACY - 1);
811 
812 	val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
813 	val32 &= ~BIT_ROM_PGE;
814 	val32 |= (page << BIT_SHIFT_ROM_PGE) & BIT_ROM_PGE;
815 	rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
816 
817 	for (block = 0; block < block_nr; block++) {
818 		rtw_write32(rtwdev, write_addr, le32_to_cpu(*ptr));
819 
820 		write_addr += DLFW_BLK_SIZE_LEGACY;
821 		ptr++;
822 	}
823 
824 	if (remain_size) {
825 		memcpy(&remain_data, ptr, remain_size);
826 		rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data));
827 	}
828 }
829 
830 static int
download_firmware_legacy(struct rtw_dev * rtwdev,const u8 * data,u32 size)831 download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size)
832 {
833 	u32 page;
834 	u32 total_page;
835 	u32 last_page_size;
836 
837 	data += sizeof(struct rtw_fw_hdr_legacy);
838 	size -= sizeof(struct rtw_fw_hdr_legacy);
839 
840 	total_page = size >> DLFW_PAGE_SIZE_SHIFT_LEGACY;
841 	last_page_size = size & (DLFW_PAGE_SIZE_LEGACY - 1);
842 
843 	rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT);
844 
845 	for (page = 0; page < total_page; page++) {
846 		write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY);
847 		data += DLFW_PAGE_SIZE_LEGACY;
848 	}
849 	if (last_page_size)
850 		write_firmware_page(rtwdev, page, data, last_page_size);
851 
852 	if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) {
853 		rtw_err(rtwdev, "failed to check download firmware report\n");
854 		return -EINVAL;
855 	}
856 
857 	return 0;
858 }
859 
download_firmware_validate_legacy(struct rtw_dev * rtwdev)860 static int download_firmware_validate_legacy(struct rtw_dev *rtwdev)
861 {
862 	u32 val32;
863 	int try;
864 
865 	val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
866 	val32 |= BIT_MCUFWDL_RDY;
867 	val32 &= ~BIT_WINTINI_RDY;
868 	rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
869 
870 	wlan_cpu_enable(rtwdev, false);
871 	wlan_cpu_enable(rtwdev, true);
872 
873 	for (try = 0; try < 10; try++) {
874 		val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
875 		if ((val32 & FW_READY_LEGACY) == FW_READY_LEGACY)
876 			return 0;
877 		msleep(20);
878 	}
879 
880 	rtw_err(rtwdev, "failed to validate firmware\n");
881 	return -EINVAL;
882 }
883 
__rtw_download_firmware_legacy(struct rtw_dev * rtwdev,struct rtw_fw_state * fw)884 static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev,
885 					  struct rtw_fw_state *fw)
886 {
887 	int ret = 0;
888 
889 	en_download_firmware_legacy(rtwdev, true);
890 	ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size);
891 	en_download_firmware_legacy(rtwdev, false);
892 	if (ret)
893 		goto out;
894 
895 	ret = download_firmware_validate_legacy(rtwdev);
896 	if (ret)
897 		goto out;
898 
899 	/* reset desc and index */
900 	rtw_hci_setup(rtwdev);
901 
902 	rtwdev->h2c.last_box_num = 0;
903 	rtwdev->h2c.seq = 0;
904 
905 	set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
906 
907 out:
908 	return ret;
909 }
910 
rtw_download_firmware(struct rtw_dev * rtwdev,struct rtw_fw_state * fw)911 int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
912 {
913 	if (rtw_chip_wcpu_11n(rtwdev))
914 		return __rtw_download_firmware_legacy(rtwdev, fw);
915 
916 	return __rtw_download_firmware(rtwdev, fw);
917 }
918 
get_priority_queues(struct rtw_dev * rtwdev,u32 queues)919 static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
920 {
921 	const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
922 	u32 prio_queues = 0;
923 
924 	if (queues & BIT(IEEE80211_AC_VO))
925 		prio_queues |= BIT(rqpn->dma_map_vo);
926 	if (queues & BIT(IEEE80211_AC_VI))
927 		prio_queues |= BIT(rqpn->dma_map_vi);
928 	if (queues & BIT(IEEE80211_AC_BE))
929 		prio_queues |= BIT(rqpn->dma_map_be);
930 	if (queues & BIT(IEEE80211_AC_BK))
931 		prio_queues |= BIT(rqpn->dma_map_bk);
932 
933 	return prio_queues;
934 }
935 
__rtw_mac_flush_prio_queue(struct rtw_dev * rtwdev,u32 prio_queue,bool drop)936 static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
937 				       u32 prio_queue, bool drop)
938 {
939 	struct rtw_chip_info *chip = rtwdev->chip;
940 	const struct rtw_prioq_addr *addr;
941 	bool wsize;
942 	u16 avail_page, rsvd_page;
943 	int i;
944 
945 	if (prio_queue >= RTW_DMA_MAPPING_MAX)
946 		return;
947 
948 	addr = &chip->prioq_addrs->prio[prio_queue];
949 	wsize = chip->prioq_addrs->wsize;
950 
951 	/* check if all of the reserved pages are available for 100 msecs */
952 	for (i = 0; i < 5; i++) {
953 		rsvd_page = wsize ? rtw_read16(rtwdev, addr->rsvd) :
954 				     rtw_read8(rtwdev, addr->rsvd);
955 		avail_page = wsize ? rtw_read16(rtwdev, addr->avail) :
956 				      rtw_read8(rtwdev, addr->avail);
957 		if (rsvd_page == avail_page)
958 			return;
959 
960 		msleep(20);
961 	}
962 
963 	/* priority queue is still not empty, throw a warning,
964 	 *
965 	 * Note that if we want to flush the tx queue when having a lot of
966 	 * traffic (ex, 100Mbps up), some of the packets could be dropped.
967 	 * And it requires like ~2secs to flush the full priority queue.
968 	 */
969 	if (!drop)
970 		rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
971 }
972 
rtw_mac_flush_prio_queues(struct rtw_dev * rtwdev,u32 prio_queues,bool drop)973 static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
974 				      u32 prio_queues, bool drop)
975 {
976 	u32 q;
977 
978 	for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
979 		if (prio_queues & BIT(q))
980 			__rtw_mac_flush_prio_queue(rtwdev, q, drop);
981 }
982 
rtw_mac_flush_queues(struct rtw_dev * rtwdev,u32 queues,bool drop)983 void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
984 {
985 	u32 prio_queues = 0;
986 
987 	/* If all of the hardware queues are requested to flush,
988 	 * or the priority queues are not mapped yet,
989 	 * flush all of the priority queues
990 	 */
991 	if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
992 		prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
993 	else
994 		prio_queues = get_priority_queues(rtwdev, queues);
995 
996 	rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
997 }
998 
txdma_queue_mapping(struct rtw_dev * rtwdev)999 static int txdma_queue_mapping(struct rtw_dev *rtwdev)
1000 {
1001 	struct rtw_chip_info *chip = rtwdev->chip;
1002 	const struct rtw_rqpn *rqpn = NULL;
1003 	u16 txdma_pq_map = 0;
1004 
1005 	switch (rtw_hci_type(rtwdev)) {
1006 	case RTW_HCI_TYPE_PCIE:
1007 		rqpn = &chip->rqpn_table[1];
1008 		break;
1009 	case RTW_HCI_TYPE_USB:
1010 		if (rtwdev->hci.bulkout_num == 2)
1011 			rqpn = &chip->rqpn_table[2];
1012 		else if (rtwdev->hci.bulkout_num == 3)
1013 			rqpn = &chip->rqpn_table[3];
1014 		else if (rtwdev->hci.bulkout_num == 4)
1015 			rqpn = &chip->rqpn_table[4];
1016 		else
1017 			return -EINVAL;
1018 		break;
1019 	default:
1020 		return -EINVAL;
1021 	}
1022 
1023 	rtwdev->fifo.rqpn = rqpn;
1024 	txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
1025 	txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
1026 	txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
1027 	txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be);
1028 	txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi);
1029 	txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo);
1030 	rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map);
1031 
1032 	rtw_write8(rtwdev, REG_CR, 0);
1033 	rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
1034 	if (rtw_chip_wcpu_11ac(rtwdev))
1035 		rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
1036 
1037 	return 0;
1038 }
1039 
set_trx_fifo_info(struct rtw_dev * rtwdev)1040 static int set_trx_fifo_info(struct rtw_dev *rtwdev)
1041 {
1042 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1043 	struct rtw_chip_info *chip = rtwdev->chip;
1044 	u16 cur_pg_addr;
1045 	u8 csi_buf_pg_num = chip->csi_buf_pg_num;
1046 
1047 	/* config rsvd page num */
1048 	fifo->rsvd_drv_pg_num = 8;
1049 	fifo->txff_pg_num = chip->txff_size >> 7;
1050 	if (rtw_chip_wcpu_11n(rtwdev))
1051 		fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num;
1052 	else
1053 		fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
1054 				   RSVD_PG_H2C_EXTRAINFO_NUM +
1055 				   RSVD_PG_H2C_STATICINFO_NUM +
1056 				   RSVD_PG_H2CQ_NUM +
1057 				   RSVD_PG_CPU_INSTRUCTION_NUM +
1058 				   RSVD_PG_FW_TXBUF_NUM +
1059 				   csi_buf_pg_num;
1060 
1061 	if (fifo->rsvd_pg_num > fifo->txff_pg_num)
1062 		return -ENOMEM;
1063 
1064 	fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num;
1065 	fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
1066 
1067 	cur_pg_addr = fifo->txff_pg_num;
1068 	if (rtw_chip_wcpu_11ac(rtwdev)) {
1069 		cur_pg_addr -= csi_buf_pg_num;
1070 		fifo->rsvd_csibuf_addr = cur_pg_addr;
1071 		cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
1072 		fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
1073 		cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
1074 		fifo->rsvd_cpu_instr_addr = cur_pg_addr;
1075 		cur_pg_addr -= RSVD_PG_H2CQ_NUM;
1076 		fifo->rsvd_h2cq_addr = cur_pg_addr;
1077 		cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
1078 		fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
1079 		cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
1080 		fifo->rsvd_h2c_info_addr = cur_pg_addr;
1081 	}
1082 	cur_pg_addr -= fifo->rsvd_drv_pg_num;
1083 	fifo->rsvd_drv_addr = cur_pg_addr;
1084 
1085 	if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) {
1086 		rtw_err(rtwdev, "wrong rsvd driver address\n");
1087 		return -EINVAL;
1088 	}
1089 
1090 	return 0;
1091 }
1092 
__priority_queue_cfg(struct rtw_dev * rtwdev,const struct rtw_page_table * pg_tbl,u16 pubq_num)1093 static int __priority_queue_cfg(struct rtw_dev *rtwdev,
1094 				const struct rtw_page_table *pg_tbl,
1095 				u16 pubq_num)
1096 {
1097 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1098 	struct rtw_chip_info *chip = rtwdev->chip;
1099 
1100 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
1101 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
1102 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
1103 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
1104 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
1105 	rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
1106 
1107 	rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
1108 	rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
1109 
1110 	rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
1111 	rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
1112 	rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
1113 	rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
1114 	rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
1115 
1116 	if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
1117 		return -EBUSY;
1118 
1119 	rtw_write8(rtwdev, REG_CR + 3, 0);
1120 
1121 	return 0;
1122 }
1123 
__priority_queue_cfg_legacy(struct rtw_dev * rtwdev,const struct rtw_page_table * pg_tbl,u16 pubq_num)1124 static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev,
1125 				       const struct rtw_page_table *pg_tbl,
1126 				       u16 pubq_num)
1127 {
1128 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1129 	struct rtw_chip_info *chip = rtwdev->chip;
1130 	u32 val32;
1131 
1132 	val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num);
1133 	rtw_write32(rtwdev, REG_RQPN_NPQ, val32);
1134 	val32 = BIT_RQPN_HLP(pg_tbl->hq_num, pg_tbl->lq_num, pubq_num);
1135 	rtw_write32(rtwdev, REG_RQPN, val32);
1136 
1137 	rtw_write8(rtwdev, REG_TRXFF_BNDY, fifo->rsvd_boundary);
1138 	rtw_write16(rtwdev, REG_TRXFF_BNDY + 2, chip->rxff_size - REPORT_BUF - 1);
1139 	rtw_write8(rtwdev, REG_DWBCN0_CTRL + 1, fifo->rsvd_boundary);
1140 	rtw_write8(rtwdev, REG_BCNQ_BDNY, fifo->rsvd_boundary);
1141 	rtw_write8(rtwdev, REG_MGQ_BDNY, fifo->rsvd_boundary);
1142 	rtw_write8(rtwdev, REG_WMAC_LBK_BF_HD, fifo->rsvd_boundary);
1143 
1144 	rtw_write32_set(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT);
1145 
1146 	if (!check_hw_ready(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT, 0))
1147 		return -EBUSY;
1148 
1149 	return 0;
1150 }
1151 
priority_queue_cfg(struct rtw_dev * rtwdev)1152 static int priority_queue_cfg(struct rtw_dev *rtwdev)
1153 {
1154 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1155 	struct rtw_chip_info *chip = rtwdev->chip;
1156 	const struct rtw_page_table *pg_tbl = NULL;
1157 	u16 pubq_num;
1158 	int ret;
1159 
1160 	ret = set_trx_fifo_info(rtwdev);
1161 	if (ret)
1162 		return ret;
1163 
1164 	switch (rtw_hci_type(rtwdev)) {
1165 	case RTW_HCI_TYPE_PCIE:
1166 		pg_tbl = &chip->page_table[1];
1167 		break;
1168 	case RTW_HCI_TYPE_USB:
1169 		if (rtwdev->hci.bulkout_num == 2)
1170 			pg_tbl = &chip->page_table[2];
1171 		else if (rtwdev->hci.bulkout_num == 3)
1172 			pg_tbl = &chip->page_table[3];
1173 		else if (rtwdev->hci.bulkout_num == 4)
1174 			pg_tbl = &chip->page_table[4];
1175 		else
1176 			return -EINVAL;
1177 		break;
1178 	default:
1179 		return -EINVAL;
1180 	}
1181 
1182 	pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
1183 		   pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
1184 	if (rtw_chip_wcpu_11n(rtwdev))
1185 		return __priority_queue_cfg_legacy(rtwdev, pg_tbl, pubq_num);
1186 	else
1187 		return __priority_queue_cfg(rtwdev, pg_tbl, pubq_num);
1188 }
1189 
init_h2c(struct rtw_dev * rtwdev)1190 static int init_h2c(struct rtw_dev *rtwdev)
1191 {
1192 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1193 	u8 value8;
1194 	u32 value32;
1195 	u32 h2cq_addr;
1196 	u32 h2cq_size;
1197 	u32 h2cq_free;
1198 	u32 wp, rp;
1199 
1200 	if (rtw_chip_wcpu_11n(rtwdev))
1201 		return 0;
1202 
1203 	h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
1204 	h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT;
1205 
1206 	value32 = rtw_read32(rtwdev, REG_H2C_HEAD);
1207 	value32 = (value32 & 0xFFFC0000) | h2cq_addr;
1208 	rtw_write32(rtwdev, REG_H2C_HEAD, value32);
1209 
1210 	value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR);
1211 	value32 = (value32 & 0xFFFC0000) | h2cq_addr;
1212 	rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32);
1213 
1214 	value32 = rtw_read32(rtwdev, REG_H2C_TAIL);
1215 	value32 &= 0xFFFC0000;
1216 	value32 |= (h2cq_addr + h2cq_size);
1217 	rtw_write32(rtwdev, REG_H2C_TAIL, value32);
1218 
1219 	value8 = rtw_read8(rtwdev, REG_H2C_INFO);
1220 	value8 = (u8)((value8 & 0xFC) | 0x01);
1221 	rtw_write8(rtwdev, REG_H2C_INFO, value8);
1222 
1223 	value8 = rtw_read8(rtwdev, REG_H2C_INFO);
1224 	value8 = (u8)((value8 & 0xFB) | 0x04);
1225 	rtw_write8(rtwdev, REG_H2C_INFO, value8);
1226 
1227 	value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1);
1228 	value8 = (u8)((value8 & 0x7f) | 0x80);
1229 	rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8);
1230 
1231 	wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF;
1232 	rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF;
1233 	h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp;
1234 
1235 	if (h2cq_size != h2cq_free) {
1236 		rtw_err(rtwdev, "H2C queue mismatch\n");
1237 		return -EINVAL;
1238 	}
1239 
1240 	return 0;
1241 }
1242 
rtw_init_trx_cfg(struct rtw_dev * rtwdev)1243 static int rtw_init_trx_cfg(struct rtw_dev *rtwdev)
1244 {
1245 	int ret;
1246 
1247 	ret = txdma_queue_mapping(rtwdev);
1248 	if (ret)
1249 		return ret;
1250 
1251 	ret = priority_queue_cfg(rtwdev);
1252 	if (ret)
1253 		return ret;
1254 
1255 	ret = init_h2c(rtwdev);
1256 	if (ret)
1257 		return ret;
1258 
1259 	return 0;
1260 }
1261 
rtw_drv_info_cfg(struct rtw_dev * rtwdev)1262 static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
1263 {
1264 	u8 value8;
1265 
1266 	rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
1267 	if (rtw_chip_wcpu_11ac(rtwdev)) {
1268 		value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
1269 		value8 &= 0xF0;
1270 		/* For rxdesc len = 0 issue */
1271 		value8 |= 0xF;
1272 		rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
1273 	}
1274 	rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS);
1275 	rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9));
1276 
1277 	return 0;
1278 }
1279 
rtw_mac_init(struct rtw_dev * rtwdev)1280 int rtw_mac_init(struct rtw_dev *rtwdev)
1281 {
1282 	struct rtw_chip_info *chip = rtwdev->chip;
1283 	int ret;
1284 
1285 	ret = rtw_init_trx_cfg(rtwdev);
1286 	if (ret)
1287 		return ret;
1288 
1289 	ret = chip->ops->mac_init(rtwdev);
1290 	if (ret)
1291 		return ret;
1292 
1293 	ret = rtw_drv_info_cfg(rtwdev);
1294 	if (ret)
1295 		return ret;
1296 
1297 	rtw_hci_interface_cfg(rtwdev);
1298 
1299 	return 0;
1300 }
1301