• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2008 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "core.h"
18 #include "hw.h"
19 #include "reg.h"
20 #include "phy.h"
21 
ath9k_hw_set_txq_interrupts(struct ath_hal * ah,struct ath9k_tx_queue_info * qi)22 static void ath9k_hw_set_txq_interrupts(struct ath_hal *ah,
23 					struct ath9k_tx_queue_info *qi)
24 {
25 	struct ath_hal_5416 *ahp = AH5416(ah);
26 
27 	DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
28 		"tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
29 		ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
30 		ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
31 		ahp->ah_txUrnInterruptMask);
32 
33 	REG_WRITE(ah, AR_IMR_S0,
34 		  SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
35 		  | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC));
36 	REG_WRITE(ah, AR_IMR_S1,
37 		  SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
38 		  | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL));
39 	REG_RMW_FIELD(ah, AR_IMR_S2,
40 		      AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
41 }
42 
ath9k_hw_gettxbuf(struct ath_hal * ah,u32 q)43 u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q)
44 {
45 	return REG_READ(ah, AR_QTXDP(q));
46 }
47 
ath9k_hw_puttxbuf(struct ath_hal * ah,u32 q,u32 txdp)48 bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q, u32 txdp)
49 {
50 	REG_WRITE(ah, AR_QTXDP(q), txdp);
51 
52 	return true;
53 }
54 
ath9k_hw_txstart(struct ath_hal * ah,u32 q)55 bool ath9k_hw_txstart(struct ath_hal *ah, u32 q)
56 {
57 	DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "queue %u\n", q);
58 
59 	REG_WRITE(ah, AR_Q_TXE, 1 << q);
60 
61 	return true;
62 }
63 
ath9k_hw_numtxpending(struct ath_hal * ah,u32 q)64 u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q)
65 {
66 	u32 npend;
67 
68 	npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
69 	if (npend == 0) {
70 
71 		if (REG_READ(ah, AR_Q_TXE) & (1 << q))
72 			npend = 1;
73 	}
74 
75 	return npend;
76 }
77 
ath9k_hw_updatetxtriglevel(struct ath_hal * ah,bool bIncTrigLevel)78 bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel)
79 {
80 	struct ath_hal_5416 *ahp = AH5416(ah);
81 	u32 txcfg, curLevel, newLevel;
82 	enum ath9k_int omask;
83 
84 	if (ah->ah_txTrigLevel >= MAX_TX_FIFO_THRESHOLD)
85 		return false;
86 
87 	omask = ath9k_hw_set_interrupts(ah, ahp->ah_maskReg & ~ATH9K_INT_GLOBAL);
88 
89 	txcfg = REG_READ(ah, AR_TXCFG);
90 	curLevel = MS(txcfg, AR_FTRIG);
91 	newLevel = curLevel;
92 	if (bIncTrigLevel) {
93 		if (curLevel < MAX_TX_FIFO_THRESHOLD)
94 			newLevel++;
95 	} else if (curLevel > MIN_TX_FIFO_THRESHOLD)
96 		newLevel--;
97 	if (newLevel != curLevel)
98 		REG_WRITE(ah, AR_TXCFG,
99 			  (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
100 
101 	ath9k_hw_set_interrupts(ah, omask);
102 
103 	ah->ah_txTrigLevel = newLevel;
104 
105 	return newLevel != curLevel;
106 }
107 
ath9k_hw_stoptxdma(struct ath_hal * ah,u32 q)108 bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q)
109 {
110 	u32 tsfLow, j, wait;
111 
112 	REG_WRITE(ah, AR_Q_TXD, 1 << q);
113 
114 	for (wait = 1000; wait != 0; wait--) {
115 		if (ath9k_hw_numtxpending(ah, q) == 0)
116 			break;
117 		udelay(100);
118 	}
119 
120 	if (ath9k_hw_numtxpending(ah, q)) {
121 		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
122 			"%s: Num of pending TX Frames %d on Q %d\n",
123 			__func__, ath9k_hw_numtxpending(ah, q), q);
124 
125 		for (j = 0; j < 2; j++) {
126 			tsfLow = REG_READ(ah, AR_TSF_L32);
127 			REG_WRITE(ah, AR_QUIET2,
128 				  SM(10, AR_QUIET2_QUIET_DUR));
129 			REG_WRITE(ah, AR_QUIET_PERIOD, 100);
130 			REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
131 			REG_SET_BIT(ah, AR_TIMER_MODE,
132 				       AR_QUIET_TIMER_EN);
133 
134 			if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
135 				break;
136 
137 			DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
138 				"TSF have moved while trying to set "
139 				"quiet time TSF: 0x%08x\n", tsfLow);
140 		}
141 
142 		REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
143 
144 		udelay(200);
145 		REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
146 
147 		wait = 1000;
148 
149 		while (ath9k_hw_numtxpending(ah, q)) {
150 			if ((--wait) == 0) {
151 				DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
152 					"Failed to stop Tx DMA in 100 "
153 					"msec after killing last frame\n");
154 				break;
155 			}
156 			udelay(100);
157 		}
158 
159 		REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
160 	}
161 
162 	REG_WRITE(ah, AR_Q_TXD, 0);
163 
164 	return wait != 0;
165 }
166 
ath9k_hw_filltxdesc(struct ath_hal * ah,struct ath_desc * ds,u32 segLen,bool firstSeg,bool lastSeg,const struct ath_desc * ds0)167 bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
168 			 u32 segLen, bool firstSeg,
169 			 bool lastSeg, const struct ath_desc *ds0)
170 {
171 	struct ar5416_desc *ads = AR5416DESC(ds);
172 
173 	if (firstSeg) {
174 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
175 	} else if (lastSeg) {
176 		ads->ds_ctl0 = 0;
177 		ads->ds_ctl1 = segLen;
178 		ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
179 		ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
180 	} else {
181 		ads->ds_ctl0 = 0;
182 		ads->ds_ctl1 = segLen | AR_TxMore;
183 		ads->ds_ctl2 = 0;
184 		ads->ds_ctl3 = 0;
185 	}
186 	ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
187 	ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
188 	ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
189 	ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
190 	ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
191 
192 	return true;
193 }
194 
ath9k_hw_cleartxdesc(struct ath_hal * ah,struct ath_desc * ds)195 void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds)
196 {
197 	struct ar5416_desc *ads = AR5416DESC(ds);
198 
199 	ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
200 	ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
201 	ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
202 	ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
203 	ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
204 }
205 
ath9k_hw_txprocdesc(struct ath_hal * ah,struct ath_desc * ds)206 int ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds)
207 {
208 	struct ar5416_desc *ads = AR5416DESC(ds);
209 
210 	if ((ads->ds_txstatus9 & AR_TxDone) == 0)
211 		return -EINPROGRESS;
212 
213 	ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
214 	ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
215 	ds->ds_txstat.ts_status = 0;
216 	ds->ds_txstat.ts_flags = 0;
217 
218 	if (ads->ds_txstatus1 & AR_ExcessiveRetries)
219 		ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
220 	if (ads->ds_txstatus1 & AR_Filtered)
221 		ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
222 	if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
223 		ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
224 		ath9k_hw_updatetxtriglevel(ah, true);
225 	}
226 	if (ads->ds_txstatus9 & AR_TxOpExceeded)
227 		ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
228 	if (ads->ds_txstatus1 & AR_TxTimerExpired)
229 		ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
230 
231 	if (ads->ds_txstatus1 & AR_DescCfgErr)
232 		ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
233 	if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
234 		ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
235 		ath9k_hw_updatetxtriglevel(ah, true);
236 	}
237 	if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
238 		ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
239 		ath9k_hw_updatetxtriglevel(ah, true);
240 	}
241 	if (ads->ds_txstatus0 & AR_TxBaStatus) {
242 		ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
243 		ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
244 		ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
245 	}
246 
247 	ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
248 	switch (ds->ds_txstat.ts_rateindex) {
249 	case 0:
250 		ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
251 		break;
252 	case 1:
253 		ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
254 		break;
255 	case 2:
256 		ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
257 		break;
258 	case 3:
259 		ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
260 		break;
261 	}
262 
263 	ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
264 	ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
265 	ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
266 	ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
267 	ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
268 	ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
269 	ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
270 	ds->ds_txstat.evm0 = ads->AR_TxEVM0;
271 	ds->ds_txstat.evm1 = ads->AR_TxEVM1;
272 	ds->ds_txstat.evm2 = ads->AR_TxEVM2;
273 	ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
274 	ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
275 	ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
276 	ds->ds_txstat.ts_antenna = 1;
277 
278 	return 0;
279 }
280 
ath9k_hw_set11n_txdesc(struct ath_hal * ah,struct ath_desc * ds,u32 pktLen,enum ath9k_pkt_type type,u32 txPower,u32 keyIx,enum ath9k_key_type keyType,u32 flags)281 void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
282 			    u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
283 			    u32 keyIx, enum ath9k_key_type keyType, u32 flags)
284 {
285 	struct ar5416_desc *ads = AR5416DESC(ds);
286 	struct ath_hal_5416 *ahp = AH5416(ah);
287 
288 	txPower += ahp->ah_txPowerIndexOffset;
289 	if (txPower > 63)
290 		txPower = 63;
291 
292 	ads->ds_ctl0 = (pktLen & AR_FrameLen)
293 		| (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
294 		| SM(txPower, AR_XmitPower)
295 		| (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
296 		| (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
297 		| (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
298 		| (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
299 
300 	ads->ds_ctl1 =
301 		(keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
302 		| SM(type, AR_FrameType)
303 		| (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
304 		| (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
305 		| (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
306 
307 	ads->ds_ctl6 = SM(keyType, AR_EncrType);
308 
309 	if (AR_SREV_9285(ah)) {
310 		ads->ds_ctl8 = 0;
311 		ads->ds_ctl9 = 0;
312 		ads->ds_ctl10 = 0;
313 		ads->ds_ctl11 = 0;
314 	}
315 }
316 
ath9k_hw_set11n_ratescenario(struct ath_hal * ah,struct ath_desc * ds,struct ath_desc * lastds,u32 durUpdateEn,u32 rtsctsRate,u32 rtsctsDuration,struct ath9k_11n_rate_series series[],u32 nseries,u32 flags)317 void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
318 				  struct ath_desc *lastds,
319 				  u32 durUpdateEn, u32 rtsctsRate,
320 				  u32 rtsctsDuration,
321 				  struct ath9k_11n_rate_series series[],
322 				  u32 nseries, u32 flags)
323 {
324 	struct ar5416_desc *ads = AR5416DESC(ds);
325 	struct ar5416_desc *last_ads = AR5416DESC(lastds);
326 	u32 ds_ctl0;
327 
328 	(void) nseries;
329 	(void) rtsctsDuration;
330 
331 	if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
332 		ds_ctl0 = ads->ds_ctl0;
333 
334 		if (flags & ATH9K_TXDESC_RTSENA) {
335 			ds_ctl0 &= ~AR_CTSEnable;
336 			ds_ctl0 |= AR_RTSEnable;
337 		} else {
338 			ds_ctl0 &= ~AR_RTSEnable;
339 			ds_ctl0 |= AR_CTSEnable;
340 		}
341 
342 		ads->ds_ctl0 = ds_ctl0;
343 	} else {
344 		ads->ds_ctl0 =
345 			(ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
346 	}
347 
348 	ads->ds_ctl2 = set11nTries(series, 0)
349 		| set11nTries(series, 1)
350 		| set11nTries(series, 2)
351 		| set11nTries(series, 3)
352 		| (durUpdateEn ? AR_DurUpdateEna : 0)
353 		| SM(0, AR_BurstDur);
354 
355 	ads->ds_ctl3 = set11nRate(series, 0)
356 		| set11nRate(series, 1)
357 		| set11nRate(series, 2)
358 		| set11nRate(series, 3);
359 
360 	ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
361 		| set11nPktDurRTSCTS(series, 1);
362 
363 	ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
364 		| set11nPktDurRTSCTS(series, 3);
365 
366 	ads->ds_ctl7 = set11nRateFlags(series, 0)
367 		| set11nRateFlags(series, 1)
368 		| set11nRateFlags(series, 2)
369 		| set11nRateFlags(series, 3)
370 		| SM(rtsctsRate, AR_RTSCTSRate);
371 	last_ads->ds_ctl2 = ads->ds_ctl2;
372 	last_ads->ds_ctl3 = ads->ds_ctl3;
373 }
374 
ath9k_hw_set11n_aggr_first(struct ath_hal * ah,struct ath_desc * ds,u32 aggrLen)375 void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
376 				u32 aggrLen)
377 {
378 	struct ar5416_desc *ads = AR5416DESC(ds);
379 
380 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
381 	ads->ds_ctl6 &= ~AR_AggrLen;
382 	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
383 }
384 
ath9k_hw_set11n_aggr_middle(struct ath_hal * ah,struct ath_desc * ds,u32 numDelims)385 void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
386 				 u32 numDelims)
387 {
388 	struct ar5416_desc *ads = AR5416DESC(ds);
389 	unsigned int ctl6;
390 
391 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
392 
393 	ctl6 = ads->ds_ctl6;
394 	ctl6 &= ~AR_PadDelim;
395 	ctl6 |= SM(numDelims, AR_PadDelim);
396 	ads->ds_ctl6 = ctl6;
397 }
398 
ath9k_hw_set11n_aggr_last(struct ath_hal * ah,struct ath_desc * ds)399 void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds)
400 {
401 	struct ar5416_desc *ads = AR5416DESC(ds);
402 
403 	ads->ds_ctl1 |= AR_IsAggr;
404 	ads->ds_ctl1 &= ~AR_MoreAggr;
405 	ads->ds_ctl6 &= ~AR_PadDelim;
406 }
407 
ath9k_hw_clr11n_aggr(struct ath_hal * ah,struct ath_desc * ds)408 void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds)
409 {
410 	struct ar5416_desc *ads = AR5416DESC(ds);
411 
412 	ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
413 }
414 
ath9k_hw_set11n_burstduration(struct ath_hal * ah,struct ath_desc * ds,u32 burstDuration)415 void ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds,
416 				   u32 burstDuration)
417 {
418 	struct ar5416_desc *ads = AR5416DESC(ds);
419 
420 	ads->ds_ctl2 &= ~AR_BurstDur;
421 	ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
422 }
423 
ath9k_hw_set11n_virtualmorefrag(struct ath_hal * ah,struct ath_desc * ds,u32 vmf)424 void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds,
425 				     u32 vmf)
426 {
427 	struct ar5416_desc *ads = AR5416DESC(ds);
428 
429 	if (vmf)
430 		ads->ds_ctl0 |= AR_VirtMoreFrag;
431 	else
432 		ads->ds_ctl0 &= ~AR_VirtMoreFrag;
433 }
434 
ath9k_hw_gettxintrtxqs(struct ath_hal * ah,u32 * txqs)435 void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs)
436 {
437 	struct ath_hal_5416 *ahp = AH5416(ah);
438 
439 	*txqs &= ahp->ah_intrTxqs;
440 	ahp->ah_intrTxqs &= ~(*txqs);
441 }
442 
ath9k_hw_set_txq_props(struct ath_hal * ah,int q,const struct ath9k_tx_queue_info * qinfo)443 bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
444 			    const struct ath9k_tx_queue_info *qinfo)
445 {
446 	u32 cw;
447 	struct ath_hal_5416 *ahp = AH5416(ah);
448 	struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
449 	struct ath9k_tx_queue_info *qi;
450 
451 	if (q >= pCap->total_queues) {
452 		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
453 		return false;
454 	}
455 
456 	qi = &ahp->ah_txq[q];
457 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
458 		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue\n");
459 		return false;
460 	}
461 
462 	DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "queue %p\n", qi);
463 
464 	qi->tqi_ver = qinfo->tqi_ver;
465 	qi->tqi_subtype = qinfo->tqi_subtype;
466 	qi->tqi_qflags = qinfo->tqi_qflags;
467 	qi->tqi_priority = qinfo->tqi_priority;
468 	if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
469 		qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
470 	else
471 		qi->tqi_aifs = INIT_AIFS;
472 	if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
473 		cw = min(qinfo->tqi_cwmin, 1024U);
474 		qi->tqi_cwmin = 1;
475 		while (qi->tqi_cwmin < cw)
476 			qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
477 	} else
478 		qi->tqi_cwmin = qinfo->tqi_cwmin;
479 	if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
480 		cw = min(qinfo->tqi_cwmax, 1024U);
481 		qi->tqi_cwmax = 1;
482 		while (qi->tqi_cwmax < cw)
483 			qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
484 	} else
485 		qi->tqi_cwmax = INIT_CWMAX;
486 
487 	if (qinfo->tqi_shretry != 0)
488 		qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
489 	else
490 		qi->tqi_shretry = INIT_SH_RETRY;
491 	if (qinfo->tqi_lgretry != 0)
492 		qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
493 	else
494 		qi->tqi_lgretry = INIT_LG_RETRY;
495 	qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
496 	qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
497 	qi->tqi_burstTime = qinfo->tqi_burstTime;
498 	qi->tqi_readyTime = qinfo->tqi_readyTime;
499 
500 	switch (qinfo->tqi_subtype) {
501 	case ATH9K_WME_UPSD:
502 		if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
503 			qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
504 		break;
505 	default:
506 		break;
507 	}
508 
509 	return true;
510 }
511 
ath9k_hw_get_txq_props(struct ath_hal * ah,int q,struct ath9k_tx_queue_info * qinfo)512 bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
513 			    struct ath9k_tx_queue_info *qinfo)
514 {
515 	struct ath_hal_5416 *ahp = AH5416(ah);
516 	struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
517 	struct ath9k_tx_queue_info *qi;
518 
519 	if (q >= pCap->total_queues) {
520 		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
521 		return false;
522 	}
523 
524 	qi = &ahp->ah_txq[q];
525 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
526 		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue\n");
527 		return false;
528 	}
529 
530 	qinfo->tqi_qflags = qi->tqi_qflags;
531 	qinfo->tqi_ver = qi->tqi_ver;
532 	qinfo->tqi_subtype = qi->tqi_subtype;
533 	qinfo->tqi_qflags = qi->tqi_qflags;
534 	qinfo->tqi_priority = qi->tqi_priority;
535 	qinfo->tqi_aifs = qi->tqi_aifs;
536 	qinfo->tqi_cwmin = qi->tqi_cwmin;
537 	qinfo->tqi_cwmax = qi->tqi_cwmax;
538 	qinfo->tqi_shretry = qi->tqi_shretry;
539 	qinfo->tqi_lgretry = qi->tqi_lgretry;
540 	qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
541 	qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
542 	qinfo->tqi_burstTime = qi->tqi_burstTime;
543 	qinfo->tqi_readyTime = qi->tqi_readyTime;
544 
545 	return true;
546 }
547 
ath9k_hw_setuptxqueue(struct ath_hal * ah,enum ath9k_tx_queue type,const struct ath9k_tx_queue_info * qinfo)548 int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
549 			  const struct ath9k_tx_queue_info *qinfo)
550 {
551 	struct ath_hal_5416 *ahp = AH5416(ah);
552 	struct ath9k_tx_queue_info *qi;
553 	struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
554 	int q;
555 
556 	switch (type) {
557 	case ATH9K_TX_QUEUE_BEACON:
558 		q = pCap->total_queues - 1;
559 		break;
560 	case ATH9K_TX_QUEUE_CAB:
561 		q = pCap->total_queues - 2;
562 		break;
563 	case ATH9K_TX_QUEUE_PSPOLL:
564 		q = 1;
565 		break;
566 	case ATH9K_TX_QUEUE_UAPSD:
567 		q = pCap->total_queues - 3;
568 		break;
569 	case ATH9K_TX_QUEUE_DATA:
570 		for (q = 0; q < pCap->total_queues; q++)
571 			if (ahp->ah_txq[q].tqi_type ==
572 			    ATH9K_TX_QUEUE_INACTIVE)
573 				break;
574 		if (q == pCap->total_queues) {
575 			DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
576 				"no available tx queue\n");
577 			return -1;
578 		}
579 		break;
580 	default:
581 		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "bad tx queue type %u\n", type);
582 		return -1;
583 	}
584 
585 	DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "queue %u\n", q);
586 
587 	qi = &ahp->ah_txq[q];
588 	if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
589 		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
590 			"tx queue %u already active\n", q);
591 		return -1;
592 	}
593 	memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
594 	qi->tqi_type = type;
595 	if (qinfo == NULL) {
596 		qi->tqi_qflags =
597 			TXQ_FLAG_TXOKINT_ENABLE
598 			| TXQ_FLAG_TXERRINT_ENABLE
599 			| TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
600 		qi->tqi_aifs = INIT_AIFS;
601 		qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
602 		qi->tqi_cwmax = INIT_CWMAX;
603 		qi->tqi_shretry = INIT_SH_RETRY;
604 		qi->tqi_lgretry = INIT_LG_RETRY;
605 		qi->tqi_physCompBuf = 0;
606 	} else {
607 		qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
608 		(void) ath9k_hw_set_txq_props(ah, q, qinfo);
609 	}
610 
611 	return q;
612 }
613 
ath9k_hw_releasetxqueue(struct ath_hal * ah,u32 q)614 bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q)
615 {
616 	struct ath_hal_5416 *ahp = AH5416(ah);
617 	struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
618 	struct ath9k_tx_queue_info *qi;
619 
620 	if (q >= pCap->total_queues) {
621 		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
622 		return false;
623 	}
624 	qi = &ahp->ah_txq[q];
625 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
626 		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue %u\n", q);
627 		return false;
628 	}
629 
630 	DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "release queue %u\n", q);
631 
632 	qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
633 	ahp->ah_txOkInterruptMask &= ~(1 << q);
634 	ahp->ah_txErrInterruptMask &= ~(1 << q);
635 	ahp->ah_txDescInterruptMask &= ~(1 << q);
636 	ahp->ah_txEolInterruptMask &= ~(1 << q);
637 	ahp->ah_txUrnInterruptMask &= ~(1 << q);
638 	ath9k_hw_set_txq_interrupts(ah, qi);
639 
640 	return true;
641 }
642 
ath9k_hw_resettxqueue(struct ath_hal * ah,u32 q)643 bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q)
644 {
645 	struct ath_hal_5416 *ahp = AH5416(ah);
646 	struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
647 	struct ath9k_channel *chan = ah->ah_curchan;
648 	struct ath9k_tx_queue_info *qi;
649 	u32 cwMin, chanCwMin, value;
650 
651 	if (q >= pCap->total_queues) {
652 		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
653 		return false;
654 	}
655 
656 	qi = &ahp->ah_txq[q];
657 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
658 		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue %u\n", q);
659 		return true;
660 	}
661 
662 	DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "reset queue %u\n", q);
663 
664 	if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
665 		if (chan && IS_CHAN_B(chan))
666 			chanCwMin = INIT_CWMIN_11B;
667 		else
668 			chanCwMin = INIT_CWMIN;
669 
670 		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
671 	} else
672 		cwMin = qi->tqi_cwmin;
673 
674 	REG_WRITE(ah, AR_DLCL_IFS(q),
675 		  SM(cwMin, AR_D_LCL_IFS_CWMIN) |
676 		  SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
677 		  SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
678 
679 	REG_WRITE(ah, AR_DRETRY_LIMIT(q),
680 		  SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
681 		  SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
682 		  SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
683 
684 	REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
685 	REG_WRITE(ah, AR_DMISC(q),
686 		  AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
687 
688 	if (qi->tqi_cbrPeriod) {
689 		REG_WRITE(ah, AR_QCBRCFG(q),
690 			  SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
691 			  SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
692 		REG_WRITE(ah, AR_QMISC(q),
693 			  REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
694 			  (qi->tqi_cbrOverflowLimit ?
695 			   AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
696 	}
697 	if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
698 		REG_WRITE(ah, AR_QRDYTIMECFG(q),
699 			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
700 			  AR_Q_RDYTIMECFG_EN);
701 	}
702 
703 	REG_WRITE(ah, AR_DCHNTIME(q),
704 		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
705 		  (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
706 
707 	if (qi->tqi_burstTime
708 	    && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
709 		REG_WRITE(ah, AR_QMISC(q),
710 			  REG_READ(ah, AR_QMISC(q)) |
711 			  AR_Q_MISC_RDYTIME_EXP_POLICY);
712 
713 	}
714 
715 	if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
716 		REG_WRITE(ah, AR_DMISC(q),
717 			  REG_READ(ah, AR_DMISC(q)) |
718 			  AR_D_MISC_POST_FR_BKOFF_DIS);
719 	}
720 	if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
721 		REG_WRITE(ah, AR_DMISC(q),
722 			  REG_READ(ah, AR_DMISC(q)) |
723 			  AR_D_MISC_FRAG_BKOFF_EN);
724 	}
725 	switch (qi->tqi_type) {
726 	case ATH9K_TX_QUEUE_BEACON:
727 		REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
728 			  | AR_Q_MISC_FSP_DBA_GATED
729 			  | AR_Q_MISC_BEACON_USE
730 			  | AR_Q_MISC_CBR_INCR_DIS1);
731 
732 		REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
733 			  | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
734 			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
735 			  | AR_D_MISC_BEACON_USE
736 			  | AR_D_MISC_POST_FR_BKOFF_DIS);
737 		break;
738 	case ATH9K_TX_QUEUE_CAB:
739 		REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
740 			  | AR_Q_MISC_FSP_DBA_GATED
741 			  | AR_Q_MISC_CBR_INCR_DIS1
742 			  | AR_Q_MISC_CBR_INCR_DIS0);
743 		value = (qi->tqi_readyTime -
744 			 (ah->ah_config.sw_beacon_response_time -
745 			  ah->ah_config.dma_beacon_response_time) -
746 			 ah->ah_config.additional_swba_backoff) * 1024;
747 		REG_WRITE(ah, AR_QRDYTIMECFG(q),
748 			  value | AR_Q_RDYTIMECFG_EN);
749 		REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
750 			  | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
751 			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
752 		break;
753 	case ATH9K_TX_QUEUE_PSPOLL:
754 		REG_WRITE(ah, AR_QMISC(q),
755 			  REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
756 		break;
757 	case ATH9K_TX_QUEUE_UAPSD:
758 		REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
759 			  AR_D_MISC_POST_FR_BKOFF_DIS);
760 		break;
761 	default:
762 		break;
763 	}
764 
765 	if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
766 		REG_WRITE(ah, AR_DMISC(q),
767 			  REG_READ(ah, AR_DMISC(q)) |
768 			  SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
769 			     AR_D_MISC_ARB_LOCKOUT_CNTRL) |
770 			  AR_D_MISC_POST_FR_BKOFF_DIS);
771 	}
772 
773 	if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
774 		ahp->ah_txOkInterruptMask |= 1 << q;
775 	else
776 		ahp->ah_txOkInterruptMask &= ~(1 << q);
777 	if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
778 		ahp->ah_txErrInterruptMask |= 1 << q;
779 	else
780 		ahp->ah_txErrInterruptMask &= ~(1 << q);
781 	if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
782 		ahp->ah_txDescInterruptMask |= 1 << q;
783 	else
784 		ahp->ah_txDescInterruptMask &= ~(1 << q);
785 	if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
786 		ahp->ah_txEolInterruptMask |= 1 << q;
787 	else
788 		ahp->ah_txEolInterruptMask &= ~(1 << q);
789 	if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
790 		ahp->ah_txUrnInterruptMask |= 1 << q;
791 	else
792 		ahp->ah_txUrnInterruptMask &= ~(1 << q);
793 	ath9k_hw_set_txq_interrupts(ah, qi);
794 
795 	return true;
796 }
797 
ath9k_hw_rxprocdesc(struct ath_hal * ah,struct ath_desc * ds,u32 pa,struct ath_desc * nds,u64 tsf)798 int ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds,
799 			u32 pa, struct ath_desc *nds, u64 tsf)
800 {
801 	struct ar5416_desc ads;
802 	struct ar5416_desc *adsp = AR5416DESC(ds);
803 	u32 phyerr;
804 
805 	if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
806 		return -EINPROGRESS;
807 
808 	ads.u.rx = adsp->u.rx;
809 
810 	ds->ds_rxstat.rs_status = 0;
811 	ds->ds_rxstat.rs_flags = 0;
812 
813 	ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
814 	ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
815 
816 	ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
817 	ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00);
818 	ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01);
819 	ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02);
820 	ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10);
821 	ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11);
822 	ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12);
823 	if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
824 		ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
825 	else
826 		ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
827 
828 	ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
829 	ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
830 
831 	ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
832 	ds->ds_rxstat.rs_moreaggr =
833 		(ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
834 	ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
835 	ds->ds_rxstat.rs_flags =
836 		(ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
837 	ds->ds_rxstat.rs_flags |=
838 		(ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
839 
840 	if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
841 		ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
842 	if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
843 		ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
844 	if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
845 		ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
846 
847 	if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
848 		if (ads.ds_rxstatus8 & AR_CRCErr)
849 			ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
850 		else if (ads.ds_rxstatus8 & AR_PHYErr) {
851 			ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
852 			phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
853 			ds->ds_rxstat.rs_phyerr = phyerr;
854 		} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
855 			ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
856 		else if (ads.ds_rxstatus8 & AR_MichaelErr)
857 			ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
858 	}
859 
860 	return 0;
861 }
862 
ath9k_hw_setuprxdesc(struct ath_hal * ah,struct ath_desc * ds,u32 size,u32 flags)863 bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
864 			  u32 size, u32 flags)
865 {
866 	struct ar5416_desc *ads = AR5416DESC(ds);
867 	struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
868 
869 	ads->ds_ctl1 = size & AR_BufLen;
870 	if (flags & ATH9K_RXDESC_INTREQ)
871 		ads->ds_ctl1 |= AR_RxIntrReq;
872 
873 	ads->ds_rxstatus8 &= ~AR_RxDone;
874 	if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
875 		memset(&(ads->u), 0, sizeof(ads->u));
876 
877 	return true;
878 }
879 
ath9k_hw_setrxabort(struct ath_hal * ah,bool set)880 bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set)
881 {
882 	u32 reg;
883 
884 	if (set) {
885 		REG_SET_BIT(ah, AR_DIAG_SW,
886 			    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
887 
888 		if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 0)) {
889 			REG_CLR_BIT(ah, AR_DIAG_SW,
890 				    (AR_DIAG_RX_DIS |
891 				     AR_DIAG_RX_ABORT));
892 
893 			reg = REG_READ(ah, AR_OBS_BUS_1);
894 			DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
895 				"rx failed to go idle in 10 ms RXSM=0x%x\n", reg);
896 
897 			return false;
898 		}
899 	} else {
900 		REG_CLR_BIT(ah, AR_DIAG_SW,
901 			    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
902 	}
903 
904 	return true;
905 }
906 
ath9k_hw_putrxbuf(struct ath_hal * ah,u32 rxdp)907 void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp)
908 {
909 	REG_WRITE(ah, AR_RXDP, rxdp);
910 }
911 
ath9k_hw_rxena(struct ath_hal * ah)912 void ath9k_hw_rxena(struct ath_hal *ah)
913 {
914 	REG_WRITE(ah, AR_CR, AR_CR_RXE);
915 }
916 
ath9k_hw_startpcureceive(struct ath_hal * ah)917 void ath9k_hw_startpcureceive(struct ath_hal *ah)
918 {
919 	ath9k_enable_mib_counters(ah);
920 
921 	ath9k_ani_reset(ah);
922 
923 	REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
924 }
925 
ath9k_hw_stoppcurecv(struct ath_hal * ah)926 void ath9k_hw_stoppcurecv(struct ath_hal *ah)
927 {
928 	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
929 
930 	ath9k_hw_disable_mib_counters(ah);
931 }
932 
ath9k_hw_stopdmarecv(struct ath_hal * ah)933 bool ath9k_hw_stopdmarecv(struct ath_hal *ah)
934 {
935 	REG_WRITE(ah, AR_CR, AR_CR_RXD);
936 
937 	if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0)) {
938 		DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
939 			"dma failed to stop in 10ms\n"
940 			"AR_CR=0x%08x\nAR_DIAG_SW=0x%08x\n",
941 			REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
942 		return false;
943 	} else {
944 		return true;
945 	}
946 }
947