1 /*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19 /********************************************\
20 Queue Control Unit, DCF Control Unit Functions
21 \********************************************/
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include "ath5k.h"
26 #include "reg.h"
27 #include "debug.h"
28 #include <linux/log2.h>
29
30 /**
31 * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
32 *
33 * Here we setup parameters for the 12 available TX queues. Note that
34 * on the various registers we can usually only map the first 10 of them so
35 * basically we have 10 queues to play with. Each queue has a matching
36 * QCU that controls when the queue will get triggered and multiple QCUs
37 * can be mapped to a single DCU that controls the various DFS parameters
38 * for the various queues. In our setup we have a 1:1 mapping between QCUs
39 * and DCUs allowing us to have different DFS settings for each queue.
40 *
41 * When a frame goes into a TX queue, QCU decides when it'll trigger a
42 * transmission based on various criteria (such as how many data we have inside
43 * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
44 * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
45 * (arbitrator) decides the priority of each QCU based on it's configuration
46 * (e.g. beacons are always transmitted when they leave DCU bypassing all other
47 * frames from other queues waiting to be transmitted). After a frame leaves
48 * the DCU it goes to PCU for further processing and then to PHY for
49 * the actual transmission.
50 */
51
52
53 /******************\
54 * Helper functions *
55 \******************/
56
57 /**
58 * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
59 * @ah: The &struct ath5k_hw
60 * @queue: One of enum ath5k_tx_queue_id
61 */
62 u32
ath5k_hw_num_tx_pending(struct ath5k_hw * ah,unsigned int queue)63 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
64 {
65 u32 pending;
66 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
67
68 /* Return if queue is declared inactive */
69 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
70 return false;
71
72 /* XXX: How about AR5K_CFG_TXCNT ? */
73 if (ah->ah_version == AR5K_AR5210)
74 return false;
75
76 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
77 pending &= AR5K_QCU_STS_FRMPENDCNT;
78
79 /* It's possible to have no frames pending even if TXE
80 * is set. To indicate that q has not stopped return
81 * true */
82 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
83 return true;
84
85 return pending;
86 }
87
88 /**
89 * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
90 * @ah: The &struct ath5k_hw
91 * @queue: One of enum ath5k_tx_queue_id
92 */
93 void
ath5k_hw_release_tx_queue(struct ath5k_hw * ah,unsigned int queue)94 ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
95 {
96 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
97 return;
98
99 /* This queue will be skipped in further operations */
100 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
101 /*For SIMR setup*/
102 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
103 }
104
105 /**
106 * ath5k_cw_validate() - Make sure the given cw is valid
107 * @cw_req: The contention window value to check
108 *
109 * Make sure cw is a power of 2 minus 1 and smaller than 1024
110 */
111 static u16
ath5k_cw_validate(u16 cw_req)112 ath5k_cw_validate(u16 cw_req)
113 {
114 cw_req = min(cw_req, (u16)1023);
115
116 /* Check if cw_req + 1 a power of 2 */
117 if (is_power_of_2(cw_req + 1))
118 return cw_req;
119
120 /* Check if cw_req is a power of 2 */
121 if (is_power_of_2(cw_req))
122 return cw_req - 1;
123
124 /* If none of the above is correct
125 * find the closest power of 2 */
126 cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
127
128 return cw_req;
129 }
130
131 /**
132 * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
133 * @ah: The &struct ath5k_hw
134 * @queue: One of enum ath5k_tx_queue_id
135 * @queue_info: The &struct ath5k_txq_info to fill
136 */
137 int
ath5k_hw_get_tx_queueprops(struct ath5k_hw * ah,int queue,struct ath5k_txq_info * queue_info)138 ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
139 struct ath5k_txq_info *queue_info)
140 {
141 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
142 return 0;
143 }
144
145 /**
146 * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
147 * @ah: The &struct ath5k_hw
148 * @queue: One of enum ath5k_tx_queue_id
149 * @qinfo: The &struct ath5k_txq_info to use
150 *
151 * Returns 0 on success or -EIO if queue is inactive
152 */
153 int
ath5k_hw_set_tx_queueprops(struct ath5k_hw * ah,int queue,const struct ath5k_txq_info * qinfo)154 ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
155 const struct ath5k_txq_info *qinfo)
156 {
157 struct ath5k_txq_info *qi;
158
159 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
160
161 qi = &ah->ah_txq[queue];
162
163 if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
164 return -EIO;
165
166 /* copy and validate values */
167 qi->tqi_type = qinfo->tqi_type;
168 qi->tqi_subtype = qinfo->tqi_subtype;
169 qi->tqi_flags = qinfo->tqi_flags;
170 /*
171 * According to the docs: Although the AIFS field is 8 bit wide,
172 * the maximum supported value is 0xFC. Setting it higher than that
173 * will cause the DCU to hang.
174 */
175 qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
176 qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
177 qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
178 qi->tqi_cbr_period = qinfo->tqi_cbr_period;
179 qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
180 qi->tqi_burst_time = qinfo->tqi_burst_time;
181 qi->tqi_ready_time = qinfo->tqi_ready_time;
182
183 /*XXX: Is this supported on 5210 ?*/
184 /*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
185 if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
186 ((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
187 (qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
188 qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
189 qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
190
191 return 0;
192 }
193
194 /**
195 * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
196 * @ah: The &struct ath5k_hw
197 * @queue_type: One of enum ath5k_tx_queue
198 * @queue_info: The &struct ath5k_txq_info to use
199 *
200 * Returns 0 on success, -EINVAL on invalid arguments
201 */
202 int
ath5k_hw_setup_tx_queue(struct ath5k_hw * ah,enum ath5k_tx_queue queue_type,struct ath5k_txq_info * queue_info)203 ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
204 struct ath5k_txq_info *queue_info)
205 {
206 unsigned int queue;
207 int ret;
208
209 /*
210 * Get queue by type
211 */
212 /* 5210 only has 2 queues */
213 if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
214 switch (queue_type) {
215 case AR5K_TX_QUEUE_DATA:
216 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
217 break;
218 case AR5K_TX_QUEUE_BEACON:
219 case AR5K_TX_QUEUE_CAB:
220 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
221 break;
222 default:
223 return -EINVAL;
224 }
225 } else {
226 switch (queue_type) {
227 case AR5K_TX_QUEUE_DATA:
228 queue = queue_info->tqi_subtype;
229 break;
230 case AR5K_TX_QUEUE_UAPSD:
231 queue = AR5K_TX_QUEUE_ID_UAPSD;
232 break;
233 case AR5K_TX_QUEUE_BEACON:
234 queue = AR5K_TX_QUEUE_ID_BEACON;
235 break;
236 case AR5K_TX_QUEUE_CAB:
237 queue = AR5K_TX_QUEUE_ID_CAB;
238 break;
239 default:
240 return -EINVAL;
241 }
242 }
243
244 /*
245 * Setup internal queue structure
246 */
247 memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
248 ah->ah_txq[queue].tqi_type = queue_type;
249
250 if (queue_info != NULL) {
251 queue_info->tqi_type = queue_type;
252 ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
253 if (ret)
254 return ret;
255 }
256
257 /*
258 * We use ah_txq_status to hold a temp value for
259 * the Secondary interrupt mask registers on 5211+
260 * check out ath5k_hw_reset_tx_queue
261 */
262 AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
263
264 return queue;
265 }
266
267
268 /*******************************\
269 * Single QCU/DCU initialization *
270 \*******************************/
271
272 /**
273 * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
274 * @ah: The &struct ath5k_hw
275 * @queue: One of enum ath5k_tx_queue_id
276 *
277 * This function is used when initializing a queue, to set
278 * retry limits based on ah->ah_retry_* and the chipset used.
279 */
280 void
ath5k_hw_set_tx_retry_limits(struct ath5k_hw * ah,unsigned int queue)281 ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
282 unsigned int queue)
283 {
284 /* Single data queue on AR5210 */
285 if (ah->ah_version == AR5K_AR5210) {
286 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
287
288 if (queue > 0)
289 return;
290
291 ath5k_hw_reg_write(ah,
292 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
293 | AR5K_REG_SM(ah->ah_retry_long,
294 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
295 | AR5K_REG_SM(ah->ah_retry_short,
296 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
297 | AR5K_REG_SM(ah->ah_retry_long,
298 AR5K_NODCU_RETRY_LMT_LG_RETRY)
299 | AR5K_REG_SM(ah->ah_retry_short,
300 AR5K_NODCU_RETRY_LMT_SH_RETRY),
301 AR5K_NODCU_RETRY_LMT);
302 /* DCU on AR5211+ */
303 } else {
304 ath5k_hw_reg_write(ah,
305 AR5K_REG_SM(ah->ah_retry_long,
306 AR5K_DCU_RETRY_LMT_RTS)
307 | AR5K_REG_SM(ah->ah_retry_long,
308 AR5K_DCU_RETRY_LMT_STA_RTS)
309 | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
310 AR5K_DCU_RETRY_LMT_STA_DATA),
311 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
312 }
313 }
314
315 /**
316 * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
317 * @ah: The &struct ath5k_hw
318 * @queue: One of enum ath5k_tx_queue_id
319 *
320 * Set DCF properties for the given transmit queue on DCU
321 * and configures all queue-specific parameters.
322 */
323 int
ath5k_hw_reset_tx_queue(struct ath5k_hw * ah,unsigned int queue)324 ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
325 {
326 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
327
328 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
329
330 tq = &ah->ah_txq[queue];
331
332 /* Skip if queue inactive or if we are on AR5210
333 * that doesn't have QCU/DCU */
334 if ((ah->ah_version == AR5K_AR5210) ||
335 (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
336 return 0;
337
338 /*
339 * Set contention window (cw_min/cw_max)
340 * and arbitrated interframe space (aifs)...
341 */
342 ath5k_hw_reg_write(ah,
343 AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
344 AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
345 AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
346 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
347
348 /*
349 * Set tx retry limits for this queue
350 */
351 ath5k_hw_set_tx_retry_limits(ah, queue);
352
353
354 /*
355 * Set misc registers
356 */
357
358 /* Enable DCU to wait for next fragment from QCU */
359 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
360 AR5K_DCU_MISC_FRAG_WAIT);
361
362 /* On Maui and Spirit use the global seqnum on DCU */
363 if (ah->ah_mac_version < AR5K_SREV_AR5211)
364 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
365 AR5K_DCU_MISC_SEQNUM_CTL);
366
367 /* Constant bit rate period */
368 if (tq->tqi_cbr_period) {
369 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
370 AR5K_QCU_CBRCFG_INTVAL) |
371 AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
372 AR5K_QCU_CBRCFG_ORN_THRES),
373 AR5K_QUEUE_CBRCFG(queue));
374
375 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
376 AR5K_QCU_MISC_FRSHED_CBR);
377
378 if (tq->tqi_cbr_overflow_limit)
379 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
380 AR5K_QCU_MISC_CBR_THRES_ENABLE);
381 }
382
383 /* Ready time interval */
384 if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
385 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
386 AR5K_QCU_RDYTIMECFG_INTVAL) |
387 AR5K_QCU_RDYTIMECFG_ENABLE,
388 AR5K_QUEUE_RDYTIMECFG(queue));
389
390 if (tq->tqi_burst_time) {
391 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
392 AR5K_DCU_CHAN_TIME_DUR) |
393 AR5K_DCU_CHAN_TIME_ENABLE,
394 AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
395
396 if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
397 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
398 AR5K_QCU_MISC_RDY_VEOL_POLICY);
399 }
400
401 /* Enable/disable Post frame backoff */
402 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
403 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
404 AR5K_QUEUE_DFS_MISC(queue));
405
406 /* Enable/disable fragmentation burst backoff */
407 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
408 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
409 AR5K_QUEUE_DFS_MISC(queue));
410
411 /*
412 * Set registers by queue type
413 */
414 switch (tq->tqi_type) {
415 case AR5K_TX_QUEUE_BEACON:
416 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
417 AR5K_QCU_MISC_FRSHED_DBA_GT |
418 AR5K_QCU_MISC_CBREXP_BCN_DIS |
419 AR5K_QCU_MISC_BCN_ENABLE);
420
421 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
422 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
423 AR5K_DCU_MISC_ARBLOCK_CTL_S) |
424 AR5K_DCU_MISC_ARBLOCK_IGNORE |
425 AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
426 AR5K_DCU_MISC_BCN_ENABLE);
427 break;
428
429 case AR5K_TX_QUEUE_CAB:
430 /* XXX: use BCN_SENT_GT, if we can figure out how */
431 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
432 AR5K_QCU_MISC_FRSHED_DBA_GT |
433 AR5K_QCU_MISC_CBREXP_DIS |
434 AR5K_QCU_MISC_CBREXP_BCN_DIS);
435
436 ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
437 (AR5K_TUNE_SW_BEACON_RESP -
438 AR5K_TUNE_DMA_BEACON_RESP) -
439 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
440 AR5K_QCU_RDYTIMECFG_ENABLE,
441 AR5K_QUEUE_RDYTIMECFG(queue));
442
443 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
444 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
445 AR5K_DCU_MISC_ARBLOCK_CTL_S));
446 break;
447
448 case AR5K_TX_QUEUE_UAPSD:
449 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
450 AR5K_QCU_MISC_CBREXP_DIS);
451 break;
452
453 case AR5K_TX_QUEUE_DATA:
454 default:
455 break;
456 }
457
458 /* TODO: Handle frame compression */
459
460 /*
461 * Enable interrupts for this tx queue
462 * in the secondary interrupt mask registers
463 */
464 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
465 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
466
467 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
468 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
469
470 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
471 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
472
473 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
474 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
475
476 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
477 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
478
479 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
480 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
481
482 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
483 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
484
485 if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
486 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
487
488 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
489 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
490
491 /* Update secondary interrupt mask registers */
492
493 /* Filter out inactive queues */
494 ah->ah_txq_imr_txok &= ah->ah_txq_status;
495 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
496 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
497 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
498 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
499 ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
500 ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
501 ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
502 ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
503
504 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
505 AR5K_SIMR0_QCU_TXOK) |
506 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
507 AR5K_SIMR0_QCU_TXDESC),
508 AR5K_SIMR0);
509
510 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
511 AR5K_SIMR1_QCU_TXERR) |
512 AR5K_REG_SM(ah->ah_txq_imr_txeol,
513 AR5K_SIMR1_QCU_TXEOL),
514 AR5K_SIMR1);
515
516 /* Update SIMR2 but don't overwrite rest simr2 settings */
517 AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
518 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
519 AR5K_REG_SM(ah->ah_txq_imr_txurn,
520 AR5K_SIMR2_QCU_TXURN));
521
522 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
523 AR5K_SIMR3_QCBRORN) |
524 AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
525 AR5K_SIMR3_QCBRURN),
526 AR5K_SIMR3);
527
528 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
529 AR5K_SIMR4_QTRIG), AR5K_SIMR4);
530
531 /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
532 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
533 AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
534
535 /* No queue has TXNOFRM enabled, disable the interrupt
536 * by setting AR5K_TXNOFRM to zero */
537 if (ah->ah_txq_imr_nofrm == 0)
538 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
539
540 /* Set QCU mask for this DCU to save power */
541 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
542
543 return 0;
544 }
545
546
547 /**************************\
548 * Global QCU/DCU functions *
549 \**************************/
550
551 /**
552 * ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU
553 * @ah: The &struct ath5k_hw
554 * @slot_time: Slot time in us
555 *
556 * Sets the global IFS intervals on DCU (also works on AR5210) for
557 * the given slot time and the current bwmode.
558 */
ath5k_hw_set_ifs_intervals(struct ath5k_hw * ah,unsigned int slot_time)559 int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
560 {
561 struct ieee80211_channel *channel = ah->ah_current_channel;
562 enum nl80211_band band;
563 struct ieee80211_supported_band *sband;
564 struct ieee80211_rate *rate;
565 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
566 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
567 u32 rate_flags, i;
568
569 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
570 return -EINVAL;
571
572 sifs = ath5k_hw_get_default_sifs(ah);
573 sifs_clock = ath5k_hw_htoclock(ah, sifs - 2);
574
575 /* EIFS
576 * Txtime of ack at lowest rate + SIFS + DIFS
577 * (DIFS = SIFS + 2 * Slot time)
578 *
579 * Note: HAL has some predefined values for EIFS
580 * Turbo: (37 + 2 * 6)
581 * Default: (74 + 2 * 9)
582 * Half: (149 + 2 * 13)
583 * Quarter: (298 + 2 * 21)
584 *
585 * (74 + 2 * 6) for AR5210 default and turbo !
586 *
587 * According to the formula we have
588 * ack_tx_time = 25 for turbo and
589 * ack_tx_time = 42.5 * clock multiplier
590 * for default/half/quarter.
591 *
592 * This can't be right, 42 is what we would get
593 * from ath5k_hw_get_frame_dur_for_bwmode or
594 * ieee80211_generic_frame_duration for zero frame
595 * length and without SIFS !
596 *
597 * Also we have different lowest rate for 802.11a
598 */
599 if (channel->band == NL80211_BAND_5GHZ)
600 band = NL80211_BAND_5GHZ;
601 else
602 band = NL80211_BAND_2GHZ;
603
604 switch (ah->ah_bwmode) {
605 case AR5K_BWMODE_5MHZ:
606 rate_flags = IEEE80211_RATE_SUPPORTS_5MHZ;
607 break;
608 case AR5K_BWMODE_10MHZ:
609 rate_flags = IEEE80211_RATE_SUPPORTS_10MHZ;
610 break;
611 default:
612 rate_flags = 0;
613 break;
614 }
615 sband = &ah->sbands[band];
616 rate = NULL;
617 for (i = 0; i < sband->n_bitrates; i++) {
618 if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
619 continue;
620 rate = &sband->bitrates[i];
621 break;
622 }
623 if (WARN_ON(!rate))
624 return -EINVAL;
625
626 ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
627
628 /* ack_tx_time includes an SIFS already */
629 eifs = ack_tx_time + sifs + 2 * slot_time;
630 eifs_clock = ath5k_hw_htoclock(ah, eifs);
631
632 /* Set IFS settings on AR5210 */
633 if (ah->ah_version == AR5K_AR5210) {
634 u32 pifs, pifs_clock, difs, difs_clock;
635
636 /* Set slot time */
637 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
638
639 /* Set EIFS */
640 eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
641
642 /* PIFS = Slot time + SIFS */
643 pifs = slot_time + sifs;
644 pifs_clock = ath5k_hw_htoclock(ah, pifs);
645 pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
646
647 /* DIFS = SIFS + 2 * Slot time */
648 difs = sifs + 2 * slot_time;
649 difs_clock = ath5k_hw_htoclock(ah, difs);
650
651 /* Set SIFS/DIFS */
652 ath5k_hw_reg_write(ah, (difs_clock <<
653 AR5K_IFS0_DIFS_S) | sifs_clock,
654 AR5K_IFS0);
655
656 /* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */
657 ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
658 (AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
659 AR5K_IFS1);
660
661 return 0;
662 }
663
664 /* Set IFS slot time */
665 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
666
667 /* Set EIFS interval */
668 ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
669
670 /* Set SIFS interval in usecs */
671 AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
672 AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
673 sifs);
674
675 /* Set SIFS interval in clock cycles */
676 ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
677
678 return 0;
679 }
680
681
682 /**
683 * ath5k_hw_init_queues() - Initialize tx queues
684 * @ah: The &struct ath5k_hw
685 *
686 * Initializes all tx queues based on information on
687 * ah->ah_txq* set by the driver
688 */
689 int
ath5k_hw_init_queues(struct ath5k_hw * ah)690 ath5k_hw_init_queues(struct ath5k_hw *ah)
691 {
692 int i, ret;
693
694 /* TODO: HW Compression support for data queues */
695 /* TODO: Burst prefetch for data queues */
696
697 /*
698 * Reset queues and start beacon timers at the end of the reset routine
699 * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
700 * Note: If we want we can assign multiple qcus on one dcu.
701 */
702 if (ah->ah_version != AR5K_AR5210)
703 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
704 ret = ath5k_hw_reset_tx_queue(ah, i);
705 if (ret) {
706 ATH5K_ERR(ah,
707 "failed to reset TX queue #%d\n", i);
708 return ret;
709 }
710 }
711 else
712 /* No QCU/DCU on AR5210, just set tx
713 * retry limits. We set IFS parameters
714 * on ath5k_hw_set_ifs_intervals */
715 ath5k_hw_set_tx_retry_limits(ah, 0);
716
717 /* Set the turbo flag when operating on 40MHz */
718 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
719 AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
720 AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
721
722 /* If we didn't set IFS timings through
723 * ath5k_hw_set_coverage_class make sure
724 * we set them here */
725 if (!ah->ah_coverage_class) {
726 unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
727 ath5k_hw_set_ifs_intervals(ah, slot_time);
728 }
729
730 return 0;
731 }
732