Lines Matching refs:ar
49 static inline unsigned int __carl9170_get_queue(struct ar9170 *ar, in __carl9170_get_queue() argument
65 static inline unsigned int carl9170_get_queue(struct ar9170 *ar, in carl9170_get_queue() argument
68 return __carl9170_get_queue(ar, skb_get_queue_mapping(skb)); in carl9170_get_queue()
71 static bool is_mem_full(struct ar9170 *ar) in is_mem_full() argument
73 return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) > in is_mem_full()
74 atomic_read(&ar->mem_free_blocks)); in is_mem_full()
77 static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_accounting() argument
82 atomic_inc(&ar->tx_total_queued); in carl9170_tx_accounting()
85 spin_lock_bh(&ar->tx_stats_lock); in carl9170_tx_accounting()
93 ar->tx_stats[queue].len++; in carl9170_tx_accounting()
94 ar->tx_stats[queue].count++; in carl9170_tx_accounting()
96 mem_full = is_mem_full(ar); in carl9170_tx_accounting()
97 for (i = 0; i < ar->hw->queues; i++) { in carl9170_tx_accounting()
98 if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) { in carl9170_tx_accounting()
99 ieee80211_stop_queue(ar->hw, i); in carl9170_tx_accounting()
100 ar->queue_stop_timeout[i] = jiffies; in carl9170_tx_accounting()
104 spin_unlock_bh(&ar->tx_stats_lock); in carl9170_tx_accounting()
108 static struct ieee80211_sta *__carl9170_get_tx_sta(struct ar9170 *ar, in __carl9170_get_tx_sta() argument
122 vif = rcu_dereference(ar->vif_priv[vif_id].vif); in __carl9170_get_tx_sta()
139 static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_ps_unblock() argument
145 sta = __carl9170_get_tx_sta(ar, skb); in carl9170_tx_ps_unblock()
151 ieee80211_sta_block_awake(ar->hw, sta, false); in carl9170_tx_ps_unblock()
157 static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_accounting_free() argument
163 spin_lock_bh(&ar->tx_stats_lock); in carl9170_tx_accounting_free()
165 ar->tx_stats[queue].len--; in carl9170_tx_accounting_free()
167 if (!is_mem_full(ar)) { in carl9170_tx_accounting_free()
169 for (i = 0; i < ar->hw->queues; i++) { in carl9170_tx_accounting_free()
170 if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT) in carl9170_tx_accounting_free()
173 if (ieee80211_queue_stopped(ar->hw, i)) { in carl9170_tx_accounting_free()
176 tmp = jiffies - ar->queue_stop_timeout[i]; in carl9170_tx_accounting_free()
177 if (tmp > ar->max_queue_stop_timeout[i]) in carl9170_tx_accounting_free()
178 ar->max_queue_stop_timeout[i] = tmp; in carl9170_tx_accounting_free()
181 ieee80211_wake_queue(ar->hw, i); in carl9170_tx_accounting_free()
185 spin_unlock_bh(&ar->tx_stats_lock); in carl9170_tx_accounting_free()
187 if (atomic_dec_and_test(&ar->tx_total_queued)) in carl9170_tx_accounting_free()
188 complete(&ar->tx_flush); in carl9170_tx_accounting_free()
191 static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb) in carl9170_alloc_dev_space() argument
197 atomic_inc(&ar->mem_allocs); in carl9170_alloc_dev_space()
199 chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size); in carl9170_alloc_dev_space()
200 if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) { in carl9170_alloc_dev_space()
201 atomic_add(chunks, &ar->mem_free_blocks); in carl9170_alloc_dev_space()
205 spin_lock_bh(&ar->mem_lock); in carl9170_alloc_dev_space()
206 cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0); in carl9170_alloc_dev_space()
207 spin_unlock_bh(&ar->mem_lock); in carl9170_alloc_dev_space()
210 atomic_add(chunks, &ar->mem_free_blocks); in carl9170_alloc_dev_space()
227 static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb) in carl9170_release_dev_space() argument
251 WARN_ON_ONCE(cookie > ar->fw.mem_blocks))) in carl9170_release_dev_space()
254 atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size), in carl9170_release_dev_space()
255 &ar->mem_free_blocks); in carl9170_release_dev_space()
257 spin_lock_bh(&ar->mem_lock); in carl9170_release_dev_space()
258 bitmap_release_region(ar->mem_bitmap, cookie - 1, 0); in carl9170_release_dev_space()
259 spin_unlock_bh(&ar->mem_lock); in carl9170_release_dev_space()
265 struct ar9170 *ar; in carl9170_tx_release() local
275 ar = arinfo->ar; in carl9170_tx_release()
276 if (WARN_ON_ONCE(!ar)) in carl9170_tx_release()
286 if (atomic_read(&ar->tx_total_queued)) in carl9170_tx_release()
287 ar->tx_schedule = true; in carl9170_tx_release()
290 if (!atomic_read(&ar->tx_ampdu_upload)) in carl9170_tx_release()
291 ar->tx_ampdu_schedule = true; in carl9170_tx_release()
317 ieee80211_free_txskb(ar->hw, skb); in carl9170_tx_release()
328 ieee80211_tx_status_irqsafe(ar->hw, skb); in carl9170_tx_release()
347 static void carl9170_tx_shift_bm(struct ar9170 *ar, in carl9170_tx_shift_bm() argument
379 static void carl9170_tx_status_process_ampdu(struct ar9170 *ar, in carl9170_tx_status_process_ampdu() argument
395 sta = __carl9170_get_tx_sta(ar, skb); in carl9170_tx_status_process_ampdu()
408 carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr)); in carl9170_tx_status_process_ampdu()
439 void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb, in carl9170_tx_status() argument
444 carl9170_tx_accounting_free(ar, skb); in carl9170_tx_status()
451 ar->tx_ack_failures++; in carl9170_tx_status()
454 carl9170_tx_status_process_ampdu(ar, skb, txinfo); in carl9170_tx_status()
456 carl9170_tx_ps_unblock(ar, skb); in carl9170_tx_status()
461 void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_callback() argument
465 atomic_dec(&ar->tx_total_pending); in carl9170_tx_callback()
468 atomic_dec(&ar->tx_ampdu_upload); in carl9170_tx_callback()
471 tasklet_hi_schedule(&ar->usb_tasklet); in carl9170_tx_callback()
474 static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie, in carl9170_get_queued_skb() argument
489 carl9170_release_dev_space(ar, skb); in carl9170_get_queued_skb()
497 static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix, in carl9170_tx_fill_rateinfo() argument
519 static void carl9170_check_queue_stop_timeout(struct ar9170 *ar) in carl9170_check_queue_stop_timeout() argument
527 for (i = 0; i < ar->hw->queues; i++) { in carl9170_check_queue_stop_timeout()
528 spin_lock_bh(&ar->tx_status[i].lock); in carl9170_check_queue_stop_timeout()
530 skb = skb_peek(&ar->tx_status[i]); in carl9170_check_queue_stop_timeout()
543 spin_unlock_bh(&ar->tx_status[i].lock); in carl9170_check_queue_stop_timeout()
560 carl9170_restart(ar, CARL9170_RR_STUCK_TX); in carl9170_check_queue_stop_timeout()
564 static void carl9170_tx_ampdu_timeout(struct ar9170 *ar) in carl9170_tx_ampdu_timeout() argument
573 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { in carl9170_tx_ampdu_timeout()
588 sta = __carl9170_get_tx_sta(ar, skb); in carl9170_tx_ampdu_timeout()
602 struct ar9170 *ar = container_of(work, struct ar9170, in carl9170_tx_janitor() local
604 if (!IS_STARTED(ar)) in carl9170_tx_janitor()
607 ar->tx_janitor_last_run = jiffies; in carl9170_tx_janitor()
609 carl9170_check_queue_stop_timeout(ar); in carl9170_tx_janitor()
610 carl9170_tx_ampdu_timeout(ar); in carl9170_tx_janitor()
612 if (!atomic_read(&ar->tx_total_queued)) in carl9170_tx_janitor()
615 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor, in carl9170_tx_janitor()
619 static void __carl9170_tx_process_status(struct ar9170 *ar, in __carl9170_tx_process_status() argument
629 skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]); in __carl9170_tx_process_status()
646 carl9170_tx_fill_rateinfo(ar, r, t, txinfo); in __carl9170_tx_process_status()
647 carl9170_tx_status(ar, skb, success); in __carl9170_tx_process_status()
650 void carl9170_tx_process_status(struct ar9170 *ar, in carl9170_tx_process_status() argument
662 __carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie, in carl9170_tx_process_status()
667 static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar, in carl9170_tx_rate_tpc_chains() argument
685 txpower = ar->power_2G_ht40; in carl9170_tx_rate_tpc_chains()
687 txpower = ar->power_5G_ht40; in carl9170_tx_rate_tpc_chains()
690 txpower = ar->power_2G_ht20; in carl9170_tx_rate_tpc_chains()
692 txpower = ar->power_5G_ht20; in carl9170_tx_rate_tpc_chains()
700 txpower = ar->power_2G_cck; in carl9170_tx_rate_tpc_chains()
702 txpower = ar->power_2G_ofdm; in carl9170_tx_rate_tpc_chains()
704 txpower = ar->power_5G_leg; in carl9170_tx_rate_tpc_chains()
713 if (ar->eeprom.tx_mask == 1) { in carl9170_tx_rate_tpc_chains()
723 *tpc = min_t(unsigned int, *tpc, ar->hw->conf.power_level * 2); in carl9170_tx_rate_tpc_chains()
726 static __le32 carl9170_tx_physet(struct ar9170 *ar, in carl9170_tx_physet() argument
777 carl9170_tx_rate_tpc_chains(ar, info, txrate, in carl9170_tx_physet()
786 static bool carl9170_tx_rts_check(struct ar9170 *ar, in carl9170_tx_rts_check() argument
790 switch (ar->erp_mode) { in carl9170_tx_rts_check()
810 static bool carl9170_tx_cts_check(struct ar9170 *ar, in carl9170_tx_cts_check() argument
813 switch (ar->erp_mode) { in carl9170_tx_cts_check()
829 static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_prepare() argument
857 hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)]; in carl9170_tx_prepare()
964 txrate->count = ar->hw->max_rate_tries; in carl9170_tx_prepare()
983 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) in carl9170_tx_prepare()
986 else if (carl9170_tx_cts_check(ar, txrate)) in carl9170_tx_prepare()
990 txc->s.rr[i - 1] = carl9170_tx_physet(ar, info, txrate); in carl9170_tx_prepare()
996 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) in carl9170_tx_prepare()
998 else if (carl9170_tx_cts_check(ar, txrate)) in carl9170_tx_prepare()
1004 txc->f.phy_control = carl9170_tx_physet(ar, info, txrate); in carl9170_tx_prepare()
1008 arinfo->ar = ar; in carl9170_tx_prepare()
1017 static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb) in carl9170_set_immba() argument
1025 static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb) in carl9170_set_ampdu_params() argument
1046 if (tmp != ar->current_density) { in carl9170_set_ampdu_params()
1047 ar->current_density = tmp; in carl9170_set_ampdu_params()
1055 if (tmp != ar->current_factor) { in carl9170_set_ampdu_params()
1056 ar->current_factor = tmp; in carl9170_set_ampdu_params()
1062 static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest, in carl9170_tx_rate_check() argument
1082 static void carl9170_tx_ampdu(struct ar9170 *ar) in carl9170_tx_ampdu() argument
1090 atomic_inc(&ar->tx_ampdu_scheduler); in carl9170_tx_ampdu()
1091 ar->tx_ampdu_schedule = false; in carl9170_tx_ampdu()
1093 if (atomic_read(&ar->tx_ampdu_upload)) in carl9170_tx_ampdu()
1096 if (!ar->tx_ampdu_list_len) in carl9170_tx_ampdu()
1102 tid_info = rcu_dereference(ar->tx_ampdu_iter); in carl9170_tx_ampdu()
1109 list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) { in carl9170_tx_ampdu()
1142 if (!carl9170_tx_rate_check(ar, skb, first)) in carl9170_tx_ampdu()
1145 atomic_inc(&ar->tx_ampdu_upload); in carl9170_tx_ampdu()
1174 carl9170_set_ampdu_params(ar, skb_peek(&agg)); in carl9170_tx_ampdu()
1177 carl9170_set_immba(ar, skb_peek_tail(&agg)); in carl9170_tx_ampdu()
1179 spin_lock_bh(&ar->tx_pending[queue].lock); in carl9170_tx_ampdu()
1180 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]); in carl9170_tx_ampdu()
1181 spin_unlock_bh(&ar->tx_pending[queue].lock); in carl9170_tx_ampdu()
1182 ar->tx_schedule = true; in carl9170_tx_ampdu()
1187 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info); in carl9170_tx_ampdu()
1191 static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar, in carl9170_tx_pick_skb() argument
1205 if (carl9170_alloc_dev_space(ar, skb)) in carl9170_tx_pick_skb()
1222 void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_drop() argument
1227 ar->tx_dropped++; in carl9170_tx_drop()
1231 ar9170_qmap[carl9170_get_queue(ar, skb)]); in carl9170_tx_drop()
1232 __carl9170_tx_process_status(ar, super->s.cookie, q); in carl9170_tx_drop()
1235 static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_ps_drop() argument
1242 sta = __carl9170_get_tx_sta(ar, skb); in carl9170_tx_ps_drop()
1255 atomic_dec(&ar->tx_ampdu_upload); in carl9170_tx_ps_drop()
1258 carl9170_release_dev_space(ar, skb); in carl9170_tx_ps_drop()
1259 carl9170_tx_status(ar, skb, false); in carl9170_tx_ps_drop()
1268 static void carl9170_tx(struct ar9170 *ar) in carl9170_tx() argument
1274 ar->tx_schedule = false; in carl9170_tx()
1276 if (unlikely(!IS_STARTED(ar))) in carl9170_tx()
1279 carl9170_usb_handle_tx_err(ar); in carl9170_tx()
1281 for (i = 0; i < ar->hw->queues; i++) { in carl9170_tx()
1282 while (!skb_queue_empty(&ar->tx_pending[i])) { in carl9170_tx()
1283 skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]); in carl9170_tx()
1287 if (unlikely(carl9170_tx_ps_drop(ar, skb))) in carl9170_tx()
1290 atomic_inc(&ar->tx_total_pending); in carl9170_tx()
1292 q = __carl9170_get_queue(ar, i); in carl9170_tx()
1297 skb_queue_tail(&ar->tx_status[q], skb); in carl9170_tx()
1309 carl9170_usb_tx(ar, skb); in carl9170_tx()
1317 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor, in carl9170_tx()
1321 static bool carl9170_tx_ampdu_queue(struct ar9170 *ar, in carl9170_tx_ampdu_queue() argument
1392 carl9170_tx_status(ar, skb, false); in carl9170_tx_ampdu_queue()
1393 ar->tx_dropped++; in carl9170_tx_ampdu_queue()
1399 struct ar9170 *ar = hw->priv; in carl9170_op_tx() local
1404 if (unlikely(!IS_STARTED(ar))) in carl9170_op_tx()
1410 if (unlikely(carl9170_tx_prepare(ar, skb))) in carl9170_op_tx()
1413 carl9170_tx_accounting(ar, skb); in carl9170_op_tx()
1425 run = carl9170_tx_ampdu_queue(ar, sta, skb); in carl9170_op_tx()
1427 carl9170_tx_ampdu(ar); in carl9170_op_tx()
1432 skb_queue_tail(&ar->tx_pending[queue], skb); in carl9170_op_tx()
1435 carl9170_tx(ar); in carl9170_op_tx()
1439 ar->tx_dropped++; in carl9170_op_tx()
1440 ieee80211_free_txskb(ar->hw, skb); in carl9170_op_tx()
1443 void carl9170_tx_scheduler(struct ar9170 *ar) in carl9170_tx_scheduler() argument
1446 if (ar->tx_ampdu_schedule) in carl9170_tx_scheduler()
1447 carl9170_tx_ampdu(ar); in carl9170_tx_scheduler()
1449 if (ar->tx_schedule) in carl9170_tx_scheduler()
1450 carl9170_tx(ar); in carl9170_tx_scheduler()
1453 int carl9170_update_beacon(struct ar9170 *ar, const bool submit) in carl9170_update_beacon() argument
1465 cvif = rcu_dereference(ar->beacon_iter); in carl9170_update_beacon()
1467 if (ar->vifs == 0 || !cvif) in carl9170_update_beacon()
1470 list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) { in carl9170_update_beacon()
1475 if (!ar->beacon_enabled || i++) in carl9170_update_beacon()
1481 rcu_assign_pointer(ar->beacon_iter, cvif); in carl9170_update_beacon()
1483 skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif), in carl9170_update_beacon()
1492 spin_lock_bh(&ar->beacon_lock); in carl9170_update_beacon()
1498 addr = ar->fw.beacon_addr + off; in carl9170_update_beacon()
1501 if ((off + len) > ar->fw.beacon_max_len) { in carl9170_update_beacon()
1503 wiphy_err(ar->hw->wiphy, "beacon does not " in carl9170_update_beacon()
1512 wiphy_err(ar->hw->wiphy, "no support for beacons " in carl9170_update_beacon()
1523 carl9170_tx_rate_tpc_chains(ar, txinfo, rate, &plcp, &power, &chains); in carl9170_update_beacon()
1552 carl9170_async_regwrite_begin(ar); in carl9170_update_beacon()
1579 spin_unlock_bh(&ar->beacon_lock); in carl9170_update_beacon()
1584 err = carl9170_bcn_ctrl(ar, cvif->id, in carl9170_update_beacon()
1596 spin_unlock_bh(&ar->beacon_lock); in carl9170_update_beacon()