• Home
  • Raw
  • Download

Lines Matching refs:wl

43 static int wl1271_set_default_wep_key(struct wl1271 *wl,  in wl1271_set_default_wep_key()  argument
50 ret = wl12xx_cmd_set_default_wep_key(wl, id, in wl1271_set_default_wep_key()
53 ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid); in wl1271_set_default_wep_key()
62 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) in wl1271_alloc_tx_id() argument
66 id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc); in wl1271_alloc_tx_id()
67 if (id >= wl->num_tx_desc) in wl1271_alloc_tx_id()
70 __set_bit(id, wl->tx_frames_map); in wl1271_alloc_tx_id()
71 wl->tx_frames[id] = skb; in wl1271_alloc_tx_id()
72 wl->tx_frames_cnt++; in wl1271_alloc_tx_id()
76 void wl1271_free_tx_id(struct wl1271 *wl, int id) in wl1271_free_tx_id() argument
78 if (__test_and_clear_bit(id, wl->tx_frames_map)) { in wl1271_free_tx_id()
79 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc)) in wl1271_free_tx_id()
80 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); in wl1271_free_tx_id()
82 wl->tx_frames[id] = NULL; in wl1271_free_tx_id()
83 wl->tx_frames_cnt--; in wl1271_free_tx_id()
88 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, in wl1271_tx_ap_update_inconnection_sta() argument
104 wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1); in wl1271_tx_ap_update_inconnection_sta()
111 wlcore_update_inconn_sta(wl, wlvif, NULL, true); in wl1271_tx_ap_update_inconnection_sta()
114 ieee80211_queue_delayed_work(wl->hw, in wl1271_tx_ap_update_inconnection_sta()
119 static void wl1271_tx_regulate_link(struct wl1271 *wl, in wl1271_tx_regulate_link() argument
129 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map); in wl1271_tx_regulate_link()
130 tx_pkts = wl->links[hlid].allocated_pkts; in wl1271_tx_regulate_link()
142 if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps && in wl1271_tx_regulate_link()
144 wl12xx_ps_link_start(wl, wlvif, hlid, true); in wl1271_tx_regulate_link()
147 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) in wl12xx_is_dummy_packet() argument
149 return wl->dummy_packet == skb; in wl12xx_is_dummy_packet()
153 static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, in wl12xx_tx_get_hlid_ap() argument
165 return wl->system_hlid; in wl12xx_tx_get_hlid_ap()
175 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, in wl12xx_tx_get_hlid() argument
181 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta); in wl12xx_tx_get_hlid()
192 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, in wlcore_calc_packet_alignment() argument
195 if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) || in wlcore_calc_packet_alignment()
196 !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)) in wlcore_calc_packet_alignment()
203 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, in wl1271_tx_allocate() argument
213 if (buf_offset + total_len > wl->aggr_buf_size) in wl1271_tx_allocate()
216 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem); in wl1271_tx_allocate()
219 id = wl1271_alloc_tx_id(wl, skb); in wl1271_tx_allocate()
223 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks); in wl1271_tx_allocate()
225 if (total_blocks <= wl->tx_blocks_available) { in wl1271_tx_allocate()
228 wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks, in wl1271_tx_allocate()
233 wl->tx_blocks_available -= total_blocks; in wl1271_tx_allocate()
234 wl->tx_allocated_blocks += total_blocks; in wl1271_tx_allocate()
241 if (wl->tx_allocated_blocks == total_blocks || in wl1271_tx_allocate()
242 test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags)) in wl1271_tx_allocate()
243 wl12xx_rearm_tx_watchdog_locked(wl); in wl1271_tx_allocate()
246 wl->tx_allocated_pkts[ac]++; in wl1271_tx_allocate()
248 if (test_bit(hlid, wl->links_map)) in wl1271_tx_allocate()
249 wl->links[hlid].allocated_pkts++; in wl1271_tx_allocate()
257 wl1271_free_tx_id(wl, id); in wl1271_tx_allocate()
263 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, in wl1271_tx_fill_hdr() argument
292 desc->start_time = cpu_to_le32(hosttime - wl->time_offset); in wl1271_tx_fill_hdr()
294 is_dummy = wl12xx_is_dummy_packet(wl, skb); in wl1271_tx_fill_hdr()
315 u8 session_id = wl->session_ids[hlid]; in wl1271_tx_fill_hdr()
317 if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) && in wl1271_tx_fill_hdr()
370 wlcore_hw_set_tx_desc_csum(wl, desc, skb); in wl1271_tx_fill_hdr()
371 wlcore_hw_set_tx_desc_data_len(wl, desc, skb); in wl1271_tx_fill_hdr()
375 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, in wl1271_prepare_tx_frame() argument
397 is_dummy = wl12xx_is_dummy_packet(wl, skb); in wl1271_prepare_tx_frame()
399 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && in wl1271_prepare_tx_frame()
413 ret = wl1271_set_default_wep_key(wl, wlvif, idx); in wl1271_prepare_tx_frame()
422 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid, in wl1271_prepare_tx_frame()
427 wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); in wl1271_prepare_tx_frame()
430 wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb); in wl1271_prepare_tx_frame()
431 wl1271_tx_regulate_link(wl, wlvif, hlid); in wl1271_prepare_tx_frame()
442 total_len = wlcore_calc_packet_alignment(wl, skb->len); in wl1271_prepare_tx_frame()
444 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); in wl1271_prepare_tx_frame()
445 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); in wl1271_prepare_tx_frame()
454 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, in wl1271_tx_enabled_rates_get() argument
461 band = wl->hw->wiphy->bands[rate_band]; in wl1271_tx_enabled_rates_get()
480 void wl1271_handle_tx_low_watermark(struct wl1271 *wl) in wl1271_handle_tx_low_watermark() argument
485 wl12xx_for_each_wlvif(wl, wlvif) { in wl1271_handle_tx_low_watermark()
487 if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i, in wl1271_handle_tx_low_watermark()
492 wlcore_wake_queue(wl, wlvif, i, in wl1271_handle_tx_low_watermark()
498 static int wlcore_select_ac(struct wl1271 *wl) in wlcore_select_ac() argument
512 if (wl->tx_queue_count[ac] && in wlcore_select_ac()
513 wl->tx_allocated_pkts[ac] < min_pkts) { in wlcore_select_ac()
515 min_pkts = wl->tx_allocated_pkts[q]; in wlcore_select_ac()
522 static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl, in wlcore_lnk_dequeue() argument
530 spin_lock_irqsave(&wl->wl_lock, flags); in wlcore_lnk_dequeue()
531 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); in wlcore_lnk_dequeue()
532 wl->tx_queue_count[q]--; in wlcore_lnk_dequeue()
537 spin_unlock_irqrestore(&wl->wl_lock, flags); in wlcore_lnk_dequeue()
543 static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl, in wlcore_lnk_dequeue_high_prio() argument
547 struct wl1271_link *lnk = &wl->links[hlid]; in wlcore_lnk_dequeue_high_prio()
549 if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) { in wlcore_lnk_dequeue_high_prio()
552 wlcore_hw_lnk_low_prio(wl, hlid, lnk)) in wlcore_lnk_dequeue_high_prio()
559 return wlcore_lnk_dequeue(wl, lnk, ac); in wlcore_lnk_dequeue_high_prio()
562 static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl, in wlcore_vif_dequeue_high_prio() argument
571 start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links; in wlcore_vif_dequeue_high_prio()
574 for (i = 0; i < wl->num_links; i++) { in wlcore_vif_dequeue_high_prio()
575 h = (start_hlid + i) % wl->num_links; in wlcore_vif_dequeue_high_prio()
581 skb = wlcore_lnk_dequeue_high_prio(wl, h, ac, in wlcore_vif_dequeue_high_prio()
597 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid) in wl1271_skb_dequeue() argument
600 struct wl12xx_vif *wlvif = wl->last_wlvif; in wl1271_skb_dequeue()
605 ac = wlcore_select_ac(wl); in wl1271_skb_dequeue()
611 wl12xx_for_each_wlvif_continue(wl, wlvif) { in wl1271_skb_dequeue()
615 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, in wl1271_skb_dequeue()
620 wl->last_wlvif = wlvif; in wl1271_skb_dequeue()
627 skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid, in wl1271_skb_dequeue()
630 *hlid = wl->system_hlid; in wl1271_skb_dequeue()
631 wl->last_wlvif = NULL; in wl1271_skb_dequeue()
638 wl12xx_for_each_wlvif(wl, wlvif) { in wl1271_skb_dequeue()
642 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, in wl1271_skb_dequeue()
645 wl->last_wlvif = wlvif; in wl1271_skb_dequeue()
650 if (wlvif == wl->last_wlvif) in wl1271_skb_dequeue()
657 struct wl1271_link *lnk = &wl->links[low_prio_hlid]; in wl1271_skb_dequeue()
658 skb = wlcore_lnk_dequeue(wl, lnk, ac); in wl1271_skb_dequeue()
664 wl->last_wlvif = lnk->wlvif; in wl1271_skb_dequeue()
672 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { in wl1271_skb_dequeue()
675 skb = wl->dummy_packet; in wl1271_skb_dequeue()
676 *hlid = wl->system_hlid; in wl1271_skb_dequeue()
678 spin_lock_irqsave(&wl->wl_lock, flags); in wl1271_skb_dequeue()
679 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); in wl1271_skb_dequeue()
680 wl->tx_queue_count[q]--; in wl1271_skb_dequeue()
681 spin_unlock_irqrestore(&wl->wl_lock, flags); in wl1271_skb_dequeue()
687 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, in wl1271_skb_queue_head() argument
693 if (wl12xx_is_dummy_packet(wl, skb)) { in wl1271_skb_queue_head()
694 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); in wl1271_skb_queue_head()
696 skb_queue_head(&wl->links[hlid].tx_queue[q], skb); in wl1271_skb_queue_head()
699 wlvif->last_tx_hlid = (hlid + wl->num_links - 1) % in wl1271_skb_queue_head()
700 wl->num_links; in wl1271_skb_queue_head()
703 spin_lock_irqsave(&wl->wl_lock, flags); in wl1271_skb_queue_head()
704 wl->tx_queue_count[q]++; in wl1271_skb_queue_head()
707 spin_unlock_irqrestore(&wl->wl_lock, flags); in wl1271_skb_queue_head()
717 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) in wl12xx_rearm_rx_streaming() argument
723 if (!wl->conf.rx_streaming.interval) in wl12xx_rearm_rx_streaming()
726 if (!wl->conf.rx_streaming.always && in wl12xx_rearm_rx_streaming()
727 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)) in wl12xx_rearm_rx_streaming()
730 timeout = wl->conf.rx_streaming.duration; in wl12xx_rearm_rx_streaming()
731 wl12xx_for_each_wlvif_sta(wl, wlvif) { in wl12xx_rearm_rx_streaming()
733 for_each_set_bit(hlid, active_hlids, wl->num_links) { in wl12xx_rearm_rx_streaming()
745 ieee80211_queue_work(wl->hw, in wl12xx_rearm_rx_streaming()
763 int wlcore_tx_work_locked(struct wl1271 *wl) in wlcore_tx_work_locked() argument
775 if (unlikely(wl->state != WLCORE_STATE_ON)) in wlcore_tx_work_locked()
778 while ((skb = wl1271_skb_dequeue(wl, &hlid))) { in wlcore_tx_work_locked()
783 if (!wl12xx_is_dummy_packet(wl, skb)) in wlcore_tx_work_locked()
786 hlid = wl->system_hlid; in wlcore_tx_work_locked()
789 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset, in wlcore_tx_work_locked()
796 wl1271_skb_queue_head(wl, wlvif, skb, hlid); in wlcore_tx_work_locked()
798 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, in wlcore_tx_work_locked()
800 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, in wlcore_tx_work_locked()
801 wl->aggr_buf, buf_offset, true); in wlcore_tx_work_locked()
813 wl1271_skb_queue_head(wl, wlvif, skb, hlid); in wlcore_tx_work_locked()
815 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); in wlcore_tx_work_locked()
818 if (wl12xx_is_dummy_packet(wl, skb)) in wlcore_tx_work_locked()
823 wl1271_skb_queue_head(wl, wlvif, skb, hlid); in wlcore_tx_work_locked()
825 ieee80211_free_txskb(wl->hw, skb); in wlcore_tx_work_locked()
830 wl->tx_packets_count++; in wlcore_tx_work_locked()
839 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len); in wlcore_tx_work_locked()
840 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, in wlcore_tx_work_locked()
852 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) { in wlcore_tx_work_locked()
853 bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS, in wlcore_tx_work_locked()
854 wl->tx_packets_count); in wlcore_tx_work_locked()
859 wl1271_handle_tx_low_watermark(wl); in wlcore_tx_work_locked()
861 wl12xx_rearm_rx_streaming(wl, active_hlids); in wlcore_tx_work_locked()
869 struct wl1271 *wl = container_of(work, struct wl1271, tx_work); in wl1271_tx_work() local
872 mutex_lock(&wl->mutex); in wl1271_tx_work()
873 ret = wl1271_ps_elp_wakeup(wl); in wl1271_tx_work()
877 ret = wlcore_tx_work_locked(wl); in wl1271_tx_work()
879 wl12xx_queue_recovery_work(wl); in wl1271_tx_work()
883 wl1271_ps_elp_sleep(wl); in wl1271_tx_work()
885 mutex_unlock(&wl->mutex); in wl1271_tx_work()
909 static void wl1271_tx_complete_packet(struct wl1271 *wl, in wl1271_tx_complete_packet() argument
922 if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) { in wl1271_tx_complete_packet()
927 skb = wl->tx_frames[id]; in wl1271_tx_complete_packet()
930 if (wl12xx_is_dummy_packet(wl, skb)) { in wl1271_tx_complete_packet()
931 wl1271_free_tx_id(wl, id); in wl1271_tx_complete_packet()
943 rate = wlcore_rate_to_idx(wl, result->rate_class_index, in wl1271_tx_complete_packet()
948 wl->stats.excessive_retries++; in wl1271_tx_complete_packet()
957 wl->stats.retry_count += result->ack_failures; in wl1271_tx_complete_packet()
963 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && in wl1271_tx_complete_packet()
978 skb_queue_tail(&wl->deferred_tx_queue, skb); in wl1271_tx_complete_packet()
979 queue_work(wl->freezable_wq, &wl->netstack_work); in wl1271_tx_complete_packet()
980 wl1271_free_tx_id(wl, result->id); in wl1271_tx_complete_packet()
984 int wlcore_tx_complete(struct wl1271 *wl) in wlcore_tx_complete() argument
986 struct wl1271_acx_mem_map *memmap = wl->target_mem_map; in wlcore_tx_complete()
992 ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result), in wlcore_tx_complete()
993 wl->tx_res_if, sizeof(*wl->tx_res_if), false); in wlcore_tx_complete()
997 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); in wlcore_tx_complete()
1000 ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) + in wlcore_tx_complete()
1006 count = fw_counter - wl->tx_results_count; in wlcore_tx_complete()
1016 u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK; in wlcore_tx_complete()
1019 result = &(wl->tx_res_if->tx_results_queue[offset]); in wlcore_tx_complete()
1020 wl1271_tx_complete_packet(wl, result); in wlcore_tx_complete()
1022 wl->tx_results_count++; in wlcore_tx_complete()
1030 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) in wl1271_tx_reset_link_queues() argument
1037 struct wl1271_link *lnk = &wl->links[hlid]; in wl1271_tx_reset_link_queues()
1044 if (!wl12xx_is_dummy_packet(wl, skb)) { in wl1271_tx_reset_link_queues()
1048 ieee80211_tx_status_ni(wl->hw, skb); in wl1271_tx_reset_link_queues()
1055 spin_lock_irqsave(&wl->wl_lock, flags); in wl1271_tx_reset_link_queues()
1057 wl->tx_queue_count[i] -= total[i]; in wl1271_tx_reset_link_queues()
1061 spin_unlock_irqrestore(&wl->wl_lock, flags); in wl1271_tx_reset_link_queues()
1063 wl1271_handle_tx_low_watermark(wl); in wl1271_tx_reset_link_queues()
1067 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) in wl12xx_tx_reset_wlvif() argument
1072 for_each_set_bit(i, wlvif->links_map, wl->num_links) { in wl12xx_tx_reset_wlvif()
1076 wl1271_free_sta(wl, wlvif, i); in wl12xx_tx_reset_wlvif()
1079 wl12xx_free_link(wl, wlvif, &hlid); in wl12xx_tx_reset_wlvif()
1088 void wl12xx_tx_reset(struct wl1271 *wl) in wl12xx_tx_reset() argument
1095 if (wl1271_tx_total_queue_count(wl) != 0) { in wl12xx_tx_reset()
1096 for (i = 0; i < wl->num_links; i++) in wl12xx_tx_reset()
1097 wl1271_tx_reset_link_queues(wl, i); in wl12xx_tx_reset()
1100 wl->tx_queue_count[i] = 0; in wl12xx_tx_reset()
1108 wl1271_handle_tx_low_watermark(wl); in wl12xx_tx_reset()
1110 for (i = 0; i < wl->num_tx_desc; i++) { in wl12xx_tx_reset()
1111 if (wl->tx_frames[i] == NULL) in wl12xx_tx_reset()
1114 skb = wl->tx_frames[i]; in wl12xx_tx_reset()
1115 wl1271_free_tx_id(wl, i); in wl12xx_tx_reset()
1118 if (!wl12xx_is_dummy_packet(wl, skb)) { in wl12xx_tx_reset()
1125 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && in wl12xx_tx_reset()
1138 ieee80211_tx_status_ni(wl->hw, skb); in wl12xx_tx_reset()
1146 void wl1271_tx_flush(struct wl1271 *wl) in wl1271_tx_flush() argument
1154 mutex_lock(&wl->flush_mutex); in wl1271_tx_flush()
1156 mutex_lock(&wl->mutex); in wl1271_tx_flush()
1157 if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) { in wl1271_tx_flush()
1158 mutex_unlock(&wl->mutex); in wl1271_tx_flush()
1162 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); in wl1271_tx_flush()
1166 wl->tx_frames_cnt, in wl1271_tx_flush()
1167 wl1271_tx_total_queue_count(wl)); in wl1271_tx_flush()
1170 mutex_unlock(&wl->mutex); in wl1271_tx_flush()
1171 if (wl1271_tx_total_queue_count(wl)) in wl1271_tx_flush()
1172 wl1271_tx_work(&wl->tx_work); in wl1271_tx_flush()
1174 mutex_lock(&wl->mutex); in wl1271_tx_flush()
1176 if ((wl->tx_frames_cnt == 0) && in wl1271_tx_flush()
1177 (wl1271_tx_total_queue_count(wl) == 0)) { in wl1271_tx_flush()
1189 for (i = 0; i < wl->num_links; i++) in wl1271_tx_flush()
1190 wl1271_tx_reset_link_queues(wl, i); in wl1271_tx_flush()
1193 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); in wl1271_tx_flush()
1194 mutex_unlock(&wl->mutex); in wl1271_tx_flush()
1196 mutex_unlock(&wl->flush_mutex); in wl1271_tx_flush()
1200 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) in wl1271_tx_min_rate_get() argument
1209 void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, in wlcore_stop_queue_locked() argument
1213 bool stopped = !!wl->queue_stop_reasons[hwq]; in wlcore_stop_queue_locked()
1216 WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); in wlcore_stop_queue_locked()
1221 ieee80211_stop_queue(wl->hw, hwq); in wlcore_stop_queue_locked()
1224 void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, in wlcore_stop_queue() argument
1229 spin_lock_irqsave(&wl->wl_lock, flags); in wlcore_stop_queue()
1230 wlcore_stop_queue_locked(wl, wlvif, queue, reason); in wlcore_stop_queue()
1231 spin_unlock_irqrestore(&wl->wl_lock, flags); in wlcore_stop_queue()
1234 void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, in wlcore_wake_queue() argument
1240 spin_lock_irqsave(&wl->wl_lock, flags); in wlcore_wake_queue()
1243 WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq])); in wlcore_wake_queue()
1245 if (wl->queue_stop_reasons[hwq]) in wlcore_wake_queue()
1248 ieee80211_wake_queue(wl->hw, hwq); in wlcore_wake_queue()
1251 spin_unlock_irqrestore(&wl->wl_lock, flags); in wlcore_wake_queue()
1254 void wlcore_stop_queues(struct wl1271 *wl, in wlcore_stop_queues() argument
1260 spin_lock_irqsave(&wl->wl_lock, flags); in wlcore_stop_queues()
1265 &wl->queue_stop_reasons[i])); in wlcore_stop_queues()
1270 ieee80211_stop_queues(wl->hw); in wlcore_stop_queues()
1272 spin_unlock_irqrestore(&wl->wl_lock, flags); in wlcore_stop_queues()
1275 void wlcore_wake_queues(struct wl1271 *wl, in wlcore_wake_queues() argument
1281 spin_lock_irqsave(&wl->wl_lock, flags); in wlcore_wake_queues()
1286 &wl->queue_stop_reasons[i])); in wlcore_wake_queues()
1291 ieee80211_wake_queues(wl->hw); in wlcore_wake_queues()
1293 spin_unlock_irqrestore(&wl->wl_lock, flags); in wlcore_wake_queues()
1296 bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, in wlcore_is_queue_stopped_by_reason() argument
1303 spin_lock_irqsave(&wl->wl_lock, flags); in wlcore_is_queue_stopped_by_reason()
1304 stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue, in wlcore_is_queue_stopped_by_reason()
1306 spin_unlock_irqrestore(&wl->wl_lock, flags); in wlcore_is_queue_stopped_by_reason()
1311 bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl, in wlcore_is_queue_stopped_by_reason_locked() argument
1317 assert_spin_locked(&wl->wl_lock); in wlcore_is_queue_stopped_by_reason_locked()
1318 return test_bit(reason, &wl->queue_stop_reasons[hwq]); in wlcore_is_queue_stopped_by_reason_locked()
1321 bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, in wlcore_is_queue_stopped_locked() argument
1326 assert_spin_locked(&wl->wl_lock); in wlcore_is_queue_stopped_locked()
1327 return !!wl->queue_stop_reasons[hwq]; in wlcore_is_queue_stopped_locked()