Lines Matching refs:tid
16 mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx) in mt76_aggr_release() argument
20 tid->head = ieee80211_sn_inc(tid->head); in mt76_aggr_release()
22 skb = tid->reorder_buf[idx]; in mt76_aggr_release()
26 tid->reorder_buf[idx] = NULL; in mt76_aggr_release()
27 tid->nframes--; in mt76_aggr_release()
32 mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, in mt76_rx_aggr_release_frames() argument
38 while (ieee80211_sn_less(tid->head, head)) { in mt76_rx_aggr_release_frames()
39 idx = tid->head % tid->size; in mt76_rx_aggr_release_frames()
40 mt76_aggr_release(tid, frames, idx); in mt76_rx_aggr_release_frames()
45 mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames) in mt76_rx_aggr_release_head() argument
47 int idx = tid->head % tid->size; in mt76_rx_aggr_release_head()
49 while (tid->reorder_buf[idx]) { in mt76_rx_aggr_release_head()
50 mt76_aggr_release(tid, frames, idx); in mt76_rx_aggr_release_head()
51 idx = tid->head % tid->size; in mt76_rx_aggr_release_head()
56 mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames) in mt76_rx_aggr_check_release() argument
62 if (!tid->nframes) in mt76_rx_aggr_check_release()
65 mt76_rx_aggr_release_head(tid, frames); in mt76_rx_aggr_check_release()
67 start = tid->head % tid->size; in mt76_rx_aggr_check_release()
68 nframes = tid->nframes; in mt76_rx_aggr_check_release()
70 for (idx = (tid->head + 1) % tid->size; in mt76_rx_aggr_check_release()
72 idx = (idx + 1) % tid->size) { in mt76_rx_aggr_check_release()
73 skb = tid->reorder_buf[idx]; in mt76_rx_aggr_check_release()
81 mt76_aggr_tid_to_timeo(tid->num))) in mt76_rx_aggr_check_release()
84 mt76_rx_aggr_release_frames(tid, frames, status->seqno); in mt76_rx_aggr_check_release()
87 mt76_rx_aggr_release_head(tid, frames); in mt76_rx_aggr_check_release()
93 struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid, in mt76_rx_aggr_reorder_work() local
95 struct mt76_dev *dev = tid->dev; in mt76_rx_aggr_reorder_work()
104 spin_lock(&tid->lock); in mt76_rx_aggr_reorder_work()
105 mt76_rx_aggr_check_release(tid, &frames); in mt76_rx_aggr_reorder_work()
106 nframes = tid->nframes; in mt76_rx_aggr_reorder_work()
107 spin_unlock(&tid->lock); in mt76_rx_aggr_reorder_work()
110 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, in mt76_rx_aggr_reorder_work()
111 mt76_aggr_tid_to_timeo(tid->num)); in mt76_rx_aggr_reorder_work()
124 struct mt76_rx_tid *tid; in mt76_rx_aggr_check_ctl() local
133 status->tid = le16_to_cpu(bar->control) >> 12; in mt76_rx_aggr_check_ctl()
135 tid = rcu_dereference(wcid->aggr[status->tid]); in mt76_rx_aggr_check_ctl()
136 if (!tid) in mt76_rx_aggr_check_ctl()
139 spin_lock_bh(&tid->lock); in mt76_rx_aggr_check_ctl()
140 if (!tid->stopped) { in mt76_rx_aggr_check_ctl()
141 mt76_rx_aggr_release_frames(tid, frames, seqno); in mt76_rx_aggr_check_ctl()
142 mt76_rx_aggr_release_head(tid, frames); in mt76_rx_aggr_check_ctl()
144 spin_unlock_bh(&tid->lock); in mt76_rx_aggr_check_ctl()
153 struct mt76_rx_tid *tid; in mt76_rx_aggr_reorder() local
175 tid = rcu_dereference(wcid->aggr[status->tid]); in mt76_rx_aggr_reorder()
176 if (!tid) in mt76_rx_aggr_reorder()
180 spin_lock_bh(&tid->lock); in mt76_rx_aggr_reorder()
182 if (tid->stopped) in mt76_rx_aggr_reorder()
185 head = tid->head; in mt76_rx_aggr_reorder()
187 size = tid->size; in mt76_rx_aggr_reorder()
190 if (!tid->started) { in mt76_rx_aggr_reorder()
194 tid->started = true; in mt76_rx_aggr_reorder()
204 tid->head = ieee80211_sn_inc(head); in mt76_rx_aggr_reorder()
205 if (tid->nframes) in mt76_rx_aggr_reorder()
206 mt76_rx_aggr_release_head(tid, frames); in mt76_rx_aggr_reorder()
218 mt76_rx_aggr_release_frames(tid, frames, head); in mt76_rx_aggr_reorder()
224 if (tid->reorder_buf[idx]) { in mt76_rx_aggr_reorder()
230 tid->reorder_buf[idx] = skb; in mt76_rx_aggr_reorder()
231 tid->nframes++; in mt76_rx_aggr_reorder()
232 mt76_rx_aggr_release_head(tid, frames); in mt76_rx_aggr_reorder()
234 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, in mt76_rx_aggr_reorder()
235 mt76_aggr_tid_to_timeo(tid->num)); in mt76_rx_aggr_reorder()
238 spin_unlock_bh(&tid->lock); in mt76_rx_aggr_reorder()
244 struct mt76_rx_tid *tid; in mt76_rx_aggr_start() local
248 tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL); in mt76_rx_aggr_start()
249 if (!tid) in mt76_rx_aggr_start()
252 tid->dev = dev; in mt76_rx_aggr_start()
253 tid->head = ssn; in mt76_rx_aggr_start()
254 tid->size = size; in mt76_rx_aggr_start()
255 tid->num = tidno; in mt76_rx_aggr_start()
256 INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work); in mt76_rx_aggr_start()
257 spin_lock_init(&tid->lock); in mt76_rx_aggr_start()
259 rcu_assign_pointer(wcid->aggr[tidno], tid); in mt76_rx_aggr_start()
265 static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) in mt76_rx_aggr_shutdown() argument
267 u16 size = tid->size; in mt76_rx_aggr_shutdown()
270 spin_lock_bh(&tid->lock); in mt76_rx_aggr_shutdown()
272 tid->stopped = true; in mt76_rx_aggr_shutdown()
273 for (i = 0; tid->nframes && i < size; i++) { in mt76_rx_aggr_shutdown()
274 struct sk_buff *skb = tid->reorder_buf[i]; in mt76_rx_aggr_shutdown()
279 tid->reorder_buf[i] = NULL; in mt76_rx_aggr_shutdown()
280 tid->nframes--; in mt76_rx_aggr_shutdown()
284 spin_unlock_bh(&tid->lock); in mt76_rx_aggr_shutdown()
286 cancel_delayed_work_sync(&tid->reorder_work); in mt76_rx_aggr_shutdown()
291 struct mt76_rx_tid *tid = NULL; in mt76_rx_aggr_stop() local
293 tid = rcu_replace_pointer(wcid->aggr[tidno], tid, in mt76_rx_aggr_stop()
295 if (tid) { in mt76_rx_aggr_stop()
296 mt76_rx_aggr_shutdown(dev, tid); in mt76_rx_aggr_stop()
297 kfree_rcu(tid, rcu_head); in mt76_rx_aggr_stop()