Lines Matching refs:q
23 struct mt76_queue *q = &dev->q_rx[qid]; in mt76s_alloc_rx_queue() local
25 spin_lock_init(&q->lock); in mt76s_alloc_rx_queue()
26 q->entry = devm_kcalloc(dev->dev, in mt76s_alloc_rx_queue()
27 MT_NUM_RX_ENTRIES, sizeof(*q->entry), in mt76s_alloc_rx_queue()
29 if (!q->entry) in mt76s_alloc_rx_queue()
32 q->ndesc = MT_NUM_RX_ENTRIES; in mt76s_alloc_rx_queue()
33 q->head = q->tail = 0; in mt76s_alloc_rx_queue()
34 q->queued = 0; in mt76s_alloc_rx_queue()
41 struct mt76_queue *q; in mt76s_alloc_tx() local
45 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); in mt76s_alloc_tx()
46 if (!q) in mt76s_alloc_tx()
49 spin_lock_init(&q->lock); in mt76s_alloc_tx()
50 q->hw_idx = i; in mt76s_alloc_tx()
51 dev->q_tx[i] = q; in mt76s_alloc_tx()
53 q->entry = devm_kcalloc(dev->dev, in mt76s_alloc_tx()
54 MT_NUM_TX_ENTRIES, sizeof(*q->entry), in mt76s_alloc_tx()
56 if (!q->entry) in mt76s_alloc_tx()
59 q->ndesc = MT_NUM_TX_ENTRIES; in mt76s_alloc_tx()
93 mt76s_get_next_rx_entry(struct mt76_queue *q) in mt76s_get_next_rx_entry() argument
97 spin_lock_bh(&q->lock); in mt76s_get_next_rx_entry()
98 if (q->queued > 0) { in mt76s_get_next_rx_entry()
99 e = &q->entry[q->tail]; in mt76s_get_next_rx_entry()
100 q->tail = (q->tail + 1) % q->ndesc; in mt76s_get_next_rx_entry()
101 q->queued--; in mt76s_get_next_rx_entry()
103 spin_unlock_bh(&q->lock); in mt76s_get_next_rx_entry()
109 mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76s_process_rx_queue() argument
111 int qid = q - &dev->q_rx[MT_RXQ_MAIN]; in mt76s_process_rx_queue()
120 e = mt76s_get_next_rx_entry(q); in mt76s_process_rx_queue()
136 struct mt76_queue *q = dev->q_tx[qid]; in mt76s_process_tx_queue() local
140 while (q->queued > 0) { in mt76s_process_tx_queue()
141 if (!q->entry[q->tail].done) in mt76s_process_tx_queue()
144 entry = q->entry[q->tail]; in mt76s_process_tx_queue()
145 q->entry[q->tail].done = false; in mt76s_process_tx_queue()
152 mt76_queue_tx_complete(dev, q, &entry); in mt76s_process_tx_queue()
155 wake = q->stopped && q->queued < q->ndesc - 8; in mt76s_process_tx_queue()
157 q->stopped = false; in mt76s_process_tx_queue()
159 if (!q->queued) in mt76s_process_tx_queue()
201 struct mt76_queue *q = dev->q_tx[qid]; in mt76s_tx_queue_skb() local
206 u16 idx = q->head; in mt76s_tx_queue_skb()
208 if (q->queued == q->ndesc) in mt76s_tx_queue_skb()
216 q->entry[q->head].skb = tx_info.skb; in mt76s_tx_queue_skb()
217 q->entry[q->head].buf_sz = len; in mt76s_tx_queue_skb()
221 q->head = (q->head + 1) % q->ndesc; in mt76s_tx_queue_skb()
222 q->queued++; in mt76s_tx_queue_skb()
231 struct mt76_queue *q = dev->q_tx[qid]; in mt76s_tx_queue_skb_raw() local
234 if (q->queued == q->ndesc) in mt76s_tx_queue_skb_raw()
242 spin_lock_bh(&q->lock); in mt76s_tx_queue_skb_raw()
244 q->entry[q->head].buf_sz = len; in mt76s_tx_queue_skb_raw()
245 q->entry[q->head].skb = skb; in mt76s_tx_queue_skb_raw()
246 q->head = (q->head + 1) % q->ndesc; in mt76s_tx_queue_skb_raw()
247 q->queued++; in mt76s_tx_queue_skb_raw()
249 spin_unlock_bh(&q->lock); in mt76s_tx_queue_skb_raw()
259 static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) in mt76s_tx_kick() argument
321 struct mt76_queue *q = &dev->q_rx[i]; in mt76s_deinit() local
324 for (j = 0; j < q->ndesc; j++) { in mt76s_deinit()
325 struct mt76_queue_entry *e = &q->entry[j]; in mt76s_deinit()