1 /**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include "rsi_mgmt.h"
18 #include "rsi_common.h"
19 #include "rsi_hal.h"
20
21 /**
22 * rsi_determine_min_weight_queue() - This function determines the queue with
23 * the min weight.
24 * @common: Pointer to the driver private structure.
25 *
26 * Return: q_num: Corresponding queue number.
27 */
rsi_determine_min_weight_queue(struct rsi_common * common)28 static u8 rsi_determine_min_weight_queue(struct rsi_common *common)
29 {
30 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
31 u32 q_len = 0;
32 u8 ii = 0;
33
34 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
35 q_len = skb_queue_len(&common->tx_queue[ii]);
36 if ((tx_qinfo[ii].pkt_contended) && q_len) {
37 common->min_weight = tx_qinfo[ii].weight;
38 break;
39 }
40 }
41 return ii;
42 }
43
44 /**
45 * rsi_recalculate_weights() - This function recalculates the weights
46 * corresponding to each queue.
47 * @common: Pointer to the driver private structure.
48 *
49 * Return: recontend_queue bool variable
50 */
rsi_recalculate_weights(struct rsi_common * common)51 static bool rsi_recalculate_weights(struct rsi_common *common)
52 {
53 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
54 bool recontend_queue = false;
55 u8 ii = 0;
56 u32 q_len = 0;
57
58 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
59 q_len = skb_queue_len(&common->tx_queue[ii]);
60 /* Check for the need of contention */
61 if (q_len) {
62 if (tx_qinfo[ii].pkt_contended) {
63 tx_qinfo[ii].weight =
64 ((tx_qinfo[ii].weight > common->min_weight) ?
65 tx_qinfo[ii].weight - common->min_weight : 0);
66 } else {
67 tx_qinfo[ii].pkt_contended = 1;
68 tx_qinfo[ii].weight = tx_qinfo[ii].wme_params;
69 recontend_queue = true;
70 }
71 } else { /* No packets so no contention */
72 tx_qinfo[ii].weight = 0;
73 tx_qinfo[ii].pkt_contended = 0;
74 }
75 }
76
77 return recontend_queue;
78 }
79
80 /**
81 * rsi_get_num_pkts_dequeue() - This function determines the number of
82 * packets to be dequeued based on the number
83 * of bytes calculated using txop.
84 *
85 * @common: Pointer to the driver private structure.
86 * @q_num: the queue from which pkts have to be dequeued
87 *
88 * Return: pkt_num: Number of pkts to be dequeued.
89 */
rsi_get_num_pkts_dequeue(struct rsi_common * common,u8 q_num)90 static u32 rsi_get_num_pkts_dequeue(struct rsi_common *common, u8 q_num)
91 {
92 struct rsi_hw *adapter = common->priv;
93 struct sk_buff *skb;
94 u32 pkt_cnt = 0;
95 s16 txop = common->tx_qinfo[q_num].txop * 32;
96 __le16 r_txop;
97 struct ieee80211_rate rate;
98
99 rate.bitrate = RSI_RATE_MCS0 * 5 * 10; /* Convert to Kbps */
100 if (q_num == VI_Q)
101 txop = ((txop << 5) / 80);
102
103 if (skb_queue_len(&common->tx_queue[q_num]))
104 skb = skb_peek(&common->tx_queue[q_num]);
105 else
106 return 0;
107
108 do {
109 r_txop = ieee80211_generic_frame_duration(adapter->hw,
110 adapter->vifs[0],
111 common->band,
112 skb->len, &rate);
113 txop -= le16_to_cpu(r_txop);
114 pkt_cnt += 1;
115 /*checking if pkts are still there*/
116 if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt)
117 skb = skb->next;
118 else
119 break;
120
121 } while (txop > 0);
122
123 return pkt_cnt;
124 }
125
126 /**
127 * rsi_core_determine_hal_queue() - This function determines the queue from
128 * which packet has to be dequeued.
129 * @common: Pointer to the driver private structure.
130 *
131 * Return: q_num: Corresponding queue number on success.
132 */
rsi_core_determine_hal_queue(struct rsi_common * common)133 static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
134 {
135 bool recontend_queue = false;
136 u32 q_len = 0;
137 u8 q_num = INVALID_QUEUE;
138 u8 ii = 0;
139
140 if (skb_queue_len(&common->tx_queue[MGMT_BEACON_Q])) {
141 q_num = MGMT_BEACON_Q;
142 return q_num;
143 }
144 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
145 if (!common->mgmt_q_block)
146 q_num = MGMT_SOFT_Q;
147 return q_num;
148 }
149
150 if (common->hw_data_qs_blocked)
151 return q_num;
152
153 if (common->pkt_cnt != 0) {
154 --common->pkt_cnt;
155 return common->selected_qnum;
156 }
157
158 get_queue_num:
159 recontend_queue = false;
160
161 q_num = rsi_determine_min_weight_queue(common);
162
163 ii = q_num;
164
165 /* Selecting the queue with least back off */
166 for (; ii < NUM_EDCA_QUEUES; ii++) {
167 q_len = skb_queue_len(&common->tx_queue[ii]);
168 if (((common->tx_qinfo[ii].pkt_contended) &&
169 (common->tx_qinfo[ii].weight < common->min_weight)) &&
170 q_len) {
171 common->min_weight = common->tx_qinfo[ii].weight;
172 q_num = ii;
173 }
174 }
175
176 if (q_num < NUM_EDCA_QUEUES)
177 common->tx_qinfo[q_num].pkt_contended = 0;
178
179 /* Adjust the back off values for all queues again */
180 recontend_queue = rsi_recalculate_weights(common);
181
182 q_len = skb_queue_len(&common->tx_queue[q_num]);
183 if (!q_len) {
184 /* If any queues are freshly contended and the selected queue
185 * doesn't have any packets
186 * then get the queue number again with fresh values
187 */
188 if (recontend_queue)
189 goto get_queue_num;
190
191 q_num = INVALID_QUEUE;
192 return q_num;
193 }
194
195 common->selected_qnum = q_num;
196 q_len = skb_queue_len(&common->tx_queue[q_num]);
197
198 if (q_num == VO_Q || q_num == VI_Q) {
199 common->pkt_cnt = rsi_get_num_pkts_dequeue(common, q_num);
200 common->pkt_cnt -= 1;
201 }
202
203 return q_num;
204 }
205
206 /**
207 * rsi_core_queue_pkt() - This functions enqueues the packet to the queue
208 * specified by the queue number.
209 * @common: Pointer to the driver private structure.
210 * @skb: Pointer to the socket buffer structure.
211 *
212 * Return: None.
213 */
rsi_core_queue_pkt(struct rsi_common * common,struct sk_buff * skb)214 static void rsi_core_queue_pkt(struct rsi_common *common,
215 struct sk_buff *skb)
216 {
217 u8 q_num = skb->priority;
218 if (q_num >= NUM_SOFT_QUEUES) {
219 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
220 __func__, q_num);
221 dev_kfree_skb(skb);
222 return;
223 }
224
225 skb_queue_tail(&common->tx_queue[q_num], skb);
226 }
227
228 /**
229 * rsi_core_dequeue_pkt() - This functions dequeues the packet from the queue
230 * specified by the queue number.
231 * @common: Pointer to the driver private structure.
232 * @q_num: Queue number.
233 *
234 * Return: Pointer to sk_buff structure.
235 */
rsi_core_dequeue_pkt(struct rsi_common * common,u8 q_num)236 static struct sk_buff *rsi_core_dequeue_pkt(struct rsi_common *common,
237 u8 q_num)
238 {
239 if (q_num >= NUM_SOFT_QUEUES) {
240 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
241 __func__, q_num);
242 return NULL;
243 }
244
245 return skb_dequeue(&common->tx_queue[q_num]);
246 }
247
248 /**
249 * rsi_core_qos_processor() - This function is used to determine the wmm queue
250 * based on the backoff procedure. Data packets are
251 * dequeued from the selected hal queue and sent to
252 * the below layers.
253 * @common: Pointer to the driver private structure.
254 *
255 * Return: None.
256 */
rsi_core_qos_processor(struct rsi_common * common)257 void rsi_core_qos_processor(struct rsi_common *common)
258 {
259 struct rsi_hw *adapter = common->priv;
260 struct sk_buff *skb;
261 unsigned long tstamp_1, tstamp_2;
262 u8 q_num;
263 int status;
264
265 tstamp_1 = jiffies;
266 while (1) {
267 q_num = rsi_core_determine_hal_queue(common);
268 rsi_dbg(DATA_TX_ZONE,
269 "%s: Queue number = %d\n", __func__, q_num);
270
271 if (q_num == INVALID_QUEUE) {
272 rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
273 break;
274 }
275
276 mutex_lock(&common->tx_lock);
277
278 status = adapter->check_hw_queue_status(adapter, q_num);
279 if ((status <= 0)) {
280 mutex_unlock(&common->tx_lock);
281 break;
282 }
283
284 if ((q_num < MGMT_SOFT_Q) &&
285 ((skb_queue_len(&common->tx_queue[q_num])) <=
286 MIN_DATA_QUEUE_WATER_MARK)) {
287 if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
288 ieee80211_wake_queue(adapter->hw,
289 WME_AC(q_num));
290 }
291
292 skb = rsi_core_dequeue_pkt(common, q_num);
293 if (skb == NULL) {
294 rsi_dbg(ERR_ZONE, "skb null\n");
295 mutex_unlock(&common->tx_lock);
296 break;
297 }
298
299 if (q_num == MGMT_SOFT_Q) {
300 status = rsi_send_mgmt_pkt(common, skb);
301 } else if (q_num == MGMT_BEACON_Q) {
302 status = rsi_send_pkt_to_bus(common, skb);
303 dev_kfree_skb(skb);
304 } else {
305 status = rsi_send_data_pkt(common, skb);
306 }
307
308 if (status) {
309 mutex_unlock(&common->tx_lock);
310 break;
311 }
312
313 common->tx_stats.total_tx_pkt_send[q_num]++;
314
315 tstamp_2 = jiffies;
316 mutex_unlock(&common->tx_lock);
317
318 if (time_after(tstamp_2, tstamp_1 + (300 * HZ) / 1000))
319 schedule();
320 }
321 }
322
rsi_find_sta(struct rsi_common * common,u8 * mac_addr)323 struct rsi_sta *rsi_find_sta(struct rsi_common *common, u8 *mac_addr)
324 {
325 int i;
326
327 for (i = 0; i < common->max_stations; i++) {
328 if (!common->stations[i].sta)
329 continue;
330 if (!(memcmp(common->stations[i].sta->addr,
331 mac_addr, ETH_ALEN)))
332 return &common->stations[i];
333 }
334 return NULL;
335 }
336
337 /**
338 * rsi_core_xmit() - This function transmits the packets received from mac80211
339 * @common: Pointer to the driver private structure.
340 * @skb: Pointer to the socket buffer structure.
341 *
342 * Return: None.
343 */
rsi_core_xmit(struct rsi_common * common,struct sk_buff * skb)344 void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
345 {
346 struct rsi_hw *adapter = common->priv;
347 struct ieee80211_tx_info *info;
348 struct skb_info *tx_params;
349 struct ieee80211_hdr *wh;
350 struct ieee80211_vif *vif = adapter->vifs[0];
351 u8 q_num, tid = 0;
352 struct rsi_sta *rsta = NULL;
353
354 if ((!skb) || (!skb->len)) {
355 rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
356 __func__);
357 goto xmit_fail;
358 }
359 if (common->fsm_state != FSM_MAC_INIT_DONE) {
360 rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
361 goto xmit_fail;
362 }
363
364 info = IEEE80211_SKB_CB(skb);
365 tx_params = (struct skb_info *)info->driver_data;
366 wh = (struct ieee80211_hdr *)&skb->data[0];
367 tx_params->sta_id = 0;
368
369 if ((ieee80211_is_mgmt(wh->frame_control)) ||
370 (ieee80211_is_ctl(wh->frame_control)) ||
371 (ieee80211_is_qos_nullfunc(wh->frame_control))) {
372 q_num = MGMT_SOFT_Q;
373 skb->priority = q_num;
374 } else {
375 if (ieee80211_is_data_qos(wh->frame_control)) {
376 tid = (skb->data[24] & IEEE80211_QOS_TID);
377 skb->priority = TID_TO_WME_AC(tid);
378 } else {
379 tid = IEEE80211_NONQOS_TID;
380 skb->priority = BE_Q;
381 }
382
383 q_num = skb->priority;
384 tx_params->tid = tid;
385
386 if ((vif->type == NL80211_IFTYPE_AP) &&
387 (!is_broadcast_ether_addr(wh->addr1)) &&
388 (!is_multicast_ether_addr(wh->addr1))) {
389 rsta = rsi_find_sta(common, wh->addr1);
390 if (!rsta)
391 goto xmit_fail;
392 tx_params->sta_id = rsta->sta_id;
393 }
394
395 if (rsta) {
396 /* Start aggregation if not done for this tid */
397 if (!rsta->start_tx_aggr[tid]) {
398 rsta->start_tx_aggr[tid] = true;
399 ieee80211_start_tx_ba_session(rsta->sta,
400 tid, 0);
401 }
402 }
403 }
404
405 if ((q_num < MGMT_SOFT_Q) &&
406 ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
407 DATA_QUEUE_WATER_MARK)) {
408 rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
409 if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
410 ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
411 rsi_set_event(&common->tx_thread.event);
412 goto xmit_fail;
413 }
414
415 rsi_core_queue_pkt(common, skb);
416 rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__);
417 rsi_set_event(&common->tx_thread.event);
418
419 return;
420
421 xmit_fail:
422 rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
423 /* Dropping pkt here */
424 ieee80211_free_txskb(common->priv->hw, skb);
425 }
426