1 /*
2 * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
3 *
4 * Flow rings are transmit traffic (=propagating towards antenna) related
5 * entities
6 *
7 *
8 * Copyright (C) 1999-2019, Broadcom.
9 *
10 * Unless you and Broadcom execute a separate written software license
11 * agreement governing use of this software, this software is licensed to you
12 * under the terms of the GNU General Public License version 2 (the "GPL"),
13 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
14 * following added to such license:
15 *
16 * As a special exception, the copyright holders of this software give you
17 * permission to link this software with independent modules, and to copy and
18 * distribute the resulting executable under terms of your choice, provided that
19 * you also meet, for each linked independent module, the terms and conditions
20 * of the license of that module. An independent module is a module which is
21 * not derived from this software. The special exception does not apply to any
22 * modifications of the software.
23 *
24 * Notwithstanding the above, under no circumstances may you combine this
25 * software in any way with any other Broadcom software provided under a license
26 * other than the GPL, without Broadcom's express prior written consent.
27 *
28 *
29 * <<Broadcom-WL-IPTag/Open:>>
30 *
31 * $Id: dhd_flowring.c 808473 2019-03-07 07:35:30Z $
32 */
33
34 #include <typedefs.h>
35 #include <bcmutils.h>
36 #include <bcmendian.h>
37 #include <bcmdevs.h>
38
39 #include <ethernet.h>
40 #include <bcmevent.h>
41 #include <dngl_stats.h>
42
43 #include <dhd.h>
44
45 #include <dhd_flowring.h>
46 #include <dhd_bus.h>
47 #include <dhd_proto.h>
48 #include <dhd_dbg.h>
49 #include <802.1d.h>
50 #include <pcie_core.h>
51 #include <bcmmsgbuf.h>
52 #include <dhd_pcie.h>
53 #include <dhd_config.h>
54
55 static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue);
56
57 static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio,
58 char *sa, char *da);
59
60 static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex,
61 uint8 prio, char *sa, char *da);
62
63 static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio,
64 char *sa, char *da, uint16 *flowid);
65 int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt);
66
67 #define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p)
68 #define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x))
69
70 const uint8 prio2ac[8] = {0, 1, 1, 0, 2, 2, 3, 3};
71 const uint8 prio2tid[8] = {0, 1, 2, 3, 4, 5, 6, 7};
72
73 /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied
74 */
dhd_flow_queue_throttle(flow_queue_t * queue)75 static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue)
76 {
77 return DHD_FLOW_QUEUE_FULL(queue);
78 }
79
dhd_flow_queue_overflow(flow_queue_t * queue,void * pkt)80 int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt)
81 {
82 return BCME_NORESOURCE;
83 }
84
85 /** Returns flow ring given a flowid */
dhd_flow_ring_node(dhd_pub_t * dhdp,uint16 flowid)86 flow_ring_node_t *dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid)
87 {
88 flow_ring_node_t *flow_ring_node;
89
90 ASSERT(dhdp != (dhd_pub_t *)NULL);
91 ASSERT(flowid < dhdp->num_flow_rings);
92 if (flowid >= dhdp->num_flow_rings) {
93 return NULL;
94 }
95
96 flow_ring_node = &(((flow_ring_node_t *)(dhdp->flow_ring_table))[flowid]);
97
98 ASSERT(flow_ring_node->flowid == flowid);
99 return flow_ring_node;
100 }
101
102 /** Returns 'backup' queue given a flowid */
dhd_flow_queue(dhd_pub_t * dhdp,uint16 flowid)103 flow_queue_t *dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid)
104 {
105 flow_ring_node_t *flow_ring_node = NULL;
106
107 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
108 if (flow_ring_node) {
109 return &flow_ring_node->queue;
110 } else {
111 return NULL;
112 }
113 }
114
115 /* Flow ring's queue management functions */
116
117 /** Reinitialize a flow ring's queue. */
dhd_flow_queue_reinit(dhd_pub_t * dhdp,flow_queue_t * queue,int max)118 void dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
119 {
120 ASSERT((queue != NULL) && (max > 0));
121
122 queue->head = queue->tail = NULL;
123 queue->len = 0;
124
125 /* Set queue's threshold and queue's parent cummulative length counter */
126 ASSERT(max > 1);
127 DHD_FLOW_QUEUE_SET_MAX(queue, max);
128 DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max);
129 DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr);
130 DHD_FLOW_QUEUE_SET_L2CLEN(queue, &dhdp->l2cumm_ctr);
131
132 queue->failures = 0U;
133 queue->cb = &dhd_flow_queue_overflow;
134 }
135
136 /** Initialize a flow ring's queue, called on driver initialization. */
dhd_flow_queue_init(dhd_pub_t * dhdp,flow_queue_t * queue,int max)137 void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
138 {
139 ASSERT((queue != NULL) && (max > 0));
140
141 dll_init(&queue->list);
142 dhd_flow_queue_reinit(dhdp, queue, max);
143 }
144
145 /** Register an enqueue overflow callback handler */
dhd_flow_queue_register(flow_queue_t * queue,flow_queue_cb_t cb)146 void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb)
147 {
148 ASSERT(queue != NULL);
149 queue->cb = cb;
150 }
151
152 /**
153 * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it
154 * will travel later on to the flow ring itself.
155 */
dhd_flow_queue_enqueue(dhd_pub_t * dhdp,flow_queue_t * queue,void * pkt)156 int BCMFASTPATH dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue,
157 void *pkt)
158 {
159 int ret = BCME_OK;
160
161 ASSERT(queue != NULL);
162
163 if (dhd_flow_queue_throttle(queue)) {
164 queue->failures++;
165 ret = (*queue->cb)(queue, pkt);
166 goto done;
167 }
168
169 if (queue->head) {
170 FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt);
171 } else {
172 queue->head = pkt;
173 }
174
175 FLOW_QUEUE_PKT_SETNEXT(pkt, NULL);
176
177 queue->tail = pkt; /* at tail */
178
179 queue->len++;
180 /* increment parent's cummulative length */
181 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
182 /* increment grandparent's cummulative length */
183 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
184
185 done:
186 return ret;
187 }
188
189 /** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */
dhd_flow_queue_dequeue(dhd_pub_t * dhdp,flow_queue_t * queue)190 void *BCMFASTPATH dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue)
191 {
192 void *pkt;
193
194 ASSERT(queue != NULL);
195
196 pkt = queue->head; /* from head */
197
198 if (pkt == NULL) {
199 ASSERT((queue->len == 0) && (queue->tail == NULL));
200 goto done;
201 }
202
203 queue->head = FLOW_QUEUE_PKT_NEXT(pkt);
204 if (queue->head == NULL) {
205 queue->tail = NULL;
206 }
207
208 queue->len--;
209 /* decrement parent's cummulative length */
210 DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
211 /* decrement grandparent's cummulative length */
212 DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
213
214 FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */
215
216 done:
217 return pkt;
218 }
219
220 /** Reinsert a dequeued 802.3 packet back at the head */
dhd_flow_queue_reinsert(dhd_pub_t * dhdp,flow_queue_t * queue,void * pkt)221 void BCMFASTPATH dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue,
222 void *pkt)
223 {
224 if (queue->head == NULL) {
225 queue->tail = pkt;
226 }
227
228 FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head);
229 queue->head = pkt;
230 queue->len++;
231 /* increment parent's cummulative length */
232 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
233 /* increment grandparent's cummulative length */
234 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
235 }
236
237 /** Fetch the backup queue for a flowring, and assign flow control thresholds */
dhd_flow_ring_config_thresholds(dhd_pub_t * dhdp,uint16 flowid,int queue_budget,int cumm_threshold,void * cumm_ctr,int l2cumm_threshold,void * l2cumm_ctr)238 void dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid,
239 int queue_budget, int cumm_threshold,
240 void *cumm_ctr, int l2cumm_threshold,
241 void *l2cumm_ctr)
242 {
243 flow_queue_t *queue = NULL;
244
245 ASSERT(dhdp != (dhd_pub_t *)NULL);
246 ASSERT(queue_budget > 1);
247 ASSERT(cumm_threshold > 1);
248 ASSERT(cumm_ctr != (void *)NULL);
249 ASSERT(l2cumm_threshold > 1);
250 ASSERT(l2cumm_ctr != (void *)NULL);
251
252 queue = dhd_flow_queue(dhdp, flowid);
253 if (queue) {
254 DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
255
256 /* Set the queue's parent threshold and cummulative counter */
257 DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
258 DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
259
260 /* Set the queue's grandparent threshold and cummulative counter */
261 DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
262 DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
263 }
264 }
265
dhd_num_prio_supported_per_flow_ring(dhd_pub_t * dhdp)266 uint8 dhd_num_prio_supported_per_flow_ring(dhd_pub_t *dhdp)
267 {
268 uint8 prio_count = 0;
269 int i;
270 // Pick all elements one by one
271 for (i = 0; i < NUMPRIO; i++) {
272 // Check if the picked element is already counted
273 int j;
274 for (j = 0; j < i; j++) {
275 if (dhdp->flow_prio_map[i] == dhdp->flow_prio_map[j]) {
276 break;
277 }
278 }
279 // If not counted earlier, then count it
280 if (i == j) {
281 prio_count++;
282 }
283 }
284
285 #ifdef DHD_LOSSLESS_ROAMING
286 /* For LLR, we are using flowring with prio 7 which is not considered
287 * in prio2ac array. But in __dhd_sendpkt, it is hardcoded hardcoded
288 * prio to PRIO_8021D_NC and send to dhd_flowid_update.
289 * So add 1 to prio_count.
290 */
291 prio_count++;
292 #endif /* DHD_LOSSLESS_ROAMING */
293
294 return prio_count;
295 }
296
dhd_get_max_multi_client_flow_rings(dhd_pub_t * dhdp)297 uint8 dhd_get_max_multi_client_flow_rings(dhd_pub_t *dhdp)
298 {
299 uint8 reserved_infra_sta_flow_rings =
300 dhd_num_prio_supported_per_flow_ring(dhdp);
301 uint8 total_tx_flow_rings = dhdp->num_flow_rings - dhdp->bus->max_cmn_rings;
302 uint8 max_multi_client_flow_rings =
303 total_tx_flow_rings - reserved_infra_sta_flow_rings;
304 return max_multi_client_flow_rings;
305 }
306
307 /** Initializes data structures of multiple flow rings */
dhd_flow_rings_init(dhd_pub_t * dhdp,uint32 num_flow_rings)308 int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings)
309 {
310 uint32 idx;
311 uint32 flow_ring_table_sz;
312 uint32 if_flow_lkup_sz = 0;
313 void *flowid_allocator;
314 flow_ring_table_t *flow_ring_table = NULL;
315 if_flow_lkup_t *if_flow_lkup = NULL;
316 void *lock = NULL;
317 void *list_lock = NULL;
318 unsigned long flags;
319
320 DHD_INFO(("%s\n", __FUNCTION__));
321
322 /* Construct a 16bit flowid allocator */
323 flowid_allocator = id16_map_init(
324 dhdp->osh, num_flow_rings - dhdp->bus->max_cmn_rings, FLOWID_RESERVED);
325 if (flowid_allocator == NULL) {
326 DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__));
327 return BCME_NOMEM;
328 }
329
330 /* Allocate a flow ring table, comprising of requested number of rings */
331 flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t));
332 flow_ring_table =
333 (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz);
334 if (flow_ring_table == NULL) {
335 DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__));
336 goto fail;
337 }
338
339 /* Initialize flow ring table state */
340 DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr);
341 DHD_CUMM_CTR_INIT(&dhdp->l2cumm_ctr);
342 bzero((uchar *)flow_ring_table, flow_ring_table_sz);
343 for (idx = 0; idx < num_flow_rings; idx++) {
344 flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
345 flow_ring_table[idx].flowid = (uint16)idx;
346 flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh);
347 #ifdef IDLE_TX_FLOW_MGMT
348 flow_ring_table[idx].last_active_ts = OSL_SYSUPTIME();
349 #endif /* IDLE_TX_FLOW_MGMT */
350 if (flow_ring_table[idx].lock == NULL) {
351 DHD_ERROR(
352 ("%s: Failed to init spinlock for queue!\n", __FUNCTION__));
353 goto fail;
354 }
355
356 dll_init(&flow_ring_table[idx].list);
357
358 /* Initialize the per flow ring backup queue */
359 dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue,
360 dhdp->conf->flow_ring_queue_threshold);
361 }
362
363 /* Allocate per interface hash table (for fast lookup from interface to flow
364 * ring) */
365 if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
366 if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(
367 dhdp, DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz);
368 if (if_flow_lkup == NULL) {
369 DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__));
370 goto fail;
371 }
372
373 /* Initialize per interface hash table */
374 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
375 int hash_ix;
376 if_flow_lkup[idx].status = 0;
377 if_flow_lkup[idx].role = 0;
378 for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++) {
379 if_flow_lkup[idx].fl_hash[hash_ix] = NULL;
380 }
381 }
382
383 lock = dhd_os_spin_lock_init(dhdp->osh);
384 if (lock == NULL) {
385 goto fail;
386 }
387
388 list_lock = dhd_os_spin_lock_init(dhdp->osh);
389 if (list_lock == NULL) {
390 goto lock_fail;
391 }
392
393 dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
394 bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
395
396 dhdp->max_multi_client_flow_rings =
397 dhd_get_max_multi_client_flow_rings(dhdp);
398 dhdp->multi_client_flow_rings = 0U;
399
400 #ifdef DHD_LOSSLESS_ROAMING
401 dhdp->dequeue_prec_map = ALLPRIO;
402 #endif // endif
403 /* Now populate into dhd pub */
404 DHD_FLOWID_LOCK(lock, flags);
405 dhdp->num_flow_rings = num_flow_rings;
406 dhdp->flowid_allocator = (void *)flowid_allocator;
407 dhdp->flow_ring_table = (void *)flow_ring_table;
408 dhdp->if_flow_lkup = (void *)if_flow_lkup;
409 dhdp->flowid_lock = lock;
410 dhdp->flow_rings_inited = TRUE;
411 dhdp->flowring_list_lock = list_lock;
412 DHD_FLOWID_UNLOCK(lock, flags);
413
414 DHD_INFO(("%s done\n", __FUNCTION__));
415 return BCME_OK;
416
417 lock_fail:
418 /* deinit the spinlock */
419 dhd_os_spin_lock_deinit(dhdp->osh, lock);
420
421 fail:
422 /* Destruct the per interface flow lkup table */
423 if (if_flow_lkup != NULL) {
424 DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz);
425 }
426 if (flow_ring_table != NULL) {
427 for (idx = 0; idx < num_flow_rings; idx++) {
428 if (flow_ring_table[idx].lock != NULL) {
429 dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
430 }
431 }
432 MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
433 }
434 id16_map_fini(dhdp->osh, flowid_allocator);
435
436 return BCME_NOMEM;
437 }
438
439 /** Deinit Flow Ring specific data structures */
dhd_flow_rings_deinit(dhd_pub_t * dhdp)440 void dhd_flow_rings_deinit(dhd_pub_t *dhdp)
441 {
442 uint16 idx;
443 uint32 flow_ring_table_sz;
444 uint32 if_flow_lkup_sz;
445 flow_ring_table_t *flow_ring_table;
446 unsigned long flags;
447 void *lock;
448
449 DHD_INFO(("dhd_flow_rings_deinit\n"));
450
451 if (!(dhdp->flow_rings_inited)) {
452 DHD_ERROR(("dhd_flow_rings not initialized!\n"));
453 return;
454 }
455
456 if (dhdp->flow_ring_table != NULL) {
457 ASSERT(dhdp->num_flow_rings > 0);
458
459 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
460 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
461 dhdp->flow_ring_table = NULL;
462 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
463 for (idx = 0; idx < dhdp->num_flow_rings; idx++) {
464 if (flow_ring_table[idx].active) {
465 dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]);
466 }
467 ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue));
468
469 /* Deinit flow ring queue locks before destroying flow ring table */
470 if (flow_ring_table[idx].lock != NULL) {
471 dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
472 }
473 flow_ring_table[idx].lock = NULL;
474 }
475
476 /* Destruct the flow ring table */
477 flow_ring_table_sz = dhdp->num_flow_rings * sizeof(flow_ring_table_t);
478 MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
479 }
480
481 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
482
483 /* Destruct the per interface flow lkup table */
484 if (dhdp->if_flow_lkup != NULL) {
485 if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
486 bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz);
487 DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz);
488 dhdp->if_flow_lkup = NULL;
489 }
490
491 /* Destruct the flowid allocator */
492 if (dhdp->flowid_allocator != NULL) {
493 dhdp->flowid_allocator =
494 id16_map_fini(dhdp->osh, dhdp->flowid_allocator);
495 }
496
497 dhdp->num_flow_rings = 0U;
498 bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
499
500 dhdp->max_multi_client_flow_rings = 0U;
501 dhdp->multi_client_flow_rings = 0U;
502
503 lock = dhdp->flowid_lock;
504 dhdp->flowid_lock = NULL;
505
506 if (lock) {
507 DHD_FLOWID_UNLOCK(lock, flags);
508 dhd_os_spin_lock_deinit(dhdp->osh, lock);
509 }
510
511 dhd_os_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock);
512 dhdp->flowring_list_lock = NULL;
513
514 ASSERT(dhdp->if_flow_lkup == NULL);
515 ASSERT(dhdp->flowid_allocator == NULL);
516 ASSERT(dhdp->flow_ring_table == NULL);
517 dhdp->flow_rings_inited = FALSE;
518 }
519
520 /** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP)
521 */
dhd_flow_rings_ifindex2role(dhd_pub_t * dhdp,uint8 ifindex)522 uint8 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex)
523 {
524 if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
525 ASSERT(if_flow_lkup);
526 return if_flow_lkup[ifindex].role;
527 }
528
529 #ifdef WLTDLS
is_tdls_destination(dhd_pub_t * dhdp,uint8 * da)530 bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da)
531 {
532 unsigned long flags;
533 tdls_peer_node_t *cur = NULL;
534
535 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
536 cur = dhdp->peer_tbl.node;
537
538 while (cur != NULL) {
539 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
540 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
541 return TRUE;
542 }
543 cur = cur->next;
544 }
545 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
546 return FALSE;
547 }
548 #endif /* WLTDLS */
549
550 /** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */
dhd_flowid_find(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da)551 static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio,
552 char *sa, char *da)
553 {
554 int hash;
555 bool ismcast = FALSE;
556 flow_hash_info_t *cur;
557 if_flow_lkup_t *if_flow_lkup;
558 unsigned long flags;
559
560 ASSERT(ifindex < DHD_MAX_IFS);
561 if (ifindex >= DHD_MAX_IFS) {
562 return FLOWID_INVALID;
563 }
564
565 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
566 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
567
568 ASSERT(if_flow_lkup);
569
570 if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
571 #ifdef WLTDLS
572 if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
573 is_tdls_destination(dhdp, da)) {
574 hash = DHD_FLOWRING_HASHINDEX(da, prio);
575 cur = if_flow_lkup[ifindex].fl_hash[hash];
576 while (cur != NULL) {
577 if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) {
578 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
579 return cur->flowid;
580 }
581 cur = cur->next;
582 }
583 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
584 return FLOWID_INVALID;
585 }
586 #endif /* WLTDLS */
587 /* For STA non TDLS dest and WDS dest flow ring id is mapped based on
588 * prio only */
589 cur = if_flow_lkup[ifindex].fl_hash[prio];
590 if (cur) {
591 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
592 return cur->flowid;
593 }
594 } else {
595
596 if (ETHER_ISMULTI(da)) {
597 ismcast = TRUE;
598 hash = 0;
599 } else {
600 hash = DHD_FLOWRING_HASHINDEX(da, prio);
601 }
602
603 cur = if_flow_lkup[ifindex].fl_hash[hash];
604
605 while (cur) {
606 if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) ||
607 (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) &&
608 (cur->flow_info.tid == prio))) {
609 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
610 return cur->flowid;
611 }
612 cur = cur->next;
613 }
614 }
615 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
616
617 DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__));
618 return FLOWID_INVALID;
619 } /* dhd_flowid_find */
620
621 /** Create unique Flow ID, called when a flow ring is created. */
dhd_flowid_alloc(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da)622 static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex,
623 uint8 prio, char *sa, char *da)
624 {
625 flow_hash_info_t *fl_hash_node, *cur;
626 if_flow_lkup_t *if_flow_lkup;
627 int hash;
628 uint16 flowid;
629 unsigned long flags;
630
631 fl_hash_node =
632 (flow_hash_info_t *)MALLOCZ(dhdp->osh, sizeof(flow_hash_info_t));
633 if (fl_hash_node == NULL) {
634 DHD_ERROR(
635 ("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__));
636 return FLOWID_INVALID;
637 }
638 memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da));
639
640 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
641 ASSERT(dhdp->flowid_allocator != NULL);
642 flowid = id16_map_alloc(dhdp->flowid_allocator);
643 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
644
645 if (flowid == FLOWID_INVALID) {
646 MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t));
647 DHD_ERROR_RLMT(("%s: cannot get free flowid \n", __FUNCTION__));
648 return FLOWID_INVALID;
649 }
650
651 fl_hash_node->flowid = flowid;
652 fl_hash_node->flow_info.tid = prio;
653 fl_hash_node->flow_info.ifindex = ifindex;
654 fl_hash_node->next = NULL;
655
656 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
657 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
658
659 if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
660 /* For STA/GC non TDLS dest and WDS dest we allocate entry based on prio
661 * only */
662 #ifdef WLTDLS
663 if (dhdp->peer_tbl.tdls_peer_count && (is_tdls_destination(dhdp, da))) {
664 hash = DHD_FLOWRING_HASHINDEX(da, prio);
665 cur = if_flow_lkup[ifindex].fl_hash[hash];
666 if (cur) {
667 while (cur->next) {
668 cur = cur->next;
669 }
670 cur->next = fl_hash_node;
671 } else {
672 if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
673 }
674 } else
675 #endif /* WLTDLS */
676 if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node;
677 } else {
678
679 /* For bcast/mcast assign first slot in in interface */
680 hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio);
681 cur = if_flow_lkup[ifindex].fl_hash[hash];
682 if (cur) {
683 while (cur->next) {
684 cur = cur->next;
685 }
686 cur->next = fl_hash_node;
687 } else {
688 if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
689 }
690 }
691 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
692
693 DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
694
695 if (fl_hash_node->flowid >= dhdp->num_flow_rings) {
696 DHD_ERROR(
697 ("%s: flowid=%d num_flow_rings=%d ifindex=%d prio=%d role=%d\n",
698 __FUNCTION__, fl_hash_node->flowid, dhdp->num_flow_rings, ifindex,
699 prio, if_flow_lkup[ifindex].role));
700 dhd_prhex("da", (uchar *)da, ETHER_ADDR_LEN, DHD_ERROR_VAL);
701 dhd_prhex("sa", (uchar *)sa, ETHER_ADDR_LEN, DHD_ERROR_VAL);
702 return FLOWID_INVALID;
703 }
704
705 return fl_hash_node->flowid;
706 } /* dhd_flowid_alloc */
707
708 /** Get flow ring ID, if not present try to create one */
dhd_flowid_lookup(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da,uint16 * flowid)709 static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio,
710 char *sa, char *da, uint16 *flowid)
711 {
712 uint16 id;
713 flow_ring_node_t *flow_ring_node;
714 flow_ring_table_t *flow_ring_table;
715 unsigned long flags;
716 int ret;
717
718 DHD_TRACE(("%s\n", __FUNCTION__));
719
720 if (!dhdp->flow_ring_table) {
721 return BCME_ERROR;
722 }
723
724 ASSERT(ifindex < DHD_MAX_IFS);
725 if (ifindex >= DHD_MAX_IFS) {
726 return BCME_BADARG;
727 }
728
729 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
730
731 id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
732
733 if (id == FLOWID_INVALID) {
734 bool if_role_multi_client;
735 if_flow_lkup_t *if_flow_lkup;
736 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
737
738 if (!if_flow_lkup[ifindex].status) {
739 return BCME_ERROR;
740 }
741
742 /* check role for multi client case */
743 if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
744
745 /* Abort Flowring creation if multi client flowrings crossed the
746 * threshold */
747 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
748 if (if_role_multi_client && (dhdp->multi_client_flow_rings >=
749 dhdp->max_multi_client_flow_rings)) {
750 DHD_ERROR_RLMT(("%s: Max multi client flow rings reached: %d:%d\n",
751 __FUNCTION__, dhdp->multi_client_flow_rings,
752 dhdp->max_multi_client_flow_rings));
753 return BCME_ERROR;
754 }
755 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
756
757 /* Do not create Flowring if peer is not associated */
758 #if defined(PCIE_FULL_DONGLE)
759 if (if_role_multi_client && !ETHER_ISMULTI(da) &&
760 !dhd_sta_associated(dhdp, ifindex, (uint8 *)da)) {
761 DHD_ERROR_RLMT(
762 ("%s: Skip send pkt without peer addition\n", __FUNCTION__));
763 return BCME_ERROR;
764 }
765 #endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
766
767 id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
768 if (id == FLOWID_INVALID) {
769 DHD_ERROR_RLMT(("%s: alloc flowid ifindex %u status %u\n",
770 __FUNCTION__, ifindex,
771 if_flow_lkup[ifindex].status));
772 return BCME_ERROR;
773 }
774
775 ASSERT(id < dhdp->num_flow_rings);
776
777 /* Only after flowid alloc, increment multi_client_flow_rings */
778 if (if_role_multi_client) {
779 dhdp->multi_client_flow_rings++;
780 }
781
782 /* register this flowid in dhd_pub */
783 dhd_add_flowid(dhdp, ifindex, prio, da, id);
784
785 flow_ring_node = (flow_ring_node_t *)&flow_ring_table[id];
786
787 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
788
789 /* Init Flow info */
790 memcpy(flow_ring_node->flow_info.sa, sa,
791 sizeof(flow_ring_node->flow_info.sa));
792 memcpy(flow_ring_node->flow_info.da, da,
793 sizeof(flow_ring_node->flow_info.da));
794 flow_ring_node->flow_info.tid = prio;
795 flow_ring_node->flow_info.ifindex = ifindex;
796 flow_ring_node->active = TRUE;
797 flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING;
798
799 #ifdef TX_STATUS_LATENCY_STATS
800 flow_ring_node->flow_info.num_tx_status = 0;
801 flow_ring_node->flow_info.cum_tx_status_latency = 0;
802 flow_ring_node->flow_info.num_tx_pkts = 0;
803 #endif /* TX_STATUS_LATENCY_STATS */
804 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
805
806 /* Create and inform device about the new flow */
807 if (dhd_bus_flow_ring_create_request(
808 dhdp->bus, (void *)flow_ring_node) != BCME_OK) {
809 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
810 flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
811 flow_ring_node->active = FALSE;
812 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
813 DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
814 return BCME_ERROR;
815 }
816
817 *flowid = id;
818 return BCME_OK;
819 } else {
820 /* if the Flow id was found in the hash */
821
822 if (id >= dhdp->num_flow_rings) {
823 DHD_ERROR(("%s: Invalid flow id : %u, num_flow_rings : %u\n",
824 __FUNCTION__, id, dhdp->num_flow_rings));
825 *flowid = FLOWID_INVALID;
826 ASSERT(0);
827 return BCME_ERROR;
828 }
829
830 flow_ring_node = (flow_ring_node_t *)&flow_ring_table[id];
831 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
832
833 /*
834 * If the flow_ring_node is in Open State or Status pending state then
835 * we can return the Flow id to the caller.If the flow_ring_node is in
836 * FLOW_RING_STATUS_PENDING this means the creation is in progress and
837 * hence the packets should be queued.
838 *
839 * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or
840 * FLOW_RING_STATUS_CLOSED, then we should return Error.
841 * Note that if the flowing is being deleted we would mark it as
842 * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and
843 * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets.
844 * We should drop the packets in that case.
845 * The decission to return OK should NOT be based on 'active' variable,
846 * beause active is made TRUE when a flow_ring_node gets allocated and
847 * is made FALSE when the flow ring gets removed and does not reflect
848 * the True state of the Flow ring. In case if IDLE_TX_FLOW_MGMT is
849 * defined, we have to handle two more flowring states. If the
850 * flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid is
851 * to be returned and from dhd_bus_txdata, the flowring would be resumed
852 * again. The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to
853 * FLOW_RING_STATUS_CREATE_PENDING.
854 */
855 if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING ||
856 flow_ring_node->status == FLOW_RING_STATUS_CLOSED) {
857 *flowid = FLOWID_INVALID;
858 ret = BCME_ERROR;
859 } else {
860 *flowid = id;
861 ret = BCME_OK;
862 }
863
864 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
865 return ret;
866 } /* Flow Id found in the hash */
867 } /* dhd_flowid_lookup */
868
dhd_flowid_find_by_ifidx(dhd_pub_t * dhdp,uint8 ifindex,uint16 flowid)869 int dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
870 {
871 int hashidx = 0;
872 bool found = FALSE;
873 flow_hash_info_t *cur;
874 if_flow_lkup_t *if_flow_lkup;
875 unsigned long flags;
876
877 if (!dhdp->flow_ring_table) {
878 DHD_ERROR(("%s : dhd->flow_ring_table is NULL\n", __FUNCTION__));
879 return BCME_ERROR;
880 }
881
882 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
883 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
884 for (hashidx = 0; hashidx < DHD_FLOWRING_HASH_SIZE; hashidx++) {
885 cur = if_flow_lkup[ifindex].fl_hash[hashidx];
886 if (cur) {
887 if (cur->flowid == flowid) {
888 found = TRUE;
889 }
890
891 while (!found && cur) {
892 if (cur->flowid == flowid) {
893 found = TRUE;
894 break;
895 }
896 cur = cur->next;
897 }
898
899 if (found) {
900 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
901 return BCME_OK;
902 }
903 }
904 }
905 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
906
907 return BCME_ERROR;
908 }
909
dhd_flowid_debug_create(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da,uint16 * flowid)910 int dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio,
911 char *sa, char *da, uint16 *flowid)
912 {
913 return dhd_flowid_lookup(dhdp, ifindex, prio, sa, da, flowid);
914 }
915
916 /**
917 * Assign existing or newly created flowid to an 802.3 packet. This flowid is
918 * later on used to select the flowring to send the packet to the dongle.
919 */
dhd_flowid_update(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,void * pktbuf)920 int BCMFASTPATH dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio,
921 void *pktbuf)
922 {
923 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
924 struct ether_header *eh = (struct ether_header *)pktdata;
925 uint16 flowid = 0;
926
927 ASSERT(ifindex < DHD_MAX_IFS);
928
929 if (ifindex >= DHD_MAX_IFS) {
930 return BCME_BADARG;
931 }
932
933 if (!dhdp->flowid_allocator) {
934 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
935 return BCME_ERROR;
936 }
937
938 if (dhd_flowid_lookup(dhdp, ifindex, prio, (char *)eh->ether_shost,
939 (char *)eh->ether_dhost, &flowid) != BCME_OK) {
940 return BCME_ERROR;
941 }
942
943 DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
944
945 /* Tag the packet with flowid */
946 DHD_PKT_SET_FLOWID(pktbuf, flowid);
947 return BCME_OK;
948 }
949
dhd_flowid_free(dhd_pub_t * dhdp,uint8 ifindex,uint16 flowid)950 void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
951 {
952 int hashix;
953 bool found = FALSE;
954 flow_hash_info_t *cur, *prev;
955 if_flow_lkup_t *if_flow_lkup;
956 unsigned long flags;
957 bool if_role_multi_client;
958
959 ASSERT(ifindex < DHD_MAX_IFS);
960 if (ifindex >= DHD_MAX_IFS) {
961 return;
962 }
963
964 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
965 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
966
967 if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
968
969 for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
970 cur = if_flow_lkup[ifindex].fl_hash[hashix];
971 if (cur) {
972 if (cur->flowid == flowid) {
973 found = TRUE;
974 }
975
976 prev = NULL;
977 while (!found && cur) {
978 if (cur->flowid == flowid) {
979 found = TRUE;
980 break;
981 }
982 prev = cur;
983 cur = cur->next;
984 }
985 if (found) {
986 if (!prev) {
987 if_flow_lkup[ifindex].fl_hash[hashix] = cur->next;
988 } else {
989 prev->next = cur->next;
990 }
991
992 /* Decrement multi_client_flow_rings */
993 if (if_role_multi_client) {
994 dhdp->multi_client_flow_rings--;
995 }
996
997 /* deregister flowid from dhd_pub. */
998 dhd_del_flowid(dhdp, ifindex, flowid);
999
1000 id16_map_free(dhdp->flowid_allocator, flowid);
1001 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1002 MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t));
1003
1004 return;
1005 }
1006 }
1007 }
1008
1009 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1010 DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
1011 __FUNCTION__, flowid));
1012 } /* dhd_flowid_free */
1013
1014 /**
1015 * Delete all Flow rings associated with the given interface. Is called when eg
1016 * the dongle indicates that a wireless link has gone down.
1017 */
dhd_flow_rings_delete(dhd_pub_t * dhdp,uint8 ifindex)1018 void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
1019 {
1020 uint32 id;
1021 flow_ring_table_t *flow_ring_table;
1022
1023 DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
1024
1025 ASSERT(ifindex < DHD_MAX_IFS);
1026 if (ifindex >= DHD_MAX_IFS) {
1027 return;
1028 }
1029
1030 if (!dhdp->flow_ring_table) {
1031 return;
1032 }
1033
1034 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
1035 for (id = 0; id < dhdp->num_flow_rings; id++) {
1036 if (flow_ring_table[id].active &&
1037 (flow_ring_table[id].flow_info.ifindex == ifindex) &&
1038 (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
1039 dhd_bus_flow_ring_delete_request(dhdp->bus,
1040 (void *)&flow_ring_table[id]);
1041 }
1042 }
1043 }
1044
dhd_flow_rings_flush(dhd_pub_t * dhdp,uint8 ifindex)1045 void dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex)
1046 {
1047 uint32 id;
1048 flow_ring_table_t *flow_ring_table;
1049
1050 DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex));
1051
1052 ASSERT(ifindex < DHD_MAX_IFS);
1053 if (ifindex >= DHD_MAX_IFS) {
1054 return;
1055 }
1056
1057 if (!dhdp->flow_ring_table) {
1058 return;
1059 }
1060 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
1061
1062 for (id = 0; id < dhdp->num_flow_rings; id++) {
1063 if (flow_ring_table[id].active &&
1064 (flow_ring_table[id].flow_info.ifindex == ifindex) &&
1065 (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
1066 dhd_bus_flow_ring_flush_request(dhdp->bus,
1067 (void *)&flow_ring_table[id]);
1068 }
1069 }
1070 }
1071
1072 /** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS
1073 * functionality. */
dhd_flow_rings_delete_for_peer(dhd_pub_t * dhdp,uint8 ifindex,char * addr)1074 void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr)
1075 {
1076 uint32 id;
1077 flow_ring_table_t *flow_ring_table;
1078
1079 DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
1080
1081 ASSERT(ifindex < DHD_MAX_IFS);
1082 if (ifindex >= DHD_MAX_IFS) {
1083 return;
1084 }
1085
1086 if (!dhdp->flow_ring_table) {
1087 return;
1088 }
1089
1090 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
1091 for (id = 0; id < dhdp->num_flow_rings; id++) {
1092 /*
1093 * Send flowring delete request even if flowring status is
1094 * FLOW_RING_STATUS_CREATE_PENDING, to handle cases where DISASSOC_IND
1095 * event comes ahead of flowring create response.
1096 * Otherwise the flowring will not be deleted later as there will not be
1097 * any DISASSOC_IND event. With this change, when create response event
1098 * comes to DHD, it will change the status to FLOW_RING_STATUS_OPEN and
1099 * soon delete response event will come, upon which DHD will delete the
1100 * flowring.
1101 */
1102 if (flow_ring_table[id].active &&
1103 (flow_ring_table[id].flow_info.ifindex == ifindex) &&
1104 (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
1105 ((flow_ring_table[id].status == FLOW_RING_STATUS_OPEN) ||
1106 (flow_ring_table[id].status == FLOW_RING_STATUS_CREATE_PENDING))) {
1107 DHD_ERROR(("%s: deleting flowid %d\n", __FUNCTION__,
1108 flow_ring_table[id].flowid));
1109 dhd_bus_flow_ring_delete_request(dhdp->bus,
1110 (void *)&flow_ring_table[id]);
1111 }
1112 }
1113 }
1114
1115 /** Handles interface ADD, CHANGE, DEL indications from the dongle */
dhd_update_interface_flow_info(dhd_pub_t * dhdp,uint8 ifindex,uint8 op,uint8 role)1116 void dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, uint8 op,
1117 uint8 role)
1118 {
1119 if_flow_lkup_t *if_flow_lkup;
1120 unsigned long flags;
1121
1122 ASSERT(ifindex < DHD_MAX_IFS);
1123 if (ifindex >= DHD_MAX_IFS) {
1124 return;
1125 }
1126
1127 DHD_INFO(("%s: ifindex %u op %u role is %u \n", __FUNCTION__, ifindex, op,
1128 role));
1129 if (!dhdp->flowid_allocator) {
1130 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
1131 return;
1132 }
1133
1134 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
1135 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
1136
1137 if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) {
1138 if_flow_lkup[ifindex].role = role;
1139 if (role == WLC_E_IF_ROLE_WDS) {
1140 /**
1141 * WDS role does not send WLC_E_LINK event after interface is up.
1142 * So to create flowrings for WDS, make status as TRUE in WLC_E_IF
1143 * itself. same is true while making the status as FALSE.
1144 * Fix FW to send WLC_E_LINK for WDS role aswell. So that all
1145 * the interfaces are handled uniformly.
1146 */
1147 if_flow_lkup[ifindex].status = TRUE;
1148 DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
1149 __FUNCTION__, ifindex, role));
1150 }
1151 } else if ((op == WLC_E_IF_DEL) && (role == WLC_E_IF_ROLE_WDS)) {
1152 if_flow_lkup[ifindex].status = FALSE;
1153 DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
1154 __FUNCTION__, ifindex, role));
1155 }
1156 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1157 }
1158
1159 /** Handles a STA 'link' indication from the dongle */
dhd_update_interface_link_status(dhd_pub_t * dhdp,uint8 ifindex,uint8 status)1160 int dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex,
1161 uint8 status)
1162 {
1163 if_flow_lkup_t *if_flow_lkup;
1164 unsigned long flags;
1165
1166 ASSERT(ifindex < DHD_MAX_IFS);
1167 if (ifindex >= DHD_MAX_IFS) {
1168 return BCME_BADARG;
1169 }
1170
1171 DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status));
1172
1173 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
1174 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
1175
1176 if (status) {
1177 if_flow_lkup[ifindex].status = TRUE;
1178 } else {
1179 if_flow_lkup[ifindex].status = FALSE;
1180 }
1181
1182 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1183
1184 return BCME_OK;
1185 }
1186
1187 /** Update flow priority mapping, called on IOVAR */
dhd_update_flow_prio_map(dhd_pub_t * dhdp,uint8 map)1188 int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map)
1189 {
1190 uint16 flowid;
1191 flow_ring_node_t *flow_ring_node;
1192
1193 if (map > DHD_FLOW_PRIO_LLR_MAP) {
1194 return BCME_BADOPTION;
1195 }
1196
1197 /* Check if we need to change prio map */
1198 if (map == dhdp->flow_prio_map_type) {
1199 return BCME_OK;
1200 }
1201
1202 /* If any ring is active we cannot change priority mapping for flow rings */
1203 for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
1204 flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
1205 if (flow_ring_node->active) {
1206 return BCME_EPERM;
1207 }
1208 }
1209
1210 /* Inform firmware about new mapping type */
1211 if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE)) {
1212 return BCME_ERROR;
1213 }
1214
1215 /* update internal structures */
1216 dhdp->flow_prio_map_type = map;
1217 if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP) {
1218 bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
1219 } else {
1220 bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
1221 }
1222
1223 dhdp->max_multi_client_flow_rings =
1224 dhd_get_max_multi_client_flow_rings(dhdp);
1225
1226 return BCME_OK;
1227 }
1228
1229 /** Inform firmware on updated flow priority mapping, called on IOVAR */
dhd_flow_prio_map(dhd_pub_t * dhd,uint8 * map,bool set)1230 int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
1231 {
1232 uint8 iovbuf[24];
1233 int len;
1234 if (!set) {
1235 memset(&iovbuf, 0, sizeof(iovbuf));
1236 len = bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char *)iovbuf,
1237 sizeof(iovbuf));
1238 if (len == 0) {
1239 return BCME_BUFTOOSHORT;
1240 }
1241 if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE,
1242 0) < 0) {
1243 DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
1244 return BCME_ERROR;
1245 }
1246 *map = iovbuf[0];
1247 return BCME_OK;
1248 }
1249 len = bcm_mkiovar("bus:fl_prio_map", (char *)map, 0x4, (char *)iovbuf,
1250 sizeof(iovbuf));
1251 if (len == 0) {
1252 return BCME_BUFTOOSHORT;
1253 }
1254 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0) < 0) {
1255 DHD_ERROR(("%s: failed to set fl_prio_map \n", __FUNCTION__));
1256 return BCME_ERROR;
1257 }
1258 return BCME_OK;
1259 }
1260