• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
3  *
4  * Flow rings are transmit traffic (=propagating towards antenna) related entities
5  *
6  *
7  * Copyright (C) 1999-2017, Broadcom Corporation
8  *
9  *      Unless you and Broadcom execute a separate written software license
10  * agreement governing use of this software, this software is licensed to you
11  * under the terms of the GNU General Public License version 2 (the "GPL"),
12  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13  * following added to such license:
14  *
15  *      As a special exception, the copyright holders of this software give you
16  * permission to link this software with independent modules, and to copy and
17  * distribute the resulting executable under terms of your choice, provided that
18  * you also meet, for each linked independent module, the terms and conditions of
19  * the license of that module.  An independent module is a module which is not
20  * derived from this software.  The special exception does not apply to any
21  * modifications of the software.
22  *
23  *      Notwithstanding the above, under no circumstances may you combine this
24  * software in any way with any other Broadcom software provided under a license
25  * other than the GPL, without Broadcom's express prior written consent.
26  *
27  *
28  * <<Broadcom-WL-IPTag/Open:>>
29  *
30  * $Id: dhd_flowring.c 710862 2017-07-14 07:43:59Z $
31  */
32 
33 
34 #include <typedefs.h>
35 #include <bcmutils.h>
36 #include <bcmendian.h>
37 #include <bcmdevs.h>
38 
39 #include <ethernet.h>
40 #include <bcmevent.h>
41 #include <dngl_stats.h>
42 
43 #include <dhd.h>
44 
45 #include <dhd_flowring.h>
46 #include <dhd_bus.h>
47 #include <dhd_proto.h>
48 #include <dhd_dbg.h>
49 #include <802.1d.h>
50 #include <pcie_core.h>
51 #include <bcmmsgbuf.h>
52 #include <dhd_pcie.h>
53 
54 static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue);
55 
56 static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex,
57                                      uint8 prio, char *sa, char *da);
58 
59 static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex,
60                                       uint8 prio, char *sa, char *da);
61 
62 static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
63                                 uint8 prio, char *sa, char *da, uint16 *flowid);
64 int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt);
65 
66 #define FLOW_QUEUE_PKT_NEXT(p)          PKTLINK(p)
67 #define FLOW_QUEUE_PKT_SETNEXT(p, x)    PKTSETLINK((p), (x))
68 
69 #if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING)
70 const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 };
71 #else
72 const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
73 #endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */
74 const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
75 
76 /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
77 static INLINE int
dhd_flow_queue_throttle(flow_queue_t * queue)78 dhd_flow_queue_throttle(flow_queue_t *queue)
79 {
80     return DHD_FLOW_QUEUE_FULL(queue);
81 }
82 
83 int BCMFASTPATH
dhd_flow_queue_overflow(flow_queue_t * queue,void * pkt)84 dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt)
85 {
86     return BCME_NORESOURCE;
87 }
88 
89 /** Returns flow ring given a flowid */
90 flow_ring_node_t *
dhd_flow_ring_node(dhd_pub_t * dhdp,uint16 flowid)91 dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid)
92 {
93     flow_ring_node_t * flow_ring_node;
94 
95     ASSERT(dhdp != (dhd_pub_t*)NULL);
96     ASSERT(flowid < dhdp->num_flow_rings);
97 
98     flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]);
99 
100     ASSERT(flow_ring_node->flowid == flowid);
101     return flow_ring_node;
102 }
103 
104 /** Returns 'backup' queue given a flowid */
105 flow_queue_t *
dhd_flow_queue(dhd_pub_t * dhdp,uint16 flowid)106 dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid)
107 {
108     flow_ring_node_t * flow_ring_node;
109 
110     flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
111     return &flow_ring_node->queue;
112 }
113 
114 /* Flow ring's queue management functions */
115 
116 /** Reinitialize a flow ring's queue. */
117 void
dhd_flow_queue_reinit(dhd_pub_t * dhdp,flow_queue_t * queue,int max)118 dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
119 {
120     ASSERT((queue != NULL) && (max > 0));
121 
122     queue->head = queue->tail = NULL;
123     queue->len = 0;
124 
125     /* Set queue's threshold and queue's parent cummulative length counter */
126     ASSERT(max > 1);
127     DHD_FLOW_QUEUE_SET_MAX(queue, max);
128     DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max);
129     DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr);
130     DHD_FLOW_QUEUE_SET_L2CLEN(queue, &dhdp->l2cumm_ctr);
131 
132     queue->failures = 0U;
133     queue->cb = &dhd_flow_queue_overflow;
134 }
135 
136 /** Initialize a flow ring's queue, called on driver initialization. */
137 void
dhd_flow_queue_init(dhd_pub_t * dhdp,flow_queue_t * queue,int max)138 dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
139 {
140     ASSERT((queue != NULL) && (max > 0));
141 
142     dll_init(&queue->list);
143     dhd_flow_queue_reinit(dhdp, queue, max);
144 }
145 
146 /** Register an enqueue overflow callback handler */
147 void
dhd_flow_queue_register(flow_queue_t * queue,flow_queue_cb_t cb)148 dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb)
149 {
150     ASSERT(queue != NULL);
151     queue->cb = cb;
152 }
153 
154 /**
155  * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on
156  * to the flow ring itself.
157  */
158 int BCMFASTPATH
dhd_flow_queue_enqueue(dhd_pub_t * dhdp,flow_queue_t * queue,void * pkt)159 dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
160 {
161     int ret = BCME_OK;
162 
163     ASSERT(queue != NULL);
164 
165     if (dhd_flow_queue_throttle(queue)) {
166         queue->failures++;
167         ret = (*queue->cb)(queue, pkt);
168         goto done;
169     }
170 
171     if (queue->head) {
172         FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt);
173     } else {
174         queue->head = pkt;
175     }
176 
177     FLOW_QUEUE_PKT_SETNEXT(pkt, NULL);
178 
179     queue->tail = pkt; /* at tail */
180 
181     queue->len++;
182     /* increment parent's cummulative length */
183     DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
184     /* increment grandparent's cummulative length */
185     DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
186 
187 done:
188     return ret;
189 }
190 
191 /** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */
192 void * BCMFASTPATH
dhd_flow_queue_dequeue(dhd_pub_t * dhdp,flow_queue_t * queue)193 dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue)
194 {
195     void * pkt;
196 
197     ASSERT(queue != NULL);
198 
199     pkt = queue->head; /* from head */
200 
201     if (pkt == NULL) {
202         ASSERT((queue->len == 0) && (queue->tail == NULL));
203         goto done;
204     }
205 
206     queue->head = FLOW_QUEUE_PKT_NEXT(pkt);
207     if (queue->head == NULL)
208         queue->tail = NULL;
209 
210     queue->len--;
211     /* decrement parent's cummulative length */
212     DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
213     /* decrement grandparent's cummulative length */
214     DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
215 
216     FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */
217 
218 done:
219     return pkt;
220 }
221 
222 /** Reinsert a dequeued 802.3 packet back at the head */
223 void BCMFASTPATH
dhd_flow_queue_reinsert(dhd_pub_t * dhdp,flow_queue_t * queue,void * pkt)224 dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
225 {
226     if (queue->head == NULL) {
227         queue->tail = pkt;
228     }
229 
230     FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head);
231     queue->head = pkt;
232     queue->len++;
233     /* increment parent's cummulative length */
234     DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
235     /* increment grandparent's cummulative length */
236     DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
237 }
238 
239 /** Fetch the backup queue for a flowring, and assign flow control thresholds */
240 void
dhd_flow_ring_config_thresholds(dhd_pub_t * dhdp,uint16 flowid,int queue_budget,int cumm_threshold,void * cumm_ctr,int l2cumm_threshold,void * l2cumm_ctr)241 dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid,
242                      int queue_budget, int cumm_threshold, void *cumm_ctr,
243                      int l2cumm_threshold, void *l2cumm_ctr)
244 {
245     flow_queue_t * queue;
246 
247     ASSERT(dhdp != (dhd_pub_t*)NULL);
248     ASSERT(queue_budget > 1);
249     ASSERT(cumm_threshold > 1);
250     ASSERT(cumm_ctr != (void*)NULL);
251     ASSERT(l2cumm_threshold > 1);
252     ASSERT(l2cumm_ctr != (void*)NULL);
253 
254     queue = dhd_flow_queue(dhdp, flowid);
255 
256     DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
257 
258     /* Set the queue's parent threshold and cummulative counter */
259     DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
260     DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
261 
262     /* Set the queue's grandparent threshold and cummulative counter */
263     DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
264     DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
265 }
266 
267 /** Initializes data structures of multiple flow rings */
268 int
dhd_flow_rings_init(dhd_pub_t * dhdp,uint32 num_flow_rings)269 dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings)
270 {
271     uint32 idx;
272     uint32 flow_ring_table_sz;
273     uint32 if_flow_lkup_sz = 0;
274     void * flowid_allocator;
275     flow_ring_table_t *flow_ring_table = NULL;
276     if_flow_lkup_t *if_flow_lkup = NULL;
277     void *lock = NULL;
278     void *list_lock = NULL;
279     unsigned long flags;
280 
281     DHD_INFO(("%s\n", __FUNCTION__));
282 
283     /* Construct a 16bit flowid allocator */
284     flowid_allocator = id16_map_init(dhdp->osh,
285                            num_flow_rings - dhdp->bus->max_cmn_rings, FLOWID_RESERVED);
286     if (flowid_allocator == NULL) {
287         DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__));
288         return BCME_NOMEM;
289     }
290 
291     /* Allocate a flow ring table, comprising of requested number of rings */
292     flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t));
293     flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz);
294     if (flow_ring_table == NULL) {
295         DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__));
296         goto fail;
297     }
298 
299     /* Initialize flow ring table state */
300     DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr);
301     DHD_CUMM_CTR_INIT(&dhdp->l2cumm_ctr);
302     bzero((uchar *)flow_ring_table, flow_ring_table_sz);
303     for (idx = 0; idx < num_flow_rings; idx++) {
304         flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
305         flow_ring_table[idx].flowid = (uint16)idx;
306         flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh);
307 #ifdef IDLE_TX_FLOW_MGMT
308         flow_ring_table[idx].last_active_ts = OSL_SYSUPTIME();
309 #endif /* IDLE_TX_FLOW_MGMT */
310         if (flow_ring_table[idx].lock == NULL) {
311             DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__));
312             goto fail;
313         }
314 
315         dll_init(&flow_ring_table[idx].list);
316 
317         /* Initialize the per flow ring backup queue */
318         dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue,
319                             FLOW_RING_QUEUE_THRESHOLD);
320     }
321 
322     /* Allocate per interface hash table (for fast lookup from interface to flow ring) */
323     if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
324     if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp,
325         DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz);
326     if (if_flow_lkup == NULL) {
327         DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__));
328         goto fail;
329     }
330 
331     /* Initialize per interface hash table */
332     for (idx = 0; idx < DHD_MAX_IFS; idx++) {
333         int hash_ix;
334         if_flow_lkup[idx].status = 0;
335         if_flow_lkup[idx].role = 0;
336         for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++)
337             if_flow_lkup[idx].fl_hash[hash_ix] = NULL;
338     }
339 
340     lock = dhd_os_spin_lock_init(dhdp->osh);
341     if (lock == NULL)
342         goto fail;
343 
344     list_lock = dhd_os_spin_lock_init(dhdp->osh);
345     if (list_lock == NULL)
346         goto lock_fail;
347 
348     dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
349     bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
350 #ifdef DHD_LOSSLESS_ROAMING
351     dhdp->dequeue_prec_map = ALLPRIO;
352 #endif
353     /* Now populate into dhd pub */
354     DHD_FLOWID_LOCK(lock, flags);
355     dhdp->num_flow_rings = num_flow_rings;
356     dhdp->flowid_allocator = (void *)flowid_allocator;
357     dhdp->flow_ring_table = (void *)flow_ring_table;
358     dhdp->if_flow_lkup = (void *)if_flow_lkup;
359     dhdp->flowid_lock = lock;
360     dhdp->flow_rings_inited = TRUE;
361     dhdp->flowring_list_lock = list_lock;
362     DHD_FLOWID_UNLOCK(lock, flags);
363 
364     DHD_INFO(("%s done\n", __FUNCTION__));
365     return BCME_OK;
366 
367 lock_fail:
368     /* deinit the spinlock */
369     dhd_os_spin_lock_deinit(dhdp->osh, lock);
370 
371 fail:
372     /* Destruct the per interface flow lkup table */
373     if (if_flow_lkup != NULL) {
374         DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz);
375     }
376     if (flow_ring_table != NULL) {
377         for (idx = 0; idx < num_flow_rings; idx++) {
378             if (flow_ring_table[idx].lock != NULL)
379                 dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
380         }
381         MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
382     }
383     id16_map_fini(dhdp->osh, flowid_allocator);
384 
385     return BCME_NOMEM;
386 }
387 
388 /** Deinit Flow Ring specific data structures */
dhd_flow_rings_deinit(dhd_pub_t * dhdp)389 void dhd_flow_rings_deinit(dhd_pub_t *dhdp)
390 {
391     uint16 idx;
392     uint32 flow_ring_table_sz;
393     uint32 if_flow_lkup_sz;
394     flow_ring_table_t *flow_ring_table;
395     unsigned long flags;
396     void *lock;
397 
398     DHD_INFO(("dhd_flow_rings_deinit\n"));
399 
400     if (!(dhdp->flow_rings_inited)) {
401         DHD_ERROR(("dhd_flow_rings not initialized!\n"));
402         return;
403     }
404 
405     if (dhdp->flow_ring_table != NULL) {
406         ASSERT(dhdp->num_flow_rings > 0);
407 
408         DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
409         flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
410         dhdp->flow_ring_table = NULL;
411         DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
412         for (idx = 0; idx < dhdp->num_flow_rings; idx++) {
413             if (flow_ring_table[idx].active) {
414                 dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]);
415             }
416             ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue));
417 
418             /* Deinit flow ring queue locks before destroying flow ring table */
419             if (flow_ring_table[idx].lock != NULL) {
420                 dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
421             }
422             flow_ring_table[idx].lock = NULL;
423         }
424 
425         /* Destruct the flow ring table */
426         flow_ring_table_sz = dhdp->num_flow_rings * sizeof(flow_ring_table_t);
427         MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
428     }
429 
430     DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
431 
432     /* Destruct the per interface flow lkup table */
433     if (dhdp->if_flow_lkup != NULL) {
434         if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
435         bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz);
436         DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz);
437         dhdp->if_flow_lkup = NULL;
438     }
439 
440     /* Destruct the flowid allocator */
441     if (dhdp->flowid_allocator != NULL)
442         dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator);
443 
444     dhdp->num_flow_rings = 0U;
445     bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
446 
447     lock = dhdp->flowid_lock;
448     dhdp->flowid_lock = NULL;
449 
450     if (lock) {
451         DHD_FLOWID_UNLOCK(lock, flags);
452         dhd_os_spin_lock_deinit(dhdp->osh, lock);
453     }
454 
455     dhd_os_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock);
456     dhdp->flowring_list_lock = NULL;
457 
458     ASSERT(dhdp->if_flow_lkup == NULL);
459     ASSERT(dhdp->flowid_allocator == NULL);
460     ASSERT(dhdp->flow_ring_table == NULL);
461     dhdp->flow_rings_inited = FALSE;
462 }
463 
464 /** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */
465 uint8
dhd_flow_rings_ifindex2role(dhd_pub_t * dhdp,uint8 ifindex)466 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex)
467 {
468     if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
469     ASSERT(if_flow_lkup);
470     return if_flow_lkup[ifindex].role;
471 }
472 
473 #ifdef WLTDLS
is_tdls_destination(dhd_pub_t * dhdp,uint8 * da)474 bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da)
475 {
476     unsigned long flags;
477     tdls_peer_node_t *cur = NULL;
478 
479     DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
480     cur = dhdp->peer_tbl.node;
481 
482     while (cur != NULL) {
483         if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
484             DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
485             return TRUE;
486         }
487         cur = cur->next;
488     }
489     DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
490     return FALSE;
491 }
492 #endif /* WLTDLS */
493 
494 /** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */
495 static INLINE uint16
dhd_flowid_find(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da)496 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
497 {
498     int hash;
499     bool ismcast = FALSE;
500     flow_hash_info_t *cur;
501     if_flow_lkup_t *if_flow_lkup;
502     unsigned long flags;
503 
504     DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
505     if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
506 
507     ASSERT(if_flow_lkup);
508 
509     if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
510             (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
511 #ifdef WLTDLS
512         if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
513             is_tdls_destination(dhdp, da)) {
514             hash = DHD_FLOWRING_HASHINDEX(da, prio);
515             cur = if_flow_lkup[ifindex].fl_hash[hash];
516             while (cur != NULL) {
517                 if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) {
518                     DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
519                     return cur->flowid;
520                 }
521                 cur = cur->next;
522             }
523             DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
524             return FLOWID_INVALID;
525         }
526 #endif /* WLTDLS */
527         /* For STA non TDLS dest and WDS dest flow ring id is mapped based on prio only */
528         cur = if_flow_lkup[ifindex].fl_hash[prio];
529         if (cur) {
530             DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
531             return cur->flowid;
532         }
533     } else {
534         if (ETHER_ISMULTI(da)) {
535             ismcast = TRUE;
536             hash = 0;
537         } else {
538             hash = DHD_FLOWRING_HASHINDEX(da, prio);
539         }
540 
541         cur = if_flow_lkup[ifindex].fl_hash[hash];
542 
543         while (cur) {
544             if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) ||
545                 (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) &&
546                 (cur->flow_info.tid == prio))) {
547                 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
548                 return cur->flowid;
549             }
550             cur = cur->next;
551         }
552     }
553     DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
554 
555     DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__));
556     return FLOWID_INVALID;
557 } /* dhd_flowid_find */
558 
559 /** Create unique Flow ID, called when a flow ring is created. */
560 static INLINE uint16
dhd_flowid_alloc(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da)561 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
562 {
563     flow_hash_info_t *fl_hash_node, *cur;
564     if_flow_lkup_t *if_flow_lkup;
565     int hash;
566     uint16 flowid;
567     unsigned long flags;
568 
569     fl_hash_node = (flow_hash_info_t *) MALLOCZ(dhdp->osh, sizeof(flow_hash_info_t));
570     if (fl_hash_node == NULL) {
571         DHD_ERROR(("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__));
572         return FLOWID_INVALID;
573     }
574     memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da));
575 
576     DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
577     ASSERT(dhdp->flowid_allocator != NULL);
578     flowid = id16_map_alloc(dhdp->flowid_allocator);
579     DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
580 
581     if (flowid == FLOWID_INVALID) {
582         MFREE(dhdp->osh, fl_hash_node,  sizeof(flow_hash_info_t));
583         DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__));
584         return FLOWID_INVALID;
585     }
586 
587     fl_hash_node->flowid = flowid;
588     fl_hash_node->flow_info.tid = prio;
589     fl_hash_node->flow_info.ifindex = ifindex;
590     fl_hash_node->next = NULL;
591 
592     DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
593     if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
594 
595     if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
596             (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
597         /* For STA non TDLS dest and WDS dest we allocate entry based on prio only */
598 #ifdef WLTDLS
599         if (dhdp->peer_tbl.tdls_peer_count &&
600             (is_tdls_destination(dhdp, da))) {
601             hash = DHD_FLOWRING_HASHINDEX(da, prio);
602             cur = if_flow_lkup[ifindex].fl_hash[hash];
603             if (cur) {
604                 while (cur->next) {
605                     cur = cur->next;
606                 }
607                 cur->next = fl_hash_node;
608             } else {
609                 if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
610             }
611         } else
612 #endif /* WLTDLS */
613             if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node;
614     } else {
615         /* For bcast/mcast assign first slot in in interface */
616         hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio);
617         cur = if_flow_lkup[ifindex].fl_hash[hash];
618         if (cur) {
619             while (cur->next) {
620                 cur = cur->next;
621             }
622             cur->next = fl_hash_node;
623         } else
624             if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
625     }
626     DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
627 
628     DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
629 
630     return fl_hash_node->flowid;
631 } /* dhd_flowid_alloc */
632 
633 /** Get flow ring ID, if not present try to create one */
634 static INLINE int
dhd_flowid_lookup(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da,uint16 * flowid)635 dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
636                   uint8 prio, char *sa, char *da, uint16 *flowid)
637 {
638     uint16 id;
639     flow_ring_node_t *flow_ring_node;
640     flow_ring_table_t *flow_ring_table;
641     unsigned long flags;
642     int ret;
643     bool is_sta_assoc;
644 
645     DHD_INFO(("%s\n", __FUNCTION__));
646     if (!dhdp->flow_ring_table) {
647         return BCME_ERROR;
648     }
649 
650     flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
651 
652     id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
653 
654     if (id == FLOWID_INVALID) {
655 
656         if_flow_lkup_t *if_flow_lkup;
657         if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
658 
659         if (!if_flow_lkup[ifindex].status)
660             return BCME_ERROR;
661         BCM_REFERENCE(is_sta_assoc);
662 #if defined(PCIE_FULL_DONGLE)
663         is_sta_assoc = dhd_sta_associated(dhdp, ifindex, (uint8 *)da);
664         DHD_ERROR(("%s: multi %x ifindex %d role %x assoc %d\n", __FUNCTION__,
665             ETHER_ISMULTI(da), ifindex, if_flow_lkup[ifindex].role,
666             is_sta_assoc));
667         if (!ETHER_ISMULTI(da) &&
668             ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_AP) ||
669             (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_P2P_GO)) &&
670             (!is_sta_assoc))
671             return BCME_ERROR;
672 #endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
673 
674         id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
675         if (id == FLOWID_INVALID) {
676             DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
677                        __FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
678             return BCME_ERROR;
679         }
680 
681         ASSERT(id < dhdp->num_flow_rings);
682 
683         /* register this flowid in dhd_pub */
684         dhd_add_flowid(dhdp, ifindex, prio, da, id);
685 
686         flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
687 
688         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
689 
690         /* Init Flow info */
691         memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa));
692         memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da));
693         flow_ring_node->flow_info.tid = prio;
694         flow_ring_node->flow_info.ifindex = ifindex;
695         flow_ring_node->active = TRUE;
696         flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING;
697 
698 #ifdef DEVICE_TX_STUCK_DETECT
699         flow_ring_node->tx_cmpl = flow_ring_node->tx_cmpl_prev = OSL_SYSUPTIME();
700         flow_ring_node->stuck_count = 0;
701 #endif /* DEVICE_TX_STUCK_DETECT */
702 
703         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
704 
705         /* Create and inform device about the new flow */
706         if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
707                 != BCME_OK) {
708             DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
709             return BCME_ERROR;
710         }
711 
712         *flowid = id;
713         return BCME_OK;
714     } else {
715         /* if the Flow id was found in the hash */
716         ASSERT(id < dhdp->num_flow_rings);
717 
718         flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
719         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
720 
721         /*
722          * If the flow_ring_node is in Open State or Status pending state then
723          * we can return the Flow id to the caller.If the flow_ring_node is in
724          * FLOW_RING_STATUS_PENDING this means the creation is in progress and
725          * hence the packets should be queued.
726          *
727          * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or
728          * FLOW_RING_STATUS_CLOSED, then we should return Error.
729          * Note that if the flowing is being deleted we would mark it as
730          * FLOW_RING_STATUS_DELETE_PENDING.  Now before Dongle could respond and
731          * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets.
732          * We should drop the packets in that case.
733          * The decission to return OK should NOT be based on 'active' variable, beause
734          * active is made TRUE when a flow_ring_node gets allocated and is made
735          * FALSE when the flow ring gets removed and does not reflect the True state
736          * of the Flow ring.
737          * In case if IDLE_TX_FLOW_MGMT is defined, we have to handle two more flowring
738          * states. If the flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid
739          * is to be returned and from dhd_bus_txdata, the flowring would be resumed again.
740          * The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to
741          * FLOW_RING_STATUS_CREATE_PENDING.
742          */
743         if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING ||
744             flow_ring_node->status == FLOW_RING_STATUS_CLOSED) {
745             *flowid = FLOWID_INVALID;
746             ret = BCME_ERROR;
747         } else {
748             *flowid = id;
749             ret = BCME_OK;
750         }
751 
752         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
753         return ret;
754     } /* Flow Id found in the hash */
755 } /* dhd_flowid_lookup */
756 
757 /**
758  * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
759  * select the flowring to send the packet to the dongle.
760  */
761 int BCMFASTPATH
dhd_flowid_update(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,void * pktbuf)762 dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf)
763 {
764     uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
765     struct ether_header *eh = (struct ether_header *)pktdata;
766     uint16 flowid;
767 
768     ASSERT(ifindex < DHD_MAX_IFS);
769 
770     if (ifindex >= DHD_MAX_IFS) {
771         return BCME_BADARG;
772     }
773 
774     if (!dhdp->flowid_allocator) {
775         DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
776         return BCME_ERROR;
777     }
778 
779     if (dhd_flowid_lookup(dhdp, ifindex, prio, (char *)eh->ether_shost, (char *)eh->ether_dhost,
780         &flowid) != BCME_OK) {
781         return BCME_ERROR;
782     }
783 
784     DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
785 
786     /* Tag the packet with flowid */
787     DHD_PKT_SET_FLOWID(pktbuf, flowid);
788     return BCME_OK;
789 }
790 
791 void
dhd_flowid_free(dhd_pub_t * dhdp,uint8 ifindex,uint16 flowid)792 dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
793 {
794     int hashix;
795     bool found = FALSE;
796     flow_hash_info_t *cur, *prev;
797     if_flow_lkup_t *if_flow_lkup;
798     unsigned long flags;
799 
800     DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
801     if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
802 
803     for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
804         cur = if_flow_lkup[ifindex].fl_hash[hashix];
805 
806         if (cur) {
807             if (cur->flowid == flowid) {
808                 found = TRUE;
809             }
810 
811             prev = NULL;
812             while (!found && cur) {
813                 if (cur->flowid == flowid) {
814                     found = TRUE;
815                     break;
816                 }
817                 prev = cur;
818                 cur = cur->next;
819             }
820             if (found) {
821                 if (!prev) {
822                     if_flow_lkup[ifindex].fl_hash[hashix] = cur->next;
823                 } else {
824                     prev->next = cur->next;
825                 }
826 
827                 /* deregister flowid from dhd_pub. */
828                 dhd_del_flowid(dhdp, ifindex, flowid);
829 
830                 id16_map_free(dhdp->flowid_allocator, flowid);
831                 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
832                 MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t));
833 
834                 return;
835             }
836         }
837     }
838 
839     DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
840     DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
841                __FUNCTION__, flowid));
842 } /* dhd_flowid_free */
843 
844 /**
845  * Delete all Flow rings associated with the given interface. Is called when eg the dongle
846  * indicates that a wireless link has gone down.
847  */
848 void
dhd_flow_rings_delete(dhd_pub_t * dhdp,uint8 ifindex)849 dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
850 {
851     uint32 id;
852     flow_ring_table_t *flow_ring_table;
853 
854     DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
855 
856     ASSERT(ifindex < DHD_MAX_IFS);
857     if (ifindex >= DHD_MAX_IFS)
858         return;
859 
860     if (!dhdp->flow_ring_table)
861         return;
862 
863     flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
864     for (id = 0; id < dhdp->num_flow_rings; id++) {
865         if (flow_ring_table[id].active &&
866             (flow_ring_table[id].flow_info.ifindex == ifindex) &&
867             (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
868             dhd_bus_flow_ring_delete_request(dhdp->bus,
869                                              (void *) &flow_ring_table[id]);
870         }
871     }
872 }
873 
874 void
dhd_flow_rings_flush(dhd_pub_t * dhdp,uint8 ifindex)875 dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex)
876 {
877     uint32 id;
878     flow_ring_table_t *flow_ring_table;
879 
880     DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex));
881 
882     ASSERT(ifindex < DHD_MAX_IFS);
883     if (ifindex >= DHD_MAX_IFS)
884         return;
885 
886     if (!dhdp->flow_ring_table)
887         return;
888     flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
889 
890     for (id = 0; id <= dhdp->num_flow_rings; id++) {
891         if (flow_ring_table[id].active &&
892             (flow_ring_table[id].flow_info.ifindex == ifindex) &&
893             (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
894             dhd_bus_flow_ring_flush_request(dhdp->bus,
895                                              (void *) &flow_ring_table[id]);
896         }
897     }
898 }
899 
900 
901 /** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */
902 void
dhd_flow_rings_delete_for_peer(dhd_pub_t * dhdp,uint8 ifindex,char * addr)903 dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr)
904 {
905     uint32 id;
906     flow_ring_table_t *flow_ring_table;
907 
908     DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
909 
910     ASSERT(ifindex < DHD_MAX_IFS);
911     if (ifindex >= DHD_MAX_IFS)
912         return;
913 
914     if (!dhdp->flow_ring_table)
915         return;
916 
917     flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
918     for (id = 0; id < dhdp->num_flow_rings; id++) {
919         if (flow_ring_table[id].active &&
920             (flow_ring_table[id].flow_info.ifindex == ifindex) &&
921             (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
922             (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
923             DHD_ERROR(("%s: deleting flowid %d\n",
924                 __FUNCTION__, flow_ring_table[id].flowid));
925             dhd_bus_flow_ring_delete_request(dhdp->bus,
926                 (void *) &flow_ring_table[id]);
927         }
928     }
929 }
930 
931 /** Handles interface ADD, CHANGE, DEL indications from the dongle */
932 void
dhd_update_interface_flow_info(dhd_pub_t * dhdp,uint8 ifindex,uint8 op,uint8 role)933 dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
934                                uint8 op, uint8 role)
935 {
936     if_flow_lkup_t *if_flow_lkup;
937     unsigned long flags;
938 
939     ASSERT(ifindex < DHD_MAX_IFS);
940     if (ifindex >= DHD_MAX_IFS)
941         return;
942 
943     DHD_ERROR(("%s: ifindex %u op %u role is %u \n",
944               __FUNCTION__, ifindex, op, role));
945     if (!dhdp->flowid_allocator) {
946         DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
947         return;
948     }
949 
950     DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
951     if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
952 
953     if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) {
954         if_flow_lkup[ifindex].role = role;
955 
956         if (role == WLC_E_IF_ROLE_WDS) {
957             /**
958              * WDS role does not send WLC_E_LINK event after interface is up.
959              * So to create flowrings for WDS, make status as TRUE in WLC_E_IF itself.
960              * same is true while making the status as FALSE.
961              * TODO: Fix FW to send WLC_E_LINK for WDS role aswell. So that all the
962              * interfaces are handled uniformly.
963              */
964             if_flow_lkup[ifindex].status = TRUE;
965             DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
966                       __FUNCTION__, ifindex, role));
967         }
968     } else    if ((op == WLC_E_IF_DEL) && (role == WLC_E_IF_ROLE_WDS)) {
969         if_flow_lkup[ifindex].status = FALSE;
970         DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
971                   __FUNCTION__, ifindex, role));
972     }
973     DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
974 }
975 
976 /** Handles a STA 'link' indication from the dongle */
977 int
dhd_update_interface_link_status(dhd_pub_t * dhdp,uint8 ifindex,uint8 status)978 dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status)
979 {
980     if_flow_lkup_t *if_flow_lkup;
981     unsigned long flags;
982 
983     ASSERT(ifindex < DHD_MAX_IFS);
984     if (ifindex >= DHD_MAX_IFS)
985         return BCME_BADARG;
986 
987     DHD_ERROR(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status));
988 
989     DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
990     if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
991 
992     if (status) {
993         if_flow_lkup[ifindex].status = TRUE;
994     } else {
995         if_flow_lkup[ifindex].status = FALSE;
996     }
997 
998     DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
999 
1000     return BCME_OK;
1001 }
1002 
1003 /** Update flow priority mapping, called on IOVAR */
dhd_update_flow_prio_map(dhd_pub_t * dhdp,uint8 map)1004 int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map)
1005 {
1006     uint16 flowid;
1007     flow_ring_node_t *flow_ring_node;
1008 
1009     if (map > DHD_FLOW_PRIO_LLR_MAP)
1010         return BCME_BADOPTION;
1011 
1012     /* Check if we need to change prio map */
1013     if (map == dhdp->flow_prio_map_type)
1014         return BCME_OK;
1015 
1016     /* If any ring is active we cannot change priority mapping for flow rings */
1017     for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
1018         flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
1019         if (flow_ring_node->active)
1020             return BCME_EPERM;
1021     }
1022 
1023     /* Inform firmware about new mapping type */
1024     if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE))
1025         return BCME_ERROR;
1026 
1027     /* update internal structures */
1028     dhdp->flow_prio_map_type = map;
1029     if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP)
1030         bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
1031     else
1032         bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
1033 
1034     return BCME_OK;
1035 }
1036 
1037 /** Inform firmware on updated flow priority mapping, called on IOVAR */
dhd_flow_prio_map(dhd_pub_t * dhd,uint8 * map,bool set)1038 int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
1039 {
1040     uint8 iovbuf[24] = {0};
1041     if (!set) {
1042         bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
1043         if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
1044             DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
1045             return BCME_ERROR;
1046         }
1047         *map = iovbuf[0];
1048         return BCME_OK;
1049     }
1050     bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf));
1051     if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
1052         DHD_ERROR(("%s: failed to set fl_prio_map \n",
1053             __FUNCTION__));
1054         return BCME_ERROR;
1055     }
1056     return BCME_OK;
1057 }
1058