1 /*
2 * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
3 *
4 * Flow rings are transmit traffic (=propagating towards antenna) related entities
5 *
6 *
7 * Copyright (C) 2020, Broadcom.
8 *
9 * Unless you and Broadcom execute a separate written software license
10 * agreement governing use of this software, this software is licensed to you
11 * under the terms of the GNU General Public License version 2 (the "GPL"),
12 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13 * following added to such license:
14 *
15 * As a special exception, the copyright holders of this software give you
16 * permission to link this software with independent modules, and to copy and
17 * distribute the resulting executable under terms of your choice, provided that
18 * you also meet, for each linked independent module, the terms and conditions of
19 * the license of that module. An independent module is a module which is not
20 * derived from this software. The special exception does not apply to any
21 * modifications of the software.
22 *
23 *
24 * <<Broadcom-WL-IPTag/Open:>>
25 *
26 * $Id$
27 */
28
29 /** XXX Twiki: [PCIeFullDongleArchitecture] */
30
31 #include <typedefs.h>
32 #include <bcmutils.h>
33 #include <bcmendian.h>
34 #include <bcmdevs.h>
35
36 #include <ethernet.h>
37 #include <bcmevent.h>
38 #include <dngl_stats.h>
39
40 #include <dhd.h>
41
42 #include <dhd_flowring.h>
43 #include <dhd_bus.h>
44 #include <dhd_proto.h>
45 #include <dhd_dbg.h>
46 #include <802.1d.h>
47 #include <pcie_core.h>
48 #include <bcmmsgbuf.h>
49 #include <dhd_pcie.h>
50 #include <dhd_config.h>
51
52 static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue);
53
54 static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex,
55 uint8 prio, char *sa, char *da);
56
57 static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex,
58 uint8 prio, char *sa, char *da);
59
60 static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
61 uint8 prio, char *sa, char *da, uint16 *flowid);
62 int dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt);
63
64 #define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p)
65 #define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x))
66
67 #ifdef DHD_REPLACE_LOG_INFO_TO_TRACE
68 #define DHD_FLOWRING_INFO DHD_TRACE
69 #else
70 #define DHD_FLOWRING_INFO DHD_INFO
71 #endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */
72
73 const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
74 const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
75
76 /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
77 static INLINE int
dhd_flow_queue_throttle(flow_queue_t * queue)78 dhd_flow_queue_throttle(flow_queue_t *queue)
79 {
80 #if defined(BCM_ROUTER_DHD)
81 /* Two tests
82 * 1) Test whether overall level 2 (grandparent) cummulative threshold crossed.
83 * 2) Or test whether queue's budget and overall cummulative threshold crossed.
84 */
85 void *gp_clen_ptr = DHD_FLOW_QUEUE_L2CLEN_PTR(queue);
86 void *parent_clen_ptr = DHD_FLOW_QUEUE_CLEN_PTR(queue);
87 int gp_cumm_threshold = DHD_FLOW_QUEUE_L2THRESHOLD(queue);
88 int cumm_threshold = DHD_FLOW_QUEUE_THRESHOLD(queue);
89
90 int ret = ((DHD_CUMM_CTR_READ(gp_clen_ptr) > gp_cumm_threshold) ||
91 ((DHD_FLOW_QUEUE_OVFL(queue, DHD_FLOW_QUEUE_MAX(queue))) &&
92 (DHD_CUMM_CTR_READ(parent_clen_ptr) > cumm_threshold)));
93 return ret;
94 #else
95 return DHD_FLOW_QUEUE_FULL(queue);
96 #endif /* ! BCM_ROUTER_DHD */
97 }
98
99 int
BCMFASTPATH(dhd_flow_queue_overflow)100 BCMFASTPATH(dhd_flow_queue_overflow)(flow_queue_t *queue, void *pkt)
101 {
102 return BCME_NORESOURCE;
103 }
104
105 /** Returns flow ring given a flowid */
106 flow_ring_node_t *
dhd_flow_ring_node(dhd_pub_t * dhdp,uint16 flowid)107 dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid)
108 {
109 flow_ring_node_t * flow_ring_node;
110
111 ASSERT(dhdp != (dhd_pub_t*)NULL);
112 ASSERT(flowid <= dhdp->max_tx_flowid);
113 if (flowid > dhdp->max_tx_flowid) {
114 return NULL;
115 }
116
117 flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]);
118
119 ASSERT(flow_ring_node->flowid == flowid);
120 return flow_ring_node;
121 }
122
123 /** Returns 'backup' queue given a flowid */
124 flow_queue_t *
dhd_flow_queue(dhd_pub_t * dhdp,uint16 flowid)125 dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid)
126 {
127 flow_ring_node_t * flow_ring_node = NULL;
128
129 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
130 if (flow_ring_node)
131 return &flow_ring_node->queue;
132 else
133 return NULL;
134 }
135
136 /* Flow ring's queue management functions */
137
138 /** Reinitialize a flow ring's queue. */
139 void
dhd_flow_queue_reinit(dhd_pub_t * dhdp,flow_queue_t * queue,int max)140 dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
141 {
142 ASSERT((queue != NULL) && (max > 0));
143
144 queue->head = queue->tail = NULL;
145 queue->len = 0;
146
147 /* Set queue's threshold and queue's parent cummulative length counter */
148 ASSERT(max > 1);
149 DHD_FLOW_QUEUE_SET_MAX(queue, max);
150 DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max);
151 DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr);
152 DHD_FLOW_QUEUE_SET_L2CLEN(queue, &dhdp->l2cumm_ctr);
153
154 queue->failures = 0U;
155 queue->cb = &dhd_flow_queue_overflow;
156 }
157
158 /** Initialize a flow ring's queue, called on driver initialization. */
159 void
dhd_flow_queue_init(dhd_pub_t * dhdp,flow_queue_t * queue,int max)160 dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
161 {
162 ASSERT((queue != NULL) && (max > 0));
163
164 dll_init(&queue->list);
165 dhd_flow_queue_reinit(dhdp, queue, max);
166 }
167
168 /** Register an enqueue overflow callback handler */
169 void
dhd_flow_queue_register(flow_queue_t * queue,flow_queue_cb_t cb)170 dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb)
171 {
172 ASSERT(queue != NULL);
173 queue->cb = cb;
174 }
175
176 /**
177 * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on
178 * to the flow ring itself.
179 */
180 int
BCMFASTPATH(dhd_flow_queue_enqueue)181 BCMFASTPATH(dhd_flow_queue_enqueue)(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
182 {
183 int ret = BCME_OK;
184
185 ASSERT(queue != NULL);
186
187 if (dhd_flow_queue_throttle(queue)) {
188 queue->failures++;
189 ret = (*queue->cb)(queue, pkt);
190 goto done;
191 }
192
193 if (queue->head) {
194 FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt);
195 } else {
196 queue->head = pkt;
197 }
198
199 FLOW_QUEUE_PKT_SETNEXT(pkt, NULL);
200
201 queue->tail = pkt; /* at tail */
202
203 queue->len++;
204 /* increment parent's cummulative length */
205 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
206 /* increment grandparent's cummulative length */
207 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
208
209 done:
210 return ret;
211 }
212
213 /** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */
214 void *
BCMFASTPATH(dhd_flow_queue_dequeue)215 BCMFASTPATH(dhd_flow_queue_dequeue)(dhd_pub_t *dhdp, flow_queue_t *queue)
216 {
217 void * pkt;
218
219 ASSERT(queue != NULL);
220
221 pkt = queue->head; /* from head */
222
223 if (pkt == NULL) {
224 ASSERT((queue->len == 0) && (queue->tail == NULL));
225 goto done;
226 }
227
228 queue->head = FLOW_QUEUE_PKT_NEXT(pkt);
229 if (queue->head == NULL)
230 queue->tail = NULL;
231
232 queue->len--;
233 /* decrement parent's cummulative length */
234 DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
235 /* decrement grandparent's cummulative length */
236 DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
237
238 FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */
239
240 done:
241 return pkt;
242 }
243
244 /** Reinsert a dequeued 802.3 packet back at the head */
245 void
BCMFASTPATH(dhd_flow_queue_reinsert)246 BCMFASTPATH(dhd_flow_queue_reinsert)(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
247 {
248 if (queue->head == NULL) {
249 queue->tail = pkt;
250 }
251
252 FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head);
253 queue->head = pkt;
254 queue->len++;
255 /* increment parent's cummulative length */
256 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
257 /* increment grandparent's cummulative length */
258 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
259 }
260
261 /** Fetch the backup queue for a flowring, and assign flow control thresholds */
262 void
dhd_flow_ring_config_thresholds(dhd_pub_t * dhdp,uint16 flowid,int queue_budget,int cumm_threshold,void * cumm_ctr,int l2cumm_threshold,void * l2cumm_ctr)263 dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid,
264 int queue_budget, int cumm_threshold, void *cumm_ctr,
265 int l2cumm_threshold, void *l2cumm_ctr)
266 {
267 flow_queue_t * queue = NULL;
268
269 ASSERT(dhdp != (dhd_pub_t*)NULL);
270 ASSERT(queue_budget > 1);
271 ASSERT(cumm_threshold > 1);
272 ASSERT(cumm_ctr != (void*)NULL);
273 ASSERT(l2cumm_threshold > 1);
274 ASSERT(l2cumm_ctr != (void*)NULL);
275
276 queue = dhd_flow_queue(dhdp, flowid);
277 if (queue) {
278 DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
279
280 /* Set the queue's parent threshold and cummulative counter */
281 DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
282 DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
283
284 /* Set the queue's grandparent threshold and cummulative counter */
285 DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
286 DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
287 }
288 }
289
290 /*
291 * This function returns total number of flowrings that can be created for a INFRA STA.
292 * For prio2ac mapping, it will return 4, prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 }
293 * For prio2tid mapping, it will return 8, prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }
294 */
295 uint8
dhd_num_prio_supported_per_flow_ring(dhd_pub_t * dhdp)296 dhd_num_prio_supported_per_flow_ring(dhd_pub_t *dhdp)
297 {
298 uint8 prio_count = 0;
299 int i;
300 /* Pick all elements one by one */
301 for (i = 0; i < NUMPRIO; i++)
302 {
303 /* Check if the picked element is already counted */
304 int j;
305 for (j = 0; j < i; j++) {
306 if (dhdp->flow_prio_map[i] == dhdp->flow_prio_map[j]) {
307 break;
308 }
309 }
310 /* If not counted earlier, then count it */
311 if (i == j) {
312 prio_count++;
313 }
314 }
315
316 return prio_count;
317 }
318
319 uint8
dhd_get_max_multi_client_flow_rings(dhd_pub_t * dhdp)320 dhd_get_max_multi_client_flow_rings(dhd_pub_t *dhdp)
321 {
322 uint8 reserved_infra_sta_flow_rings = dhd_num_prio_supported_per_flow_ring(dhdp);
323 uint8 total_tx_flow_rings = (uint8)dhd_get_max_flow_rings(dhdp);
324 uint8 max_multi_client_flow_rings = total_tx_flow_rings - reserved_infra_sta_flow_rings;
325 return max_multi_client_flow_rings;
326 }
327
328 int
dhd_flowid_map_init(dhd_pub_t * dhdp,uint16 max_tx_flow_rings)329 dhd_flowid_map_init(dhd_pub_t *dhdp, uint16 max_tx_flow_rings)
330 {
331 #if defined(DHD_HTPUT_TUNABLES)
332 uint16 max_normal_tx_flow_rings = max_tx_flow_rings - HTPUT_TOTAL_FLOW_RINGS;
333 #else
334 uint16 max_normal_tx_flow_rings = max_tx_flow_rings;
335 #endif /* DHD_HTPUT_TUNABLES */
336
337 /* Construct a normal flowid allocator from FLOWID_RESERVED to
338 * (max_normal_tx_flow_rings - 1)
339 */
340 dhdp->flowid_allocator = id16_map_init(dhdp->osh, max_normal_tx_flow_rings,
341 FLOWID_RESERVED);
342 if (dhdp->flowid_allocator == NULL) {
343 DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__));
344 return BCME_NOMEM;
345 }
346
347 #if defined(DHD_HTPUT_TUNABLES)
348 if (HTPUT_TOTAL_FLOW_RINGS > 0) {
349 dhdp->htput_flow_ring_start = max_normal_tx_flow_rings + FLOWID_RESERVED;
350 /* Construct a htput flowid allocator from htput_flow_ring_start to
351 * (htput_flow_ring_start + HTPUT_TOTAL_FLOW_RINGS - 1)
352 */
353 dhdp->htput_flowid_allocator = id16_map_init(dhdp->osh, HTPUT_TOTAL_FLOW_RINGS,
354 dhdp->htput_flow_ring_start);
355 if (dhdp->htput_flowid_allocator == NULL) {
356 DHD_ERROR(("%s: htput flowid allocator init failure\n", __FUNCTION__));
357 return BCME_NOMEM;
358 }
359 dhdp->htput_client_flow_rings = 0u;
360 }
361 #endif /* !DHD_HTPUT_TUNABLES */
362
363 return BCME_OK;
364 }
365
366 void
dhd_flowid_map_deinit(dhd_pub_t * dhdp)367 dhd_flowid_map_deinit(dhd_pub_t *dhdp)
368 {
369 if (dhdp->flowid_allocator) {
370 dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator);
371 }
372 ASSERT(dhdp->flowid_allocator == NULL);
373
374 #if defined(DHD_HTPUT_TUNABLES)
375 if (dhdp->htput_flowid_allocator) {
376 dhdp->htput_flowid_allocator = id16_map_fini(dhdp->osh,
377 dhdp->htput_flowid_allocator);
378 ASSERT(dhdp->htput_flowid_allocator == NULL);
379 }
380 dhdp->htput_client_flow_rings = 0u;
381 #endif /* !DHD_HTPUT_TUNABLES */
382 return;
383 }
384
385 /** Initializes data structures of multiple flow rings
386 * num_h2d_rings - max_h2d_rings including static and dynamic rings
387 */
388 int
dhd_flow_rings_init(dhd_pub_t * dhdp,uint32 num_h2d_rings)389 dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_h2d_rings)
390 {
391 uint32 idx;
392 uint32 flow_ring_table_sz = 0;
393 uint32 if_flow_lkup_sz = 0;
394 flow_ring_table_t *flow_ring_table = NULL;
395 if_flow_lkup_t *if_flow_lkup = NULL;
396 void *lock = NULL;
397 void *list_lock = NULL;
398 unsigned long flags;
399 uint16 max_tx_flow_rings;
400
401 DHD_INFO(("%s\n", __FUNCTION__));
402
403 /*
404 * Only 16-bit flowid map will be allocated for actual number of Tx flowrings
405 * excluding common rings.
406 * Rest all flowring data structure will be allocated for all num_h2d_rings.
407 */
408 max_tx_flow_rings = dhd_get_max_flow_rings(dhdp);
409 if (dhd_flowid_map_init(dhdp, max_tx_flow_rings) != BCME_OK) {
410 DHD_ERROR(("%s: dhd_flowid_map_init failure\n", __FUNCTION__));
411 goto fail;
412 }
413
414 /* Any Tx flow id should not be > max_tx_flowid */
415 dhdp->max_tx_flowid = max_tx_flow_rings + FLOWID_RESERVED - 1;
416
417 /* Allocate a flow ring table, comprising of requested number of rings */
418 flow_ring_table_sz = (num_h2d_rings * sizeof(flow_ring_node_t));
419 flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz);
420 if (flow_ring_table == NULL) {
421 DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__));
422 goto fail;
423 }
424
425 /* Initialize flow ring table state */
426 DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr);
427 DHD_CUMM_CTR_INIT(&dhdp->l2cumm_ctr);
428 bzero((uchar *)flow_ring_table, flow_ring_table_sz);
429 for (idx = 0; idx < num_h2d_rings; idx++) {
430 flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
431 flow_ring_table[idx].flowid = (uint16)idx;
432 flow_ring_table[idx].lock = osl_spin_lock_init(dhdp->osh);
433 #ifdef IDLE_TX_FLOW_MGMT
434 flow_ring_table[idx].last_active_ts = OSL_SYSUPTIME();
435 #endif /* IDLE_TX_FLOW_MGMT */
436 if (flow_ring_table[idx].lock == NULL) {
437 DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__));
438 goto fail;
439 }
440
441 dll_init(&flow_ring_table[idx].list);
442
443 /* Initialize the per flow ring backup queue */
444 dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue,
445 dhdp->conf->flow_ring_queue_threshold);
446 }
447
448 /* Allocate per interface hash table (for fast lookup from interface to flow ring) */
449 if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
450 if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp,
451 DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz);
452 if (if_flow_lkup == NULL) {
453 DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__));
454 goto fail;
455 }
456
457 /* Initialize per interface hash table */
458 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
459 int hash_ix;
460 if_flow_lkup[idx].status = 0;
461 if_flow_lkup[idx].role = 0;
462 for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++)
463 if_flow_lkup[idx].fl_hash[hash_ix] = NULL;
464 }
465
466 lock = osl_spin_lock_init(dhdp->osh);
467 if (lock == NULL)
468 goto fail;
469
470 list_lock = osl_spin_lock_init(dhdp->osh);
471 if (list_lock == NULL)
472 goto lock_fail;
473
474 dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
475 bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
476
477 dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
478 dhdp->multi_client_flow_rings = 0U;
479
480 #ifdef DHD_LOSSLESS_ROAMING
481 dhdp->dequeue_prec_map = ALLPRIO;
482 #endif
483 /* Now populate into dhd pub */
484 DHD_FLOWID_LOCK(lock, flags);
485 dhdp->num_h2d_rings = num_h2d_rings;
486 dhdp->flow_ring_table = (void *)flow_ring_table;
487 dhdp->if_flow_lkup = (void *)if_flow_lkup;
488 dhdp->flowid_lock = lock;
489 dhdp->flow_rings_inited = TRUE;
490 dhdp->flowring_list_lock = list_lock;
491 DHD_FLOWID_UNLOCK(lock, flags);
492
493 DHD_INFO(("%s done\n", __FUNCTION__));
494 return BCME_OK;
495
496 lock_fail:
497 /* deinit the spinlock */
498 osl_spin_lock_deinit(dhdp->osh, lock);
499
500 fail:
501 /* Destruct the per interface flow lkup table */
502 if (if_flow_lkup != NULL) {
503 DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz);
504 }
505 if (flow_ring_table != NULL) {
506 for (idx = 0; idx < num_h2d_rings; idx++) {
507 if (flow_ring_table[idx].lock != NULL)
508 osl_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
509 }
510 MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
511 }
512 dhd_flowid_map_deinit(dhdp);
513
514 return BCME_NOMEM;
515 }
516
517 /** Deinit Flow Ring specific data structures */
dhd_flow_rings_deinit(dhd_pub_t * dhdp)518 void dhd_flow_rings_deinit(dhd_pub_t *dhdp)
519 {
520 uint16 idx;
521 uint32 flow_ring_table_sz;
522 uint32 if_flow_lkup_sz;
523 flow_ring_table_t *flow_ring_table;
524 unsigned long flags;
525 void *lock;
526
527 DHD_INFO(("dhd_flow_rings_deinit\n"));
528
529 if (!(dhdp->flow_rings_inited)) {
530 DHD_ERROR(("dhd_flow_rings not initialized!\n"));
531 return;
532 }
533
534 if (dhdp->flow_ring_table != NULL) {
535
536 ASSERT(dhdp->num_h2d_rings > 0);
537
538 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
539 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
540 dhdp->flow_ring_table = NULL;
541 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
542 for (idx = 0; idx < dhdp->num_h2d_rings; idx++) {
543 if (flow_ring_table[idx].active) {
544 dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]);
545 }
546 ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue));
547
548 /* Deinit flow ring queue locks before destroying flow ring table */
549 if (flow_ring_table[idx].lock != NULL) {
550 osl_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
551 }
552 flow_ring_table[idx].lock = NULL;
553
554 }
555
556 /* Destruct the flow ring table */
557 flow_ring_table_sz = dhdp->num_h2d_rings * sizeof(flow_ring_table_t);
558 MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
559 }
560
561 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
562
563 /* Destruct the per interface flow lkup table */
564 if (dhdp->if_flow_lkup != NULL) {
565 if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
566 bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz);
567 DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz);
568 dhdp->if_flow_lkup = NULL;
569 }
570
571 /* Destruct the flowid allocator */
572 dhd_flowid_map_deinit(dhdp);
573
574 dhdp->num_h2d_rings = 0U;
575 bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
576
577 dhdp->max_multi_client_flow_rings = 0U;
578 dhdp->multi_client_flow_rings = 0U;
579
580 lock = dhdp->flowid_lock;
581 dhdp->flowid_lock = NULL;
582
583 if (lock) {
584 DHD_FLOWID_UNLOCK(lock, flags);
585 osl_spin_lock_deinit(dhdp->osh, lock);
586 }
587
588 osl_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock);
589 dhdp->flowring_list_lock = NULL;
590
591 ASSERT(dhdp->if_flow_lkup == NULL);
592 ASSERT(dhdp->flow_ring_table == NULL);
593 dhdp->flow_rings_inited = FALSE;
594 }
595
596 /** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */
597 uint8
dhd_flow_rings_ifindex2role(dhd_pub_t * dhdp,uint8 ifindex)598 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex)
599 {
600 if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
601 ASSERT(if_flow_lkup);
602 return if_flow_lkup[ifindex].role;
603 }
604
605 #ifdef WLTDLS
is_tdls_destination(dhd_pub_t * dhdp,uint8 * da)606 bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da)
607 {
608 unsigned long flags;
609 tdls_peer_node_t *cur = NULL;
610
611 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
612 /* Check only if tdls peer is added */
613 if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da))) {
614 cur = dhdp->peer_tbl.node;
615
616 while (cur != NULL) {
617 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
618 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
619 return TRUE;
620 }
621 cur = cur->next;
622 }
623 }
624 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
625 return FALSE;
626 }
627 #endif /* WLTDLS */
628
629 /** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */
630 static INLINE uint16
dhd_flowid_find(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da)631 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
632 {
633 int hash;
634 bool ismcast = FALSE;
635 flow_hash_info_t *cur;
636 if_flow_lkup_t *if_flow_lkup;
637 unsigned long flags;
638
639 ASSERT(ifindex < DHD_MAX_IFS);
640 if (ifindex >= DHD_MAX_IFS)
641 return FLOWID_INVALID;
642
643 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
644 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
645
646 ASSERT(if_flow_lkup);
647
648 if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
649 #ifdef WLTDLS
650 if (is_tdls_destination(dhdp, da)) {
651 hash = DHD_FLOWRING_HASHINDEX(da, prio);
652 cur = if_flow_lkup[ifindex].fl_hash[hash];
653 while (cur != NULL) {
654 if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) {
655 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
656 return cur->flowid;
657 }
658 cur = cur->next;
659 }
660 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
661 return FLOWID_INVALID;
662 }
663 #endif /* WLTDLS */
664 /* For STA non TDLS dest and WDS dest flow ring id is mapped based on prio only */
665 cur = if_flow_lkup[ifindex].fl_hash[prio];
666 if (cur) {
667 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
668 return cur->flowid;
669 }
670 } else {
671
672 if (ETHER_ISMULTI(da)) {
673 ismcast = TRUE;
674 hash = 0;
675 } else {
676 hash = DHD_FLOWRING_HASHINDEX(da, prio);
677 }
678
679 cur = if_flow_lkup[ifindex].fl_hash[hash];
680
681 while (cur) {
682 if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) ||
683 (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) &&
684 (cur->flow_info.tid == prio))) {
685 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
686 return cur->flowid;
687 }
688 cur = cur->next;
689 }
690 }
691 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
692
693 #ifdef DHD_EFI
694 DHD_TRACE(("%s: cannot find flowid\n", __FUNCTION__));
695 #else
696 DHD_FLOWRING_INFO(("%s: cannot find flowid\n", __FUNCTION__));
697 #endif
698 return FLOWID_INVALID;
699 } /* dhd_flowid_find */
700
701 static uint16
dhd_flowid_map_alloc(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * da)702 dhd_flowid_map_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *da)
703 {
704 uint16 flowid = FLOWID_INVALID;
705 ASSERT(dhdp->flowid_allocator != NULL);
706
707 #if defined(DHD_HTPUT_TUNABLES)
708 if (dhdp->htput_flowid_allocator) {
709 if (prio == HTPUT_FLOW_RING_PRIO) {
710 if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
711 /* For STA case, only one flowring per PRIO is created,
712 * so no need to have a HTPUT counter variable for STA case.
713 * If already HTPUT flowring is allocated for given HTPUT_PRIO,
714 * then this function will not even get called as dhd_flowid_find
715 * will take care assigning same for those HTPUT_PRIO packets.
716 */
717 flowid = id16_map_alloc(dhdp->htput_flowid_allocator);
718 } else if (DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex) && !ETHER_ISMULTI(da)) {
719 /* Use HTPUT flowrings for only HTPUT_NUM_CLIENT_FLOW_RINGS */
720 if (dhdp->htput_client_flow_rings < HTPUT_NUM_CLIENT_FLOW_RINGS) {
721 flowid = id16_map_alloc(dhdp->htput_flowid_allocator);
722 /* increment htput client counter */
723 if (flowid != FLOWID_INVALID) {
724 dhdp->htput_client_flow_rings++;
725 }
726 }
727 }
728 }
729 }
730 #endif /* !DHD_HTPUT_TUNABLES */
731
732 BCM_REFERENCE(flowid);
733
734 /*
735 * For HTPUT case, if the high throughput flowrings are already allocated
736 * for the given role, the control comes here.
737 */
738 if (flowid == FLOWID_INVALID) {
739 flowid = id16_map_alloc(dhdp->flowid_allocator);
740 }
741
742 return flowid;
743 }
744
745 /** Create unique Flow ID, called when a flow ring is created. */
746 static INLINE uint16
dhd_flowid_alloc(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da)747 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
748 {
749 flow_hash_info_t *fl_hash_node, *cur;
750 if_flow_lkup_t *if_flow_lkup;
751 int hash;
752 uint16 flowid;
753 unsigned long flags;
754
755 fl_hash_node = (flow_hash_info_t *) MALLOCZ(dhdp->osh, sizeof(flow_hash_info_t));
756 if (fl_hash_node == NULL) {
757 DHD_ERROR(("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__));
758 return FLOWID_INVALID;
759 }
760 memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da));
761
762 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
763 flowid = dhd_flowid_map_alloc(dhdp, ifindex, prio, da);
764 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
765
766 if (flowid == FLOWID_INVALID) {
767 MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t));
768 DHD_ERROR_RLMT(("%s: cannot get free flowid \n", __FUNCTION__));
769 return FLOWID_INVALID;
770 }
771
772 fl_hash_node->flowid = flowid;
773 fl_hash_node->flow_info.tid = prio;
774 fl_hash_node->flow_info.ifindex = ifindex;
775 fl_hash_node->next = NULL;
776
777 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
778 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
779
780 if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
781 /* For STA/GC non TDLS dest and WDS dest we allocate entry based on prio only */
782 #ifdef WLTDLS
783 if (is_tdls_destination(dhdp, da)) {
784 hash = DHD_FLOWRING_HASHINDEX(da, prio);
785 cur = if_flow_lkup[ifindex].fl_hash[hash];
786 if (cur) {
787 while (cur->next) {
788 cur = cur->next;
789 }
790 cur->next = fl_hash_node;
791 } else {
792 if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
793 }
794 } else
795 #endif /* WLTDLS */
796 if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node;
797 } else {
798
799 /* For bcast/mcast assign first slot in in interface */
800 hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio);
801 cur = if_flow_lkup[ifindex].fl_hash[hash];
802 if (cur) {
803 while (cur->next) {
804 cur = cur->next;
805 }
806 cur->next = fl_hash_node;
807 } else
808 if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
809 }
810 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
811
812 DHD_FLOWRING_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
813
814 if (fl_hash_node->flowid > dhdp->max_tx_flowid) {
815 DHD_ERROR(("%s: flowid=%d max_tx_flowid=%d ifindex=%d prio=%d role=%d\n",
816 __FUNCTION__, fl_hash_node->flowid, dhdp->max_tx_flowid,
817 ifindex, prio, if_flow_lkup[ifindex].role));
818 dhd_prhex("da", (uchar *)da, ETHER_ADDR_LEN, DHD_ERROR_VAL);
819 dhd_prhex("sa", (uchar *)sa, ETHER_ADDR_LEN, DHD_ERROR_VAL);
820 return FLOWID_INVALID;
821 }
822
823 return fl_hash_node->flowid;
824 } /* dhd_flowid_alloc */
825
826 /** Get flow ring ID, if not present try to create one */
827 static INLINE int
dhd_flowid_lookup(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da,uint16 * flowid)828 dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
829 uint8 prio, char *sa, char *da, uint16 *flowid)
830 {
831 uint16 id;
832 flow_ring_node_t *flow_ring_node;
833 flow_ring_table_t *flow_ring_table;
834 unsigned long flags;
835 int ret;
836
837 DHD_TRACE(("%s\n", __FUNCTION__));
838
839 if (!dhdp->flow_ring_table) {
840 return BCME_ERROR;
841 }
842
843 ASSERT(ifindex < DHD_MAX_IFS);
844 if (ifindex >= DHD_MAX_IFS)
845 return BCME_BADARG;
846
847 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
848
849 id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
850
851 if (id == FLOWID_INVALID) {
852 bool if_role_multi_client;
853 if_flow_lkup_t *if_flow_lkup;
854 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
855
856 if (!if_flow_lkup[ifindex].status)
857 return BCME_ERROR;
858
859 /* check role for multi client case */
860 if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
861
862 /* Abort Flowring creation if multi client flowrings crossed the threshold */
863 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
864 if (if_role_multi_client &&
865 (dhdp->multi_client_flow_rings >= dhdp->max_multi_client_flow_rings)) {
866 DHD_ERROR_RLMT(("%s: Max multi client flow rings reached: %d:%d\n",
867 __FUNCTION__, dhdp->multi_client_flow_rings,
868 dhdp->max_multi_client_flow_rings));
869 return BCME_ERROR;
870 }
871 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
872
873 /* Do not create Flowring if peer is not associated */
874 #if (defined(linux) || defined(LINUX)) && defined(PCIE_FULL_DONGLE)
875 if (if_role_multi_client && !ETHER_ISMULTI(da) &&
876 !dhd_sta_associated(dhdp, ifindex, (uint8 *)da)) {
877 DHD_ERROR_RLMT(("%s: Skip send pkt without peer addition\n", __FUNCTION__));
878 return BCME_ERROR;
879 }
880 #endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
881
882 id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
883 if (id == FLOWID_INVALID) {
884 DHD_ERROR_RLMT(("%s: alloc flowid ifindex %u status %u\n",
885 __FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
886 return BCME_ERROR;
887 }
888
889 ASSERT(id <= dhdp->max_tx_flowid);
890
891 /* Only after flowid alloc, increment multi_client_flow_rings */
892 if (if_role_multi_client) {
893 dhdp->multi_client_flow_rings++;
894 }
895
896 /* register this flowid in dhd_pub */
897 dhd_add_flowid(dhdp, ifindex, prio, da, id);
898
899 flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
900
901 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
902
903 /* Init Flow info */
904 memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa));
905 memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da));
906 flow_ring_node->flow_info.tid = prio;
907 flow_ring_node->flow_info.ifindex = ifindex;
908 flow_ring_node->active = TRUE;
909 flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING;
910
911 #ifdef DEVICE_TX_STUCK_DETECT
912 flow_ring_node->tx_cmpl = flow_ring_node->tx_cmpl_prev = OSL_SYSUPTIME();
913 flow_ring_node->stuck_count = 0;
914 #endif /* DEVICE_TX_STUCK_DETECT */
915 #ifdef TX_STATUS_LATENCY_STATS
916 flow_ring_node->flow_info.num_tx_status = 0;
917 flow_ring_node->flow_info.cum_tx_status_latency = 0;
918 flow_ring_node->flow_info.num_tx_pkts = 0;
919 #endif /* TX_STATUS_LATENCY_STATS */
920 #ifdef BCMDBG
921 bzero(&flow_ring_node->flow_info.tx_status[0],
922 sizeof(uint32) * DHD_MAX_TX_STATUS_MSGS);
923 #endif
924 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
925
926 /* Create and inform device about the new flow */
927 if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
928 != BCME_OK) {
929 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
930 flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
931 flow_ring_node->active = FALSE;
932 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
933 DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
934 return BCME_ERROR;
935 }
936
937 *flowid = id;
938 return BCME_OK;
939 } else {
940 /* if the Flow id was found in the hash */
941
942 if (id > dhdp->max_tx_flowid) {
943 DHD_ERROR(("%s: Invalid flow id : %u, max_tx_flowid : %u\n",
944 __FUNCTION__, id, dhdp->max_tx_flowid));
945 *flowid = FLOWID_INVALID;
946 ASSERT(0);
947 return BCME_ERROR;
948 }
949
950 flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
951 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
952
953 /*
954 * If the flow_ring_node is in Open State or Status pending state then
955 * we can return the Flow id to the caller.If the flow_ring_node is in
956 * FLOW_RING_STATUS_PENDING this means the creation is in progress and
957 * hence the packets should be queued.
958 *
959 * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or
960 * FLOW_RING_STATUS_CLOSED, then we should return Error.
961 * Note that if the flowing is being deleted we would mark it as
962 * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and
963 * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets.
964 * We should drop the packets in that case.
965 * The decission to return OK should NOT be based on 'active' variable, beause
966 * active is made TRUE when a flow_ring_node gets allocated and is made
967 * FALSE when the flow ring gets removed and does not reflect the True state
968 * of the Flow ring.
969 * In case if IDLE_TX_FLOW_MGMT is defined, we have to handle two more flowring
970 * states. If the flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid
971 * is to be returned and from dhd_bus_txdata, the flowring would be resumed again.
972 * The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to
973 * FLOW_RING_STATUS_CREATE_PENDING.
974 */
975 if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING ||
976 flow_ring_node->status == FLOW_RING_STATUS_CLOSED) {
977 *flowid = FLOWID_INVALID;
978 ret = BCME_ERROR;
979 } else {
980 *flowid = id;
981 ret = BCME_OK;
982 }
983
984 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
985 return ret;
986 } /* Flow Id found in the hash */
987 } /* dhd_flowid_lookup */
988
989 int
dhd_flowid_find_by_ifidx(dhd_pub_t * dhdp,uint8 ifindex,uint16 flowid)990 dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
991 {
992 int hashidx = 0;
993 bool found = FALSE;
994 flow_hash_info_t *cur;
995 if_flow_lkup_t *if_flow_lkup;
996 unsigned long flags;
997
998 if (!dhdp->flow_ring_table) {
999 DHD_ERROR(("%s : dhd->flow_ring_table is NULL\n", __FUNCTION__));
1000 return BCME_ERROR;
1001 }
1002
1003 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
1004 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
1005 for (hashidx = 0; hashidx < DHD_FLOWRING_HASH_SIZE; hashidx++) {
1006 cur = if_flow_lkup[ifindex].fl_hash[hashidx];
1007 if (cur) {
1008 if (cur->flowid == flowid) {
1009 found = TRUE;
1010 }
1011
1012 while (!found && cur) {
1013 if (cur->flowid == flowid) {
1014 found = TRUE;
1015 break;
1016 }
1017 cur = cur->next;
1018 }
1019
1020 if (found) {
1021 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1022 return BCME_OK;
1023 }
1024 }
1025 }
1026 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1027
1028 return BCME_ERROR;
1029 }
1030
1031 int
dhd_flowid_debug_create(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da,uint16 * flowid)1032 dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex,
1033 uint8 prio, char *sa, char *da, uint16 *flowid)
1034 {
1035 return dhd_flowid_lookup(dhdp, ifindex, prio, sa, da, flowid);
1036 }
1037
1038 /**
1039 * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
1040 * select the flowring to send the packet to the dongle.
1041 */
1042 int
BCMFASTPATH(dhd_flowid_update)1043 BCMFASTPATH(dhd_flowid_update)(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf)
1044 {
1045 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
1046 struct ether_header *eh = (struct ether_header *)pktdata;
1047 uint16 flowid = 0;
1048
1049 ASSERT(ifindex < DHD_MAX_IFS);
1050
1051 if (ifindex >= DHD_MAX_IFS) {
1052 return BCME_BADARG;
1053 }
1054
1055 if (!dhdp->flowid_allocator) {
1056 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
1057 return BCME_ERROR;
1058 }
1059
1060 if (dhd_flowid_lookup(dhdp, ifindex, prio, (char *)eh->ether_shost, (char *)eh->ether_dhost,
1061 &flowid) != BCME_OK) {
1062 return BCME_ERROR;
1063 }
1064
1065 DHD_FLOWRING_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
1066
1067 /* Tag the packet with flowid */
1068 DHD_PKT_SET_FLOWID(pktbuf, flowid);
1069 return BCME_OK;
1070 }
1071
1072 static void
dhd_flowid_map_free(dhd_pub_t * dhdp,uint8 ifindex,uint16 flowid)1073 dhd_flowid_map_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
1074 {
1075 #if defined(DHD_HTPUT_TUNABLES)
1076 if (dhdp->htput_flowid_allocator) {
1077 if (DHD_IS_FLOWID_HTPUT(dhdp, flowid)) {
1078 id16_map_free(dhdp->htput_flowid_allocator, flowid);
1079 /* decrement htput client counter */
1080 if (DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex)) {
1081 dhdp->htput_client_flow_rings--;
1082 }
1083 return;
1084 }
1085 }
1086 #endif /* !DHD_HTPUT_TUNABLES */
1087
1088 id16_map_free(dhdp->flowid_allocator, flowid);
1089
1090 return;
1091 }
1092
1093 void
dhd_flowid_free(dhd_pub_t * dhdp,uint8 ifindex,uint16 flowid)1094 dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
1095 {
1096 int hashix;
1097 bool found = FALSE;
1098 flow_hash_info_t *cur, *prev;
1099 if_flow_lkup_t *if_flow_lkup;
1100 unsigned long flags;
1101 bool if_role_multi_client;
1102
1103 ASSERT(ifindex < DHD_MAX_IFS);
1104 if (ifindex >= DHD_MAX_IFS)
1105 return;
1106
1107 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
1108 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
1109
1110 if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
1111
1112 for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
1113
1114 cur = if_flow_lkup[ifindex].fl_hash[hashix];
1115
1116 if (cur) {
1117 if (cur->flowid == flowid) {
1118 found = TRUE;
1119 }
1120
1121 prev = NULL;
1122 while (!found && cur) {
1123 if (cur->flowid == flowid) {
1124 found = TRUE;
1125 break;
1126 }
1127 prev = cur;
1128 cur = cur->next;
1129 }
1130 if (found) {
1131 if (!prev) {
1132 if_flow_lkup[ifindex].fl_hash[hashix] = cur->next;
1133 } else {
1134 prev->next = cur->next;
1135 }
1136
1137 /* Decrement multi_client_flow_rings */
1138 if (if_role_multi_client) {
1139 dhdp->multi_client_flow_rings--;
1140 }
1141
1142 /* deregister flowid from dhd_pub. */
1143 dhd_del_flowid(dhdp, ifindex, flowid);
1144
1145 dhd_flowid_map_free(dhdp, ifindex, flowid);
1146 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1147 MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t));
1148
1149 return;
1150 }
1151 }
1152 }
1153
1154 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1155 DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
1156 __FUNCTION__, flowid));
1157 } /* dhd_flowid_free */
1158
1159 /**
1160 * Delete all Flow rings associated with the given interface. Is called when eg the dongle
1161 * indicates that a wireless link has gone down.
1162 */
1163 void
dhd_flow_rings_delete(dhd_pub_t * dhdp,uint8 ifindex)1164 dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
1165 {
1166 uint32 id;
1167 flow_ring_table_t *flow_ring_table;
1168
1169 DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
1170
1171 ASSERT(ifindex < DHD_MAX_IFS);
1172 if (ifindex >= DHD_MAX_IFS)
1173 return;
1174
1175 if (!dhdp->flow_ring_table)
1176 return;
1177
1178 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
1179 for (id = 0; id < dhdp->num_h2d_rings; id++) {
1180 if (flow_ring_table[id].active &&
1181 (flow_ring_table[id].flow_info.ifindex == ifindex) &&
1182 (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
1183 dhd_bus_flow_ring_delete_request(dhdp->bus,
1184 (void *) &flow_ring_table[id]);
1185 }
1186 }
1187 }
1188
1189 void
dhd_flow_rings_flush(dhd_pub_t * dhdp,uint8 ifindex)1190 dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex)
1191 {
1192 uint32 id;
1193 flow_ring_table_t *flow_ring_table;
1194
1195 DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex));
1196
1197 ASSERT(ifindex < DHD_MAX_IFS);
1198 if (ifindex >= DHD_MAX_IFS)
1199 return;
1200
1201 if (!dhdp->flow_ring_table)
1202 return;
1203 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
1204
1205 for (id = 0; id < dhdp->num_h2d_rings; id++) {
1206 if (flow_ring_table[id].active &&
1207 (flow_ring_table[id].flow_info.ifindex == ifindex) &&
1208 (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
1209 dhd_bus_flow_ring_flush_request(dhdp->bus,
1210 (void *) &flow_ring_table[id]);
1211 }
1212 }
1213 }
1214
1215 /** Delete flow ring(s) for given peer address. */
1216 void
dhd_flow_rings_delete_for_peer(dhd_pub_t * dhdp,uint8 ifindex,char * addr)1217 dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr)
1218 {
1219 uint32 id;
1220 flow_ring_table_t *flow_ring_table;
1221
1222 DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
1223
1224 ASSERT(ifindex < DHD_MAX_IFS);
1225 if (ifindex >= DHD_MAX_IFS)
1226 return;
1227
1228 if (!dhdp->flow_ring_table)
1229 return;
1230
1231 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
1232 for (id = 0; id < dhdp->num_h2d_rings; id++) {
1233 /*
1234 * Send flowring delete request even if flowring status is
1235 * FLOW_RING_STATUS_CREATE_PENDING, to handle cases where DISASSOC_IND
1236 * event comes ahead of flowring create response.
1237 * Otherwise the flowring will not be deleted later as there will not be any
1238 * DISASSOC_IND event. With this change, when create response event comes to DHD,
1239 * it will change the status to FLOW_RING_STATUS_OPEN and soon delete response
1240 * event will come, upon which DHD will delete the flowring.
1241 */
1242 if (flow_ring_table[id].active &&
1243 (flow_ring_table[id].flow_info.ifindex == ifindex) &&
1244 (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
1245 ((flow_ring_table[id].status == FLOW_RING_STATUS_OPEN) ||
1246 (flow_ring_table[id].status == FLOW_RING_STATUS_CREATE_PENDING))) {
1247 DHD_ERROR(("%s: deleting flowid %d\n",
1248 __FUNCTION__, flow_ring_table[id].flowid));
1249 dhd_bus_flow_ring_delete_request(dhdp->bus,
1250 (void *) &flow_ring_table[id]);
1251 }
1252 }
1253 }
1254
1255 /** Handles interface ADD, CHANGE, DEL indications from the dongle */
1256 void
dhd_update_interface_flow_info(dhd_pub_t * dhdp,uint8 ifindex,uint8 op,uint8 role)1257 dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
1258 uint8 op, uint8 role)
1259 {
1260 if_flow_lkup_t *if_flow_lkup;
1261 unsigned long flags;
1262
1263 ASSERT(ifindex < DHD_MAX_IFS);
1264 if (ifindex >= DHD_MAX_IFS)
1265 return;
1266
1267 DHD_INFO(("%s: ifindex %u op %u role is %u \n",
1268 __FUNCTION__, ifindex, op, role));
1269 if (!dhdp->flowid_allocator) {
1270 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
1271 return;
1272 }
1273
1274 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
1275 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
1276
1277 if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) {
1278
1279 if_flow_lkup[ifindex].role = role;
1280
1281 if (role == WLC_E_IF_ROLE_WDS) {
1282 /**
1283 * WDS role does not send WLC_E_LINK event after interface is up.
1284 * So to create flowrings for WDS, make status as TRUE in WLC_E_IF itself.
1285 * same is true while making the status as FALSE.
1286 * TODO: Fix FW to send WLC_E_LINK for WDS role aswell. So that all the
1287 * interfaces are handled uniformly.
1288 */
1289 if_flow_lkup[ifindex].status = TRUE;
1290 DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
1291 __FUNCTION__, ifindex, role));
1292 }
1293 } else if ((op == WLC_E_IF_DEL) && (role == WLC_E_IF_ROLE_WDS)) {
1294 if_flow_lkup[ifindex].status = FALSE;
1295 DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
1296 __FUNCTION__, ifindex, role));
1297 }
1298 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1299 }
1300
1301 /** Handles a STA 'link' indication from the dongle */
1302 int
dhd_update_interface_link_status(dhd_pub_t * dhdp,uint8 ifindex,uint8 status)1303 dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status)
1304 {
1305 if_flow_lkup_t *if_flow_lkup;
1306 unsigned long flags;
1307
1308 ASSERT(ifindex < DHD_MAX_IFS);
1309 if (ifindex >= DHD_MAX_IFS)
1310 return BCME_BADARG;
1311
1312 DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status));
1313
1314 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
1315 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
1316
1317 if (status) {
1318 if_flow_lkup[ifindex].status = TRUE;
1319 } else {
1320 if_flow_lkup[ifindex].status = FALSE;
1321 }
1322
1323 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1324
1325 return BCME_OK;
1326 }
1327
1328 /** Update flow priority mapping, called on IOVAR */
dhd_update_flow_prio_map(dhd_pub_t * dhdp,uint8 map)1329 int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map)
1330 {
1331 uint16 flowid;
1332 flow_ring_node_t *flow_ring_node;
1333
1334 if (map > DHD_FLOW_PRIO_LLR_MAP)
1335 return BCME_BADOPTION;
1336
1337 /* Check if we need to change prio map */
1338 if (map == dhdp->flow_prio_map_type)
1339 return BCME_OK;
1340
1341 /* If any ring is active we cannot change priority mapping for flow rings */
1342 for (flowid = 0; flowid < dhdp->num_h2d_rings; flowid++) {
1343 flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
1344 if (flow_ring_node->active)
1345 return BCME_EPERM;
1346 }
1347
1348 /* Inform firmware about new mapping type */
1349 if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE))
1350 return BCME_ERROR;
1351
1352 /* update internal structures */
1353 dhdp->flow_prio_map_type = map;
1354 if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP)
1355 bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
1356 else
1357 bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
1358
1359 dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
1360
1361 return BCME_OK;
1362 }
1363
1364 /** Inform firmware on updated flow priority mapping, called on IOVAR */
dhd_flow_prio_map(dhd_pub_t * dhd,uint8 * map,bool set)1365 int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
1366 {
1367 uint8 iovbuf[WLC_IOCTL_SMLEN];
1368 int len;
1369 uint32 val;
1370 if (!set) {
1371 bzero(&iovbuf, sizeof(iovbuf));
1372 len = bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
1373 if (len == 0) {
1374 return BCME_BUFTOOSHORT;
1375 }
1376 if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
1377 DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
1378 return BCME_ERROR;
1379 }
1380 *map = iovbuf[0];
1381 return BCME_OK;
1382 }
1383 val = (uint32)map[0];
1384 len = bcm_mkiovar("bus:fl_prio_map", (char *)&val, sizeof(val),
1385 (char*)iovbuf, sizeof(iovbuf));
1386 if (len == 0) {
1387 return BCME_BUFTOOSHORT;
1388 }
1389 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0) < 0) {
1390 DHD_ERROR(("%s: failed to set fl_prio_map \n",
1391 __FUNCTION__));
1392 return BCME_ERROR;
1393 }
1394 return BCME_OK;
1395 }
1396
1397 uint32
dhd_active_tx_flowring_bkpq_len(dhd_pub_t * dhd)1398 dhd_active_tx_flowring_bkpq_len(dhd_pub_t *dhd)
1399 {
1400 unsigned long list_lock_flags;
1401 dll_t *item, *prev;
1402 flow_ring_node_t *flow_ring_node;
1403 dhd_bus_t *bus = dhd->bus;
1404 uint32 active_tx_flowring_qlen = 0;
1405
1406 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, list_lock_flags);
1407
1408 for (item = dll_tail_p(&bus->flowring_active_list);
1409 !dll_end(&bus->flowring_active_list, item); item = prev) {
1410
1411 prev = dll_prev_p(item);
1412
1413 flow_ring_node = dhd_constlist_to_flowring(item);
1414 if (flow_ring_node->active) {
1415 DHD_INFO(("%s :%d\n", __FUNCTION__, flow_ring_node->queue.len));
1416 active_tx_flowring_qlen += flow_ring_node->queue.len;
1417 }
1418 }
1419 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, list_lock_flags);
1420 return active_tx_flowring_qlen;
1421 }
1422
1423 #ifdef DHD_AWDL
1424 /**
1425 * Handle/Intercept awdl peer op IOVAR fired by user
1426 * buf = NULL means delete all peers in awdl interface
1427 */
1428 void
dhd_awdl_peer_op(dhd_pub_t * dhdp,uint8 ifindex,void * buf,uint32 buflen)1429 dhd_awdl_peer_op(dhd_pub_t *dhdp, uint8 ifindex, void *buf, uint32 buflen)
1430 {
1431 awdl_peer_op_t *peer = (awdl_peer_op_t *)buf;
1432 DHD_TRACE(("%s\n", __FUNCTION__));
1433
1434 ASSERT(ifindex < DHD_MAX_IFS);
1435 if (ifindex >= DHD_MAX_IFS)
1436 return;
1437 if (!buf) {
1438 /* Delete all peers in awdl interface */
1439 if_flow_lkup_t *if_flow_lkup;
1440 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
1441 if (if_flow_lkup[ifindex].role != WLC_E_IF_ROLE_AWDL) {
1442 DHD_ERROR(("%s: Iinterface %d is not a awdl peer \n",
1443 __FUNCTION__, ifindex));
1444 return;
1445 }
1446 dhd_flow_rings_delete(dhdp, ifindex);
1447 return;
1448 }
1449 /* Parse awdl_peer_op info now */
1450 if (buflen < sizeof(awdl_peer_op_t)) {
1451 DHD_ERROR(("%s: cannot handle awdl_peer_op add/del\n", __FUNCTION__));
1452 return;
1453 }
1454 /**
1455 * Only flowring deletion is handled here
1456 * Flowring addition is taken care in dhd_flowid_lookup
1457 */
1458 if (peer->opcode == AWDL_PEER_OP_DEL) {
1459 dhd_del_sta(dhdp, ifindex, &peer->addr.octet[0]);
1460 dhd_flow_rings_delete_for_peer(dhdp, ifindex, (char *)&peer->addr.octet[0]);
1461 } else if (peer->opcode == AWDL_PEER_OP_ADD) {
1462 dhd_findadd_sta(dhdp, ifindex, &peer->addr.octet[0]);
1463 }
1464 return;
1465 }
1466 #endif /* DHD_AWDL */
1467