• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2014 Broadcom Corporation
2  *
3  * Permission to use, copy, modify, and/or distribute this software for any
4  * purpose with or without fee is hereby granted, provided that the above
5  * copyright notice and this permission notice appear in all copies.
6  *
7  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14  */
15 
16 
17 #include <linux/types.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <brcmu_utils.h>
21 
22 #include "dhd.h"
23 #include "dhd_dbg.h"
24 #include "dhd_bus.h"
25 #include "proto.h"
26 #include "flowring.h"
27 #include "msgbuf.h"
28 
29 
30 #define BRCMF_FLOWRING_HIGH		1024
31 #define BRCMF_FLOWRING_LOW		(BRCMF_FLOWRING_HIGH - 256)
32 #define BRCMF_FLOWRING_INVALID_IFIDX	0xff
33 
34 #define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
35 #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
36 
37 static const u8 ALLZEROMAC[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
38 static const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
39 
40 static const u8 brcmf_flowring_prio2fifo[] = {
41 	1,
42 	0,
43 	0,
44 	1,
45 	2,
46 	2,
47 	3,
48 	3
49 };
50 
51 
52 static bool
brcmf_flowring_is_tdls_mac(struct brcmf_flowring * flow,u8 mac[ETH_ALEN])53 brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN])
54 {
55 	struct brcmf_flowring_tdls_entry *search;
56 
57 	search = flow->tdls_entry;
58 
59 	while (search) {
60 		if (memcmp(search->mac, mac, ETH_ALEN) == 0)
61 			return true;
62 		search = search->next;
63 	}
64 
65 	return false;
66 }
67 
68 
brcmf_flowring_lookup(struct brcmf_flowring * flow,u8 da[ETH_ALEN],u8 prio,u8 ifidx)69 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
70 			  u8 prio, u8 ifidx)
71 {
72 	struct brcmf_flowring_hash *hash;
73 	u8 hash_idx;
74 	u32 i;
75 	bool found;
76 	bool sta;
77 	u8 fifo;
78 	u8 *mac;
79 
80 	fifo = brcmf_flowring_prio2fifo[prio];
81 	sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
82 	mac = da;
83 	if ((!sta) && (is_multicast_ether_addr(da))) {
84 		mac = (u8 *)ALLFFMAC;
85 		fifo = 0;
86 	}
87 	if ((sta) && (flow->tdls_active) &&
88 	    (brcmf_flowring_is_tdls_mac(flow, da))) {
89 		sta = false;
90 	}
91 	hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
92 			  BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
93 	found = false;
94 	hash = flow->hash;
95 	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
96 		if ((sta || (memcmp(hash[hash_idx].mac, mac, ETH_ALEN) == 0)) &&
97 		    (hash[hash_idx].fifo == fifo) &&
98 		    (hash[hash_idx].ifidx == ifidx)) {
99 			found = true;
100 			break;
101 		}
102 		hash_idx++;
103 	}
104 	if (found)
105 		return hash[hash_idx].flowid;
106 
107 	return BRCMF_FLOWRING_INVALID_ID;
108 }
109 
110 
brcmf_flowring_create(struct brcmf_flowring * flow,u8 da[ETH_ALEN],u8 prio,u8 ifidx)111 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
112 			  u8 prio, u8 ifidx)
113 {
114 	struct brcmf_flowring_ring *ring;
115 	struct brcmf_flowring_hash *hash;
116 	u8 hash_idx;
117 	u32 i;
118 	bool found;
119 	u8 fifo;
120 	bool sta;
121 	u8 *mac;
122 
123 	fifo = brcmf_flowring_prio2fifo[prio];
124 	sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
125 	mac = da;
126 	if ((!sta) && (is_multicast_ether_addr(da))) {
127 		mac = (u8 *)ALLFFMAC;
128 		fifo = 0;
129 	}
130 	if ((sta) && (flow->tdls_active) &&
131 	    (brcmf_flowring_is_tdls_mac(flow, da))) {
132 		sta = false;
133 	}
134 	hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
135 			  BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
136 	found = false;
137 	hash = flow->hash;
138 	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
139 		if ((hash[hash_idx].ifidx == BRCMF_FLOWRING_INVALID_IFIDX) &&
140 		    (memcmp(hash[hash_idx].mac, ALLZEROMAC, ETH_ALEN) == 0)) {
141 			found = true;
142 			break;
143 		}
144 		hash_idx++;
145 	}
146 	if (found) {
147 		for (i = 0; i < flow->nrofrings; i++) {
148 			if (flow->rings[i] == NULL)
149 				break;
150 		}
151 		if (i == flow->nrofrings)
152 			return -ENOMEM;
153 
154 		ring = kzalloc(sizeof(*ring), GFP_ATOMIC);
155 		if (!ring)
156 			return -ENOMEM;
157 
158 		memcpy(hash[hash_idx].mac, mac, ETH_ALEN);
159 		hash[hash_idx].fifo = fifo;
160 		hash[hash_idx].ifidx = ifidx;
161 		hash[hash_idx].flowid = i;
162 
163 		ring->hash_id = hash_idx;
164 		ring->status = RING_CLOSED;
165 		skb_queue_head_init(&ring->skblist);
166 		flow->rings[i] = ring;
167 
168 		return i;
169 	}
170 	return BRCMF_FLOWRING_INVALID_ID;
171 }
172 
173 
brcmf_flowring_tid(struct brcmf_flowring * flow,u8 flowid)174 u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
175 {
176 	struct brcmf_flowring_ring *ring;
177 
178 	ring = flow->rings[flowid];
179 
180 	return flow->hash[ring->hash_id].fifo;
181 }
182 
183 
brcmf_flowring_block(struct brcmf_flowring * flow,u8 flowid,bool blocked)184 static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
185 				 bool blocked)
186 {
187 	struct brcmf_flowring_ring *ring;
188 	struct brcmf_bus *bus_if;
189 	struct brcmf_pub *drvr;
190 	struct brcmf_if *ifp;
191 	bool currently_blocked;
192 	int i;
193 	u8 ifidx;
194 	unsigned long flags;
195 
196 	spin_lock_irqsave(&flow->block_lock, flags);
197 
198 	ring = flow->rings[flowid];
199 	ifidx = brcmf_flowring_ifidx_get(flow, flowid);
200 
201 	currently_blocked = false;
202 	for (i = 0; i < flow->nrofrings; i++) {
203 		if (flow->rings[i]) {
204 			ring = flow->rings[i];
205 			if ((ring->status == RING_OPEN) &&
206 			    (brcmf_flowring_ifidx_get(flow, i) == ifidx)) {
207 				if (ring->blocked) {
208 					currently_blocked = true;
209 					break;
210 				}
211 			}
212 		}
213 	}
214 	ring->blocked = blocked;
215 	if (currently_blocked == blocked) {
216 		spin_unlock_irqrestore(&flow->block_lock, flags);
217 		return;
218 	}
219 
220 	bus_if = dev_get_drvdata(flow->dev);
221 	drvr = bus_if->drvr;
222 	ifp = drvr->iflist[ifidx];
223 	brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW, blocked);
224 
225 	spin_unlock_irqrestore(&flow->block_lock, flags);
226 }
227 
228 
brcmf_flowring_delete(struct brcmf_flowring * flow,u8 flowid)229 void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
230 {
231 	struct brcmf_flowring_ring *ring;
232 	u8 hash_idx;
233 	struct sk_buff *skb;
234 
235 	ring = flow->rings[flowid];
236 	if (!ring)
237 		return;
238 	brcmf_flowring_block(flow, flowid, false);
239 	hash_idx = ring->hash_id;
240 	flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
241 	memset(flow->hash[hash_idx].mac, 0, ETH_ALEN);
242 	flow->rings[flowid] = NULL;
243 
244 	skb = skb_dequeue(&ring->skblist);
245 	while (skb) {
246 		brcmu_pkt_buf_free_skb(skb);
247 		skb = skb_dequeue(&ring->skblist);
248 	}
249 
250 	kfree(ring);
251 }
252 
253 
brcmf_flowring_enqueue(struct brcmf_flowring * flow,u8 flowid,struct sk_buff * skb)254 void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
255 			    struct sk_buff *skb)
256 {
257 	struct brcmf_flowring_ring *ring;
258 
259 	ring = flow->rings[flowid];
260 
261 	skb_queue_tail(&ring->skblist, skb);
262 
263 	if (!ring->blocked &&
264 	    (skb_queue_len(&ring->skblist) > BRCMF_FLOWRING_HIGH)) {
265 		brcmf_flowring_block(flow, flowid, true);
266 		brcmf_dbg(MSGBUF, "Flowcontrol: BLOCK for ring %d\n", flowid);
267 		/* To prevent (work around) possible race condition, check
268 		 * queue len again. It is also possible to use locking to
269 		 * protect, but that is undesirable for every enqueue and
270 		 * dequeue. This simple check will solve a possible race
271 		 * condition if it occurs.
272 		 */
273 		if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
274 			brcmf_flowring_block(flow, flowid, false);
275 	}
276 }
277 
278 
brcmf_flowring_dequeue(struct brcmf_flowring * flow,u8 flowid)279 struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
280 {
281 	struct brcmf_flowring_ring *ring;
282 	struct sk_buff *skb;
283 
284 	ring = flow->rings[flowid];
285 	if (ring->status != RING_OPEN)
286 		return NULL;
287 
288 	skb = skb_dequeue(&ring->skblist);
289 
290 	if (ring->blocked &&
291 	    (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)) {
292 		brcmf_flowring_block(flow, flowid, false);
293 		brcmf_dbg(MSGBUF, "Flowcontrol: OPEN for ring %d\n", flowid);
294 	}
295 
296 	return skb;
297 }
298 
299 
brcmf_flowring_reinsert(struct brcmf_flowring * flow,u8 flowid,struct sk_buff * skb)300 void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
301 			     struct sk_buff *skb)
302 {
303 	struct brcmf_flowring_ring *ring;
304 
305 	ring = flow->rings[flowid];
306 
307 	skb_queue_head(&ring->skblist, skb);
308 }
309 
310 
brcmf_flowring_qlen(struct brcmf_flowring * flow,u8 flowid)311 u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
312 {
313 	struct brcmf_flowring_ring *ring;
314 
315 	ring = flow->rings[flowid];
316 	if (!ring)
317 		return 0;
318 
319 	if (ring->status != RING_OPEN)
320 		return 0;
321 
322 	return skb_queue_len(&ring->skblist);
323 }
324 
325 
brcmf_flowring_open(struct brcmf_flowring * flow,u8 flowid)326 void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
327 {
328 	struct brcmf_flowring_ring *ring;
329 
330 	ring = flow->rings[flowid];
331 	if (!ring) {
332 		brcmf_err("Ring NULL, for flowid %d\n", flowid);
333 		return;
334 	}
335 
336 	ring->status = RING_OPEN;
337 }
338 
339 
brcmf_flowring_ifidx_get(struct brcmf_flowring * flow,u8 flowid)340 u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid)
341 {
342 	struct brcmf_flowring_ring *ring;
343 	u8 hash_idx;
344 
345 	ring = flow->rings[flowid];
346 	hash_idx = ring->hash_id;
347 
348 	return flow->hash[hash_idx].ifidx;
349 }
350 
351 
brcmf_flowring_attach(struct device * dev,u16 nrofrings)352 struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings)
353 {
354 	struct brcmf_flowring *flow;
355 	u32 i;
356 
357 	flow = kzalloc(sizeof(*flow), GFP_KERNEL);
358 	if (flow) {
359 		flow->dev = dev;
360 		flow->nrofrings = nrofrings;
361 		spin_lock_init(&flow->block_lock);
362 		for (i = 0; i < ARRAY_SIZE(flow->addr_mode); i++)
363 			flow->addr_mode[i] = ADDR_INDIRECT;
364 		for (i = 0; i < ARRAY_SIZE(flow->hash); i++)
365 			flow->hash[i].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
366 		flow->rings = kcalloc(nrofrings, sizeof(*flow->rings),
367 				      GFP_KERNEL);
368 		if (!flow->rings) {
369 			kfree(flow);
370 			flow = NULL;
371 		}
372 	}
373 
374 	return flow;
375 }
376 
377 
brcmf_flowring_detach(struct brcmf_flowring * flow)378 void brcmf_flowring_detach(struct brcmf_flowring *flow)
379 {
380 	struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
381 	struct brcmf_pub *drvr = bus_if->drvr;
382 	struct brcmf_flowring_tdls_entry *search;
383 	struct brcmf_flowring_tdls_entry *remove;
384 	u8 flowid;
385 
386 	for (flowid = 0; flowid < flow->nrofrings; flowid++) {
387 		if (flow->rings[flowid])
388 			brcmf_msgbuf_delete_flowring(drvr, flowid);
389 	}
390 
391 	search = flow->tdls_entry;
392 	while (search) {
393 		remove = search;
394 		search = search->next;
395 		kfree(remove);
396 	}
397 	kfree(flow->rings);
398 	kfree(flow);
399 }
400 
401 
brcmf_flowring_configure_addr_mode(struct brcmf_flowring * flow,int ifidx,enum proto_addr_mode addr_mode)402 void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
403 					enum proto_addr_mode addr_mode)
404 {
405 	struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
406 	struct brcmf_pub *drvr = bus_if->drvr;
407 	u32 i;
408 	u8 flowid;
409 
410 	if (flow->addr_mode[ifidx] != addr_mode) {
411 		for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
412 			if (flow->hash[i].ifidx == ifidx) {
413 				flowid = flow->hash[i].flowid;
414 				if (flow->rings[flowid]->status != RING_OPEN)
415 					continue;
416 				flow->rings[flowid]->status = RING_CLOSING;
417 				brcmf_msgbuf_delete_flowring(drvr, flowid);
418 			}
419 		}
420 		flow->addr_mode[ifidx] = addr_mode;
421 	}
422 }
423 
424 
brcmf_flowring_delete_peer(struct brcmf_flowring * flow,int ifidx,u8 peer[ETH_ALEN])425 void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
426 				u8 peer[ETH_ALEN])
427 {
428 	struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
429 	struct brcmf_pub *drvr = bus_if->drvr;
430 	struct brcmf_flowring_hash *hash;
431 	struct brcmf_flowring_tdls_entry *prev;
432 	struct brcmf_flowring_tdls_entry *search;
433 	u32 i;
434 	u8 flowid;
435 	bool sta;
436 
437 	sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
438 
439 	search = flow->tdls_entry;
440 	prev = NULL;
441 	while (search) {
442 		if (memcmp(search->mac, peer, ETH_ALEN) == 0) {
443 			sta = false;
444 			break;
445 		}
446 		prev = search;
447 		search = search->next;
448 	}
449 
450 	hash = flow->hash;
451 	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
452 		if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) &&
453 		    (hash[i].ifidx == ifidx)) {
454 			flowid = flow->hash[i].flowid;
455 			if (flow->rings[flowid]->status == RING_OPEN) {
456 				flow->rings[flowid]->status = RING_CLOSING;
457 				brcmf_msgbuf_delete_flowring(drvr, flowid);
458 			}
459 		}
460 	}
461 
462 	if (search) {
463 		if (prev)
464 			prev->next = search->next;
465 		else
466 			flow->tdls_entry = search->next;
467 		kfree(search);
468 		if (flow->tdls_entry == NULL)
469 			flow->tdls_active = false;
470 	}
471 }
472 
473 
brcmf_flowring_add_tdls_peer(struct brcmf_flowring * flow,int ifidx,u8 peer[ETH_ALEN])474 void brcmf_flowring_add_tdls_peer(struct brcmf_flowring *flow, int ifidx,
475 				  u8 peer[ETH_ALEN])
476 {
477 	struct brcmf_flowring_tdls_entry *tdls_entry;
478 	struct brcmf_flowring_tdls_entry *search;
479 
480 	tdls_entry = kzalloc(sizeof(*tdls_entry), GFP_ATOMIC);
481 	if (tdls_entry == NULL)
482 		return;
483 
484 	memcpy(tdls_entry->mac, peer, ETH_ALEN);
485 	tdls_entry->next = NULL;
486 	if (flow->tdls_entry == NULL) {
487 		flow->tdls_entry = tdls_entry;
488 	} else {
489 		search = flow->tdls_entry;
490 		if (memcmp(search->mac, peer, ETH_ALEN) == 0)
491 			return;
492 		while (search->next) {
493 			search = search->next;
494 			if (memcmp(search->mac, peer, ETH_ALEN) == 0)
495 				return;
496 		}
497 		search->next = tdls_entry;
498 	}
499 
500 	flow->tdls_active = true;
501 }
502