1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/etherdevice.h>
19 #include <linux/module.h>
20 #include <net/cfg80211.h>
21 #include <net/rtnetlink.h>
22 #include <brcmu_utils.h>
23 #include <brcmu_wifi.h>
24
25 #include "core.h"
26 #include "bus.h"
27 #include "debug.h"
28 #include "fwil_types.h"
29 #include "p2p.h"
30 #include "cfg80211.h"
31 #include "fwil.h"
32 #include "fwsignal.h"
33 #include "feature.h"
34 #include "proto.h"
35 #include "pcie.h"
36 #include "common.h"
37
38 MODULE_AUTHOR("Broadcom Corporation");
39 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
40 MODULE_LICENSE("Dual BSD/GPL");
41
42 #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
43
44 /* AMPDU rx reordering definitions */
45 #define BRCMF_RXREORDER_FLOWID_OFFSET 0
46 #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
47 #define BRCMF_RXREORDER_FLAGS_OFFSET 4
48 #define BRCMF_RXREORDER_CURIDX_OFFSET 6
49 #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
50
51 #define BRCMF_RXREORDER_DEL_FLOW 0x01
52 #define BRCMF_RXREORDER_FLUSH_ALL 0x02
53 #define BRCMF_RXREORDER_CURIDX_VALID 0x04
54 #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
55 #define BRCMF_RXREORDER_NEW_HOLE 0x10
56
57 #define BRCMF_BSSIDX_INVALID -1
58
59 /* Error bits */
60 int brcmf_msg_level;
61 module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
62 MODULE_PARM_DESC(debug, "level of debug output");
63
64 /* P2P0 enable */
65 static int brcmf_p2p_enable;
66 module_param_named(p2pon, brcmf_p2p_enable, int, 0);
67 MODULE_PARM_DESC(p2pon, "enable legacy p2p management functionality");
68
brcmf_ifname(struct brcmf_pub * drvr,int ifidx)69 char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
70 {
71 if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
72 brcmf_err("ifidx %d out of range\n", ifidx);
73 return "<if_bad>";
74 }
75
76 if (drvr->iflist[ifidx] == NULL) {
77 brcmf_err("null i/f %d\n", ifidx);
78 return "<if_null>";
79 }
80
81 if (drvr->iflist[ifidx]->ndev)
82 return drvr->iflist[ifidx]->ndev->name;
83
84 return "<if_none>";
85 }
86
brcmf_get_ifp(struct brcmf_pub * drvr,int ifidx)87 struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx)
88 {
89 struct brcmf_if *ifp;
90 s32 bssidx;
91
92 if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
93 brcmf_err("ifidx %d out of range\n", ifidx);
94 return NULL;
95 }
96
97 ifp = NULL;
98 bssidx = drvr->if2bss[ifidx];
99 if (bssidx >= 0)
100 ifp = drvr->iflist[bssidx];
101
102 return ifp;
103 }
104
_brcmf_set_multicast_list(struct work_struct * work)105 static void _brcmf_set_multicast_list(struct work_struct *work)
106 {
107 struct brcmf_if *ifp;
108 struct net_device *ndev;
109 struct netdev_hw_addr *ha;
110 u32 cmd_value, cnt;
111 __le32 cnt_le;
112 char *buf, *bufp;
113 u32 buflen;
114 s32 err;
115
116 ifp = container_of(work, struct brcmf_if, multicast_work);
117
118 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
119
120 ndev = ifp->ndev;
121
122 /* Determine initial value of allmulti flag */
123 cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
124
125 /* Send down the multicast list first. */
126 cnt = netdev_mc_count(ndev);
127 buflen = sizeof(cnt) + (cnt * ETH_ALEN);
128 buf = kmalloc(buflen, GFP_ATOMIC);
129 if (!buf)
130 return;
131 bufp = buf;
132
133 cnt_le = cpu_to_le32(cnt);
134 memcpy(bufp, &cnt_le, sizeof(cnt_le));
135 bufp += sizeof(cnt_le);
136
137 netdev_for_each_mc_addr(ha, ndev) {
138 if (!cnt)
139 break;
140 memcpy(bufp, ha->addr, ETH_ALEN);
141 bufp += ETH_ALEN;
142 cnt--;
143 }
144
145 err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
146 if (err < 0) {
147 brcmf_err("Setting mcast_list failed, %d\n", err);
148 cmd_value = cnt ? true : cmd_value;
149 }
150
151 kfree(buf);
152
153 /*
154 * Now send the allmulti setting. This is based on the setting in the
155 * net_device flags, but might be modified above to be turned on if we
156 * were trying to set some addresses and dongle rejected it...
157 */
158 err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
159 if (err < 0)
160 brcmf_err("Setting allmulti failed, %d\n", err);
161
162 /*Finally, pick up the PROMISC flag */
163 cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
164 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
165 if (err < 0)
166 brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
167 err);
168 }
169
170 static void
_brcmf_set_mac_address(struct work_struct * work)171 _brcmf_set_mac_address(struct work_struct *work)
172 {
173 struct brcmf_if *ifp;
174 s32 err;
175
176 ifp = container_of(work, struct brcmf_if, setmacaddr_work);
177
178 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
179
180 err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
181 ETH_ALEN);
182 if (err < 0) {
183 brcmf_err("Setting cur_etheraddr failed, %d\n", err);
184 } else {
185 brcmf_dbg(TRACE, "MAC address updated to %pM\n",
186 ifp->mac_addr);
187 memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
188 }
189 }
190
brcmf_netdev_set_mac_address(struct net_device * ndev,void * addr)191 static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
192 {
193 struct brcmf_if *ifp = netdev_priv(ndev);
194 struct sockaddr *sa = (struct sockaddr *)addr;
195
196 memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
197 schedule_work(&ifp->setmacaddr_work);
198 return 0;
199 }
200
brcmf_netdev_set_multicast_list(struct net_device * ndev)201 static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
202 {
203 struct brcmf_if *ifp = netdev_priv(ndev);
204
205 schedule_work(&ifp->multicast_work);
206 }
207
brcmf_netdev_start_xmit(struct sk_buff * skb,struct net_device * ndev)208 static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
209 struct net_device *ndev)
210 {
211 int ret;
212 struct brcmf_if *ifp = netdev_priv(ndev);
213 struct brcmf_pub *drvr = ifp->drvr;
214 struct ethhdr *eh;
215
216 brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
217
218 /* Can the device send data? */
219 if (drvr->bus_if->state != BRCMF_BUS_UP) {
220 brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
221 netif_stop_queue(ndev);
222 dev_kfree_skb(skb);
223 ret = -ENODEV;
224 goto done;
225 }
226
227 if (!drvr->iflist[ifp->bssidx]) {
228 brcmf_err("bad ifidx %d\n", ifp->bssidx);
229 netif_stop_queue(ndev);
230 dev_kfree_skb(skb);
231 ret = -ENODEV;
232 goto done;
233 }
234
235 /* Make sure there's enough writable headroom*/
236 ret = skb_cow_head(skb, drvr->hdrlen);
237 if (ret < 0) {
238 brcmf_err("%s: skb_cow_head failed\n",
239 brcmf_ifname(drvr, ifp->bssidx));
240 dev_kfree_skb(skb);
241 goto done;
242 }
243
244 /* validate length for ether packet */
245 if (skb->len < sizeof(*eh)) {
246 ret = -EINVAL;
247 dev_kfree_skb(skb);
248 goto done;
249 }
250
251 eh = (struct ethhdr *)(skb->data);
252
253 if (eh->h_proto == htons(ETH_P_PAE))
254 atomic_inc(&ifp->pend_8021x_cnt);
255
256 ret = brcmf_fws_process_skb(ifp, skb);
257
258 done:
259 if (ret) {
260 ifp->stats.tx_dropped++;
261 } else {
262 ifp->stats.tx_packets++;
263 ifp->stats.tx_bytes += skb->len;
264 }
265
266 /* Return ok: we always eat the packet */
267 return NETDEV_TX_OK;
268 }
269
brcmf_txflowblock_if(struct brcmf_if * ifp,enum brcmf_netif_stop_reason reason,bool state)270 void brcmf_txflowblock_if(struct brcmf_if *ifp,
271 enum brcmf_netif_stop_reason reason, bool state)
272 {
273 unsigned long flags;
274
275 if (!ifp || !ifp->ndev)
276 return;
277
278 brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
279 ifp->bssidx, ifp->netif_stop, reason, state);
280
281 spin_lock_irqsave(&ifp->netif_stop_lock, flags);
282 if (state) {
283 if (!ifp->netif_stop)
284 netif_stop_queue(ifp->ndev);
285 ifp->netif_stop |= reason;
286 } else {
287 ifp->netif_stop &= ~reason;
288 if (!ifp->netif_stop)
289 netif_wake_queue(ifp->ndev);
290 }
291 spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
292 }
293
brcmf_txflowblock(struct device * dev,bool state)294 void brcmf_txflowblock(struct device *dev, bool state)
295 {
296 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
297 struct brcmf_pub *drvr = bus_if->drvr;
298
299 brcmf_dbg(TRACE, "Enter\n");
300
301 brcmf_fws_bus_blocked(drvr, state);
302 }
303
brcmf_netif_rx(struct brcmf_if * ifp,struct sk_buff * skb)304 void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
305 {
306 if (skb->pkt_type == PACKET_MULTICAST)
307 ifp->stats.multicast++;
308
309 if (!(ifp->ndev->flags & IFF_UP)) {
310 brcmu_pkt_buf_free_skb(skb);
311 return;
312 }
313
314 ifp->stats.rx_bytes += skb->len;
315 ifp->stats.rx_packets++;
316
317 brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
318 if (in_interrupt())
319 netif_rx(skb);
320 else
321 /* If the receive is not processed inside an ISR,
322 * the softirqd must be woken explicitly to service
323 * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
324 */
325 netif_rx_ni(skb);
326 }
327
brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder * rfi,u8 start,u8 end,struct sk_buff_head * skb_list)328 static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
329 u8 start, u8 end,
330 struct sk_buff_head *skb_list)
331 {
332 /* initialize return list */
333 __skb_queue_head_init(skb_list);
334
335 if (rfi->pend_pkts == 0) {
336 brcmf_dbg(INFO, "no packets in reorder queue\n");
337 return;
338 }
339
340 do {
341 if (rfi->pktslots[start]) {
342 __skb_queue_tail(skb_list, rfi->pktslots[start]);
343 rfi->pktslots[start] = NULL;
344 }
345 start++;
346 if (start > rfi->max_idx)
347 start = 0;
348 } while (start != end);
349 rfi->pend_pkts -= skb_queue_len(skb_list);
350 }
351
brcmf_rxreorder_process_info(struct brcmf_if * ifp,u8 * reorder_data,struct sk_buff * pkt)352 static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
353 struct sk_buff *pkt)
354 {
355 u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
356 struct brcmf_ampdu_rx_reorder *rfi;
357 struct sk_buff_head reorder_list;
358 struct sk_buff *pnext;
359 u8 flags;
360 u32 buf_size;
361
362 flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
363 flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
364
365 /* validate flags and flow id */
366 if (flags == 0xFF) {
367 brcmf_err("invalid flags...so ignore this packet\n");
368 brcmf_netif_rx(ifp, pkt);
369 return;
370 }
371
372 rfi = ifp->drvr->reorder_flows[flow_id];
373 if (flags & BRCMF_RXREORDER_DEL_FLOW) {
374 brcmf_dbg(INFO, "flow-%d: delete\n",
375 flow_id);
376
377 if (rfi == NULL) {
378 brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
379 flow_id);
380 brcmf_netif_rx(ifp, pkt);
381 return;
382 }
383
384 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
385 &reorder_list);
386 /* add the last packet */
387 __skb_queue_tail(&reorder_list, pkt);
388 kfree(rfi);
389 ifp->drvr->reorder_flows[flow_id] = NULL;
390 goto netif_rx;
391 }
392 /* from here on we need a flow reorder instance */
393 if (rfi == NULL) {
394 buf_size = sizeof(*rfi);
395 max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
396
397 buf_size += (max_idx + 1) * sizeof(pkt);
398
399 /* allocate space for flow reorder info */
400 brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
401 flow_id, max_idx);
402 rfi = kzalloc(buf_size, GFP_ATOMIC);
403 if (rfi == NULL) {
404 brcmf_err("failed to alloc buffer\n");
405 brcmf_netif_rx(ifp, pkt);
406 return;
407 }
408
409 ifp->drvr->reorder_flows[flow_id] = rfi;
410 rfi->pktslots = (struct sk_buff **)(rfi+1);
411 rfi->max_idx = max_idx;
412 }
413 if (flags & BRCMF_RXREORDER_NEW_HOLE) {
414 if (rfi->pend_pkts) {
415 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
416 rfi->exp_idx,
417 &reorder_list);
418 WARN_ON(rfi->pend_pkts);
419 } else {
420 __skb_queue_head_init(&reorder_list);
421 }
422 rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
423 rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
424 rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
425 rfi->pktslots[rfi->cur_idx] = pkt;
426 rfi->pend_pkts++;
427 brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
428 flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
429 } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
430 cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
431 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
432
433 if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
434 /* still in the current hole */
435 /* enqueue the current on the buffer chain */
436 if (rfi->pktslots[cur_idx] != NULL) {
437 brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
438 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
439 rfi->pktslots[cur_idx] = NULL;
440 }
441 rfi->pktslots[cur_idx] = pkt;
442 rfi->pend_pkts++;
443 rfi->cur_idx = cur_idx;
444 brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
445 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
446
447 /* can return now as there is no reorder
448 * list to process.
449 */
450 return;
451 }
452 if (rfi->exp_idx == cur_idx) {
453 if (rfi->pktslots[cur_idx] != NULL) {
454 brcmf_dbg(INFO, "error buffer pending..free it\n");
455 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
456 rfi->pktslots[cur_idx] = NULL;
457 }
458 rfi->pktslots[cur_idx] = pkt;
459 rfi->pend_pkts++;
460
461 /* got the expected one. flush from current to expected
462 * and update expected
463 */
464 brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
465 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
466
467 rfi->cur_idx = cur_idx;
468 rfi->exp_idx = exp_idx;
469
470 brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
471 &reorder_list);
472 brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
473 flow_id, skb_queue_len(&reorder_list),
474 rfi->pend_pkts);
475 } else {
476 u8 end_idx;
477
478 brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
479 flow_id, flags, rfi->cur_idx, rfi->exp_idx,
480 cur_idx, exp_idx);
481 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
482 end_idx = rfi->exp_idx;
483 else
484 end_idx = exp_idx;
485
486 /* flush pkts first */
487 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
488 &reorder_list);
489
490 if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
491 __skb_queue_tail(&reorder_list, pkt);
492 } else {
493 rfi->pktslots[cur_idx] = pkt;
494 rfi->pend_pkts++;
495 }
496 rfi->exp_idx = exp_idx;
497 rfi->cur_idx = cur_idx;
498 }
499 } else {
500 /* explicity window move updating the expected index */
501 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
502
503 brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
504 flow_id, flags, rfi->exp_idx, exp_idx);
505 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
506 end_idx = rfi->exp_idx;
507 else
508 end_idx = exp_idx;
509
510 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
511 &reorder_list);
512 __skb_queue_tail(&reorder_list, pkt);
513 /* set the new expected idx */
514 rfi->exp_idx = exp_idx;
515 }
516 netif_rx:
517 skb_queue_walk_safe(&reorder_list, pkt, pnext) {
518 __skb_unlink(pkt, &reorder_list);
519 brcmf_netif_rx(ifp, pkt);
520 }
521 }
522
brcmf_rx_frame(struct device * dev,struct sk_buff * skb,bool handle_event)523 void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
524 {
525 struct brcmf_if *ifp;
526 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
527 struct brcmf_pub *drvr = bus_if->drvr;
528 struct brcmf_skb_reorder_data *rd;
529 int ret;
530
531 brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
532
533 /* process and remove protocol-specific header */
534 ret = brcmf_proto_hdrpull(drvr, true, skb, &ifp);
535
536 if (ret || !ifp || !ifp->ndev) {
537 if (ret != -ENODATA && ifp)
538 ifp->stats.rx_errors++;
539 brcmu_pkt_buf_free_skb(skb);
540 return;
541 }
542
543 skb->protocol = eth_type_trans(skb, ifp->ndev);
544
545 rd = (struct brcmf_skb_reorder_data *)skb->cb;
546 if (rd->reorder) {
547 brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
548 } else {
549 /* Process special event packets */
550 if (handle_event)
551 brcmf_fweh_process_skb(ifp->drvr, skb,
552 BCMILCP_SUBTYPE_VENDOR_LONG);
553
554 brcmf_netif_rx(ifp, skb);
555 }
556 }
557
brcmf_rx_event(struct device * dev,struct sk_buff * skb)558 void brcmf_rx_event(struct device *dev, struct sk_buff *skb)
559 {
560 struct brcmf_if *ifp;
561 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
562 struct brcmf_pub *drvr = bus_if->drvr;
563 int ret;
564
565 brcmf_dbg(EVENT, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
566
567 /* process and remove protocol-specific header */
568 ret = brcmf_proto_hdrpull(drvr, true, skb, &ifp);
569
570 if (ret || !ifp || !ifp->ndev) {
571 if (ret != -ENODATA && ifp)
572 ifp->stats.rx_errors++;
573 brcmu_pkt_buf_free_skb(skb);
574 return;
575 }
576
577 skb->protocol = eth_type_trans(skb, ifp->ndev);
578
579 brcmf_fweh_process_skb(ifp->drvr, skb, 0);
580 brcmu_pkt_buf_free_skb(skb);
581 }
582
brcmf_txfinalize(struct brcmf_if * ifp,struct sk_buff * txp,bool success)583 void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
584 {
585 struct ethhdr *eh;
586 u16 type;
587
588 eh = (struct ethhdr *)(txp->data);
589 type = ntohs(eh->h_proto);
590
591 if (type == ETH_P_PAE) {
592 atomic_dec(&ifp->pend_8021x_cnt);
593 if (waitqueue_active(&ifp->pend_8021x_wait))
594 wake_up(&ifp->pend_8021x_wait);
595 }
596
597 if (!success)
598 ifp->stats.tx_errors++;
599
600 brcmu_pkt_buf_free_skb(txp);
601 }
602
brcmf_txcomplete(struct device * dev,struct sk_buff * txp,bool success)603 void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
604 {
605 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
606 struct brcmf_pub *drvr = bus_if->drvr;
607 struct brcmf_if *ifp;
608
609 /* await txstatus signal for firmware if active */
610 if (brcmf_fws_fc_active(drvr->fws)) {
611 if (!success)
612 brcmf_fws_bustxfail(drvr->fws, txp);
613 } else {
614 if (brcmf_proto_hdrpull(drvr, false, txp, &ifp))
615 brcmu_pkt_buf_free_skb(txp);
616 else
617 brcmf_txfinalize(ifp, txp, success);
618 }
619 }
620
brcmf_netdev_get_stats(struct net_device * ndev)621 static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
622 {
623 struct brcmf_if *ifp = netdev_priv(ndev);
624
625 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
626
627 return &ifp->stats;
628 }
629
brcmf_ethtool_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * info)630 static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
631 struct ethtool_drvinfo *info)
632 {
633 struct brcmf_if *ifp = netdev_priv(ndev);
634 struct brcmf_pub *drvr = ifp->drvr;
635 char drev[BRCMU_DOTREV_LEN] = "n/a";
636
637 if (drvr->revinfo.result == 0)
638 brcmu_dotrev_str(drvr->revinfo.driverrev, drev);
639 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
640 strlcpy(info->version, drev, sizeof(info->version));
641 strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
642 strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
643 sizeof(info->bus_info));
644 }
645
646 static const struct ethtool_ops brcmf_ethtool_ops = {
647 .get_drvinfo = brcmf_ethtool_get_drvinfo,
648 };
649
brcmf_netdev_stop(struct net_device * ndev)650 static int brcmf_netdev_stop(struct net_device *ndev)
651 {
652 struct brcmf_if *ifp = netdev_priv(ndev);
653
654 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
655
656 brcmf_cfg80211_down(ndev);
657
658 brcmf_net_setcarrier(ifp, false);
659
660 return 0;
661 }
662
brcmf_netdev_open(struct net_device * ndev)663 static int brcmf_netdev_open(struct net_device *ndev)
664 {
665 struct brcmf_if *ifp = netdev_priv(ndev);
666 struct brcmf_pub *drvr = ifp->drvr;
667 struct brcmf_bus *bus_if = drvr->bus_if;
668 u32 toe_ol;
669
670 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
671
672 /* If bus is not ready, can't continue */
673 if (bus_if->state != BRCMF_BUS_UP) {
674 brcmf_err("failed bus is not ready\n");
675 return -EAGAIN;
676 }
677
678 atomic_set(&ifp->pend_8021x_cnt, 0);
679
680 /* Get current TOE mode from dongle */
681 if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
682 && (toe_ol & TOE_TX_CSUM_OL) != 0)
683 ndev->features |= NETIF_F_IP_CSUM;
684 else
685 ndev->features &= ~NETIF_F_IP_CSUM;
686
687 if (brcmf_cfg80211_up(ndev)) {
688 brcmf_err("failed to bring up cfg80211\n");
689 return -EIO;
690 }
691
692 /* Clear, carrier, set when connected or AP mode. */
693 netif_carrier_off(ndev);
694 return 0;
695 }
696
697 static const struct net_device_ops brcmf_netdev_ops_pri = {
698 .ndo_open = brcmf_netdev_open,
699 .ndo_stop = brcmf_netdev_stop,
700 .ndo_get_stats = brcmf_netdev_get_stats,
701 .ndo_start_xmit = brcmf_netdev_start_xmit,
702 .ndo_set_mac_address = brcmf_netdev_set_mac_address,
703 .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
704 };
705
brcmf_net_attach(struct brcmf_if * ifp,bool rtnl_locked)706 int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
707 {
708 struct brcmf_pub *drvr = ifp->drvr;
709 struct net_device *ndev;
710 s32 err;
711
712 brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
713 ifp->mac_addr);
714 ndev = ifp->ndev;
715
716 /* set appropriate operations */
717 ndev->netdev_ops = &brcmf_netdev_ops_pri;
718
719 ndev->hard_header_len += drvr->hdrlen;
720 ndev->ethtool_ops = &brcmf_ethtool_ops;
721
722 drvr->rxsz = ndev->mtu + ndev->hard_header_len +
723 drvr->hdrlen;
724
725 /* set the mac address */
726 memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
727
728 INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
729 INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
730
731 if (rtnl_locked)
732 err = register_netdevice(ndev);
733 else
734 err = register_netdev(ndev);
735 if (err != 0) {
736 brcmf_err("couldn't register the net device\n");
737 goto fail;
738 }
739
740 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
741 return 0;
742
743 fail:
744 drvr->iflist[ifp->bssidx] = NULL;
745 ndev->netdev_ops = NULL;
746 free_netdev(ndev);
747 return -EBADE;
748 }
749
brcmf_net_detach(struct net_device * ndev)750 static void brcmf_net_detach(struct net_device *ndev)
751 {
752 if (ndev->reg_state == NETREG_REGISTERED)
753 unregister_netdev(ndev);
754 else
755 brcmf_cfg80211_free_netdev(ndev);
756 }
757
brcmf_net_setcarrier(struct brcmf_if * ifp,bool on)758 void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on)
759 {
760 struct net_device *ndev;
761
762 brcmf_dbg(TRACE, "Enter, idx=%d carrier=%d\n", ifp->bssidx, on);
763
764 ndev = ifp->ndev;
765 brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_DISCONNECTED, !on);
766 if (on) {
767 if (!netif_carrier_ok(ndev))
768 netif_carrier_on(ndev);
769
770 } else {
771 if (netif_carrier_ok(ndev))
772 netif_carrier_off(ndev);
773 }
774 }
775
brcmf_net_p2p_open(struct net_device * ndev)776 static int brcmf_net_p2p_open(struct net_device *ndev)
777 {
778 brcmf_dbg(TRACE, "Enter\n");
779
780 return brcmf_cfg80211_up(ndev);
781 }
782
brcmf_net_p2p_stop(struct net_device * ndev)783 static int brcmf_net_p2p_stop(struct net_device *ndev)
784 {
785 brcmf_dbg(TRACE, "Enter\n");
786
787 return brcmf_cfg80211_down(ndev);
788 }
789
brcmf_net_p2p_start_xmit(struct sk_buff * skb,struct net_device * ndev)790 static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
791 struct net_device *ndev)
792 {
793 if (skb)
794 dev_kfree_skb_any(skb);
795
796 return NETDEV_TX_OK;
797 }
798
799 static const struct net_device_ops brcmf_netdev_ops_p2p = {
800 .ndo_open = brcmf_net_p2p_open,
801 .ndo_stop = brcmf_net_p2p_stop,
802 .ndo_start_xmit = brcmf_net_p2p_start_xmit
803 };
804
brcmf_net_p2p_attach(struct brcmf_if * ifp)805 static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
806 {
807 struct net_device *ndev;
808
809 brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
810 ifp->mac_addr);
811 ndev = ifp->ndev;
812
813 ndev->netdev_ops = &brcmf_netdev_ops_p2p;
814
815 /* set the mac address */
816 memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
817
818 if (register_netdev(ndev) != 0) {
819 brcmf_err("couldn't register the p2p net device\n");
820 goto fail;
821 }
822
823 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
824
825 return 0;
826
827 fail:
828 ifp->drvr->iflist[ifp->bssidx] = NULL;
829 ndev->netdev_ops = NULL;
830 free_netdev(ndev);
831 return -EBADE;
832 }
833
brcmf_add_if(struct brcmf_pub * drvr,s32 bssidx,s32 ifidx,bool is_p2pdev,char * name,u8 * mac_addr)834 struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
835 bool is_p2pdev, char *name, u8 *mac_addr)
836 {
837 struct brcmf_if *ifp;
838 struct net_device *ndev;
839
840 brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
841
842 ifp = drvr->iflist[bssidx];
843 /*
844 * Delete the existing interface before overwriting it
845 * in case we missed the BRCMF_E_IF_DEL event.
846 */
847 if (ifp) {
848 brcmf_err("ERROR: netdev:%s already exists\n",
849 ifp->ndev->name);
850 if (ifidx) {
851 netif_stop_queue(ifp->ndev);
852 brcmf_net_detach(ifp->ndev);
853 drvr->iflist[bssidx] = NULL;
854 } else {
855 brcmf_err("ignore IF event\n");
856 return ERR_PTR(-EINVAL);
857 }
858 }
859
860 if (!brcmf_p2p_enable && is_p2pdev) {
861 /* this is P2P_DEVICE interface */
862 brcmf_dbg(INFO, "allocate non-netdev interface\n");
863 ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
864 if (!ifp)
865 return ERR_PTR(-ENOMEM);
866 } else {
867 brcmf_dbg(INFO, "allocate netdev interface\n");
868 /* Allocate netdev, including space for private structure */
869 ndev = alloc_netdev(sizeof(*ifp), is_p2pdev ? "p2p%d" : name,
870 NET_NAME_UNKNOWN, ether_setup);
871 if (!ndev)
872 return ERR_PTR(-ENOMEM);
873
874 ndev->destructor = brcmf_cfg80211_free_netdev;
875 ifp = netdev_priv(ndev);
876 ifp->ndev = ndev;
877 /* store mapping ifidx to bssidx */
878 if (drvr->if2bss[ifidx] == BRCMF_BSSIDX_INVALID)
879 drvr->if2bss[ifidx] = bssidx;
880 }
881
882 ifp->drvr = drvr;
883 drvr->iflist[bssidx] = ifp;
884 ifp->ifidx = ifidx;
885 ifp->bssidx = bssidx;
886
887 init_waitqueue_head(&ifp->pend_8021x_wait);
888 spin_lock_init(&ifp->netif_stop_lock);
889
890 if (mac_addr != NULL)
891 memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
892
893 brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
894 current->pid, name, ifp->mac_addr);
895
896 return ifp;
897 }
898
brcmf_del_if(struct brcmf_pub * drvr,s32 bssidx)899 static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
900 {
901 struct brcmf_if *ifp;
902
903 ifp = drvr->iflist[bssidx];
904 drvr->iflist[bssidx] = NULL;
905 if (!ifp) {
906 brcmf_err("Null interface, idx=%d\n", bssidx);
907 return;
908 }
909 brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
910 if (drvr->if2bss[ifp->ifidx] == bssidx)
911 drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID;
912 if (ifp->ndev) {
913 if (bssidx == 0) {
914 if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
915 rtnl_lock();
916 brcmf_netdev_stop(ifp->ndev);
917 rtnl_unlock();
918 }
919 } else {
920 netif_stop_queue(ifp->ndev);
921 }
922
923 if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
924 cancel_work_sync(&ifp->setmacaddr_work);
925 cancel_work_sync(&ifp->multicast_work);
926 }
927 brcmf_net_detach(ifp->ndev);
928 } else {
929 /* Only p2p device interfaces which get dynamically created
930 * end up here. In this case the p2p module should be informed
931 * about the removal of the interface within the firmware. If
932 * not then p2p commands towards the firmware will cause some
933 * serious troublesome side effects. The p2p module will clean
934 * up the ifp if needed.
935 */
936 brcmf_p2p_ifp_removed(ifp);
937 kfree(ifp);
938 }
939 }
940
brcmf_remove_interface(struct brcmf_if * ifp)941 void brcmf_remove_interface(struct brcmf_if *ifp)
942 {
943 if (!ifp || WARN_ON(ifp->drvr->iflist[ifp->bssidx] != ifp))
944 return;
945 brcmf_dbg(TRACE, "Enter, bssidx=%d, ifidx=%d\n", ifp->bssidx,
946 ifp->ifidx);
947 brcmf_fws_del_interface(ifp);
948 brcmf_del_if(ifp->drvr, ifp->bssidx);
949 }
950
brcmf_get_next_free_bsscfgidx(struct brcmf_pub * drvr)951 int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
952 {
953 int ifidx;
954 int bsscfgidx;
955 bool available;
956 int highest;
957
958 available = false;
959 bsscfgidx = 2;
960 highest = 2;
961 for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
962 if (drvr->iflist[ifidx]) {
963 if (drvr->iflist[ifidx]->bssidx == bsscfgidx)
964 bsscfgidx = highest + 1;
965 else if (drvr->iflist[ifidx]->bssidx > highest)
966 highest = drvr->iflist[ifidx]->bssidx;
967 } else {
968 available = true;
969 }
970 }
971
972 return available ? bsscfgidx : -ENOMEM;
973 }
974
brcmf_attach(struct device * dev)975 int brcmf_attach(struct device *dev)
976 {
977 struct brcmf_pub *drvr = NULL;
978 int ret = 0;
979 int i;
980
981 brcmf_dbg(TRACE, "Enter\n");
982
983 /* Allocate primary brcmf_info */
984 drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
985 if (!drvr)
986 return -ENOMEM;
987
988 for (i = 0; i < ARRAY_SIZE(drvr->if2bss); i++)
989 drvr->if2bss[i] = BRCMF_BSSIDX_INVALID;
990
991 mutex_init(&drvr->proto_block);
992
993 /* Link to bus module */
994 drvr->hdrlen = 0;
995 drvr->bus_if = dev_get_drvdata(dev);
996 drvr->bus_if->drvr = drvr;
997
998 /* attach debug facilities */
999 brcmf_debug_attach(drvr);
1000
1001 /* Attach and link in the protocol */
1002 ret = brcmf_proto_attach(drvr);
1003 if (ret != 0) {
1004 brcmf_err("brcmf_prot_attach failed\n");
1005 goto fail;
1006 }
1007
1008 /* attach firmware event handler */
1009 brcmf_fweh_attach(drvr);
1010
1011 return ret;
1012
1013 fail:
1014 brcmf_detach(dev);
1015
1016 return ret;
1017 }
1018
brcmf_revinfo_read(struct seq_file * s,void * data)1019 static int brcmf_revinfo_read(struct seq_file *s, void *data)
1020 {
1021 struct brcmf_bus *bus_if = dev_get_drvdata(s->private);
1022 struct brcmf_rev_info *ri = &bus_if->drvr->revinfo;
1023 char drev[BRCMU_DOTREV_LEN];
1024 char brev[BRCMU_BOARDREV_LEN];
1025
1026 seq_printf(s, "vendorid: 0x%04x\n", ri->vendorid);
1027 seq_printf(s, "deviceid: 0x%04x\n", ri->deviceid);
1028 seq_printf(s, "radiorev: %s\n", brcmu_dotrev_str(ri->radiorev, drev));
1029 seq_printf(s, "chipnum: %u (%x)\n", ri->chipnum, ri->chipnum);
1030 seq_printf(s, "chiprev: %u\n", ri->chiprev);
1031 seq_printf(s, "chippkg: %u\n", ri->chippkg);
1032 seq_printf(s, "corerev: %u\n", ri->corerev);
1033 seq_printf(s, "boardid: 0x%04x\n", ri->boardid);
1034 seq_printf(s, "boardvendor: 0x%04x\n", ri->boardvendor);
1035 seq_printf(s, "boardrev: %s\n", brcmu_boardrev_str(ri->boardrev, brev));
1036 seq_printf(s, "driverrev: %s\n", brcmu_dotrev_str(ri->driverrev, drev));
1037 seq_printf(s, "ucoderev: %u\n", ri->ucoderev);
1038 seq_printf(s, "bus: %u\n", ri->bus);
1039 seq_printf(s, "phytype: %u\n", ri->phytype);
1040 seq_printf(s, "phyrev: %u\n", ri->phyrev);
1041 seq_printf(s, "anarev: %u\n", ri->anarev);
1042 seq_printf(s, "nvramrev: %08x\n", ri->nvramrev);
1043
1044 return 0;
1045 }
1046
brcmf_bus_start(struct device * dev)1047 int brcmf_bus_start(struct device *dev)
1048 {
1049 int ret = -1;
1050 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1051 struct brcmf_pub *drvr = bus_if->drvr;
1052 struct brcmf_if *ifp;
1053 struct brcmf_if *p2p_ifp;
1054
1055 brcmf_dbg(TRACE, "\n");
1056
1057 /* add primary networking interface */
1058 ifp = brcmf_add_if(drvr, 0, 0, false, "wlan%d", NULL);
1059 if (IS_ERR(ifp))
1060 return PTR_ERR(ifp);
1061
1062 p2p_ifp = NULL;
1063
1064 /* signal bus ready */
1065 brcmf_bus_change_state(bus_if, BRCMF_BUS_UP);
1066
1067 /* Bus is ready, do any initialization */
1068 ret = brcmf_c_preinit_dcmds(ifp);
1069 if (ret < 0)
1070 goto fail;
1071
1072 brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
1073
1074 /* assure we have chipid before feature attach */
1075 if (!bus_if->chip) {
1076 bus_if->chip = drvr->revinfo.chipnum;
1077 bus_if->chiprev = drvr->revinfo.chiprev;
1078 brcmf_dbg(INFO, "firmware revinfo: chip %x (%d) rev %d\n",
1079 bus_if->chip, bus_if->chip, bus_if->chiprev);
1080 }
1081 brcmf_feat_attach(drvr);
1082
1083 ret = brcmf_fws_init(drvr);
1084 if (ret < 0)
1085 goto fail;
1086
1087 brcmf_fws_add_interface(ifp);
1088
1089 drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev,
1090 brcmf_p2p_enable);
1091 if (drvr->config == NULL) {
1092 ret = -ENOMEM;
1093 goto fail;
1094 }
1095
1096 ret = brcmf_net_attach(ifp, false);
1097
1098 if ((!ret) && (brcmf_p2p_enable)) {
1099 p2p_ifp = drvr->iflist[1];
1100 if (p2p_ifp)
1101 ret = brcmf_net_p2p_attach(p2p_ifp);
1102 }
1103 fail:
1104 if (ret < 0) {
1105 brcmf_err("failed: %d\n", ret);
1106 if (drvr->config) {
1107 brcmf_cfg80211_detach(drvr->config);
1108 drvr->config = NULL;
1109 }
1110 if (drvr->fws) {
1111 brcmf_fws_del_interface(ifp);
1112 brcmf_fws_deinit(drvr);
1113 }
1114 if (ifp)
1115 brcmf_net_detach(ifp->ndev);
1116 if (p2p_ifp)
1117 brcmf_net_detach(p2p_ifp->ndev);
1118 return ret;
1119 }
1120 return 0;
1121 }
1122
brcmf_bus_add_txhdrlen(struct device * dev,uint len)1123 void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
1124 {
1125 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1126 struct brcmf_pub *drvr = bus_if->drvr;
1127
1128 if (drvr) {
1129 drvr->hdrlen += len;
1130 }
1131 }
1132
brcmf_bus_detach(struct brcmf_pub * drvr)1133 static void brcmf_bus_detach(struct brcmf_pub *drvr)
1134 {
1135 brcmf_dbg(TRACE, "Enter\n");
1136
1137 if (drvr) {
1138 /* Stop the bus module */
1139 brcmf_bus_stop(drvr->bus_if);
1140 }
1141 }
1142
brcmf_dev_reset(struct device * dev)1143 void brcmf_dev_reset(struct device *dev)
1144 {
1145 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1146 struct brcmf_pub *drvr = bus_if->drvr;
1147
1148 if (drvr == NULL)
1149 return;
1150
1151 if (drvr->iflist[0])
1152 brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
1153 }
1154
brcmf_detach(struct device * dev)1155 void brcmf_detach(struct device *dev)
1156 {
1157 s32 i;
1158 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1159 struct brcmf_pub *drvr = bus_if->drvr;
1160
1161 brcmf_dbg(TRACE, "Enter\n");
1162
1163 if (drvr == NULL)
1164 return;
1165
1166 /* stop firmware event handling */
1167 brcmf_fweh_detach(drvr);
1168 if (drvr->config)
1169 brcmf_p2p_detach(&drvr->config->p2p);
1170
1171 brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
1172
1173 /* make sure primary interface removed last */
1174 for (i = BRCMF_MAX_IFS-1; i > -1; i--)
1175 brcmf_remove_interface(drvr->iflist[i]);
1176
1177 brcmf_cfg80211_detach(drvr->config);
1178
1179 brcmf_fws_deinit(drvr);
1180
1181 brcmf_bus_detach(drvr);
1182
1183 brcmf_proto_detach(drvr);
1184
1185 brcmf_debug_detach(drvr);
1186 bus_if->drvr = NULL;
1187 kfree(drvr);
1188 }
1189
brcmf_iovar_data_set(struct device * dev,char * name,void * data,u32 len)1190 s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
1191 {
1192 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1193 struct brcmf_if *ifp = bus_if->drvr->iflist[0];
1194
1195 return brcmf_fil_iovar_data_set(ifp, name, data, len);
1196 }
1197
brcmf_get_pend_8021x_cnt(struct brcmf_if * ifp)1198 static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
1199 {
1200 return atomic_read(&ifp->pend_8021x_cnt);
1201 }
1202
brcmf_netdev_wait_pend8021x(struct brcmf_if * ifp)1203 int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
1204 {
1205 int err;
1206
1207 err = wait_event_timeout(ifp->pend_8021x_wait,
1208 !brcmf_get_pend_8021x_cnt(ifp),
1209 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
1210
1211 WARN_ON(!err);
1212
1213 return !err;
1214 }
1215
brcmf_bus_change_state(struct brcmf_bus * bus,enum brcmf_bus_state state)1216 void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
1217 {
1218 struct brcmf_pub *drvr = bus->drvr;
1219 struct net_device *ndev;
1220 int ifidx;
1221
1222 brcmf_dbg(TRACE, "%d -> %d\n", bus->state, state);
1223 bus->state = state;
1224
1225 if (state == BRCMF_BUS_UP) {
1226 for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
1227 if ((drvr->iflist[ifidx]) &&
1228 (drvr->iflist[ifidx]->ndev)) {
1229 ndev = drvr->iflist[ifidx]->ndev;
1230 if (netif_queue_stopped(ndev))
1231 netif_wake_queue(ndev);
1232 }
1233 }
1234 }
1235 }
1236
brcmf_driver_register(struct work_struct * work)1237 static void brcmf_driver_register(struct work_struct *work)
1238 {
1239 #ifdef CONFIG_BRCMFMAC_SDIO
1240 brcmf_sdio_register();
1241 #endif
1242 #ifdef CONFIG_BRCMFMAC_USB
1243 brcmf_usb_register();
1244 #endif
1245 #ifdef CONFIG_BRCMFMAC_PCIE
1246 brcmf_pcie_register();
1247 #endif
1248 }
1249 static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
1250
brcmfmac_module_init(void)1251 static int __init brcmfmac_module_init(void)
1252 {
1253 brcmf_debugfs_init();
1254 #ifdef CONFIG_BRCMFMAC_SDIO
1255 brcmf_sdio_init();
1256 #endif
1257 if (!schedule_work(&brcmf_driver_work))
1258 return -EBUSY;
1259
1260 return 0;
1261 }
1262
brcmfmac_module_exit(void)1263 static void __exit brcmfmac_module_exit(void)
1264 {
1265 cancel_work_sync(&brcmf_driver_work);
1266
1267 #ifdef CONFIG_BRCMFMAC_SDIO
1268 brcmf_sdio_exit();
1269 #endif
1270 #ifdef CONFIG_BRCMFMAC_USB
1271 brcmf_usb_exit();
1272 #endif
1273 #ifdef CONFIG_BRCMFMAC_PCIE
1274 brcmf_pcie_exit();
1275 #endif
1276 brcmf_debugfs_exit();
1277 }
1278
1279 module_init(brcmfmac_module_init);
1280 module_exit(brcmfmac_module_exit);
1281