• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3  * Basically selected code segments from usb-cdc.c and usb-rndis.c
4  *
5  * Copyright (C) 1999-2013, Broadcom Corporation
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  * $Id: dhd_linux.c 419821 2013-08-22 21:43:26Z $
26  */
27 
28 #include <typedefs.h>
29 #include <linuxver.h>
30 #include <osl.h>
31 
32 #include <linux/init.h>
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/inetdevice.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/etherdevice.h>
40 #include <linux/random.h>
41 #include <linux/spinlock.h>
42 #include <linux/ethtool.h>
43 #include <linux/fcntl.h>
44 #include <linux/fs.h>
45 #include <linux/ip.h>
46 #include <net/addrconf.h>
47 
48 #include <asm/uaccess.h>
49 #include <asm/unaligned.h>
50 
51 #include <epivers.h>
52 #include <bcmutils.h>
53 #include <bcmendian.h>
54 #include <bcmdevs.h>
55 
56 #include <proto/ethernet.h>
57 #include <proto/bcmip.h>
58 #include <dngl_stats.h>
59 #include <dhd.h>
60 #include <dhd_bus.h>
61 #include <dhd_proto.h>
62 #include <dhd_dbg.h>
63 #ifdef CONFIG_HAS_WAKELOCK
64 #include <linux/wakelock.h>
65 #endif
66 #ifdef WL_CFG80211
67 #include <wl_cfg80211.h>
68 #endif
69 #ifdef PNO_SUPPORT
70 #include <dhd_pno.h>
71 #endif
72 
73 #ifdef WLMEDIA_HTSF
74 #include <linux/time.h>
75 #include <htsf.h>
76 
77 #define HTSF_MINLEN 200    /* min. packet length to timestamp */
78 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us  */
79 #define TSMAX  1000        /* max no. of timing record kept   */
80 #define NUMBIN 34
81 
82 static uint32 tsidx = 0;
83 static uint32 htsf_seqnum = 0;
84 uint32 tsfsync;
85 struct timeval tsync;
86 static uint32 tsport = 5010;
87 
88 typedef struct histo_ {
89 	uint32 bin[NUMBIN];
90 } histo_t;
91 
92 #if !ISPOWEROF2(DHD_SDALIGN)
93 #error DHD_SDALIGN is not a power of 2!
94 #endif
95 
96 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
97 #endif /* WLMEDIA_HTSF */
98 
99 
100 #if defined(SOFTAP)
101 extern bool ap_cfg_running;
102 extern bool ap_fw_loaded;
103 #endif
104 
105 /* enable HOSTIP cache update from the host side when an eth0:N is up */
106 #define AOE_IP_ALIAS_SUPPORT 1
107 
108 #ifdef BCM_FD_AGGR
109 #include <bcm_rpc.h>
110 #include <bcm_rpc_tp.h>
111 #endif
112 #ifdef PROP_TXSTATUS
113 #include <wlfc_proto.h>
114 #include <dhd_wlfc.h>
115 #endif
116 
117 #include <wl_android.h>
118 
119 #ifdef ARP_OFFLOAD_SUPPORT
120 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
121 static int dhd_device_event(struct notifier_block *this,
122 	unsigned long event,
123 	void *ptr);
124 
125 static struct notifier_block dhd_notifier = {
126 	.notifier_call = dhd_device_event
127 };
128 #endif /* ARP_OFFLOAD_SUPPORT */
129 static int dhd_device_ipv6_event(struct notifier_block *this,
130 	unsigned long event,
131 	void *ptr);
132 
133 static struct notifier_block dhd_notifier_ipv6 = {
134 	.notifier_call = dhd_device_ipv6_event
135 };
136 
137 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
138 #include <linux/suspend.h>
139 volatile bool dhd_mmc_suspend = FALSE;
140 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
141 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
142 
143 #if defined(OOB_INTR_ONLY)
144 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
145 #endif
146 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
147 static void dhd_hang_process(struct work_struct *work);
148 #endif
149 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
150 MODULE_LICENSE("GPL v2");
151 #endif /* LinuxVer */
152 
153 #include <dhd_bus.h>
154 
155 #ifdef BCM_FD_AGGR
156 #define DBUS_RX_BUFFER_SIZE_DHD(net)	(BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
157 #else
158 #ifndef PROP_TXSTATUS
159 #define DBUS_RX_BUFFER_SIZE_DHD(net)	(net->mtu + net->hard_header_len + dhd->pub.hdrlen)
160 #else
161 #define DBUS_RX_BUFFER_SIZE_DHD(net)	(net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
162 #endif
163 #endif /* BCM_FD_AGGR */
164 
165 #ifdef PROP_TXSTATUS
166 extern bool dhd_wlfc_skip_fc(void);
167 extern void dhd_wlfc_plat_enable(void *dhd);
168 extern void dhd_wlfc_plat_deinit(void *dhd);
169 #endif /* PROP_TXSTATUS */
170 
171 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
172 const char *
print_tainted()173 print_tainted()
174 {
175 	return "";
176 }
177 #endif	/* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
178 
179 /* Linux wireless extension support */
180 #if defined(WL_WIRELESS_EXT)
181 #include <wl_iw.h>
182 extern wl_iw_extra_params_t  g_wl_iw_params;
183 #endif /* defined(WL_WIRELESS_EXT) */
184 
185 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
186 #include <linux/earlysuspend.h>
187 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
188 
189 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
190 
191 #ifdef PKT_FILTER_SUPPORT
192 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
193 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
194 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
195 #endif
196 
197 
198 #ifdef READ_MACADDR
199 extern int dhd_read_macaddr(struct dhd_info *dhd);
200 #else
dhd_read_macaddr(struct dhd_info * dhd)201 static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
202 #endif
203 #ifdef WRITE_MACADDR
204 extern int dhd_write_macaddr(struct ether_addr *mac);
205 #else
dhd_write_macaddr(struct ether_addr * mac)206 static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
207 #endif
208 struct ipv6_addr {
209 	char 			ipv6_addr[IPV6_ADDR_LEN];
210 	dhd_ipv6_op_t 	ipv6_oper;
211 	struct list_head list;
212 };
213 
214 /* Interface control information */
215 typedef struct dhd_if {
216 	struct dhd_info *info;			/* back pointer to dhd_info */
217 	/* OS/stack specifics */
218 	struct net_device *net;
219 	struct net_device_stats stats;
220 	int 			idx;			/* iface idx in dongle */
221 	dhd_if_state_t	state;			/* interface state */
222 	uint 			subunit;		/* subunit */
223 	uint8			mac_addr[ETHER_ADDR_LEN];	/* assigned MAC address */
224 	bool			attached;		/* Delayed attachment when unset */
225 	bool			txflowcontrol;	/* Per interface flow control indicator */
226 	char			name[IFNAMSIZ+1]; /* linux interface name */
227 	uint8			bssidx;			/* bsscfg index for the interface */
228 	bool			set_multicast;
229 	struct list_head ipv6_list;
230 	spinlock_t		ipv6_lock;
231 	bool			event2cfg80211;	/* To determine if pass event to cfg80211 */
232 } dhd_if_t;
233 
234 #ifdef WLMEDIA_HTSF
235 typedef struct {
236 	uint32 low;
237 	uint32 high;
238 } tsf_t;
239 
240 typedef struct {
241 	uint32 last_cycle;
242 	uint32 last_sec;
243 	uint32 last_tsf;
244 	uint32 coef;     /* scaling factor */
245 	uint32 coefdec1; /* first decimal  */
246 	uint32 coefdec2; /* second decimal */
247 } htsf_t;
248 
249 typedef struct {
250 	uint32 t1;
251 	uint32 t2;
252 	uint32 t3;
253 	uint32 t4;
254 } tstamp_t;
255 
256 static tstamp_t ts[TSMAX];
257 static tstamp_t maxdelayts;
258 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
259 
260 #endif  /* WLMEDIA_HTSF */
261 
262 /* Local private structure (extension of pub) */
263 typedef struct dhd_info {
264 #if defined(WL_WIRELESS_EXT)
265 	wl_iw_t		iw;		/* wireless extensions state (must be first) */
266 #endif /* defined(WL_WIRELESS_EXT) */
267 
268 	dhd_pub_t pub;
269 
270 	/* For supporting multiple interfaces */
271 	dhd_if_t *iflist[DHD_MAX_IFS];
272 
273 	struct semaphore proto_sem;
274 #ifdef PROP_TXSTATUS
275 	spinlock_t	wlfc_spinlock;
276 #endif /* PROP_TXSTATUS */
277 #ifdef WLMEDIA_HTSF
278 	htsf_t  htsf;
279 #endif
280 	wait_queue_head_t ioctl_resp_wait;
281 	uint32	default_wd_interval;
282 
283 	struct timer_list timer;
284 	bool wd_timer_valid;
285 	struct tasklet_struct tasklet;
286 	spinlock_t	sdlock;
287 	spinlock_t	txqlock;
288 	spinlock_t	dhd_lock;
289 #ifdef DHDTHREAD
290 	/* Thread based operation */
291 	bool threads_only;
292 	struct semaphore sdsem;
293 
294 	tsk_ctl_t	thr_dpc_ctl;
295 	tsk_ctl_t	thr_wdt_ctl;
296 #ifdef RXFRAME_THREAD
297 	tsk_ctl_t	thr_rxf_ctl;
298 	spinlock_t	rxf_lock;
299 #endif /* RXFRAME_THREAD */
300 #endif /* DHDTHREAD */
301 	bool dhd_tasklet_create;
302 	tsk_ctl_t	thr_sysioc_ctl;
303 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
304 	struct work_struct work_hang;
305 #endif
306 
307 	/* Wakelocks */
308 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
309 	struct wake_lock wl_wifi;   /* Wifi wakelock */
310 	struct wake_lock wl_rxwake; /* Wifi rx wakelock */
311 	struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
312 	struct wake_lock wl_wdwake; /* Wifi wd wakelock */
313 #endif
314 
315 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
316 	/* net_device interface lock, prevent race conditions among net_dev interface
317 	 * calls and wifi_on or wifi_off
318 	 */
319 	struct mutex dhd_net_if_mutex;
320 	struct mutex dhd_suspend_mutex;
321 #endif
322 	spinlock_t wakelock_spinlock;
323 	int wakelock_counter;
324 	int wakelock_wd_counter;
325 	int wakelock_rx_timeout_enable;
326 	int wakelock_ctrl_timeout_enable;
327 
328 	/* Thread to issue ioctl for multicast */
329 	unsigned char set_macaddress;
330 	struct ether_addr macvalue;
331 	wait_queue_head_t ctrl_wait;
332 	atomic_t pend_8021x_cnt;
333 	dhd_attach_states_t dhd_state;
334 
335 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
336 	struct early_suspend early_suspend;
337 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
338 
339 #ifdef ARP_OFFLOAD_SUPPORT
340 	u32 pend_ipaddr;
341 #endif /* ARP_OFFLOAD_SUPPORT */
342 #ifdef BCM_FD_AGGR
343 	void *rpc_th;
344 	void *rpc_osh;
345 	struct timer_list rpcth_timer;
346 	bool rpcth_timer_active;
347 	bool fdaggr;
348 #endif
349 #ifdef DHDTCPACK_SUPPRESS
350 	spinlock_t	tcpack_lock;
351 #endif /* DHDTCPACK_SUPPRESS */
352 } dhd_info_t;
353 
354 /* Flag to indicate if we should download firmware on driver load */
355 uint dhd_download_fw_on_driverload = TRUE;
356 
357 /* Definitions to provide path to the firmware and nvram
358  * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
359  */
360 char firmware_path[MOD_PARAM_PATHLEN];
361 char nvram_path[MOD_PARAM_PATHLEN];
362 
363 /* information string to keep firmware, chio, cheip version info visiable from log */
364 char info_string[MOD_PARAM_INFOLEN];
365 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
366 int op_mode = 0;
367 int disable_proptx = 0;
368 module_param(op_mode, int, 0644);
369 extern int wl_control_wl_start(struct net_device *dev);
370 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
371 struct semaphore dhd_registration_sem;
372 struct semaphore dhd_chipup_sem;
373 int dhd_registration_check = FALSE;
374 
375 #define DHD_REGISTRATION_TIMEOUT  12000  /* msec : allowed time to finished dhd registration */
376 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
377 
378 /* Spawn a thread for system ioctls (set mac, set mcast) */
379 uint dhd_sysioc = TRUE;
380 module_param(dhd_sysioc, uint, 0);
381 
382 /* Error bits */
383 module_param(dhd_msg_level, int, 0);
384 
385 #ifdef ARP_OFFLOAD_SUPPORT
386 /* ARP offload enable */
387 uint dhd_arp_enable = TRUE;
388 module_param(dhd_arp_enable, uint, 0);
389 
390 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
391 
392 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
393 
394 module_param(dhd_arp_mode, uint, 0);
395 #endif /* ARP_OFFLOAD_SUPPORT */
396 
397 
398 
399 /* Disable Prop tx */
400 module_param(disable_proptx, int, 0644);
401 /* load firmware and/or nvram values from the filesystem */
402 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
403 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0);
404 
405 /* Watchdog interval */
406 
407 /* extend watchdog expiration to 2 seconds when DPC is running */
408 #define WATCHDOG_EXTEND_INTERVAL (2000)
409 
410 uint dhd_watchdog_ms = 10;
411 module_param(dhd_watchdog_ms, uint, 0);
412 
413 #if defined(DHD_DEBUG)
414 /* Console poll interval */
415 uint dhd_console_ms = 0;
416 module_param(dhd_console_ms, uint, 0644);
417 #endif /* defined(DHD_DEBUG) */
418 
419 uint dhd_slpauto = TRUE;
420 module_param(dhd_slpauto, uint, 0);
421 
422 #ifdef PKT_FILTER_SUPPORT
423 /* Global Pkt filter enable control */
424 uint dhd_pkt_filter_enable = TRUE;
425 module_param(dhd_pkt_filter_enable, uint, 0);
426 #endif
427 
428 /* Pkt filter init setup */
429 uint dhd_pkt_filter_init = 0;
430 module_param(dhd_pkt_filter_init, uint, 0);
431 
432 /* Pkt filter mode control */
433 uint dhd_master_mode = TRUE;
434 module_param(dhd_master_mode, uint, 0);
435 
436 #ifdef DHDTHREAD
437 int dhd_watchdog_prio = 0;
438 module_param(dhd_watchdog_prio, int, 0);
439 
440 /* DPC thread priority */
441 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
442 module_param(dhd_dpc_prio, int, 0);
443 
444 #ifdef RXFRAME_THREAD
445 /* RX frame thread priority */
446 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
447 module_param(dhd_rxf_prio, int, 0);
448 #endif /* RXFRAME_THREAD */
449 
450 /* DPC thread priority, -1 to use tasklet */
451 extern int dhd_dongle_ramsize;
452 module_param(dhd_dongle_ramsize, int, 0);
453 #endif /* DHDTHREAD */
454 /* Control fw roaming */
455 uint dhd_roam_disable = 0;
456 
457 /* Control radio state */
458 uint dhd_radio_up = 1;
459 
460 /* Network inteface name */
461 char iface_name[IFNAMSIZ] = {'\0'};
462 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
463 
464 /* The following are specific to the SDIO dongle */
465 
466 /* IOCTL response timeout */
467 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
468 
469 /* Idle timeout for backplane clock */
470 int dhd_idletime = DHD_IDLETIME_TICKS;
471 module_param(dhd_idletime, int, 0);
472 
473 /* Use polling */
474 uint dhd_poll = FALSE;
475 module_param(dhd_poll, uint, 0);
476 
477 /* Use interrupts */
478 uint dhd_intr = TRUE;
479 module_param(dhd_intr, uint, 0);
480 
481 /* SDIO Drive Strength (in milliamps) */
482 uint dhd_sdiod_drive_strength = 6;
483 module_param(dhd_sdiod_drive_strength, uint, 0);
484 
485 /* Tx/Rx bounds */
486 extern uint dhd_txbound;
487 extern uint dhd_rxbound;
488 module_param(dhd_txbound, uint, 0);
489 module_param(dhd_rxbound, uint, 0);
490 
491 /* Deferred transmits */
492 extern uint dhd_deferred_tx;
493 module_param(dhd_deferred_tx, uint, 0);
494 
495 #ifdef BCMDBGFS
496 extern void dhd_dbg_init(dhd_pub_t *dhdp);
497 extern void dhd_dbg_remove(void);
498 #endif /* BCMDBGFS */
499 
500 
501 
502 #ifdef SDTEST
503 /* Echo packet generator (pkts/s) */
504 uint dhd_pktgen = 0;
505 module_param(dhd_pktgen, uint, 0);
506 
507 /* Echo packet len (0 => sawtooth, max 2040) */
508 uint dhd_pktgen_len = 0;
509 module_param(dhd_pktgen_len, uint, 0);
510 #endif /* SDTEST */
511 
512 /* Version string to report */
513 #ifdef DHD_DEBUG
514 #ifndef SRCBASE
515 #define SRCBASE        "drivers/net/wireless/bcmdhd"
516 #endif
517 #define DHD_COMPILED "\nCompiled in " SRCBASE
518 #else
519 #define DHD_COMPILED
520 #endif /* DHD_DEBUG */
521 
522 static char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
523 #ifdef DHD_DEBUG
524 "\nCompiled in " SRCBASE " on " __DATE__ " at " __TIME__
525 #endif
526 ;
527 static void dhd_net_if_lock_local(dhd_info_t *dhd);
528 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
529 static void dhd_suspend_lock(dhd_pub_t *dhdp);
530 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
531 
532 #ifdef WLMEDIA_HTSF
533 void htsf_update(dhd_info_t *dhd, void *data);
534 tsf_t prev_tsf, cur_tsf;
535 
536 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
537 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
538 static void dhd_dump_latency(void);
539 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
540 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
541 static void dhd_dump_htsfhisto(histo_t *his, char *s);
542 #endif /* WLMEDIA_HTSF */
543 
544 /* Monitor interface */
545 int dhd_monitor_init(void *dhd_pub);
546 int dhd_monitor_uninit(void);
547 
548 
549 
550 #if defined(WL_WIRELESS_EXT)
551 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
552 #endif /* defined(WL_WIRELESS_EXT) */
553 
554 static void dhd_dpc(ulong data);
555 /* forward decl */
556 extern int dhd_wait_pend8021x(struct net_device *dev);
557 void dhd_os_wd_timer_extend(void *bus, bool extend);
558 
559 #ifdef TOE
560 #ifndef BDC
561 #error TOE requires BDC
562 #endif /* !BDC */
563 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
564 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
565 #endif /* TOE */
566 
567 static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
568                              wl_event_msg_t *event_ptr, void **data_ptr);
569 
570 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
571 	KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM_SLEEP)
dhd_sleep_pm_callback(struct notifier_block * nfb,unsigned long action,void * ignored)572 static int dhd_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
573 {
574 	int ret = NOTIFY_DONE;
575 
576 	switch (action) {
577 	case PM_HIBERNATION_PREPARE:
578 	case PM_SUSPEND_PREPARE:
579 		dhd_mmc_suspend = TRUE;
580 		ret = NOTIFY_OK;
581 		break;
582 	case PM_POST_HIBERNATION:
583 	case PM_POST_SUSPEND:
584 		dhd_mmc_suspend = FALSE;
585 		ret = NOTIFY_OK;
586 		break;
587 	}
588 	smp_mb();
589 	return ret;
590 }
591 
592 static struct notifier_block dhd_sleep_pm_notifier = {
593 	.notifier_call = dhd_sleep_pm_callback,
594 	.priority = 10
595 };
596 extern int register_pm_notifier(struct notifier_block *nb);
597 extern int unregister_pm_notifier(struct notifier_block *nb);
598 #endif /* (LINUX_VERSION >= 2.6.27 && LINUX_VERSION <= 2.6.39 && CONFIG_PM_SLEEP */
599 
600 #if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
601 /* Request scheduling of the bus rx frame */
602 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
603 static void dhd_os_rxflock(dhd_pub_t *pub);
604 static void dhd_os_rxfunlock(dhd_pub_t *pub);
605 
dhd_rxf_enqueue(dhd_pub_t * dhdp,void * skb)606 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
607 {
608 	uint32 store_idx;
609 	uint32 sent_idx;
610 
611 	if (!skb) {
612 		DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
613 		return BCME_ERROR;
614 	}
615 
616 	dhd_os_rxflock(dhdp);
617 	store_idx = dhdp->store_idx;
618 	sent_idx = dhdp->sent_idx;
619 	if (dhdp->skbbuf[store_idx] != NULL) {
620 		/* Make sure the previous packets are processed */
621 		/* Do I need to make this context sleep here? Definitely in Single processor case */
622 		dhd_os_rxfunlock(dhdp);
623 		DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
624 			skb, store_idx, sent_idx));
625 		msleep(1);
626 		return BCME_ERROR;
627 	}
628 	DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
629 		skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
630 	dhdp->skbbuf[store_idx] = skb;
631 	dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
632 	dhd_os_rxfunlock(dhdp);
633 
634 	return BCME_OK;
635 }
636 
dhd_rxf_dequeue(dhd_pub_t * dhdp)637 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
638 {
639 	uint32 store_idx;
640 	uint32 sent_idx;
641 	void *skb;
642 
643 	dhd_os_rxflock(dhdp);
644 
645 	store_idx = dhdp->store_idx;
646 	sent_idx = dhdp->sent_idx;
647 	skb = dhdp->skbbuf[sent_idx];
648 
649 	if (skb == NULL) {
650 		dhd_os_rxfunlock(dhdp);
651 		DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
652 			store_idx, sent_idx));
653 		return NULL;
654 	}
655 
656 	dhdp->skbbuf[sent_idx] = NULL;
657 	dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
658 
659 	DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
660 		skb, sent_idx));
661 
662 	dhd_os_rxfunlock(dhdp);
663 
664 	return skb;
665 }
666 #endif /* defined(DHDTHREAD) && defined(RXFRAME_THREAD) */
667 
dhd_process_cid_mac(dhd_pub_t * dhdp,bool prepost)668 static int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
669 {
670 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
671 
672 	if (prepost) { /* pre process */
673 		dhd_read_macaddr(dhd);
674 	} else { /* post process */
675 		dhd_write_macaddr(&dhd->pub.mac);
676 	}
677 
678 	return 0;
679 }
680 
681 #ifdef PKT_FILTER_SUPPORT
682 static bool
_turn_on_arp_filter(dhd_pub_t * dhd,int op_mode)683 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
684 {
685 	bool _apply = FALSE;
686 	/* In case of IBSS mode, apply arp pkt filter */
687 	if (op_mode & DHD_FLAG_IBSS_MODE) {
688 		_apply = TRUE;
689 		goto exit;
690 	}
691 	/* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
692 	if ((dhd->arp_version == 1) &&
693 		(op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
694 		_apply = TRUE;
695 		goto exit;
696 	}
697 exit:
698 	return _apply;
699 }
700 #endif /* PKT_FILTER_SUPPORT */
701 
dhd_set_packet_filter(dhd_pub_t * dhd)702 void dhd_set_packet_filter(dhd_pub_t *dhd)
703 {
704 #ifdef PKT_FILTER_SUPPORT
705 	int i;
706 
707 	DHD_TRACE(("%s: enter\n", __FUNCTION__));
708 	if (dhd_pkt_filter_enable) {
709 		for (i = 0; i < dhd->pktfilter_count; i++) {
710 			dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
711 		}
712 	}
713 #endif /* PKT_FILTER_SUPPORT */
714 }
715 
dhd_enable_packet_filter(int value,dhd_pub_t * dhd)716 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
717 {
718 #ifdef PKT_FILTER_SUPPORT
719 	int i;
720 
721 	DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
722 	/* 1 - Enable packet filter, only allow unicast packet to send up */
723 	/* 0 - Disable packet filter */
724 	if (dhd_pkt_filter_enable && (!value ||
725 	    (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
726 	    {
727 		for (i = 0; i < dhd->pktfilter_count; i++) {
728 			if (value && (i == DHD_ARP_FILTER_NUM) &&
729 			  !_turn_on_arp_filter(dhd, dhd->op_mode)) {
730 				DHD_TRACE(("Do not turn on ARP white list pkt filter:"
731 					"val %d, cnt %d, op_mode 0x%x\n",
732 					 value, i, dhd->op_mode));
733 				continue;
734 			}
735 			dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
736 				value, dhd_master_mode);
737 		}
738 	}
739 #endif /* PKT_FILTER_SUPPORT */
740 }
741 
dhd_set_suspend(int value,dhd_pub_t * dhd)742 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
743 {
744 #ifndef SUPPORT_PM2_ONLY
745 	int power_mode = PM_MAX;
746 #endif /* SUPPORT_PM2_ONLY */
747 	/* wl_pkt_filter_enable_t	enable_parm; */
748 	char iovbuf[32];
749 	int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
750 #ifndef ENABLE_FW_ROAM_SUSPEND
751 	uint roamvar = 1;
752 #endif /* ENABLE_FW_ROAM_SUSPEND */
753 	uint nd_ra_filter = 0;
754 	int ret = 0;
755 
756 	if (!dhd)
757 		return -ENODEV;
758 
759 	DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
760 		__FUNCTION__, value, dhd->in_suspend));
761 
762 	dhd_suspend_lock(dhd);
763 	if (dhd->up) {
764 		if (value && dhd->in_suspend) {
765 #ifdef PKT_FILTER_SUPPORT
766 				dhd->early_suspended = 1;
767 #endif
768 				/* Kernel suspended */
769 				DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
770 
771 #ifndef SUPPORT_PM2_ONLY
772 				dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
773 				                 sizeof(power_mode), TRUE, 0);
774 #endif /* SUPPORT_PM2_ONLY */
775 
776 				/* Enable packet filter, only allow unicast packet to send up */
777 				dhd_enable_packet_filter(1, dhd);
778 
779 
780 				/* If DTIM skip is set up as default, force it to wake
781 				 * each third DTIM for better power savings.  Note that
782 				 * one side effect is a chance to miss BC/MC packet.
783 				 */
784 				bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
785 				bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
786 					4, iovbuf, sizeof(iovbuf));
787 				if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
788 					TRUE, 0) < 0)
789 					DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
790 
791 #ifndef ENABLE_FW_ROAM_SUSPEND
792 				/* Disable firmware roaming during suspend */
793 				bcm_mkiovar("roam_off", (char *)&roamvar, 4,
794 					iovbuf, sizeof(iovbuf));
795 				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
796 #endif /* ENABLE_FW_ROAM_SUSPEND */
797 				if (FW_SUPPORTED(dhd, ndoe)) {
798 					/* enable IPv6 RA filter in  firmware during suspend */
799 					nd_ra_filter = 1;
800 					bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
801 							iovbuf, sizeof(iovbuf));
802 					if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
803 						DHD_ERROR(("failed to set nd_ra_filter (%d)\n", ret));
804 				}
805 			} else {
806 #ifdef PKT_FILTER_SUPPORT
807 				dhd->early_suspended = 0;
808 #endif
809 				/* Kernel resumed  */
810 				DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
811 
812 #ifndef SUPPORT_PM2_ONLY
813 				power_mode = PM_FAST;
814 				dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
815 				                 sizeof(power_mode), TRUE, 0);
816 #endif /* SUPPORT_PM2_ONLY */
817 #ifdef PKT_FILTER_SUPPORT
818 				/* disable pkt filter */
819 				dhd_enable_packet_filter(0, dhd);
820 #endif /* PKT_FILTER_SUPPORT */
821 
822 				/* restore pre-suspend setting for dtim_skip */
823 				bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
824 					4, iovbuf, sizeof(iovbuf));
825 
826 				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
827 #ifndef ENABLE_FW_ROAM_SUSPEND
828 				roamvar = dhd_roam_disable;
829 				bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf,
830 					sizeof(iovbuf));
831 				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
832 #endif /* ENABLE_FW_ROAM_SUSPEND */
833 				if (FW_SUPPORTED(dhd, ndoe)) {
834 					/* disable IPv6 RA filter in  firmware during suspend */
835 					nd_ra_filter = 0;
836 					bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
837 							iovbuf, sizeof(iovbuf));
838 					if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
839 						DHD_ERROR(("failed to set nd_ra_filter (%d)\n", ret));
840 				}
841 
842 			}
843 	}
844 	dhd_suspend_unlock(dhd);
845 
846 	return 0;
847 }
848 
dhd_suspend_resume_helper(struct dhd_info * dhd,int val,int force)849 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
850 {
851 	dhd_pub_t *dhdp = &dhd->pub;
852 	int ret = 0;
853 
854 	DHD_OS_WAKE_LOCK(dhdp);
855 	/* Set flag when early suspend was called */
856 	dhdp->in_suspend = val;
857 	if ((force || !dhdp->suspend_disable_flag) &&
858 		dhd_support_sta_mode(dhdp))
859 	{
860 		ret = dhd_set_suspend(val, dhdp);
861 	}
862 
863 	DHD_OS_WAKE_UNLOCK(dhdp);
864 	return ret;
865 }
866 
867 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
dhd_early_suspend(struct early_suspend * h)868 static void dhd_early_suspend(struct early_suspend *h)
869 {
870 	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
871 	DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
872 
873 	if (dhd)
874 		dhd_suspend_resume_helper(dhd, 1, 0);
875 }
876 
dhd_late_resume(struct early_suspend * h)877 static void dhd_late_resume(struct early_suspend *h)
878 {
879 	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
880 	DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
881 
882 	if (dhd)
883 		dhd_suspend_resume_helper(dhd, 0, 0);
884 }
885 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
886 
887 /*
888  * Generalized timeout mechanism.  Uses spin sleep with exponential back-off until
889  * the sleep time reaches one jiffy, then switches over to task delay.  Usage:
890  *
891  *      dhd_timeout_start(&tmo, usec);
892  *      while (!dhd_timeout_expired(&tmo))
893  *              if (poll_something())
894  *                      break;
895  *      if (dhd_timeout_expired(&tmo))
896  *              fatal();
897  */
898 
899 void
dhd_timeout_start(dhd_timeout_t * tmo,uint usec)900 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
901 {
902 	tmo->limit = usec;
903 	tmo->increment = 0;
904 	tmo->elapsed = 0;
905 	tmo->tick = jiffies_to_usecs(1);
906 }
907 
908 int
dhd_timeout_expired(dhd_timeout_t * tmo)909 dhd_timeout_expired(dhd_timeout_t *tmo)
910 {
911 	/* Does nothing the first call */
912 	if (tmo->increment == 0) {
913 		tmo->increment = 1;
914 		return 0;
915 	}
916 
917 	if (tmo->elapsed >= tmo->limit)
918 		return 1;
919 
920 	/* Add the delay that's about to take place */
921 	tmo->elapsed += tmo->increment;
922 
923 	if (tmo->increment < tmo->tick) {
924 		OSL_DELAY(tmo->increment);
925 		tmo->increment *= 2;
926 		if (tmo->increment > tmo->tick)
927 			tmo->increment = tmo->tick;
928 	} else {
929 		wait_queue_head_t delay_wait;
930 		DECLARE_WAITQUEUE(wait, current);
931 		init_waitqueue_head(&delay_wait);
932 		add_wait_queue(&delay_wait, &wait);
933 		set_current_state(TASK_INTERRUPTIBLE);
934 		schedule_timeout(1);
935 		remove_wait_queue(&delay_wait, &wait);
936 		set_current_state(TASK_RUNNING);
937 	}
938 
939 	return 0;
940 }
941 
942 int
dhd_net2idx(dhd_info_t * dhd,struct net_device * net)943 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
944 {
945 	int i = 0;
946 
947 	ASSERT(dhd);
948 	while (i < DHD_MAX_IFS) {
949 		if (dhd->iflist[i] && (dhd->iflist[i]->net == net))
950 			return i;
951 		i++;
952 	}
953 
954 	return DHD_BAD_IF;
955 }
956 
dhd_idx2net(void * pub,int ifidx)957 struct net_device * dhd_idx2net(void *pub, int ifidx)
958 {
959 	struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
960 	struct dhd_info *dhd_info;
961 
962 	if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
963 		return NULL;
964 	dhd_info = dhd_pub->info;
965 	if (dhd_info && dhd_info->iflist[ifidx])
966 		return dhd_info->iflist[ifidx]->net;
967 	return NULL;
968 }
969 
970 int
dhd_ifname2idx(dhd_info_t * dhd,char * name)971 dhd_ifname2idx(dhd_info_t *dhd, char *name)
972 {
973 	int i = DHD_MAX_IFS;
974 
975 	ASSERT(dhd);
976 
977 	if (name == NULL || *name == '\0')
978 		return 0;
979 
980 	while (--i > 0)
981 		if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
982 				break;
983 
984 	DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
985 
986 	return i;	/* default - the primary interface */
987 }
988 
989 char *
dhd_ifname(dhd_pub_t * dhdp,int ifidx)990 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
991 {
992 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
993 
994 	ASSERT(dhd);
995 
996 	if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
997 		DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
998 		return "<if_bad>";
999 	}
1000 
1001 	if (dhd->iflist[ifidx] == NULL) {
1002 		DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
1003 		return "<if_null>";
1004 	}
1005 
1006 	if (dhd->iflist[ifidx]->net)
1007 		return dhd->iflist[ifidx]->net->name;
1008 
1009 	return "<if_none>";
1010 }
1011 
1012 uint8 *
dhd_bssidx2bssid(dhd_pub_t * dhdp,int idx)1013 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
1014 {
1015 	int i;
1016 	dhd_info_t *dhd = (dhd_info_t *)dhdp;
1017 
1018 	ASSERT(dhd);
1019 	for (i = 0; i < DHD_MAX_IFS; i++)
1020 	if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
1021 		return dhd->iflist[i]->mac_addr;
1022 
1023 	return NULL;
1024 }
1025 
1026 
1027 static void
_dhd_set_multicast_list(dhd_info_t * dhd,int ifidx)1028 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
1029 {
1030 	struct net_device *dev;
1031 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1032 	struct netdev_hw_addr *ha;
1033 #else
1034 	struct dev_mc_list *mclist;
1035 #endif
1036 	uint32 allmulti, cnt;
1037 
1038 	wl_ioctl_t ioc;
1039 	char *buf, *bufp;
1040 	uint buflen;
1041 	int ret;
1042 
1043 			ASSERT(dhd && dhd->iflist[ifidx]);
1044 			dev = dhd->iflist[ifidx]->net;
1045 			if (!dev)
1046 				return;
1047 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1048 			netif_addr_lock_bh(dev);
1049 #endif
1050 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1051 			cnt = netdev_mc_count(dev);
1052 #else
1053 			cnt = dev->mc_count;
1054 #endif /* LINUX_VERSION_CODE */
1055 
1056 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1057 			netif_addr_unlock_bh(dev);
1058 #endif
1059 
1060 			/* Determine initial value of allmulti flag */
1061 	allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
1062 
1063 	/* Send down the multicast list first. */
1064 
1065 
1066 	buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
1067 	if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
1068 		DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
1069 		           dhd_ifname(&dhd->pub, ifidx), cnt));
1070 		return;
1071 	}
1072 
1073 	strncpy(bufp, "mcast_list", buflen - 1);
1074 	bufp[buflen - 1] = '\0';
1075 	bufp += strlen("mcast_list") + 1;
1076 
1077 	cnt = htol32(cnt);
1078 	memcpy(bufp, &cnt, sizeof(cnt));
1079 	bufp += sizeof(cnt);
1080 
1081 
1082 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1083 			netif_addr_lock_bh(dev);
1084 #endif
1085 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1086 			netdev_for_each_mc_addr(ha, dev) {
1087 				if (!cnt)
1088 					break;
1089 				memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
1090 				bufp += ETHER_ADDR_LEN;
1091 				cnt--;
1092 	}
1093 #else
1094 	for (mclist = dev->mc_list; (mclist && (cnt > 0));
1095 		cnt--, mclist = mclist->next) {
1096 				memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
1097 				bufp += ETHER_ADDR_LEN;
1098 			}
1099 #endif /* LINUX_VERSION_CODE */
1100 
1101 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1102 			netif_addr_unlock_bh(dev);
1103 #endif
1104 
1105 	memset(&ioc, 0, sizeof(ioc));
1106 	ioc.cmd = WLC_SET_VAR;
1107 	ioc.buf = buf;
1108 	ioc.len = buflen;
1109 	ioc.set = TRUE;
1110 
1111 	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
1112 	if (ret < 0) {
1113 		DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
1114 			dhd_ifname(&dhd->pub, ifidx), cnt));
1115 		allmulti = cnt ? TRUE : allmulti;
1116 	}
1117 
1118 	MFREE(dhd->pub.osh, buf, buflen);
1119 
1120 	/* Now send the allmulti setting.  This is based on the setting in the
1121 	 * net_device flags, but might be modified above to be turned on if we
1122 	 * were trying to set some addresses and dongle rejected it...
1123 	 */
1124 
1125 	buflen = sizeof("allmulti") + sizeof(allmulti);
1126 	if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
1127 		DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
1128 		return;
1129 	}
1130 	allmulti = htol32(allmulti);
1131 
1132 	if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
1133 		DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
1134 		           dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
1135 		MFREE(dhd->pub.osh, buf, buflen);
1136 		return;
1137 	}
1138 
1139 
1140 	memset(&ioc, 0, sizeof(ioc));
1141 	ioc.cmd = WLC_SET_VAR;
1142 	ioc.buf = buf;
1143 	ioc.len = buflen;
1144 	ioc.set = TRUE;
1145 
1146 	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
1147 	if (ret < 0) {
1148 		DHD_ERROR(("%s: set allmulti %d failed\n",
1149 		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
1150 	}
1151 
1152 	MFREE(dhd->pub.osh, buf, buflen);
1153 
1154 	/* Finally, pick up the PROMISC flag as well, like the NIC driver does */
1155 
1156 	allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
1157 
1158 	allmulti = htol32(allmulti);
1159 
1160 	memset(&ioc, 0, sizeof(ioc));
1161 	ioc.cmd = WLC_SET_PROMISC;
1162 	ioc.buf = &allmulti;
1163 	ioc.len = sizeof(allmulti);
1164 	ioc.set = TRUE;
1165 
1166 	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
1167 	if (ret < 0) {
1168 		DHD_ERROR(("%s: set promisc %d failed\n",
1169 		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
1170 	}
1171 }
1172 
1173 int
_dhd_set_mac_address(dhd_info_t * dhd,int ifidx,struct ether_addr * addr)1174 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr)
1175 {
1176 	char buf[32];
1177 	wl_ioctl_t ioc;
1178 	int ret;
1179 
1180 	if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
1181 		DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
1182 		return -1;
1183 	}
1184 	memset(&ioc, 0, sizeof(ioc));
1185 	ioc.cmd = WLC_SET_VAR;
1186 	ioc.buf = buf;
1187 	ioc.len = 32;
1188 	ioc.set = TRUE;
1189 
1190 	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
1191 	if (ret < 0) {
1192 		DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
1193 	} else {
1194 		memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
1195 		memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
1196 	}
1197 
1198 	return ret;
1199 }
1200 
1201 #ifdef SOFTAP
1202 extern struct net_device *ap_net_dev;
1203 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
1204 #endif
1205 
1206 static void
dhd_op_if(dhd_if_t * ifp)1207 dhd_op_if(dhd_if_t *ifp)
1208 {
1209 	dhd_info_t	*dhd;
1210 	int ret = 0, err = 0;
1211 #ifdef SOFTAP
1212 	unsigned long flags;
1213 #endif
1214 
1215 	if (!ifp || !ifp->info || !ifp->idx)
1216 		return;
1217 	ASSERT(ifp && ifp->info && ifp->idx);	/* Virtual interfaces only */
1218 	dhd = ifp->info;
1219 
1220 	DHD_TRACE(("%s: idx %d, state %d\n", __FUNCTION__, ifp->idx, ifp->state));
1221 
1222 #ifdef WL_CFG80211
1223 	if (wl_cfg80211_is_progress_ifchange())
1224 			return;
1225 
1226 #endif
1227 	switch (ifp->state) {
1228 	case DHD_IF_ADD:
1229 		/*
1230 		 * Delete the existing interface before overwriting it
1231 		 * in case we missed the WLC_E_IF_DEL event.
1232 		 */
1233 		if (ifp->net != NULL) {
1234 			DHD_ERROR(("%s: ERROR: netdev:%s already exists, try free & unregister \n",
1235 			 __FUNCTION__, ifp->net->name));
1236 			netif_stop_queue(ifp->net);
1237 			unregister_netdev(ifp->net);
1238 			free_netdev(ifp->net);
1239 		}
1240 		/* Allocate etherdev, including space for private structure */
1241 		if (!(ifp->net = alloc_etherdev(sizeof(dhd)))) {
1242 			DHD_ERROR(("%s: OOM - alloc_etherdev(%d)\n", __FUNCTION__, sizeof(dhd)));
1243 			ret = -ENOMEM;
1244 		}
1245 		if (ret == 0) {
1246 			strncpy(ifp->net->name, ifp->name, IFNAMSIZ);
1247 			ifp->net->name[IFNAMSIZ - 1] = '\0';
1248 			memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
1249 #ifdef WL_CFG80211
1250 			if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)
1251 				if (!wl_cfg80211_notify_ifadd(ifp->net, ifp->idx, ifp->bssidx,
1252 					(void*)dhd_net_attach)) {
1253 					ifp->state = DHD_IF_NONE;
1254 					ifp->event2cfg80211 = TRUE;
1255 					return;
1256 				}
1257 #endif
1258 			if ((err = dhd_net_attach(&dhd->pub, ifp->idx)) != 0) {
1259 				DHD_ERROR(("%s: dhd_net_attach failed, err %d\n",
1260 					__FUNCTION__, err));
1261 				ret = -EOPNOTSUPP;
1262 			} else {
1263 #if defined(SOFTAP)
1264 		if (ap_fw_loaded && !(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
1265 				 /* semaphore that the soft AP CODE waits on */
1266 				flags = dhd_os_spin_lock(&dhd->pub);
1267 
1268 				/* save ptr to wl0.1 netdev for use in wl_iw.c  */
1269 				ap_net_dev = ifp->net;
1270 				 /* signal to the SOFTAP 'sleeper' thread, wl0.1 is ready */
1271 				up(&ap_eth_ctl.sema);
1272 				dhd_os_spin_unlock(&dhd->pub, flags);
1273 		}
1274 #endif
1275 				DHD_TRACE(("\n ==== pid:%x, net_device for if:%s created ===\n\n",
1276 					current->pid, ifp->net->name));
1277 				ifp->state = DHD_IF_NONE;
1278 			}
1279 		}
1280 		break;
1281 	case DHD_IF_DEL:
1282 		/* Make sure that we don't enter again here if .. */
1283 		/* dhd_op_if is called again from some other context */
1284 		ifp->state = DHD_IF_DELETING;
1285 		if (ifp->net != NULL) {
1286 			DHD_TRACE(("\n%s: got 'DHD_IF_DEL' state\n", __FUNCTION__));
1287 			netif_stop_queue(ifp->net);
1288 #ifdef WL_CFG80211
1289 			if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
1290 				wl_cfg80211_ifdel_ops(ifp->net);
1291 			}
1292 #endif
1293 			unregister_netdev(ifp->net);
1294 			ret = DHD_DEL_IF;	/* Make sure the free_netdev() is called */
1295 #ifdef WL_CFG80211
1296 			if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
1297 				wl_cfg80211_notify_ifdel();
1298 			}
1299 #endif
1300 		}
1301 		break;
1302 	case DHD_IF_DELETING:
1303 		break;
1304 	default:
1305 		DHD_ERROR(("%s: bad op %d\n", __FUNCTION__, ifp->state));
1306 		ASSERT(!ifp->state);
1307 		break;
1308 	}
1309 
1310 	if (ret < 0) {
1311 		ifp->set_multicast = FALSE;
1312 		if (ifp->net) {
1313 			free_netdev(ifp->net);
1314 			ifp->net = NULL;
1315 		}
1316 		dhd->iflist[ifp->idx] = NULL;
1317 #ifdef SOFTAP
1318 		flags = dhd_os_spin_lock(&dhd->pub);
1319 		if (ifp->net == ap_net_dev)
1320 			ap_net_dev = NULL;   /*  NULL  SOFTAP global wl0.1 as well */
1321 		dhd_os_spin_unlock(&dhd->pub, flags);
1322 #endif /*  SOFTAP */
1323 		MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
1324 	}
1325 }
1326 
1327 #ifdef DHDTCPACK_SUPPRESS
1328 uint dhd_use_tcpack_suppress = TRUE;
1329 module_param(dhd_use_tcpack_suppress, uint, FALSE);
1330 extern bool dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt);
1331 #endif /* DHDTCPACK_SUPPRESS */
1332 
1333 static int
_dhd_sysioc_thread(void * data)1334 _dhd_sysioc_thread(void *data)
1335 {
1336 	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
1337 	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
1338 	struct ipv6_addr *iter, *next;
1339 	int i, ret;
1340 #ifdef SOFTAP
1341 	bool in_ap = FALSE;
1342 	unsigned long flags;
1343 #endif
1344 
1345 	while (down_interruptible(&tsk->sema) == 0) {
1346 
1347 		SMP_RD_BARRIER_DEPENDS();
1348 		if (tsk->terminated) {
1349 			break;
1350 		}
1351 
1352 		dhd_net_if_lock_local(dhd);
1353 		DHD_OS_WAKE_LOCK(&dhd->pub);
1354 
1355 		for (i = 0; i < DHD_MAX_IFS; i++) {
1356 			if (dhd->iflist[i]) {
1357 				DHD_TRACE(("%s: interface %d\n", __FUNCTION__, i));
1358 #ifdef SOFTAP
1359 				flags = dhd_os_spin_lock(&dhd->pub);
1360 				in_ap = (ap_net_dev != NULL);
1361 				dhd_os_spin_unlock(&dhd->pub, flags);
1362 #endif /* SOFTAP */
1363 				if (dhd->iflist[i] && dhd->iflist[i]->state)
1364 					dhd_op_if(dhd->iflist[i]);
1365 
1366 				if (dhd->iflist[i] == NULL) {
1367 					DHD_TRACE(("\n\n %s: interface %d just been removed,"
1368 						"!\n\n", __FUNCTION__, i));
1369 					continue;
1370 				}
1371 #ifdef SOFTAP
1372 				if (in_ap && dhd->set_macaddress == i+1)  {
1373 					DHD_TRACE(("attempt to set MAC for %s in AP Mode,"
1374 						"blocked. \n", dhd->iflist[i]->net->name));
1375 					dhd->set_macaddress = 0;
1376 					continue;
1377 				}
1378 
1379 				if (in_ap && dhd->iflist[i]->set_multicast)  {
1380 					DHD_TRACE(("attempt to set MULTICAST list for %s"
1381 					 "in AP Mode, blocked. \n", dhd->iflist[i]->net->name));
1382 					dhd->iflist[i]->set_multicast = FALSE;
1383 					continue;
1384 				}
1385 #endif /* SOFTAP */
1386 				if (dhd->pub.up == 0)
1387 					continue;
1388 				if (dhd->iflist[i]->set_multicast) {
1389 					dhd->iflist[i]->set_multicast = FALSE;
1390 					_dhd_set_multicast_list(dhd, i);
1391 
1392 				}
1393 				list_for_each_entry_safe(iter, next,
1394 					&dhd->iflist[i]->ipv6_list, list) {
1395 					spin_lock_bh(&dhd->iflist[i]->ipv6_lock);
1396 					list_del(&iter->list);
1397 					spin_unlock_bh(&dhd->iflist[i]->ipv6_lock);
1398 					if (iter->ipv6_oper == DHD_IPV6_ADDR_ADD) {
1399 						ret = dhd_ndo_enable(&dhd->pub, TRUE);
1400 						if (ret < 0) {
1401 							DHD_ERROR(("%s: Enabling NDO Failed %d\n",
1402 								__FUNCTION__, ret));
1403 							continue;
1404 						}
1405 						ret = dhd_ndo_add_ip(&dhd->pub,
1406 							(char *)&iter->ipv6_addr[0], i);
1407 						if (ret < 0) {
1408 							DHD_ERROR(("%s: Adding host ip fail %d\n",
1409 								__FUNCTION__, ret));
1410 							continue;
1411 						}
1412 					} else {
1413 						ret = dhd_ndo_remove_ip(&dhd->pub, i);
1414 						if (ret < 0) {
1415 							DHD_ERROR(("%s: Removing host ip fail %d\n",
1416 								__FUNCTION__, ret));
1417 							continue;
1418 						}
1419 					}
1420 					NATIVE_MFREE(dhd->pub.osh, iter, sizeof(struct ipv6_addr));
1421 				}
1422 				if (dhd->set_macaddress == i+1) {
1423 					dhd->set_macaddress = 0;
1424 					if (_dhd_set_mac_address(dhd, i, &dhd->macvalue) == 0) {
1425 						DHD_INFO((
1426 							"%s: MACID is overwritten\n",
1427 							__FUNCTION__));
1428 					} else {
1429 						DHD_ERROR((
1430 							"%s: _dhd_set_mac_address() failed\n",
1431 							__FUNCTION__));
1432 					}
1433 				}
1434 			}
1435 		}
1436 
1437 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
1438 		dhd_net_if_unlock_local(dhd);
1439 	}
1440 	DHD_TRACE(("%s: stopped\n", __FUNCTION__));
1441 	complete_and_exit(&tsk->completed, 0);
1442 }
1443 
1444 static int
dhd_set_mac_address(struct net_device * dev,void * addr)1445 dhd_set_mac_address(struct net_device *dev, void *addr)
1446 {
1447 	int ret = 0;
1448 
1449 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
1450 	struct sockaddr *sa = (struct sockaddr *)addr;
1451 	int ifidx;
1452 
1453 	ifidx = dhd_net2idx(dhd, dev);
1454 	if (ifidx == DHD_BAD_IF)
1455 		return -1;
1456 
1457 	ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0);
1458 	memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN);
1459 	dhd->set_macaddress = ifidx+1;
1460 	up(&dhd->thr_sysioc_ctl.sema);
1461 
1462 	return ret;
1463 }
1464 
1465 static void
dhd_set_multicast_list(struct net_device * dev)1466 dhd_set_multicast_list(struct net_device *dev)
1467 {
1468 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
1469 	int ifidx;
1470 
1471 	ifidx = dhd_net2idx(dhd, dev);
1472 	if (ifidx == DHD_BAD_IF)
1473 		return;
1474 
1475 	ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0);
1476 	dhd->iflist[ifidx]->set_multicast = TRUE;
1477 	up(&dhd->thr_sysioc_ctl.sema);
1478 }
1479 
1480 #ifdef PROP_TXSTATUS
1481 int
dhd_os_wlfc_block(dhd_pub_t * pub)1482 dhd_os_wlfc_block(dhd_pub_t *pub)
1483 {
1484 	dhd_info_t *di = (dhd_info_t *)(pub->info);
1485 	ASSERT(di != NULL);
1486 	spin_lock_bh(&di->wlfc_spinlock);
1487 	return 1;
1488 }
1489 
1490 int
dhd_os_wlfc_unblock(dhd_pub_t * pub)1491 dhd_os_wlfc_unblock(dhd_pub_t *pub)
1492 {
1493 	dhd_info_t *di = (dhd_info_t *)(pub->info);
1494 
1495 	ASSERT(di != NULL);
1496 	spin_unlock_bh(&di->wlfc_spinlock);
1497 	return 1;
1498 }
1499 
1500 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
1501 uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
1502 #define WME_PRIO2AC(prio)	wme_fifo2ac[prio2fifo[(prio)]]
1503 
1504 #endif /* PROP_TXSTATUS */
1505 int
dhd_sendpkt(dhd_pub_t * dhdp,int ifidx,void * pktbuf)1506 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
1507 {
1508 	int ret = BCME_OK;
1509 	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
1510 	struct ether_header *eh = NULL;
1511 
1512 	/* Reject if down */
1513 	if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
1514 		/* free the packet here since the caller won't */
1515 		PKTFREE(dhdp->osh, pktbuf, TRUE);
1516 		return -ENODEV;
1517 	}
1518 
1519 	/* Update multicast statistic */
1520 	if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
1521 		uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
1522 		eh = (struct ether_header *)pktdata;
1523 
1524 		if (ETHER_ISMULTI(eh->ether_dhost))
1525 			dhdp->tx_multicast++;
1526 		if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
1527 			atomic_inc(&dhd->pend_8021x_cnt);
1528 	} else {
1529 		PKTFREE(dhd->pub.osh, pktbuf, TRUE);
1530 		return BCME_ERROR;
1531 	}
1532 
1533 	/* Look into the packet and update the packet priority */
1534 #ifndef PKTPRIO_OVERRIDE
1535 	if (PKTPRIO(pktbuf) == 0)
1536 #endif
1537 		pktsetprio(pktbuf, FALSE);
1538 
1539 #ifdef PROP_TXSTATUS
1540 	if (dhdp->wlfc_state) {
1541 		/* store the interface ID */
1542 		DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
1543 
1544 		/* store destination MAC in the tag as well */
1545 		DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
1546 
1547 		/* decide which FIFO this packet belongs to */
1548 		if (ETHER_ISMULTI(eh->ether_dhost))
1549 			/* one additional queue index (highest AC + 1) is used for bc/mc queue */
1550 			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
1551 		else
1552 			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
1553 	} else
1554 #endif /* PROP_TXSTATUS */
1555 	/* If the protocol uses a data header, apply it */
1556 	dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
1557 
1558 	/* Use bus module to send data frame */
1559 #ifdef WLMEDIA_HTSF
1560 	dhd_htsf_addtxts(dhdp, pktbuf);
1561 #endif
1562 #ifdef DHDTCPACK_SUPPRESS
1563 	if (dhd_use_tcpack_suppress && dhd_tcpack_suppress(dhdp, pktbuf))
1564 		ret = BCME_OK;
1565 	else
1566 #endif /* DHDTCPACK_SUPPRESS */
1567 #ifdef PROP_TXSTATUS
1568 	dhd_os_wlfc_block(dhdp);
1569 	if (dhdp->wlfc_state && ((athost_wl_status_info_t*)dhdp->wlfc_state)->proptxstatus_mode
1570 		!= WLFC_FCMODE_NONE) {
1571 		dhd_wlfc_commit_packets(dhdp->wlfc_state,  (f_commitpkt_t)dhd_bus_txdata,
1572 			dhdp->bus, pktbuf);
1573 		if (((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if) {
1574 			((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if = 0;
1575 		}
1576 		dhd_os_wlfc_unblock(dhdp);
1577 	}
1578 	else {
1579 		dhd_os_wlfc_unblock(dhdp);
1580 		/* non-proptxstatus way */
1581 		ret = dhd_bus_txdata(dhdp->bus, pktbuf);
1582 	}
1583 #else
1584 	ret = dhd_bus_txdata(dhdp->bus, pktbuf);
1585 #endif /* PROP_TXSTATUS */
1586 
1587 	return ret;
1588 }
1589 
1590 int
dhd_start_xmit(struct sk_buff * skb,struct net_device * net)1591 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
1592 {
1593 	int ret;
1594 	uint datalen;
1595 	void *pktbuf;
1596 	dhd_info_t *dhd  =  *(dhd_info_t **)netdev_priv(net);
1597 	dhd_if_t *ifp = NULL;
1598 	int ifidx;
1599 #ifdef WLMEDIA_HTSF
1600 	uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
1601 #else
1602 	uint8 htsfdlystat_sz = 0;
1603 #endif
1604 
1605 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1606 
1607 	DHD_OS_WAKE_LOCK(&dhd->pub);
1608 
1609 	/* Reject if down */
1610 	if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) {
1611 		DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
1612 			__FUNCTION__, dhd->pub.up, dhd->pub.busstate));
1613 		netif_stop_queue(net);
1614 		/* Send Event when bus down detected during data session */
1615 		if (dhd->pub.up) {
1616 			DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
1617 			net_os_send_hang_message(net);
1618 		}
1619 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
1620 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
1621 		return -ENODEV;
1622 #else
1623 		return NETDEV_TX_BUSY;
1624 #endif
1625 	}
1626 
1627 	ifidx = dhd_net2idx(dhd, net);
1628 	if (ifidx == DHD_BAD_IF) {
1629 		DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
1630 		netif_stop_queue(net);
1631 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
1632 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
1633 		return -ENODEV;
1634 #else
1635 		return NETDEV_TX_BUSY;
1636 #endif
1637 	}
1638 
1639 	ifp = dhd->iflist[ifidx];
1640 	datalen  = PKTLEN(dhdp->osh, skb);
1641 
1642 	/* Make sure there's enough room for any header */
1643 
1644 	if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
1645 		struct sk_buff *skb2;
1646 
1647 		DHD_INFO(("%s: insufficient headroom\n",
1648 		          dhd_ifname(&dhd->pub, ifidx)));
1649 		dhd->pub.tx_realloc++;
1650 
1651 		skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
1652 
1653 		dev_kfree_skb(skb);
1654 		if ((skb = skb2) == NULL) {
1655 			DHD_ERROR(("%s: skb_realloc_headroom failed\n",
1656 			           dhd_ifname(&dhd->pub, ifidx)));
1657 			ret = -ENOMEM;
1658 			goto done;
1659 		}
1660 	}
1661 
1662 	/* Convert to packet */
1663 	if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
1664 		DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
1665 		           dhd_ifname(&dhd->pub, ifidx)));
1666 		dev_kfree_skb_any(skb);
1667 		ret = -ENOMEM;
1668 		goto done;
1669 	}
1670 #ifdef WLMEDIA_HTSF
1671 	if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
1672 		uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
1673 		struct ether_header *eh = (struct ether_header *)pktdata;
1674 
1675 		if (!ETHER_ISMULTI(eh->ether_dhost) &&
1676 			(ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
1677 			eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
1678 		}
1679 	}
1680 #endif
1681 
1682 	ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
1683 
1684 done:
1685 	if (ret) {
1686 			ifp->stats.tx_dropped++;
1687 	}
1688 	else {
1689 			dhd->pub.tx_packets++;
1690 			ifp->stats.tx_packets++;
1691 			ifp->stats.tx_bytes += datalen;
1692 	}
1693 
1694 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
1695 
1696 	/* Return ok: we always eat the packet */
1697 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
1698 	return 0;
1699 #else
1700 	return NETDEV_TX_OK;
1701 #endif
1702 }
1703 
1704 void
dhd_txflowcontrol(dhd_pub_t * dhdp,int ifidx,bool state)1705 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
1706 {
1707 	struct net_device *net;
1708 	dhd_info_t *dhd = dhdp->info;
1709 	int i;
1710 
1711 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1712 
1713 	ASSERT(dhd);
1714 
1715 	if (ifidx == ALL_INTERFACES) {
1716 		/* Flow control on all active interfaces */
1717 		dhdp->txoff = state;
1718 		for (i = 0; i < DHD_MAX_IFS; i++) {
1719 			if (dhd->iflist[i]) {
1720 				net = dhd->iflist[i]->net;
1721 				if (state == ON)
1722 					netif_stop_queue(net);
1723 				else
1724 					netif_wake_queue(net);
1725 			}
1726 		}
1727 	}
1728 	else {
1729 		if (dhd->iflist[ifidx]) {
1730 			net = dhd->iflist[ifidx]->net;
1731 			if (state == ON)
1732 				netif_stop_queue(net);
1733 			else
1734 				netif_wake_queue(net);
1735 		}
1736 	}
1737 }
1738 
1739 #ifdef DHD_RX_DUMP
1740 typedef struct {
1741 	uint16 type;
1742 	const char *str;
1743 } PKTTYPE_INFO;
1744 
1745 static const PKTTYPE_INFO packet_type_info[] =
1746 {
1747 	{ ETHER_TYPE_IP, "IP" },
1748 	{ ETHER_TYPE_ARP, "ARP" },
1749 	{ ETHER_TYPE_BRCM, "BRCM" },
1750 	{ ETHER_TYPE_802_1X, "802.1X" },
1751 	{ ETHER_TYPE_WAI, "WAPI" },
1752 	{ 0, ""}
1753 };
1754 
_get_packet_type_str(uint16 type)1755 static const char *_get_packet_type_str(uint16 type)
1756 {
1757 	int i;
1758 	int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
1759 
1760 	for (i = 0; i < n; i++) {
1761 		if (packet_type_info[i].type == type)
1762 			return packet_type_info[i].str;
1763 	}
1764 
1765 	return packet_type_info[n].str;
1766 }
1767 #endif /* DHD_RX_DUMP */
1768 
1769 void
dhd_rx_frame(dhd_pub_t * dhdp,int ifidx,void * pktbuf,int numpkt,uint8 chan)1770 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
1771 {
1772 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1773 	struct sk_buff *skb;
1774 	uchar *eth;
1775 	uint len;
1776 	void *data, *pnext = NULL;
1777 	int i;
1778 	dhd_if_t *ifp;
1779 	wl_event_msg_t event;
1780 	int tout_rx = 0;
1781 	int tout_ctrl = 0;
1782 #if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
1783 	void *skbhead = NULL;
1784 	void *skbprev = NULL;
1785 #endif /* defined(DHDTHREAD) && defined(RXFRAME_THREAD) */
1786 #ifdef DHD_RX_DUMP
1787 #ifdef DHD_RX_FULL_DUMP
1788 	int k;
1789 #endif /* DHD_RX_FULL_DUMP */
1790 	char *dump_data;
1791 	uint16 protocol;
1792 #endif /* DHD_RX_DUMP */
1793 
1794 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1795 
1796 	for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
1797 
1798 		pnext = PKTNEXT(dhdp->osh, pktbuf);
1799 		PKTSETNEXT(wl->sh.osh, pktbuf, NULL);
1800 
1801 		ifp = dhd->iflist[ifidx];
1802 		if (ifp == NULL) {
1803 			DHD_ERROR(("%s: ifp is NULL. drop packet\n",
1804 				__FUNCTION__));
1805 			PKTFREE(dhdp->osh, pktbuf, TRUE);
1806 			continue;
1807 		}
1808 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
1809 		/* Dropping packets before registering net device to avoid kernel panic */
1810 #ifndef PROP_TXSTATUS_VSDB
1811 		if (!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) {
1812 #else
1813 		if (!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) {
1814 #endif /* PROP_TXSTATUS_VSDB */
1815 			DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
1816 			__FUNCTION__));
1817 			PKTFREE(dhdp->osh, pktbuf, TRUE);
1818 			continue;
1819 		}
1820 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
1821 
1822 
1823 #ifdef PROP_TXSTATUS
1824 		if (dhdp->wlfc_state && PKTLEN(wl->sh.osh, pktbuf) == 0) {
1825 			/* WLFC may send header only packet when
1826 			there is an urgent message but no packet to
1827 			piggy-back on
1828 			*/
1829 			((athost_wl_status_info_t*)dhdp->wlfc_state)->stats.wlfc_header_only_pkt++;
1830 			PKTFREE(dhdp->osh, pktbuf, TRUE);
1831 			continue;
1832 		}
1833 #endif
1834 
1835 		skb = PKTTONATIVE(dhdp->osh, pktbuf);
1836 
1837 		/* Get the protocol, maintain skb around eth_type_trans()
1838 		 * The main reason for this hack is for the limitation of
1839 		 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
1840 		 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
1841 		 * coping of the packet coming from the network stack to add
1842 		 * BDC, Hardware header etc, during network interface registration
1843 		 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
1844 		 * for BDC, Hardware header etc. and not just the ETH_HLEN
1845 		 */
1846 		eth = skb->data;
1847 		len = skb->len;
1848 
1849 #ifdef DHD_RX_DUMP
1850 		dump_data = skb->data;
1851 		protocol = (dump_data[12] << 8) | dump_data[13];
1852 		DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
1853 
1854 #ifdef DHD_RX_FULL_DUMP
1855 		if (protocol != ETHER_TYPE_BRCM) {
1856 			for (k = 0; k < skb->len; k++) {
1857 				DHD_ERROR(("%02X ", dump_data[k]));
1858 				if ((k & 15) == 15)
1859 					DHD_ERROR(("\n"));
1860 			}
1861 			DHD_ERROR(("\n"));
1862 		}
1863 #endif /* DHD_RX_FULL_DUMP */
1864 
1865 		if (protocol != ETHER_TYPE_BRCM) {
1866 			if (dump_data[0] == 0xFF) {
1867 				DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
1868 
1869 				if ((dump_data[12] == 8) &&
1870 					(dump_data[13] == 6)) {
1871 					DHD_ERROR(("%s: ARP %d\n",
1872 						__FUNCTION__, dump_data[0x15]));
1873 				}
1874 			} else if (dump_data[0] & 1) {
1875 				DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
1876 					__FUNCTION__, MAC2STRDBG(dump_data)));
1877 			}
1878 
1879 			if (protocol == ETHER_TYPE_802_1X) {
1880 				DHD_ERROR(("ETHER_TYPE_802_1X: "
1881 					"ver %d, type %d, replay %d\n",
1882 					dump_data[14], dump_data[15],
1883 					dump_data[30]));
1884 			}
1885 		}
1886 
1887 #endif /* DHD_RX_DUMP */
1888 
1889 		ifp = dhd->iflist[ifidx];
1890 		if (ifp == NULL)
1891 			ifp = dhd->iflist[0];
1892 
1893 		ASSERT(ifp);
1894 		skb->dev = ifp->net;
1895 		skb->protocol = eth_type_trans(skb, skb->dev);
1896 
1897 		if (skb->pkt_type == PACKET_MULTICAST) {
1898 			dhd->pub.rx_multicast++;
1899 		}
1900 
1901 		skb->data = eth;
1902 		skb->len = len;
1903 
1904 #ifdef WLMEDIA_HTSF
1905 		dhd_htsf_addrxts(dhdp, pktbuf);
1906 #endif
1907 		/* Strip header, count, deliver upward */
1908 		skb_pull(skb, ETH_HLEN);
1909 
1910 		/* Process special event packets and then discard them */
1911 		memset(&event, 0, sizeof(event));
1912 		if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
1913 			dhd_wl_host_event(dhd, &ifidx,
1914 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1915 			skb_mac_header(skb),
1916 #else
1917 			skb->mac.raw,
1918 #endif
1919 			&event,
1920 			&data);
1921 
1922 			wl_event_to_host_order(&event);
1923 			if (!tout_ctrl)
1924 				tout_ctrl = DHD_PACKET_TIMEOUT_MS;
1925 
1926 #if defined(PNO_SUPPORT)
1927 			if (event.event_type == WLC_E_PFN_NET_FOUND) {
1928 				/* enforce custom wake lock to garantee that Kernel not suspended */
1929 				tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
1930 			}
1931 #endif /* PNO_SUPPORT */
1932 
1933 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
1934 			PKTFREE(dhdp->osh, pktbuf, TRUE);
1935 			continue;
1936 #endif
1937 		} else {
1938 			tout_rx = DHD_PACKET_TIMEOUT_MS;
1939 		}
1940 
1941 		ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
1942 		if (dhd->iflist[ifidx] && !dhd->iflist[ifidx]->state)
1943 			ifp = dhd->iflist[ifidx];
1944 
1945 		if (ifp->net)
1946 			ifp->net->last_rx = jiffies;
1947 
1948 		dhdp->dstats.rx_bytes += skb->len;
1949 		dhdp->rx_packets++; /* Local count */
1950 		ifp->stats.rx_bytes += skb->len;
1951 		ifp->stats.rx_packets++;
1952 
1953 		if (in_interrupt()) {
1954 			netif_rx(skb);
1955 		} else {
1956 			/* If the receive is not processed inside an ISR,
1957 			 * the softirqd must be woken explicitly to service
1958 			 * the NET_RX_SOFTIRQ.  In 2.6 kernels, this is handled
1959 			 * by netif_rx_ni(), but in earlier kernels, we need
1960 			 * to do it manually.
1961 			 */
1962 #if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
1963 			if (!skbhead)
1964 				skbhead = skb;
1965 			else
1966 				PKTSETNEXT(wl->sh.osh, skbprev, skb);
1967 			skbprev = skb;
1968 #else
1969 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
1970 			netif_rx_ni(skb);
1971 #else
1972 			ulong flags;
1973 			netif_rx(skb);
1974 			local_irq_save(flags);
1975 			RAISE_RX_SOFTIRQ();
1976 			local_irq_restore(flags);
1977 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
1978 #endif /* defined(DHDTHREAD) && defined(RXFRAME_THREAD) */
1979 		}
1980 	}
1981 #if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
1982 	if (skbhead)
1983 		dhd_sched_rxf(dhdp, skbhead);
1984 #endif
1985 	DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
1986 	DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
1987 }
1988 
1989 void
1990 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
1991 {
1992 	/* Linux version has nothing to do */
1993 	return;
1994 }
1995 
1996 void
1997 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
1998 {
1999 	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
2000 	struct ether_header *eh;
2001 	uint16 type;
2002 
2003 	dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
2004 
2005 	eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
2006 	type  = ntoh16(eh->ether_type);
2007 
2008 	if (type == ETHER_TYPE_802_1X)
2009 		atomic_dec(&dhd->pend_8021x_cnt);
2010 
2011 }
2012 
2013 static struct net_device_stats *
2014 dhd_get_stats(struct net_device *net)
2015 {
2016 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
2017 	dhd_if_t *ifp;
2018 	int ifidx;
2019 
2020 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2021 
2022 	ifidx = dhd_net2idx(dhd, net);
2023 	if (ifidx == DHD_BAD_IF) {
2024 		DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
2025 		return NULL;
2026 	}
2027 
2028 	ifp = dhd->iflist[ifidx];
2029 	ASSERT(dhd && ifp);
2030 
2031 	if (dhd->pub.up) {
2032 		/* Use the protocol to get dongle stats */
2033 		dhd_prot_dstats(&dhd->pub);
2034 	}
2035 
2036 	/* Copy dongle stats to net device stats */
2037 	ifp->stats.rx_packets = dhd->pub.dstats.rx_packets;
2038 	ifp->stats.tx_packets = dhd->pub.dstats.tx_packets;
2039 	ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes;
2040 	ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes;
2041 	ifp->stats.rx_errors = dhd->pub.dstats.rx_errors;
2042 	ifp->stats.tx_errors = dhd->pub.dstats.tx_errors;
2043 	ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped;
2044 	ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped;
2045 	ifp->stats.multicast = dhd->pub.dstats.multicast;
2046 
2047 	return &ifp->stats;
2048 }
2049 
2050 #ifdef DHDTHREAD
2051 static int
2052 dhd_watchdog_thread(void *data)
2053 {
2054 	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
2055 	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
2056 	/* This thread doesn't need any user-level access,
2057 	 * so get rid of all our resources
2058 	 */
2059 	if (dhd_watchdog_prio > 0) {
2060 		struct sched_param param;
2061 		param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
2062 			dhd_watchdog_prio:(MAX_RT_PRIO-1);
2063 		setScheduler(current, SCHED_FIFO, &param);
2064 	}
2065 
2066 	while (1)
2067 		if (down_interruptible (&tsk->sema) == 0) {
2068 			unsigned long flags;
2069 			unsigned long jiffies_at_start = jiffies;
2070 			unsigned long time_lapse;
2071 
2072 			SMP_RD_BARRIER_DEPENDS();
2073 			if (tsk->terminated) {
2074 				break;
2075 			}
2076 
2077 			dhd_os_sdlock(&dhd->pub);
2078 			if (dhd->pub.dongle_reset == FALSE) {
2079 				DHD_TIMER(("%s:\n", __FUNCTION__));
2080 
2081 				/* Call the bus module watchdog */
2082 				dhd_bus_watchdog(&dhd->pub);
2083 
2084 				flags = dhd_os_spin_lock(&dhd->pub);
2085 				/* Count the tick for reference */
2086 				dhd->pub.tickcnt++;
2087 				time_lapse = jiffies - jiffies_at_start;
2088 
2089 				/* Reschedule the watchdog */
2090 				if (dhd->wd_timer_valid)
2091 					mod_timer(&dhd->timer,
2092 					jiffies +
2093 					msecs_to_jiffies(dhd_watchdog_ms) -
2094 					min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
2095 				dhd_os_spin_unlock(&dhd->pub, flags);
2096 			}
2097 			dhd_os_sdunlock(&dhd->pub);
2098 		} else {
2099 			break;
2100 	}
2101 
2102 	complete_and_exit(&tsk->completed, 0);
2103 }
2104 #endif /* DHDTHREAD */
2105 
2106 static void dhd_watchdog(ulong data)
2107 {
2108 	dhd_info_t *dhd = (dhd_info_t *)data;
2109 	unsigned long flags;
2110 
2111 	if (dhd->pub.dongle_reset) {
2112 		return;
2113 	}
2114 
2115 #ifdef DHDTHREAD
2116 	if (dhd->thr_wdt_ctl.thr_pid >= 0) {
2117 		up(&dhd->thr_wdt_ctl.sema);
2118 		return;
2119 	}
2120 #endif /* DHDTHREAD */
2121 
2122 	dhd_os_sdlock(&dhd->pub);
2123 	/* Call the bus module watchdog */
2124 	dhd_bus_watchdog(&dhd->pub);
2125 
2126 	flags = dhd_os_spin_lock(&dhd->pub);
2127 	/* Count the tick for reference */
2128 	dhd->pub.tickcnt++;
2129 
2130 	/* Reschedule the watchdog */
2131 	if (dhd->wd_timer_valid)
2132 		mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
2133 	dhd_os_spin_unlock(&dhd->pub, flags);
2134 	dhd_os_sdunlock(&dhd->pub);
2135 }
2136 
2137 #ifdef DHDTHREAD
2138 static int
2139 dhd_dpc_thread(void *data)
2140 {
2141 	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
2142 	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
2143 
2144 	/* This thread doesn't need any user-level access,
2145 	 * so get rid of all our resources
2146 	 */
2147 	if (dhd_dpc_prio > 0)
2148 	{
2149 		struct sched_param param;
2150 		param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
2151 		setScheduler(current, SCHED_FIFO, &param);
2152 	}
2153 
2154 #ifdef CUSTOM_DPC_CPUCORE
2155 	set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
2156 #endif /* CUSTOM_DPC_CPUCORE */
2157 
2158 	/* Run until signal received */
2159 	while (1) {
2160 		if (!binary_sema_down(tsk)) {
2161 
2162 			SMP_RD_BARRIER_DEPENDS();
2163 			if (tsk->terminated) {
2164 				break;
2165 			}
2166 
2167 			/* Call bus dpc unless it indicated down (then clean stop) */
2168 			if (dhd->pub.busstate != DHD_BUS_DOWN) {
2169 				dhd_os_wd_timer_extend(&dhd->pub, TRUE);
2170 				while (dhd_bus_dpc(dhd->pub.bus)) {
2171 					/* process all data */
2172 				}
2173 				dhd_os_wd_timer_extend(&dhd->pub, FALSE);
2174 				DHD_OS_WAKE_UNLOCK(&dhd->pub);
2175 			} else {
2176 				if (dhd->pub.up)
2177 					dhd_bus_stop(dhd->pub.bus, TRUE);
2178 				DHD_OS_WAKE_UNLOCK(&dhd->pub);
2179 			}
2180 		}
2181 		else
2182 			break;
2183 	}
2184 
2185 	complete_and_exit(&tsk->completed, 0);
2186 }
2187 
2188 #ifdef RXFRAME_THREAD
2189 static int
2190 dhd_rxf_thread(void *data)
2191 {
2192 	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
2193 	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
2194 	dhd_pub_t *pub = &dhd->pub;
2195 
2196 	/* This thread doesn't need any user-level access,
2197 	 * so get rid of all our resources
2198 	 */
2199 	if (dhd_rxf_prio > 0)
2200 	{
2201 		struct sched_param param;
2202 		param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
2203 		setScheduler(current, SCHED_FIFO, &param);
2204 	}
2205 
2206 	DAEMONIZE("dhd_rxf");
2207 	/* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below  */
2208 
2209 	/*  signal: thread has started */
2210 	complete(&tsk->completed);
2211 
2212 	/* Run until signal received */
2213 	while (1) {
2214 		if (down_interruptible(&tsk->sema) == 0) {
2215 			void *skb;
2216 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
2217 			ulong flags;
2218 #endif
2219 
2220 			SMP_RD_BARRIER_DEPENDS();
2221 
2222 			if (tsk->terminated) {
2223 				break;
2224 			}
2225 			skb = dhd_rxf_dequeue(pub);
2226 
2227 			if (skb == NULL) {
2228 				continue;
2229 			}
2230 			while (skb) {
2231 				void *skbnext = PKTNEXT(pub->osh, skb);
2232 				PKTSETNEXT(pub->osh, skb, NULL);
2233 
2234 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
2235 				netif_rx_ni(skb);
2236 #else
2237 				netif_rx(skb);
2238 				local_irq_save(flags);
2239 				RAISE_RX_SOFTIRQ();
2240 				local_irq_restore(flags);
2241 
2242 #endif
2243 				skb = skbnext;
2244 			}
2245 
2246 			DHD_OS_WAKE_UNLOCK(pub);
2247 		}
2248 		else
2249 			break;
2250 	}
2251 
2252 	complete_and_exit(&tsk->completed, 0);
2253 }
2254 #endif /* RXFRAME_THREAD */
2255 #endif /* DHDTHREAD */
2256 
2257 static void
2258 dhd_dpc(ulong data)
2259 {
2260 	dhd_info_t *dhd;
2261 
2262 	dhd = (dhd_info_t *)data;
2263 
2264 	/* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
2265 	 * down below , wake lock is set,
2266 	 * the tasklet is initialized in dhd_attach()
2267 	 */
2268 	/* Call bus dpc unless it indicated down (then clean stop) */
2269 	if (dhd->pub.busstate != DHD_BUS_DOWN) {
2270 		if (dhd_bus_dpc(dhd->pub.bus))
2271 			tasklet_schedule(&dhd->tasklet);
2272 		else
2273 			DHD_OS_WAKE_UNLOCK(&dhd->pub);
2274 	} else {
2275 		dhd_bus_stop(dhd->pub.bus, TRUE);
2276 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
2277 	}
2278 }
2279 
2280 void
2281 dhd_sched_dpc(dhd_pub_t *dhdp)
2282 {
2283 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2284 
2285 	DHD_OS_WAKE_LOCK(dhdp);
2286 #ifdef DHDTHREAD
2287 	if (dhd->thr_dpc_ctl.thr_pid >= 0) {
2288 		if (!binary_sema_up(&dhd->thr_dpc_ctl))
2289 			DHD_OS_WAKE_UNLOCK(dhdp);
2290 		return;
2291 	}
2292 #endif /* DHDTHREAD */
2293 
2294 	if (dhd->dhd_tasklet_create)
2295 		tasklet_schedule(&dhd->tasklet);
2296 }
2297 
2298 #if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
2299 static void
2300 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
2301 {
2302 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2303 
2304 	DHD_OS_WAKE_LOCK(dhdp);
2305 
2306 	DHD_TRACE(("dhd_sched_rxf: Enter\n"));
2307 
2308 	do {
2309 		if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
2310 			break;
2311 	} while (1);
2312 	if (dhd->thr_rxf_ctl.thr_pid >= 0) {
2313 		up(&dhd->thr_rxf_ctl.sema);
2314 	}
2315 	return;
2316 }
2317 #endif /* defined(DHDTHREAD) && defined(RXFRAME_THREAD) */
2318 
2319 #ifdef TOE
2320 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
2321 static int
2322 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
2323 {
2324 	wl_ioctl_t ioc;
2325 	char buf[32];
2326 	int ret;
2327 
2328 	memset(&ioc, 0, sizeof(ioc));
2329 
2330 	ioc.cmd = WLC_GET_VAR;
2331 	ioc.buf = buf;
2332 	ioc.len = (uint)sizeof(buf);
2333 	ioc.set = FALSE;
2334 
2335 	strncpy(buf, "toe_ol", sizeof(buf) - 1);
2336 	buf[sizeof(buf) - 1] = '\0';
2337 	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
2338 		/* Check for older dongle image that doesn't support toe_ol */
2339 		if (ret == -EIO) {
2340 			DHD_ERROR(("%s: toe not supported by device\n",
2341 				dhd_ifname(&dhd->pub, ifidx)));
2342 			return -EOPNOTSUPP;
2343 		}
2344 
2345 		DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
2346 		return ret;
2347 	}
2348 
2349 	memcpy(toe_ol, buf, sizeof(uint32));
2350 	return 0;
2351 }
2352 
2353 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
2354 static int
2355 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
2356 {
2357 	wl_ioctl_t ioc;
2358 	char buf[32];
2359 	int toe, ret;
2360 
2361 	memset(&ioc, 0, sizeof(ioc));
2362 
2363 	ioc.cmd = WLC_SET_VAR;
2364 	ioc.buf = buf;
2365 	ioc.len = (uint)sizeof(buf);
2366 	ioc.set = TRUE;
2367 
2368 	/* Set toe_ol as requested */
2369 
2370 	strncpy(buf, "toe_ol", sizeof(buf) - 1);
2371 	buf[sizeof(buf) - 1] = '\0';
2372 	memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
2373 
2374 	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
2375 		DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
2376 			dhd_ifname(&dhd->pub, ifidx), ret));
2377 		return ret;
2378 	}
2379 
2380 	/* Enable toe globally only if any components are enabled. */
2381 
2382 	toe = (toe_ol != 0);
2383 
2384 	strcpy(buf, "toe");
2385 	memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
2386 
2387 	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
2388 		DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
2389 		return ret;
2390 	}
2391 
2392 	return 0;
2393 }
2394 #endif /* TOE */
2395 
2396 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
2397 static void
2398 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
2399 {
2400 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
2401 
2402 	snprintf(info->driver, sizeof(info->driver), "wl");
2403 	snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
2404 }
2405 
2406 struct ethtool_ops dhd_ethtool_ops = {
2407 	.get_drvinfo = dhd_ethtool_get_drvinfo
2408 };
2409 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
2410 
2411 
2412 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
2413 static int
2414 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
2415 {
2416 	struct ethtool_drvinfo info;
2417 	char drvname[sizeof(info.driver)];
2418 	uint32 cmd;
2419 #ifdef TOE
2420 	struct ethtool_value edata;
2421 	uint32 toe_cmpnt, csum_dir;
2422 	int ret;
2423 #endif
2424 
2425 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2426 
2427 	/* all ethtool calls start with a cmd word */
2428 	if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
2429 		return -EFAULT;
2430 
2431 	switch (cmd) {
2432 	case ETHTOOL_GDRVINFO:
2433 		/* Copy out any request driver name */
2434 		if (copy_from_user(&info, uaddr, sizeof(info)))
2435 			return -EFAULT;
2436 		strncpy(drvname, info.driver, sizeof(info.driver));
2437 		drvname[sizeof(info.driver)-1] = '\0';
2438 
2439 		/* clear struct for return */
2440 		memset(&info, 0, sizeof(info));
2441 		info.cmd = cmd;
2442 
2443 		/* if dhd requested, identify ourselves */
2444 		if (strcmp(drvname, "?dhd") == 0) {
2445 			snprintf(info.driver, sizeof(info.driver), "dhd");
2446 			strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
2447 			info.version[sizeof(info.version) - 1] = '\0';
2448 		}
2449 
2450 		/* otherwise, require dongle to be up */
2451 		else if (!dhd->pub.up) {
2452 			DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
2453 			return -ENODEV;
2454 		}
2455 
2456 		/* finally, report dongle driver type */
2457 		else if (dhd->pub.iswl)
2458 			snprintf(info.driver, sizeof(info.driver), "wl");
2459 		else
2460 			snprintf(info.driver, sizeof(info.driver), "xx");
2461 
2462 		snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
2463 		if (copy_to_user(uaddr, &info, sizeof(info)))
2464 			return -EFAULT;
2465 		DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
2466 		         (int)sizeof(drvname), drvname, info.driver));
2467 		break;
2468 
2469 #ifdef TOE
2470 	/* Get toe offload components from dongle */
2471 	case ETHTOOL_GRXCSUM:
2472 	case ETHTOOL_GTXCSUM:
2473 		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
2474 			return ret;
2475 
2476 		csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
2477 
2478 		edata.cmd = cmd;
2479 		edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
2480 
2481 		if (copy_to_user(uaddr, &edata, sizeof(edata)))
2482 			return -EFAULT;
2483 		break;
2484 
2485 	/* Set toe offload components in dongle */
2486 	case ETHTOOL_SRXCSUM:
2487 	case ETHTOOL_STXCSUM:
2488 		if (copy_from_user(&edata, uaddr, sizeof(edata)))
2489 			return -EFAULT;
2490 
2491 		/* Read the current settings, update and write back */
2492 		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
2493 			return ret;
2494 
2495 		csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
2496 
2497 		if (edata.data != 0)
2498 			toe_cmpnt |= csum_dir;
2499 		else
2500 			toe_cmpnt &= ~csum_dir;
2501 
2502 		if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
2503 			return ret;
2504 
2505 		/* If setting TX checksum mode, tell Linux the new mode */
2506 		if (cmd == ETHTOOL_STXCSUM) {
2507 			if (edata.data)
2508 				dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
2509 			else
2510 				dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
2511 		}
2512 
2513 		break;
2514 #endif /* TOE */
2515 
2516 	default:
2517 		return -EOPNOTSUPP;
2518 	}
2519 
2520 	return 0;
2521 }
2522 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
2523 
2524 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
2525 {
2526 	dhd_info_t *dhd;
2527 
2528 	if (!dhdp) {
2529 		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
2530 		return FALSE;
2531 	}
2532 
2533 	if (!dhdp->up)
2534 		return FALSE;
2535 
2536 	dhd = (dhd_info_t *)dhdp->info;
2537 	if (dhd->thr_sysioc_ctl.thr_pid < 0) {
2538 		DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
2539 		return FALSE;
2540 	}
2541 
2542 	if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
2543 		((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
2544 		DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d e=%d s=%d\n", __FUNCTION__,
2545 			dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
2546 		net_os_send_hang_message(net);
2547 		return TRUE;
2548 	}
2549 	return FALSE;
2550 }
2551 
2552 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc)
2553 {
2554 	int bcmerror = BCME_OK;
2555 	int buflen = 0;
2556 	void *buf = NULL;
2557 	struct net_device *net;
2558 
2559 	net = dhd_idx2net(pub, ifidx);
2560 	if (!net) {
2561 		bcmerror = BCME_BADARG;
2562 		goto done;
2563 	}
2564 
2565 	/* Copy out any buffer passed */
2566 	if (ioc->buf) {
2567 		if (ioc->len == 0) {
2568 			DHD_TRACE(("%s: ioc->len=0, returns BCME_BADARG \n", __FUNCTION__));
2569 			bcmerror = BCME_BADARG;
2570 			goto done;
2571 		}
2572 		buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
2573 		/* optimization for direct ioctl calls from kernel */
2574 		/*
2575 		if (segment_eq(get_fs(), KERNEL_DS)) {
2576 			buf = ioc->buf;
2577 		} else {
2578 		*/
2579 		{
2580 			if (!(buf = MALLOC(pub->osh, buflen + 1))) {
2581 				bcmerror = BCME_NOMEM;
2582 				goto done;
2583 			}
2584 			if (copy_from_user(buf, ioc->buf, buflen)) {
2585 				bcmerror = BCME_BADADDR;
2586 				goto done;
2587 			}
2588 			*(char *)(buf + buflen) = '\0';
2589 		}
2590 	}
2591 
2592 	/* check for local dhd ioctl and handle it */
2593 	if (ioc->driver == DHD_IOCTL_MAGIC) {
2594 		bcmerror = dhd_ioctl((void *)pub, ioc, buf, buflen);
2595 		if (bcmerror)
2596 			pub->bcmerror = bcmerror;
2597 		goto done;
2598 	}
2599 
2600 	/* send to dongle (must be up, and wl). */
2601 	if (pub->busstate != DHD_BUS_DATA) {
2602 		bcmerror = BCME_DONGLE_DOWN;
2603 		goto done;
2604 	}
2605 
2606 	if (!pub->iswl) {
2607 		bcmerror = BCME_DONGLE_DOWN;
2608 		goto done;
2609 	}
2610 
2611 	/*
2612 	 * Flush the TX queue if required for proper message serialization:
2613 	 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
2614 	 * prevent M4 encryption and
2615 	 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
2616 	 * prevent disassoc frame being sent before WPS-DONE frame.
2617 	 */
2618 	if (ioc->cmd == WLC_SET_KEY ||
2619 	    (ioc->cmd == WLC_SET_VAR && ioc->buf != NULL &&
2620 	     strncmp("wsec_key", ioc->buf, 9) == 0) ||
2621 	    (ioc->cmd == WLC_SET_VAR && ioc->buf != NULL &&
2622 	     strncmp("bsscfg:wsec_key", ioc->buf, 15) == 0) ||
2623 	    ioc->cmd == WLC_DISASSOC)
2624 		dhd_wait_pend8021x(net);
2625 
2626 #ifdef WLMEDIA_HTSF
2627 	if (ioc->buf) {
2628 		/*  short cut wl ioctl calls here  */
2629 		if (strcmp("htsf", ioc->buf) == 0) {
2630 			dhd_ioctl_htsf_get(dhd, 0);
2631 			return BCME_OK;
2632 		}
2633 
2634 		if (strcmp("htsflate", ioc->buf) == 0) {
2635 			if (ioc->set) {
2636 				memset(ts, 0, sizeof(tstamp_t)*TSMAX);
2637 				memset(&maxdelayts, 0, sizeof(tstamp_t));
2638 				maxdelay = 0;
2639 				tspktcnt = 0;
2640 				maxdelaypktno = 0;
2641 				memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
2642 				memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
2643 				memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
2644 				memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
2645 			} else {
2646 				dhd_dump_latency();
2647 			}
2648 			return BCME_OK;
2649 		}
2650 		if (strcmp("htsfclear", ioc->buf) == 0) {
2651 			memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
2652 			memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
2653 			memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
2654 			memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
2655 			htsf_seqnum = 0;
2656 			return BCME_OK;
2657 		}
2658 		if (strcmp("htsfhis", ioc->buf) == 0) {
2659 			dhd_dump_htsfhisto(&vi_d1, "H to D");
2660 			dhd_dump_htsfhisto(&vi_d2, "D to D");
2661 			dhd_dump_htsfhisto(&vi_d3, "D to H");
2662 			dhd_dump_htsfhisto(&vi_d4, "H to H");
2663 			return BCME_OK;
2664 		}
2665 		if (strcmp("tsport", ioc->buf) == 0) {
2666 			if (ioc->set) {
2667 				memcpy(&tsport, ioc->buf + 7, 4);
2668 			} else {
2669 				DHD_ERROR(("current timestamp port: %d \n", tsport));
2670 			}
2671 			return BCME_OK;
2672 		}
2673 	}
2674 #endif /* WLMEDIA_HTSF */
2675 
2676 	if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
2677 		ioc->buf != NULL && strncmp("rpc_", ioc->buf, 4) == 0) {
2678 #ifdef BCM_FD_AGGR
2679 		bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, buf, buflen);
2680 #else
2681 		bcmerror = BCME_UNSUPPORTED;
2682 #endif
2683 		goto done;
2684 	}
2685 	bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, buf, buflen);
2686 
2687 done:
2688 	dhd_check_hang(net, pub, bcmerror);
2689 
2690 	if (!bcmerror && buf && ioc->buf) {
2691 		if (copy_to_user(ioc->buf, buf, buflen))
2692 			bcmerror = -EFAULT;
2693 	}
2694 
2695 	if (buf)
2696 		MFREE(pub->osh, buf, buflen + 1);
2697 
2698 	return bcmerror;
2699 }
2700 
2701 static int
2702 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
2703 {
2704 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
2705 	dhd_ioctl_t ioc;
2706 	int bcmerror = 0;
2707 	int ifidx;
2708 	int ret;
2709 
2710 	DHD_OS_WAKE_LOCK(&dhd->pub);
2711 
2712 	/* send to dongle only if we are not waiting for reload already */
2713 	if (dhd->pub.hang_was_sent) {
2714 		DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
2715 		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
2716 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
2717 		return OSL_ERROR(BCME_DONGLE_DOWN);
2718 	}
2719 
2720 	ifidx = dhd_net2idx(dhd, net);
2721 	DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
2722 
2723 	if (ifidx == DHD_BAD_IF) {
2724 		DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
2725 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
2726 		return -1;
2727 	}
2728 
2729 #if defined(WL_WIRELESS_EXT)
2730 	/* linux wireless extensions */
2731 	if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
2732 		/* may recurse, do NOT lock */
2733 		ret = wl_iw_ioctl(net, ifr, cmd);
2734 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
2735 		return ret;
2736 	}
2737 #endif /* defined(WL_WIRELESS_EXT) */
2738 
2739 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
2740 	if (cmd == SIOCETHTOOL) {
2741 		ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
2742 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
2743 		return ret;
2744 	}
2745 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
2746 
2747 	if (cmd == SIOCDEVPRIVATE+1) {
2748 		ret = wl_android_priv_cmd(net, ifr, cmd);
2749 		dhd_check_hang(net, &dhd->pub, ret);
2750 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
2751 		return ret;
2752 	}
2753 
2754 	if (cmd != SIOCDEVPRIVATE) {
2755 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
2756 		return -EOPNOTSUPP;
2757 	}
2758 
2759 	memset(&ioc, 0, sizeof(ioc));
2760 
2761 	/* Copy the ioc control structure part of ioctl request */
2762 	if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
2763 		bcmerror = BCME_BADADDR;
2764 		goto done;
2765 	}
2766 
2767 	/* To differentiate between wl and dhd read 4 more byes */
2768 	if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
2769 		sizeof(uint)) != 0)) {
2770 		bcmerror = BCME_BADADDR;
2771 		goto done;
2772 	}
2773 
2774 	if (!capable(CAP_NET_ADMIN)) {
2775 		bcmerror = BCME_EPERM;
2776 		goto done;
2777 	}
2778 
2779 	bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc);
2780 
2781 done:
2782 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
2783 
2784 	return OSL_ERROR(bcmerror);
2785 }
2786 
2787 #ifdef WL_CFG80211
2788 static int
2789 dhd_cleanup_virt_ifaces(dhd_info_t *dhd)
2790 {
2791 	int i = 1; /* Leave ifidx 0 [Primary Interface] */
2792 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
2793 	int rollback_lock = FALSE;
2794 #endif
2795 
2796 	DHD_TRACE(("%s: Enter \n", __func__));
2797 
2798 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
2799 	/* release lock for unregister_netdev */
2800 	if (rtnl_is_locked()) {
2801 		rtnl_unlock();
2802 		rollback_lock = TRUE;
2803 	}
2804 #endif
2805 
2806 	for (i = 1; i < DHD_MAX_IFS; i++) {
2807 		dhd_net_if_lock_local(dhd);
2808 		if (dhd->iflist[i]) {
2809 			DHD_TRACE(("Deleting IF: %d \n", i));
2810 			if ((dhd->iflist[i]->state != DHD_IF_DEL) &&
2811 				(dhd->iflist[i]->state != DHD_IF_DELETING)) {
2812 				dhd->iflist[i]->state = DHD_IF_DEL;
2813 				dhd->iflist[i]->idx = i;
2814 				dhd_op_if(dhd->iflist[i]);
2815 			}
2816 		}
2817 		dhd_net_if_unlock_local(dhd);
2818 	}
2819 
2820 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
2821 	if (rollback_lock)
2822 		rtnl_lock();
2823 #endif
2824 
2825 	return 0;
2826 }
2827 #endif /* WL_CFG80211 */
2828 
2829 
2830 static int
2831 dhd_stop(struct net_device *net)
2832 {
2833 	int ifidx = 0;
2834 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
2835 	DHD_OS_WAKE_LOCK(&dhd->pub);
2836 	DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net));
2837 
2838 	if (dhd->pub.up == 0) {
2839 		goto exit;
2840 	}
2841 	ifidx = dhd_net2idx(dhd, net);
2842 	BCM_REFERENCE(ifidx);
2843 
2844 	/* Set state and stop OS transmissions */
2845 	netif_stop_queue(net);
2846 	dhd->pub.up = 0;
2847 
2848 #ifdef WL_CFG80211
2849 	if (ifidx == 0) {
2850 		wl_cfg80211_down(NULL);
2851 
2852 		/*
2853 		 * For CFG80211: Clean up all the left over virtual interfaces
2854 		 * when the primary Interface is brought down. [ifconfig wlan0 down]
2855 		 */
2856 		if (!dhd_download_fw_on_driverload) {
2857 			if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
2858 				(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
2859 				dhd_cleanup_virt_ifaces(dhd);
2860 			}
2861 		}
2862 	}
2863 #endif
2864 
2865 #ifdef PROP_TXSTATUS
2866 	dhd_os_wlfc_block(&dhd->pub);
2867 	dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
2868 	dhd_os_wlfc_unblock(&dhd->pub);
2869 #endif
2870 	/* Stop the protocol module */
2871 	dhd_prot_stop(&dhd->pub);
2872 
2873 	OLD_MOD_DEC_USE_COUNT;
2874 exit:
2875 #if defined(WL_CFG80211)
2876 	if (ifidx == 0 && !dhd_download_fw_on_driverload)
2877 		wl_android_wifi_off(net);
2878 #endif
2879 	dhd->pub.rxcnt_timeout = 0;
2880 	dhd->pub.txcnt_timeout = 0;
2881 
2882 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
2883 	return 0;
2884 }
2885 
2886 static int
2887 dhd_open(struct net_device *net)
2888 {
2889 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
2890 #ifdef TOE
2891 	uint32 toe_ol;
2892 #endif
2893 	int ifidx;
2894 	int32 ret = 0;
2895 
2896 	DHD_OS_WAKE_LOCK(&dhd->pub);
2897 	/* Update FW path if it was changed */
2898 	if (strlen(firmware_path) != 0) {
2899 		if (firmware_path[strlen(firmware_path)-1] == '\n')
2900 			firmware_path[strlen(firmware_path)-1] = '\0';
2901 		bzero(fw_path, MOD_PARAM_PATHLEN);
2902 		strncpy(fw_path, firmware_path, sizeof(fw_path)-1);
2903 		firmware_path[0] = '\0';
2904 	}
2905 
2906 
2907 
2908 	dhd->pub.dongle_trap_occured = 0;
2909 	dhd->pub.hang_was_sent = 0;
2910 #if !defined(WL_CFG80211)
2911 	/*
2912 	 * Force start if ifconfig_up gets called before START command
2913 	 *  We keep WEXT's wl_control_wl_start to provide backward compatibility
2914 	 *  This should be removed in the future
2915 	 */
2916 	ret = wl_control_wl_start(net);
2917 	if (ret != 0) {
2918 		DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
2919 		ret = -1;
2920 		goto exit;
2921 	}
2922 
2923 #endif
2924 
2925 	ifidx = dhd_net2idx(dhd, net);
2926 	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
2927 
2928 	if (ifidx < 0) {
2929 		DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
2930 		ret = -1;
2931 		goto exit;
2932 	}
2933 
2934 	if (!dhd->iflist[ifidx] || dhd->iflist[ifidx]->state == DHD_IF_DEL) {
2935 		DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
2936 		ret = -1;
2937 		goto exit;
2938 	}
2939 
2940 	if (ifidx == 0) {
2941 		atomic_set(&dhd->pend_8021x_cnt, 0);
2942 #if defined(WL_CFG80211)
2943 		DHD_ERROR(("\n%s\n", dhd_version));
2944 		if (!dhd_download_fw_on_driverload) {
2945 			ret = wl_android_wifi_on(net);
2946 			if (ret != 0) {
2947 				DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
2948 					__FUNCTION__, ret));
2949 				ret = -1;
2950 				goto exit;
2951 			}
2952 		}
2953 #endif
2954 
2955 		if (dhd->pub.busstate != DHD_BUS_DATA) {
2956 
2957 			/* try to bring up bus */
2958 			if ((ret = dhd_bus_start(&dhd->pub)) != 0) {
2959 				DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
2960 				ret = -1;
2961 				goto exit;
2962 			}
2963 
2964 		}
2965 
2966 		/* dhd_prot_init has been called in dhd_bus_start or wl_android_wifi_on */
2967 		memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
2968 
2969 #ifdef TOE
2970 		/* Get current TOE mode from dongle */
2971 		if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
2972 			dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
2973 		else
2974 			dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
2975 #endif /* TOE */
2976 
2977 #if defined(WL_CFG80211)
2978 		if (unlikely(wl_cfg80211_up(NULL))) {
2979 			DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
2980 			ret = -1;
2981 			goto exit;
2982 		}
2983 #endif /* WL_CFG80211 */
2984 	}
2985 
2986 	/* Allow transmit calls */
2987 	netif_start_queue(net);
2988 	dhd->pub.up = 1;
2989 
2990 #ifdef BCMDBGFS
2991 	dhd_dbg_init(&dhd->pub);
2992 #endif
2993 
2994 	OLD_MOD_INC_USE_COUNT;
2995 exit:
2996 	if (ret)
2997 		dhd_stop(net);
2998 
2999 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
3000 	return ret;
3001 }
3002 
3003 int dhd_do_driver_init(struct net_device *net)
3004 {
3005 	dhd_info_t *dhd = NULL;
3006 
3007 	if (!net) {
3008 		DHD_ERROR(("Primary Interface not initialized \n"));
3009 		return -EINVAL;
3010 	}
3011 
3012 	dhd = *(dhd_info_t **)netdev_priv(net);
3013 
3014 	/* If driver is already initialized, do nothing
3015 	 */
3016 	if (dhd->pub.busstate == DHD_BUS_DATA) {
3017 		DHD_TRACE(("Driver already Inititalized. Nothing to do"));
3018 		return 0;
3019 	}
3020 
3021 	if (dhd_open(net) < 0) {
3022 		DHD_ERROR(("Driver Init Failed \n"));
3023 		return -1;
3024 	}
3025 
3026 	return 0;
3027 }
3028 
3029 osl_t *
3030 dhd_osl_attach(void *pdev, uint bustype)
3031 {
3032 	return osl_attach(pdev, bustype, TRUE);
3033 }
3034 
3035 void
3036 dhd_osl_detach(osl_t *osh)
3037 {
3038 	if (MALLOCED(osh)) {
3039 		DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh)));
3040 	}
3041 	osl_detach(osh);
3042 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
3043 	dhd_registration_check = FALSE;
3044 	up(&dhd_registration_sem);
3045 #if	defined(BCMLXSDMMC)
3046 	up(&dhd_chipup_sem);
3047 #endif
3048 #endif
3049 }
3050 
3051 int
3052 dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name,
3053 	uint8 *mac_addr, uint32 flags, uint8 bssidx)
3054 {
3055 	dhd_if_t *ifp;
3056 
3057 	DHD_TRACE(("%s: idx %d, handle->%p\n", __FUNCTION__, ifidx, handle));
3058 
3059 	ASSERT(dhd && (ifidx < DHD_MAX_IFS));
3060 
3061 	ifp = dhd->iflist[ifidx];
3062 	if (ifp != NULL) {
3063 		if (ifp->net != NULL) {
3064 			netif_stop_queue(ifp->net);
3065 			unregister_netdev(ifp->net);
3066 			free_netdev(ifp->net);
3067 		}
3068 	} else
3069 		if ((ifp = MALLOC(dhd->pub.osh, sizeof(dhd_if_t))) == NULL) {
3070 			DHD_ERROR(("%s: OOM - dhd_if_t(%d)\n", __FUNCTION__, sizeof(dhd_if_t)));
3071 			return -ENOMEM;
3072 		}
3073 
3074 	memset(ifp, 0, sizeof(dhd_if_t));
3075 	ifp->event2cfg80211 = FALSE;
3076 	ifp->info = dhd;
3077 	dhd->iflist[ifidx] = ifp;
3078 	strncpy(ifp->name, name, IFNAMSIZ);
3079 	ifp->name[IFNAMSIZ] = '\0';
3080 	INIT_LIST_HEAD(&ifp->ipv6_list);
3081 	spin_lock_init(&ifp->ipv6_lock);
3082 	if (mac_addr != NULL)
3083 		memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN);
3084 
3085 	if (handle == NULL) {
3086 		ifp->state = DHD_IF_ADD;
3087 		ifp->idx = ifidx;
3088 		ifp->bssidx = bssidx;
3089 		ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0);
3090 		up(&dhd->thr_sysioc_ctl.sema);
3091 	} else
3092 		ifp->net = (struct net_device *)handle;
3093 
3094 	if (ifidx == 0) {
3095 		ifp->event2cfg80211 = TRUE;
3096 	}
3097 
3098 	return 0;
3099 }
3100 
3101 void
3102 dhd_del_if(dhd_info_t *dhd, int ifidx)
3103 {
3104 	dhd_if_t *ifp;
3105 
3106 	DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
3107 
3108 	ASSERT(dhd && ifidx && (ifidx < DHD_MAX_IFS));
3109 	ifp = dhd->iflist[ifidx];
3110 	if (!ifp) {
3111 		DHD_ERROR(("%s: Null interface\n", __FUNCTION__));
3112 		return;
3113 	}
3114 
3115 	ifp->state = DHD_IF_DEL;
3116 	ifp->idx = ifidx;
3117 	ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0);
3118 	up(&dhd->thr_sysioc_ctl.sema);
3119 }
3120 
3121 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
3122 static struct net_device_ops dhd_ops_pri = {
3123 	.ndo_open = dhd_open,
3124 	.ndo_stop = dhd_stop,
3125 	.ndo_get_stats = dhd_get_stats,
3126 	.ndo_do_ioctl = dhd_ioctl_entry,
3127 	.ndo_start_xmit = dhd_start_xmit,
3128 	.ndo_set_mac_address = dhd_set_mac_address,
3129 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
3130 	.ndo_set_rx_mode = dhd_set_multicast_list,
3131 #else
3132 	.ndo_set_multicast_list = dhd_set_multicast_list,
3133 #endif
3134 };
3135 
3136 static struct net_device_ops dhd_ops_virt = {
3137 	.ndo_get_stats = dhd_get_stats,
3138 	.ndo_do_ioctl = dhd_ioctl_entry,
3139 	.ndo_start_xmit = dhd_start_xmit,
3140 	.ndo_set_mac_address = dhd_set_mac_address,
3141 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
3142 	.ndo_set_rx_mode = dhd_set_multicast_list,
3143 #else
3144 	.ndo_set_multicast_list = dhd_set_multicast_list,
3145 #endif
3146 };
3147 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
3148 
3149 dhd_pub_t *
3150 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
3151 {
3152 	dhd_info_t *dhd = NULL;
3153 	struct net_device *net = NULL;
3154 
3155 	dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
3156 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3157 
3158 	/* updates firmware nvram path if it was provided as module parameters */
3159 	if (strlen(firmware_path) != 0) {
3160 		bzero(fw_path, MOD_PARAM_PATHLEN);
3161 		strncpy(fw_path, firmware_path, sizeof(fw_path) - 1);
3162 	}
3163 	if (strlen(nvram_path) != 0) {
3164 		bzero(nv_path, MOD_PARAM_PATHLEN);
3165 		strncpy(nv_path, nvram_path, sizeof(nv_path) -1);
3166 	}
3167 
3168 	/* Allocate etherdev, including space for private structure */
3169 	if (!(net = alloc_etherdev(sizeof(dhd)))) {
3170 		DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
3171 		goto fail;
3172 	}
3173 	dhd_state |= DHD_ATTACH_STATE_NET_ALLOC;
3174 
3175 	/* Allocate primary dhd_info */
3176 #if defined(CONFIG_DHD_USE_STATIC_BUF)
3177 	dhd = (void *)dhd_os_prealloc(osh, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
3178 	if (!dhd) {
3179 		DHD_INFO(("%s: OOM - Pre-alloc dhd_info\n", __FUNCTION__));
3180 #endif /* CONFIG_DHD_USE_STATIC_BUF */
3181 	if (!(dhd = MALLOC(osh, sizeof(dhd_info_t)))) {
3182 		DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
3183 		goto fail;
3184 	}
3185 #if defined(CONFIG_DHD_USE_STATIC_BUF)
3186 	}
3187 #endif /* CONFIG_DHD_USE_STATIC_BUF */
3188 	memset(dhd, 0, sizeof(dhd_info_t));
3189 
3190 #ifdef DHDTHREAD
3191 	dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
3192 	dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
3193 #endif /* DHDTHREAD */
3194 	dhd->dhd_tasklet_create = FALSE;
3195 	dhd->thr_sysioc_ctl.thr_pid = DHD_PID_KT_INVALID;
3196 	dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
3197 
3198 	/*
3199 	 * Save the dhd_info into the priv
3200 	 */
3201 	memcpy((void *)netdev_priv(net), &dhd, sizeof(dhd));
3202 	dhd->pub.osh = osh;
3203 
3204 	/* Link to info module */
3205 	dhd->pub.info = dhd;
3206 	/* Link to bus module */
3207 	dhd->pub.bus = bus;
3208 	dhd->pub.hdrlen = bus_hdrlen;
3209 
3210 	/* Set network interface name if it was provided as module parameter */
3211 	if (iface_name[0]) {
3212 		int len;
3213 		char ch;
3214 		strncpy(net->name, iface_name, IFNAMSIZ);
3215 		net->name[IFNAMSIZ - 1] = 0;
3216 		len = strlen(net->name);
3217 		ch = net->name[len - 1];
3218 		if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
3219 			strcat(net->name, "%d");
3220 	}
3221 
3222 	if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) == DHD_BAD_IF)
3223 		goto fail;
3224 	dhd_state |= DHD_ATTACH_STATE_ADD_IF;
3225 
3226 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
3227 	net->open = NULL;
3228 #else
3229 	net->netdev_ops = NULL;
3230 #endif
3231 
3232 	sema_init(&dhd->proto_sem, 1);
3233 
3234 #ifdef PROP_TXSTATUS
3235 	spin_lock_init(&dhd->wlfc_spinlock);
3236 #ifdef PROP_TXSTATUS_VSDB
3237 	dhd->pub.wlfc_enabled = FALSE;
3238 #else
3239 	if (!disable_proptx)
3240 		dhd->pub.wlfc_enabled = TRUE;
3241 	else
3242 		dhd->pub.wlfc_enabled = FALSE;
3243 #endif /* PROP_TXSTATUS_VSDB */
3244 	dhd->pub.ptx_opt_enabled = FALSE;
3245 	dhd->pub.skip_fc = dhd_wlfc_skip_fc;
3246 	dhd->pub.plat_enable = dhd_wlfc_plat_enable;
3247 	dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
3248 #endif /* PROP_TXSTATUS */
3249 
3250 	/* Initialize other structure content */
3251 	init_waitqueue_head(&dhd->ioctl_resp_wait);
3252 	init_waitqueue_head(&dhd->ctrl_wait);
3253 
3254 	/* Initialize the spinlocks */
3255 	spin_lock_init(&dhd->sdlock);
3256 	spin_lock_init(&dhd->txqlock);
3257 	spin_lock_init(&dhd->dhd_lock);
3258 #if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
3259 	spin_lock_init(&dhd->rxf_lock);
3260 #endif /* defined(DHDTHREAD) && defined(RXFRAME_THREAD) */
3261 #ifdef DHDTCPACK_SUPPRESS
3262 	spin_lock_init(&dhd->tcpack_lock);
3263 #endif /* DHDTCPACK_SUPPRESS */
3264 
3265 	/* Initialize Wakelock stuff */
3266 	spin_lock_init(&dhd->wakelock_spinlock);
3267 	dhd->wakelock_counter = 0;
3268 	dhd->wakelock_wd_counter = 0;
3269 	dhd->wakelock_rx_timeout_enable = 0;
3270 	dhd->wakelock_ctrl_timeout_enable = 0;
3271 #ifdef CONFIG_HAS_WAKELOCK
3272 	wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
3273 	wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
3274 	wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
3275 	wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
3276 #endif /* CONFIG_HAS_WAKELOCK */
3277 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
3278 	mutex_init(&dhd->dhd_net_if_mutex);
3279 	mutex_init(&dhd->dhd_suspend_mutex);
3280 #endif
3281 	dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
3282 
3283 	/* Attach and link in the protocol */
3284 	if (dhd_prot_attach(&dhd->pub) != 0) {
3285 		DHD_ERROR(("dhd_prot_attach failed\n"));
3286 		goto fail;
3287 	}
3288 	dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
3289 
3290 #ifdef WL_CFG80211
3291 	/* Attach and link in the cfg80211 */
3292 	if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
3293 		DHD_ERROR(("wl_cfg80211_attach failed\n"));
3294 		goto fail;
3295 	}
3296 
3297 	dhd_monitor_init(&dhd->pub);
3298 	dhd_state |= DHD_ATTACH_STATE_CFG80211;
3299 #endif
3300 #if defined(WL_WIRELESS_EXT)
3301 	/* Attach and link in the iw */
3302 	if (!(dhd_state &  DHD_ATTACH_STATE_CFG80211)) {
3303 		if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
3304 		DHD_ERROR(("wl_iw_attach failed\n"));
3305 		goto fail;
3306 	}
3307 	dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
3308 	}
3309 #endif /* defined(WL_WIRELESS_EXT) */
3310 
3311 
3312 	/* Set up the watchdog timer */
3313 	init_timer(&dhd->timer);
3314 	dhd->timer.data = (ulong)dhd;
3315 	dhd->timer.function = dhd_watchdog;
3316 	dhd->default_wd_interval = dhd_watchdog_ms;
3317 
3318 #ifdef DHDTHREAD
3319 	/* Initialize thread based operation and lock */
3320 	sema_init(&dhd->sdsem, 1);
3321 	if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)) {
3322 		dhd->threads_only = TRUE;
3323 	}
3324 	else {
3325 		dhd->threads_only = FALSE;
3326 	}
3327 
3328 	if (dhd_watchdog_prio >= 0) {
3329 		/* Initialize watchdog thread */
3330 		PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
3331 
3332 	} else {
3333 		dhd->thr_wdt_ctl.thr_pid = -1;
3334 	}
3335 
3336 	/* Set up the bottom half handler */
3337 	if (dhd_dpc_prio >= 0) {
3338 		/* Initialize DPC thread */
3339 		PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
3340 	} else {
3341 		/*  use tasklet for dpc */
3342 		tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
3343 		dhd->thr_dpc_ctl.thr_pid = -1;
3344 	}
3345 #ifdef RXFRAME_THREAD
3346 	bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
3347 	/* Initialize RXF thread */
3348 	PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
3349 #endif
3350 #else
3351 	/* Set up the bottom half handler */
3352 	tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
3353 	dhd->dhd_tasklet_create = TRUE;
3354 #endif /* DHDTHREAD */
3355 
3356 	if (dhd_sysioc) {
3357 		PROC_START(_dhd_sysioc_thread, dhd, &dhd->thr_sysioc_ctl, 0, "dhd_sysioc");
3358 	} else {
3359 		dhd->thr_sysioc_ctl.thr_pid = -1;
3360 	}
3361 	dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
3362 
3363 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
3364 	INIT_WORK(&dhd->work_hang, dhd_hang_process);
3365 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))  */
3366 
3367 	/*
3368 	 * Save the dhd_info into the priv
3369 	 */
3370 	memcpy(netdev_priv(net), &dhd, sizeof(dhd));
3371 
3372 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
3373 	KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM_SLEEP)
3374 	register_pm_notifier(&dhd_sleep_pm_notifier);
3375 #endif /* (LINUX_VERSION >= 2.6.27 && LINUX_VERSION <= 2.6.39 && CONFIG_PM_SLEEP */
3376 
3377 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
3378 	dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
3379 	dhd->early_suspend.suspend = dhd_early_suspend;
3380 	dhd->early_suspend.resume = dhd_late_resume;
3381 	register_early_suspend(&dhd->early_suspend);
3382 	dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
3383 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
3384 
3385 #ifdef ARP_OFFLOAD_SUPPORT
3386 	dhd->pend_ipaddr = 0;
3387 	register_inetaddr_notifier(&dhd_notifier);
3388 #endif /* ARP_OFFLOAD_SUPPORT */
3389 	register_inet6addr_notifier(&dhd_notifier_ipv6);
3390 
3391 #ifdef DHDTCPACK_SUPPRESS
3392 	dhd->pub.tcp_ack_info_cnt = 0;
3393 	bzero(dhd->pub.tcp_ack_info_tbl, sizeof(struct tcp_ack_info)*MAXTCPSTREAMS);
3394 #endif /* DHDTCPACK_SUPPRESS */
3395 
3396 	dhd_state |= DHD_ATTACH_STATE_DONE;
3397 	dhd->dhd_state = dhd_state;
3398 	return &dhd->pub;
3399 
3400 fail:
3401 	if (dhd_state < DHD_ATTACH_STATE_DHD_ALLOC) {
3402 		if (net) free_netdev(net);
3403 	} else {
3404 		DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
3405 			__FUNCTION__, dhd_state, &dhd->pub));
3406 		dhd->dhd_state = dhd_state;
3407 		dhd_detach(&dhd->pub);
3408 		dhd_free(&dhd->pub);
3409 	}
3410 
3411 	return NULL;
3412 }
3413 
3414 int
3415 dhd_bus_start(dhd_pub_t *dhdp)
3416 {
3417 	int ret = -1;
3418 	dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
3419 	unsigned long flags;
3420 
3421 	ASSERT(dhd);
3422 
3423 	DHD_TRACE(("Enter %s:\n", __FUNCTION__));
3424 
3425 #ifdef DHDTHREAD
3426 	if (dhd->threads_only)
3427 		dhd_os_sdlock(dhdp);
3428 #endif /* DHDTHREAD */
3429 
3430 
3431 	/* try to download image and nvram to the dongle */
3432 	if  ((dhd->pub.busstate == DHD_BUS_DOWN) &&
3433 		(fw_path[0] != '\0') && (nv_path[0] != '\0')) {
3434 #ifdef SHOW_NVRAM_TYPE
3435 		{	/* Show nvram type in the kernel log */
3436 			int i;
3437 			for (i = 0; nv_path[i] != '\0'; ++i) {
3438 				if (nv_path[i] == '.') {
3439 					++i;
3440 					break;
3441 				}
3442 			}
3443 			DHD_ERROR(("%s: nvram_type = [%s]\n", __FUNCTION__, &nv_path[i]));
3444 		}
3445 #endif /* SHOW_NVRAM_TYPE */
3446 		/* wake lock moved to dhdsdio_download_firmware */
3447 		if (!(dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
3448 			fw_path, nv_path))) {
3449 			DHD_ERROR(("%s: dhdsdio_probe_download failed. firmware = %s nvram = %s\n",
3450 				__FUNCTION__, fw_path, nv_path));
3451 #ifdef DHDTHREAD
3452 			if (dhd->threads_only)
3453 				dhd_os_sdunlock(dhdp);
3454 #endif /* DHDTHREAD */
3455 			return -1;
3456 		}
3457 	}
3458 	if (dhd->pub.busstate != DHD_BUS_LOAD) {
3459 #ifdef DHDTHREAD
3460 		if (dhd->threads_only)
3461 			dhd_os_sdunlock(dhdp);
3462 #endif /* DHDTHREAD */
3463 		return -ENETDOWN;
3464 	}
3465 
3466 	/* Start the watchdog timer */
3467 	dhd->pub.tickcnt = 0;
3468 	dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
3469 
3470 	/* Bring up the bus */
3471 	if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
3472 
3473 		DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
3474 #ifdef DHDTHREAD
3475 		if (dhd->threads_only)
3476 			dhd_os_sdunlock(dhdp);
3477 #endif /* DHDTHREAD */
3478 		return ret;
3479 	}
3480 #if defined(OOB_INTR_ONLY)
3481 	/* Host registration for OOB interrupt */
3482 	if (bcmsdh_register_oob_intr(dhdp)) {
3483 		/* deactivate timer and wait for the handler to finish */
3484 
3485 		flags = dhd_os_spin_lock(&dhd->pub);
3486 		dhd->wd_timer_valid = FALSE;
3487 		dhd_os_spin_unlock(&dhd->pub, flags);
3488 		del_timer_sync(&dhd->timer);
3489 
3490 		DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
3491 #ifdef DHDTHREAD
3492 		if (dhd->threads_only)
3493 			dhd_os_sdunlock(dhdp);
3494 #endif /* DHDTHREAD */
3495 		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
3496 		return -ENODEV;
3497 	}
3498 
3499 	/* Enable oob at firmware */
3500 	dhd_enable_oob_intr(dhd->pub.bus, TRUE);
3501 #endif
3502 
3503 	/* If bus is not ready, can't come up */
3504 	if (dhd->pub.busstate != DHD_BUS_DATA) {
3505 		flags = dhd_os_spin_lock(&dhd->pub);
3506 		dhd->wd_timer_valid = FALSE;
3507 		dhd_os_spin_unlock(&dhd->pub, flags);
3508 		del_timer_sync(&dhd->timer);
3509 		DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
3510 #ifdef DHDTHREAD
3511 		if (dhd->threads_only)
3512 			dhd_os_sdunlock(dhdp);
3513 #endif /* DHDTHREAD */
3514 		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
3515 		return -ENODEV;
3516 	}
3517 
3518 #ifdef DHDTHREAD
3519 	if (dhd->threads_only)
3520 		dhd_os_sdunlock(dhdp);
3521 #endif /* DHDTHREAD */
3522 
3523 	dhd_process_cid_mac(dhdp, TRUE);
3524 
3525 	/* Bus is ready, do any protocol initialization */
3526 	if ((ret = dhd_prot_init(&dhd->pub)) < 0)
3527 		return ret;
3528 
3529 	dhd_process_cid_mac(dhdp, FALSE);
3530 
3531 #ifdef ARP_OFFLOAD_SUPPORT
3532 	if (dhd->pend_ipaddr) {
3533 #ifdef AOE_IP_ALIAS_SUPPORT
3534 		aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
3535 #endif /* AOE_IP_ALIAS_SUPPORT */
3536 		dhd->pend_ipaddr = 0;
3537 	}
3538 #endif /* ARP_OFFLOAD_SUPPORT */
3539 
3540 	return 0;
3541 }
3542 
3543 #ifdef WLTDLS
3544 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
3545 {
3546 	char iovbuf[WLC_IOCTL_SMLEN];
3547 	uint32 tdls = tdls_on;
3548 	int ret = 0;
3549 	uint32 tdls_auto_op = 0;
3550 	uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
3551 	int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
3552 	int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
3553 	if (!FW_SUPPORTED(dhd, tdls))
3554 		return BCME_ERROR;
3555 
3556 	if (dhd->tdls_enable == tdls_on)
3557 		goto auto_mode;
3558 	bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
3559 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
3560 		DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
3561 		goto exit;
3562 	}
3563 	dhd->tdls_enable = tdls_on;
3564 auto_mode:
3565 	if (mac) {
3566 		tdls_auto_op = auto_on;
3567 		bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
3568 			iovbuf, sizeof(iovbuf));
3569 		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
3570 				sizeof(iovbuf), TRUE, 0)) < 0) {
3571 			DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
3572 			goto exit;
3573 		}
3574 
3575 		if (tdls_auto_op) {
3576 			bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
3577 				sizeof(tdls_idle_time),	iovbuf, sizeof(iovbuf));
3578 			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
3579 				sizeof(iovbuf), TRUE, 0)) < 0) {
3580 				DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
3581 				goto exit;
3582 			}
3583 			bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
3584 			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
3585 				DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
3586 				goto exit;
3587 			}
3588 			bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
3589 			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
3590 				DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
3591 				goto exit;
3592 			}
3593 		}
3594 	}
3595 exit:
3596 	return ret;
3597 }
3598 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
3599 {
3600 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
3601 	int ret = 0;
3602 	if (dhd)
3603 		ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
3604 	else
3605 		ret = BCME_ERROR;
3606 	return ret;
3607 }
3608 #endif /* WLTDLS */
3609 
3610 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
3611 {
3612 	if (!dhd)
3613 		return FALSE;
3614 
3615 	if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
3616 		return TRUE;
3617 	else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
3618 		DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
3619 		return TRUE;
3620 	else
3621 		return FALSE;
3622 }
3623 
3624 #if !defined(AP) && defined(WLP2P)
3625 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
3626  * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
3627  * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
3628  * would still be named as fw_bcmdhd_apsta.
3629  */
3630 uint32
3631 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
3632 {
3633 	int32 ret = 0;
3634 	char buf[WLC_IOCTL_SMLEN];
3635 	bool mchan_supported = FALSE;
3636 	/* if dhd->op_mode is already set for HOSTAP,
3637 	  * that means we only will use the mode as it is
3638 	  */
3639 	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
3640 		return 0;
3641 	if (FW_SUPPORTED(dhd, vsdb)) {
3642 		mchan_supported = TRUE;
3643 	}
3644 	if (!FW_SUPPORTED(dhd, p2p)) {
3645 		DHD_TRACE(("Chip does not support p2p\n"));
3646 		return 0;
3647 	}
3648 	else {
3649 		/* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
3650 		memset(buf, 0, sizeof(buf));
3651 		bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
3652 		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
3653 			FALSE, 0)) < 0) {
3654 			DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
3655 			return 0;
3656 		}
3657 		else {
3658 			if (buf[0] == 1) {
3659 				/* By default, chip supports single chan concurrency,
3660 				* now lets check for mchan
3661 				*/
3662 				ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
3663 				if (mchan_supported)
3664 					ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
3665 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
3666 				/* For customer_hw4, although ICS,
3667 				* we still support concurrent mode
3668 				*/
3669 				return ret;
3670 #else
3671 				return 0;
3672 #endif
3673 			}
3674 		}
3675 	}
3676 	return 0;
3677 }
3678 #endif
3679 int
3680 dhd_preinit_ioctls(dhd_pub_t *dhd)
3681 {
3682 	int ret = 0;
3683 	char eventmask[WL_EVENTING_MASK_LEN];
3684 	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/*  Room for "event_msgs" + '\0' + bitvec  */
3685 	uint32 buf_key_b4_m4 = 1;
3686 #ifdef CUSTOM_AMPDU_BA_WSIZE
3687 	uint32 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
3688 #endif /* CUSTOM_AMPDU_BA_WSIZE */
3689 	uint power_mode = PM_FAST;
3690 	uint32 dongle_align = DHD_SDALIGN;
3691 	uint32 glom = CUSTOM_GLOM_SETTING;
3692 	uint bcn_timeout = 4;
3693 	uint retry_max = 3;
3694 #if defined(ARP_OFFLOAD_SUPPORT)
3695 	int arpoe = 1;
3696 #endif
3697 	int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
3698 	int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
3699 	int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
3700 	char buf[WLC_IOCTL_SMLEN];
3701 	char *ptr;
3702 	uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
3703 #ifdef ROAM_ENABLE
3704 	uint roamvar = 0;
3705 	int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
3706 	int roam_scan_period[2] = {10, WLC_BAND_ALL};
3707 	int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
3708 #ifdef ROAM_AP_ENV_DETECTION
3709 	int roam_env_mode = AP_ENV_INDETERMINATE;
3710 #endif /* ROAM_AP_ENV_DETECTION */
3711 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
3712 	int roam_fullscan_period = 60;
3713 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
3714 	int roam_fullscan_period = 120;
3715 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
3716 #else
3717 #ifdef DISABLE_BUILTIN_ROAM
3718 	uint roamvar = 1;
3719 #endif /* DISABLE_BUILTIN_ROAM */
3720 #endif /* ROAM_ENABLE */
3721 
3722 #if defined(SOFTAP)
3723 	uint dtim = 1;
3724 #endif
3725 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
3726 	uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
3727 	struct ether_addr p2p_ea;
3728 #endif
3729 
3730 #if defined(AP) || defined(WLP2P)
3731 	uint32 apsta = 1; /* Enable APSTA mode */
3732 #endif /* defined(AP) || defined(WLP2P) */
3733 #ifdef GET_CUSTOM_MAC_ENABLE
3734 	struct ether_addr ea_addr;
3735 #endif /* GET_CUSTOM_MAC_ENABLE */
3736 
3737 #ifdef DISABLE_11N
3738 	uint32 nmode = 0;
3739 #endif /* DISABLE_11N */
3740 #ifdef USE_WL_TXBF
3741 	uint32 txbf = 1;
3742 #endif /* USE_WL_TXBF */
3743 #ifdef USE_WL_FRAMEBURST
3744 	uint32 frameburst = 1;
3745 #endif /* USE_WL_FRAMEBURST */
3746 #ifdef SUPPORT_2G_VHT
3747 	uint32 vht_features = 0x3; /* 2G enable | rates all */
3748 #endif /* SUPPORT_2G_VHT */
3749 #ifdef PROP_TXSTATUS
3750 #ifdef PROP_TXSTATUS_VSDB
3751 	/* In case the host does not support proptxstatus, hostreorder in dongle should be off */
3752 	uint32 hostreorder = 0;
3753 	dhd->wlfc_enabled = FALSE;
3754 	/* enable WLFC only if the firmware is VSDB */
3755 #else
3756 	if (!disable_proptx)
3757 		dhd->wlfc_enabled = TRUE;
3758 	else
3759 		dhd->wlfc_enabled = FALSE;
3760 #endif /* PROP_TXSTATUS_VSDB */
3761 #endif /* PROP_TXSTATUS */
3762 #ifdef WLTDLS
3763 	dhd->tdls_enable = FALSE;
3764 #endif /* WLTDLS */
3765 	dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
3766 	DHD_TRACE(("Enter %s\n", __FUNCTION__));
3767 	dhd->op_mode = 0;
3768 #ifdef GET_CUSTOM_MAC_ENABLE
3769 	ret = dhd_custom_get_mac_address(ea_addr.octet);
3770 	if (!ret) {
3771 		memset(buf, 0, sizeof(buf));
3772 		bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
3773 		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
3774 		if (ret < 0) {
3775 			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
3776 			return BCME_NOTUP;
3777 		}
3778 		memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
3779 	} else {
3780 #endif /* GET_CUSTOM_MAC_ENABLE */
3781 		/* Get the default device MAC address directly from firmware */
3782 		memset(buf, 0, sizeof(buf));
3783 		bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
3784 		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
3785 			FALSE, 0)) < 0) {
3786 			DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
3787 			return BCME_NOTUP;
3788 		}
3789 		/* Update public MAC address after reading from Firmware */
3790 		memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
3791 
3792 #ifdef GET_CUSTOM_MAC_ENABLE
3793 	}
3794 #endif /* GET_CUSTOM_MAC_ENABLE */
3795 
3796 	DHD_TRACE(("Firmware = %s\n", fw_path));
3797 	/* get a capabilities from firmware */
3798 	memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
3799 	bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
3800 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
3801 		sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
3802 		DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
3803 			__FUNCTION__, ret));
3804 		return 0;
3805 	}
3806 	DHD_TRACE(("Firmare Capabilities: %s\n", dhd->fw_capabilities));
3807 	if ((!op_mode && strstr(fw_path, "_apsta") != NULL) ||
3808 		(op_mode == DHD_FLAG_HOSTAP_MODE)) {
3809 #ifdef SET_RANDOM_MAC_SOFTAP
3810 		uint rand_mac;
3811 #endif
3812 		dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
3813 #if defined(ARP_OFFLOAD_SUPPORT)
3814 			arpoe = 0;
3815 #endif
3816 #ifdef PKT_FILTER_SUPPORT
3817 			dhd_pkt_filter_enable = FALSE;
3818 #endif
3819 #ifdef SET_RANDOM_MAC_SOFTAP
3820 		SRANDOM32((uint)jiffies);
3821 		rand_mac = RANDOM32();
3822 		iovbuf[0] = 0x02;			   /* locally administered bit */
3823 		iovbuf[1] = 0x1A;
3824 		iovbuf[2] = 0x11;
3825 		iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
3826 		iovbuf[4] = (unsigned char)(rand_mac >> 8);
3827 		iovbuf[5] = (unsigned char)(rand_mac >> 16);
3828 
3829 		bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
3830 		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
3831 		if (ret < 0) {
3832 			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
3833 		} else
3834 			memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
3835 #endif /* SET_RANDOM_MAC_SOFTAP */
3836 #if !defined(AP) && defined(WL_CFG80211)
3837 		/* Turn off MPC in AP mode */
3838 		bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
3839 		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
3840 			sizeof(iovbuf), TRUE, 0)) < 0) {
3841 			DHD_ERROR(("%s mpc for HostAPD failed  %d\n", __FUNCTION__, ret));
3842 		}
3843 #endif
3844 	}
3845 	else {
3846 		uint32 concurrent_mode = 0;
3847 		if ((!op_mode && strstr(fw_path, "_p2p") != NULL) ||
3848 			(op_mode == DHD_FLAG_P2P_MODE)) {
3849 #if defined(ARP_OFFLOAD_SUPPORT)
3850 			arpoe = 0;
3851 #endif
3852 #ifdef PKT_FILTER_SUPPORT
3853 			dhd_pkt_filter_enable = FALSE;
3854 #endif
3855 			dhd->op_mode = DHD_FLAG_P2P_MODE;
3856 		} else if (op_mode == DHD_FLAG_IBSS_MODE ||
3857 			(!op_mode && strstr(fw_path, "_ibss") != NULL)) {
3858 			dhd->op_mode = DHD_FLAG_IBSS_MODE;
3859 		} else {
3860 			dhd->op_mode = DHD_FLAG_STA_MODE;
3861 		}
3862 #if !defined(AP) && defined(WLP2P)
3863 		if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
3864 			(concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
3865 #if defined(ARP_OFFLOAD_SUPPORT)
3866 			arpoe = 1;
3867 #endif
3868 			dhd->op_mode |= concurrent_mode;
3869 		}
3870 
3871 		/* Check if we are enabling p2p */
3872 		if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
3873 			bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
3874 			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
3875 				iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
3876 				DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
3877 			}
3878 
3879 			memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
3880 			ETHER_SET_LOCALADDR(&p2p_ea);
3881 			bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
3882 				ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
3883 			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
3884 				iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
3885 				DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
3886 			} else {
3887 				DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
3888 			}
3889 		}
3890 #else
3891 	(void)concurrent_mode;
3892 #endif
3893 	}
3894 
3895 	DHD_ERROR(("Firmware up: op_mode=0x%04x, "
3896 		"Broadcom Dongle Host Driver mac="MACDBG"\n",
3897 		dhd->op_mode,
3898 		MAC2STRDBG(dhd->mac.octet)));
3899 	/* Set Country code  */
3900 	if (dhd->dhd_cspec.ccode[0] != 0) {
3901 		bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
3902 			sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
3903 		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
3904 			DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
3905 	}
3906 
3907 	/* Set Listen Interval */
3908 	bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
3909 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
3910 		DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
3911 
3912 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
3913 	/* Disable built-in roaming to allowed ext supplicant to take care of roaming */
3914 	bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
3915 	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
3916 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
3917 #if defined(ROAM_ENABLE)
3918 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
3919 		sizeof(roam_trigger), TRUE, 0)) < 0)
3920 		DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
3921 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
3922 		sizeof(roam_scan_period), TRUE, 0)) < 0)
3923 		DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
3924 	if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
3925 		sizeof(roam_delta), TRUE, 0)) < 0)
3926 		DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
3927 	bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
3928 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
3929 		DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
3930 #ifdef ROAM_AP_ENV_DETECTION
3931 	if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
3932 		bcm_mkiovar("roam_env_detection", (char *)&roam_env_mode,
3933 			4, iovbuf, sizeof(iovbuf));
3934 		if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) == BCME_OK)
3935 			dhd->roam_env_detection = TRUE;
3936 		else {
3937 			dhd->roam_env_detection = FALSE;
3938 		}
3939 	}
3940 #endif /* ROAM_AP_ENV_DETECTION */
3941 #endif /* ROAM_ENABLE */
3942 
3943 #ifdef WLTDLS
3944 	/* by default TDLS on and auto mode off */
3945 	_dhd_tdls_enable(dhd, true, false, NULL);
3946 #endif /* WLTDLS */
3947 
3948 	/* Set PowerSave mode */
3949 	dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
3950 
3951 	/* Match Host and Dongle rx alignment */
3952 	bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
3953 	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
3954 
3955 	if (glom != DEFAULT_GLOM_VALUE) {
3956 		DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
3957 		bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
3958 		dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
3959 	}
3960 
3961 	/* Setup timeout if Beacons are lost and roam is off to report link down */
3962 	bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
3963 	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
3964 	/* Setup assoc_retry_max count to reconnect target AP in dongle */
3965 	bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
3966 	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
3967 #if defined(AP) && !defined(WLP2P)
3968 	/* Turn off MPC in AP mode */
3969 	bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
3970 	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
3971 	bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
3972 	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
3973 #endif /* defined(AP) && !defined(WLP2P) */
3974 
3975 
3976 #if defined(SOFTAP)
3977 	if (ap_fw_loaded == TRUE) {
3978 		dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
3979 	}
3980 #endif
3981 
3982 #if defined(KEEP_ALIVE)
3983 	{
3984 	/* Set Keep Alive : be sure to use FW with -keepalive */
3985 	int res;
3986 
3987 #if defined(SOFTAP)
3988 	if (ap_fw_loaded == FALSE)
3989 #endif
3990 		if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
3991 			if ((res = dhd_keep_alive_onoff(dhd)) < 0)
3992 				DHD_ERROR(("%s set keeplive failed %d\n",
3993 				__FUNCTION__, res));
3994 		}
3995 	}
3996 #endif /* defined(KEEP_ALIVE) */
3997 #ifdef USE_WL_TXBF
3998 	bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
3999 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
4000 		sizeof(iovbuf), TRUE, 0)) < 0) {
4001 		DHD_ERROR(("%s Set txbf failed  %d\n", __FUNCTION__, ret));
4002 	}
4003 #endif /* USE_WL_TXBF */
4004 #ifdef USE_WL_FRAMEBURST
4005 	/* Set frameburst to value */
4006 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
4007 		sizeof(frameburst), TRUE, 0)) < 0) {
4008 		DHD_ERROR(("%s Set frameburst failed  %d\n", __FUNCTION__, ret));
4009 	}
4010 #endif /* USE_WL_FRAMEBURST */
4011 #ifdef CUSTOM_AMPDU_BA_WSIZE
4012 	/* Set ampdu ba wsize to 64 */
4013 	bcm_mkiovar("ampdu_ba_wsize", (char *)&ampdu_ba_wsize, 4, iovbuf, sizeof(iovbuf));
4014 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
4015 		sizeof(iovbuf), TRUE, 0)) < 0) {
4016 		DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed  %d\n",
4017 			__FUNCTION__, CUSTOM_AMPDU_BA_WSIZE, ret));
4018 	}
4019 #endif /* CUSTOM_AMPDU_BA_WSIZE */
4020 #ifdef SUPPORT_2G_VHT
4021 	bcm_mkiovar("vht_features", (char *)&vht_features, 4, iovbuf, sizeof(iovbuf));
4022 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4023 		DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
4024 	}
4025 #endif /* SUPPORT_2G_VHT */
4026 
4027 	bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
4028 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
4029 		sizeof(iovbuf), TRUE, 0)) < 0) {
4030 		DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
4031 	}
4032 
4033 	/* Read event_msgs mask */
4034 	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
4035 	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
4036 		DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
4037 		goto done;
4038 	}
4039 	bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
4040 
4041 	/* Setup event_msgs */
4042 	setbit(eventmask, WLC_E_SET_SSID);
4043 	setbit(eventmask, WLC_E_PRUNE);
4044 	setbit(eventmask, WLC_E_AUTH);
4045 	setbit(eventmask, WLC_E_ASSOC);
4046 	setbit(eventmask, WLC_E_REASSOC);
4047 	setbit(eventmask, WLC_E_REASSOC_IND);
4048 	setbit(eventmask, WLC_E_DEAUTH);
4049 	setbit(eventmask, WLC_E_DEAUTH_IND);
4050 	setbit(eventmask, WLC_E_DISASSOC_IND);
4051 	setbit(eventmask, WLC_E_DISASSOC);
4052 	setbit(eventmask, WLC_E_JOIN);
4053 	setbit(eventmask, WLC_E_START);
4054 	setbit(eventmask, WLC_E_ASSOC_IND);
4055 	setbit(eventmask, WLC_E_PSK_SUP);
4056 	setbit(eventmask, WLC_E_LINK);
4057 	setbit(eventmask, WLC_E_NDIS_LINK);
4058 	setbit(eventmask, WLC_E_MIC_ERROR);
4059 	setbit(eventmask, WLC_E_ASSOC_REQ_IE);
4060 	setbit(eventmask, WLC_E_ASSOC_RESP_IE);
4061 #ifndef WL_CFG80211
4062 	setbit(eventmask, WLC_E_PMKID_CACHE);
4063 	setbit(eventmask, WLC_E_TXFAIL);
4064 #endif
4065 	setbit(eventmask, WLC_E_JOIN_START);
4066 	setbit(eventmask, WLC_E_SCAN_COMPLETE);
4067 #ifdef WLMEDIA_HTSF
4068 	setbit(eventmask, WLC_E_HTSFSYNC);
4069 #endif /* WLMEDIA_HTSF */
4070 #ifdef PNO_SUPPORT
4071 	setbit(eventmask, WLC_E_PFN_NET_FOUND);
4072 	setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
4073 	setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
4074 	setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
4075 #endif /* PNO_SUPPORT */
4076 	/* enable dongle roaming event */
4077 	setbit(eventmask, WLC_E_ROAM);
4078 #ifdef WLTDLS
4079 	setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
4080 #endif /* WLTDLS */
4081 #ifdef WL_CFG80211
4082 	setbit(eventmask, WLC_E_ESCAN_RESULT);
4083 	if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
4084 		setbit(eventmask, WLC_E_ACTION_FRAME_RX);
4085 		setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
4086 	}
4087 #endif /* WL_CFG80211 */
4088 
4089 	/* Write updated Event mask */
4090 	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
4091 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4092 		DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
4093 		goto done;
4094 	}
4095 
4096 	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
4097 		sizeof(scan_assoc_time), TRUE, 0);
4098 	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
4099 		sizeof(scan_unassoc_time), TRUE, 0);
4100 	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
4101 		sizeof(scan_passive_time), TRUE, 0);
4102 
4103 #ifdef ARP_OFFLOAD_SUPPORT
4104 	/* Set and enable ARP offload feature for STA only  */
4105 #if defined(SOFTAP)
4106 	if (arpoe && !ap_fw_loaded) {
4107 #else
4108 	if (arpoe) {
4109 #endif
4110 		dhd_arp_offload_enable(dhd, TRUE);
4111 		dhd_arp_offload_set(dhd, dhd_arp_mode);
4112 	} else {
4113 		dhd_arp_offload_enable(dhd, FALSE);
4114 		dhd_arp_offload_set(dhd, 0);
4115 	}
4116 	dhd_arp_enable = arpoe;
4117 #endif /* ARP_OFFLOAD_SUPPORT */
4118 
4119 #ifdef PKT_FILTER_SUPPORT
4120 	/* Setup default defintions for pktfilter , enable in suspend */
4121 	dhd->pktfilter_count = 6;
4122 	/* Setup filter to allow only unicast */
4123 	dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
4124 	dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
4125 	dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
4126 	dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
4127 	/* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
4128 	dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
4129 	/* apply APP pktfilter */
4130 	dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
4131 
4132 #if defined(SOFTAP)
4133 	if (ap_fw_loaded) {
4134 		dhd_enable_packet_filter(0, dhd);
4135 	}
4136 #endif /* defined(SOFTAP) */
4137 	dhd_set_packet_filter(dhd);
4138 #endif /* PKT_FILTER_SUPPORT */
4139 #ifdef DISABLE_11N
4140 	bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
4141 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
4142 		DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
4143 #else
4144 #if defined(PROP_TXSTATUS) && defined(PROP_TXSTATUS_VSDB)
4145 	bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, buf, sizeof(buf));
4146 	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
4147 #endif
4148 #endif /* DISABLE_11N */
4149 
4150 
4151 
4152 	/* query for 'ver' to get version info from firmware */
4153 	memset(buf, 0, sizeof(buf));
4154 	ptr = buf;
4155 	bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
4156 	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
4157 		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
4158 	else {
4159 		bcmstrtok(&ptr, "\n", 0);
4160 		/* Print fw version info */
4161 		DHD_ERROR(("Firmware version = %s\n", buf));
4162 		dhd_set_version_info(dhd, buf);
4163 
4164 		/* Check and adjust IOCTL response timeout for Manufactring firmware */
4165 		if (strstr(buf, MANUFACTRING_FW) != NULL) {
4166 			dhd_os_set_ioctl_resp_timeout(20000);
4167 			DHD_ERROR(("%s : adjust IOCTL response time for Manufactring Firmware\n",
4168 			__FUNCTION__));
4169 		}
4170 	}
4171 
4172 #ifdef BCMSDIOH_TXGLOM
4173 	if (bcmsdh_glom_enabled()) {
4174 		dhd_txglom_enable(dhd, TRUE);
4175 	}
4176 #endif /* BCMSDIOH_TXGLOM */
4177 
4178 #if defined(PROP_TXSTATUS) && !defined(PROP_TXSTATUS_VSDB)
4179 	dhd_wlfc_init(dhd);
4180 #endif /* PROP_TXSTATUS && !PROP_TXSTATUS_VSDB */
4181 #ifdef PNO_SUPPORT
4182 	if (!dhd->pno_state) {
4183 		dhd_pno_init(dhd);
4184 	}
4185 #endif
4186 
4187 done:
4188 	return ret;
4189 }
4190 
4191 
4192 int
4193 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
4194 {
4195 	char buf[strlen(name) + 1 + cmd_len];
4196 	int len = sizeof(buf);
4197 	wl_ioctl_t ioc;
4198 	int ret;
4199 
4200 	len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
4201 
4202 	memset(&ioc, 0, sizeof(ioc));
4203 
4204 	ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
4205 	ioc.buf = buf;
4206 	ioc.len = len;
4207 	ioc.set = set;
4208 
4209 	ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
4210 	if (!set && ret >= 0)
4211 		memcpy(cmd_buf, buf, cmd_len);
4212 
4213 	return ret;
4214 }
4215 
4216 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
4217 {
4218 	struct dhd_info *dhd = dhdp->info;
4219 	struct net_device *dev = NULL;
4220 
4221 	ASSERT(dhd && dhd->iflist[ifidx]);
4222 	dev = dhd->iflist[ifidx]->net;
4223 	ASSERT(dev);
4224 
4225 	if (netif_running(dev)) {
4226 		DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
4227 		return BCME_NOTDOWN;
4228 	}
4229 
4230 #define DHD_MIN_MTU 1500
4231 #define DHD_MAX_MTU 1752
4232 
4233 	if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
4234 		DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
4235 		return BCME_BADARG;
4236 	}
4237 
4238 	dev->mtu = new_mtu;
4239 	return 0;
4240 }
4241 
4242 #ifdef ARP_OFFLOAD_SUPPORT
4243 /* add or remove AOE host ip(s) (up to 8 IPs on the interface)  */
4244 void
4245 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
4246 {
4247 	u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
4248 	int i;
4249 	int ret;
4250 
4251 	bzero(ipv4_buf, sizeof(ipv4_buf));
4252 
4253 	/* display what we've got */
4254 	ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
4255 	DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
4256 #ifdef AOE_DBG
4257 	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
4258 #endif
4259 	/* now we saved hoste_ip table, clr it in the dongle AOE */
4260 	dhd_aoe_hostip_clr(dhd_pub, idx);
4261 
4262 	if (ret) {
4263 		DHD_ERROR(("%s failed\n", __FUNCTION__));
4264 		return;
4265 	}
4266 
4267 	for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
4268 		if (add && (ipv4_buf[i] == 0)) {
4269 				ipv4_buf[i] = ipa;
4270 				add = FALSE; /* added ipa to local table  */
4271 				DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
4272 				__FUNCTION__, i));
4273 		} else if (ipv4_buf[i] == ipa) {
4274 			ipv4_buf[i]	= 0;
4275 			DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
4276 				__FUNCTION__, ipa, i));
4277 		}
4278 
4279 		if (ipv4_buf[i] != 0) {
4280 			/* add back host_ip entries from our local cache */
4281 			dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
4282 			DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
4283 				__FUNCTION__, ipv4_buf[i], i));
4284 		}
4285 	}
4286 #ifdef AOE_DBG
4287 	/* see the resulting hostip table */
4288 	dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
4289 	DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
4290 	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
4291 #endif
4292 }
4293 
4294 /*
4295  * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
4296  * whenever there is an event related to an IP address.
4297  * ptr : kernel provided pointer to IP address that has changed
4298  */
4299 static int dhd_device_event(struct notifier_block *this,
4300 	unsigned long event,
4301 	void *ptr)
4302 {
4303 	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4304 
4305 	dhd_info_t *dhd;
4306 	dhd_pub_t *dhd_pub;
4307 	int idx;
4308 
4309 	if (!dhd_arp_enable)
4310 		return NOTIFY_DONE;
4311 	if (!ifa || !(ifa->ifa_dev->dev))
4312 		return NOTIFY_DONE;
4313 
4314 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
4315 	/* Filter notifications meant for non Broadcom devices */
4316 	if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
4317 	    (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
4318 #if defined(WL_ENABLE_P2P_IF)
4319 		if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
4320 #endif /* WL_ENABLE_P2P_IF */
4321 			return NOTIFY_DONE;
4322 	}
4323 #endif /* LINUX_VERSION_CODE */
4324 
4325 	dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev);
4326 	if (!dhd)
4327 		return NOTIFY_DONE;
4328 
4329 	dhd_pub = &dhd->pub;
4330 
4331 	if (dhd_pub->arp_version == 1) {
4332 		idx = 0;
4333 	}
4334 	else {
4335 		for (idx = 0; idx < DHD_MAX_IFS; idx++) {
4336 			if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
4337 			break;
4338 		}
4339 		if (idx < DHD_MAX_IFS)
4340 			DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
4341 				dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
4342 		else {
4343 			DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
4344 			idx = 0;
4345 		}
4346 	}
4347 
4348 	switch (event) {
4349 		case NETDEV_UP:
4350 			DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
4351 				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
4352 
4353 			if (dhd->pub.busstate != DHD_BUS_DATA) {
4354 				DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
4355 				if (dhd->pend_ipaddr) {
4356 					DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
4357 						__FUNCTION__, dhd->pend_ipaddr));
4358 				}
4359 				dhd->pend_ipaddr = ifa->ifa_address;
4360 				break;
4361 			}
4362 
4363 #ifdef AOE_IP_ALIAS_SUPPORT
4364 			DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
4365 				__FUNCTION__));
4366 			aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
4367 #endif /* AOE_IP_ALIAS_SUPPORT */
4368 			break;
4369 
4370 		case NETDEV_DOWN:
4371 			DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
4372 				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
4373 			dhd->pend_ipaddr = 0;
4374 #ifdef AOE_IP_ALIAS_SUPPORT
4375 			DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
4376 				__FUNCTION__));
4377 			aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
4378 #else
4379 			dhd_aoe_hostip_clr(&dhd->pub, idx);
4380 			dhd_aoe_arp_clr(&dhd->pub, idx);
4381 #endif /* AOE_IP_ALIAS_SUPPORT */
4382 			break;
4383 
4384 		default:
4385 			DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
4386 				__func__, ifa->ifa_label, event));
4387 			break;
4388 	}
4389 	return NOTIFY_DONE;
4390 }
4391 #endif /* ARP_OFFLOAD_SUPPORT */
4392 
4393 /*
4394  * Neighbor Discovery Offload: Called when an interface
4395  * is assigned with ipv6 address.
4396  * Handles only primary interface
4397  */
4398 static int dhd_device_ipv6_event(struct notifier_block *this,
4399 	unsigned long event,
4400 	void *ptr)
4401 {
4402 	dhd_info_t *dhd;
4403 	dhd_pub_t *dhd_pub;
4404 	struct ipv6_addr *_ipv6_addr = NULL;
4405 	struct inet6_ifaddr *inet6_ifa = ptr;
4406 	int idx = 0;
4407 
4408 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
4409 	/* Filter notifications meant for non Broadcom devices */
4410 	if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
4411 		goto exit;
4412 	}
4413 #endif /* LINUX_VERSION_CODE */
4414 
4415 	dhd = *(dhd_info_t **)netdev_priv(inet6_ifa->idev->dev);
4416 	if (!dhd)
4417 		goto exit;
4418 
4419 	idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
4420 	if (idx == DHD_BAD_IF) {
4421 		DHD_ERROR(("Cannot find ifidx"));
4422 		goto exit;
4423 	}
4424 	dhd_pub = &dhd->pub;
4425 	if (!FW_SUPPORTED(dhd_pub, ndoe))
4426 		goto exit;
4427 	if (event == NETDEV_UP || event == NETDEV_DOWN) {
4428 		_ipv6_addr = NATIVE_MALLOC(dhd_pub->osh, sizeof(struct ipv6_addr));
4429 		if (_ipv6_addr == NULL) {
4430 			DHD_ERROR(("Failed to allocate ipv6\n"));
4431 			goto exit;
4432 		}
4433 		memcpy(&_ipv6_addr->ipv6_addr[0], &inet6_ifa->addr, IPV6_ADDR_LEN);
4434 		DHD_TRACE(("IPV6 address : %pI6\n", &inet6_ifa->addr));
4435 	}
4436 	switch (event) {
4437 		case NETDEV_UP:
4438 			DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
4439 			_ipv6_addr->ipv6_oper = DHD_IPV6_ADDR_ADD;
4440 			break;
4441 		case NETDEV_DOWN:
4442 			DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
4443 			_ipv6_addr->ipv6_oper = DHD_IPV6_ADDR_DELETE;
4444 			break;
4445 		default:
4446 			DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
4447 			goto exit;
4448 	}
4449 	spin_lock_bh(&dhd->iflist[idx]->ipv6_lock);
4450 	list_add_tail(&_ipv6_addr->list, &dhd->iflist[idx]->ipv6_list);
4451 	spin_unlock_bh(&dhd->iflist[idx]->ipv6_lock);
4452 	up(&dhd->thr_sysioc_ctl.sema);
4453 exit:
4454 	return NOTIFY_DONE;
4455 }
4456 
4457 int
4458 dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
4459 {
4460 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4461 	struct net_device *net = NULL;
4462 	int err = 0;
4463 	uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
4464 
4465 	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
4466 
4467 	ASSERT(dhd && dhd->iflist[ifidx]);
4468 
4469 	net = dhd->iflist[ifidx]->net;
4470 	ASSERT(net);
4471 
4472 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
4473 	ASSERT(!net->open);
4474 	net->get_stats = dhd_get_stats;
4475 	net->do_ioctl = dhd_ioctl_entry;
4476 	net->hard_start_xmit = dhd_start_xmit;
4477 	net->set_mac_address = dhd_set_mac_address;
4478 	net->set_multicast_list = dhd_set_multicast_list;
4479 	net->open = net->stop = NULL;
4480 #else
4481 	ASSERT(!net->netdev_ops);
4482 	net->netdev_ops = &dhd_ops_virt;
4483 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
4484 
4485 	/* Ok, link into the network layer... */
4486 	if (ifidx == 0) {
4487 		/*
4488 		 * device functions for the primary interface only
4489 		 */
4490 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
4491 		net->open = dhd_open;
4492 		net->stop = dhd_stop;
4493 #else
4494 		net->netdev_ops = &dhd_ops_pri;
4495 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
4496 		if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
4497 			memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
4498 	} else {
4499 		/*
4500 		 * We have to use the primary MAC for virtual interfaces
4501 		 */
4502 		memcpy(temp_addr, dhd->iflist[ifidx]->mac_addr, ETHER_ADDR_LEN);
4503 		/*
4504 		 * Android sets the locally administered bit to indicate that this is a
4505 		 * portable hotspot.  This will not work in simultaneous AP/STA mode,
4506 		 * nor with P2P.  Need to set the Donlge's MAC address, and then use that.
4507 		 */
4508 		if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
4509 			ETHER_ADDR_LEN)) {
4510 			DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
4511 			__func__, net->name));
4512 			temp_addr[0] |= 0x02;
4513 		}
4514 	}
4515 
4516 	net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
4517 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
4518 	net->ethtool_ops = &dhd_ethtool_ops;
4519 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
4520 
4521 #if defined(WL_WIRELESS_EXT)
4522 #if WIRELESS_EXT < 19
4523 	net->get_wireless_stats = dhd_get_wireless_stats;
4524 #endif /* WIRELESS_EXT < 19 */
4525 #if WIRELESS_EXT > 12
4526 	net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
4527 #endif /* WIRELESS_EXT > 12 */
4528 #endif /* defined(WL_WIRELESS_EXT) */
4529 
4530 	dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
4531 
4532 	memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
4533 
4534 	net->ifindex = 0;
4535 	if ((err = register_netdev(net)) != 0) {
4536 		DHD_ERROR(("couldn't register the net device, err %d\n", err));
4537 		goto fail;
4538 	}
4539 	printf("Broadcom Dongle Host Driver: register interface [%s]"
4540 		" MAC: "MACDBG"\n",
4541 		net->name,
4542 		MAC2STRDBG(net->dev_addr));
4543 
4544 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
4545 		wl_iw_iscan_set_scan_broadcast_prep(net, 1);
4546 #endif
4547 
4548 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
4549 	if (ifidx == 0) {
4550 		dhd_registration_check = TRUE;
4551 		up(&dhd_registration_sem);
4552 	}
4553 #endif
4554 	return 0;
4555 
4556 fail:
4557 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
4558 	net->open = NULL;
4559 #else
4560 	net->netdev_ops = NULL;
4561 #endif
4562 	return err;
4563 }
4564 
4565 void
4566 dhd_bus_detach(dhd_pub_t *dhdp)
4567 {
4568 	dhd_info_t *dhd;
4569 
4570 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4571 
4572 	if (dhdp) {
4573 		dhd = (dhd_info_t *)dhdp->info;
4574 		if (dhd) {
4575 
4576 			/*
4577 			 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
4578 			 *  calling stop again will cuase SD read/write errors.
4579 			 */
4580 			if (dhd->pub.busstate != DHD_BUS_DOWN) {
4581 				/* Stop the protocol module */
4582 				dhd_prot_stop(&dhd->pub);
4583 
4584 				/* Stop the bus module */
4585 				dhd_bus_stop(dhd->pub.bus, TRUE);
4586 			}
4587 
4588 #if defined(OOB_INTR_ONLY)
4589 			bcmsdh_unregister_oob_intr();
4590 #endif
4591 		}
4592 	}
4593 }
4594 
4595 
4596 void dhd_detach(dhd_pub_t *dhdp)
4597 {
4598 	dhd_info_t *dhd;
4599 	unsigned long flags;
4600 	int timer_valid = FALSE;
4601 
4602 	if (!dhdp)
4603 		return;
4604 
4605 	dhd = (dhd_info_t *)dhdp->info;
4606 	if (!dhd)
4607 		return;
4608 
4609 	DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
4610 #ifdef ARP_OFFLOAD_SUPPORT
4611 	unregister_inetaddr_notifier(&dhd_notifier);
4612 #endif /* ARP_OFFLOAD_SUPPORT */
4613 	unregister_inet6addr_notifier(&dhd_notifier_ipv6);
4614 
4615 	dhd->pub.up = 0;
4616 	if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
4617 		/* Give sufficient time for threads to start running in case
4618 		 * dhd_attach() has failed
4619 		 */
4620 		OSL_SLEEP(100);
4621 	}
4622 
4623 	if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
4624 		dhd_bus_detach(dhdp);
4625 
4626 		if (dhdp->prot)
4627 			dhd_prot_detach(dhdp);
4628 	}
4629 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
4630 	if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
4631 		if (dhd->early_suspend.suspend)
4632 			unregister_early_suspend(&dhd->early_suspend);
4633 	}
4634 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
4635 
4636 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
4637 	cancel_work_sync(&dhd->work_hang);
4638 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))  */
4639 
4640 #if defined(WL_WIRELESS_EXT)
4641 	if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
4642 		/* Detatch and unlink in the iw */
4643 		wl_iw_detach();
4644 	}
4645 #endif /* defined(WL_WIRELESS_EXT) */
4646 
4647 	if (dhd->thr_sysioc_ctl.thr_pid >= 0) {
4648 		PROC_STOP(&dhd->thr_sysioc_ctl);
4649 	}
4650 
4651 	/* delete all interfaces, start with virtual  */
4652 	if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
4653 		int i = 1;
4654 		dhd_if_t *ifp;
4655 
4656 		/* Cleanup virtual interfaces */
4657 		for (i = 1; i < DHD_MAX_IFS; i++) {
4658 			dhd_net_if_lock_local(dhd);
4659 			if (dhd->iflist[i]) {
4660 				dhd->iflist[i]->state = DHD_IF_DEL;
4661 				dhd->iflist[i]->idx = i;
4662 				dhd_op_if(dhd->iflist[i]);
4663 			}
4664 
4665 			dhd_net_if_unlock_local(dhd);
4666 		}
4667 		/*  delete primary interface 0 */
4668 		ifp = dhd->iflist[0];
4669 		ASSERT(ifp);
4670 		ASSERT(ifp->net);
4671 		if (ifp && ifp->net) {
4672 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
4673 			if (ifp->net->open)
4674 #else
4675 			if (ifp->net->netdev_ops == &dhd_ops_pri)
4676 #endif
4677 			{
4678 				unregister_netdev(ifp->net);
4679 				free_netdev(ifp->net);
4680 				ifp->net = NULL;
4681 				MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
4682 				dhd->iflist[0] = NULL;
4683 			}
4684 		}
4685 	}
4686 
4687 	/* Clear the watchdog timer */
4688 	flags = dhd_os_spin_lock(&dhd->pub);
4689 	timer_valid = dhd->wd_timer_valid;
4690 	dhd->wd_timer_valid = FALSE;
4691 	dhd_os_spin_unlock(&dhd->pub, flags);
4692 	if (timer_valid)
4693 		del_timer_sync(&dhd->timer);
4694 
4695 	if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
4696 #ifdef DHDTHREAD
4697 		if (dhd->thr_wdt_ctl.thr_pid >= 0) {
4698 			PROC_STOP(&dhd->thr_wdt_ctl);
4699 		}
4700 
4701 		if (dhd->thr_dpc_ctl.thr_pid >= 0) {
4702 			PROC_STOP(&dhd->thr_dpc_ctl);
4703 		}
4704 #ifdef RXFRAME_THREAD
4705 		if (dhd->thr_rxf_ctl.thr_pid >= 0) {
4706 			PROC_STOP(&dhd->thr_rxf_ctl);
4707 		}
4708 #endif
4709 		else
4710 #endif /* DHDTHREAD */
4711 		tasklet_kill(&dhd->tasklet);
4712 	}
4713 #ifdef WL_CFG80211
4714 	if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
4715 		wl_cfg80211_detach(NULL);
4716 		dhd_monitor_uninit();
4717 	}
4718 #endif
4719 
4720 #ifdef PNO_SUPPORT
4721 	if (dhdp->pno_state)
4722 		dhd_pno_deinit(dhdp);
4723 #endif
4724 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
4725 	KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM_SLEEP)
4726 		unregister_pm_notifier(&dhd_sleep_pm_notifier);
4727 #endif /* (LINUX_VERSION >= 2.6.27 && LINUX_VERSION <= 2.6.39 && CONFIG_PM_SLEEP */
4728 
4729 	if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
4730 		DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
4731 #ifdef CONFIG_HAS_WAKELOCK
4732 		dhd->wakelock_counter = 0;
4733 		dhd->wakelock_wd_counter = 0;
4734 		dhd->wakelock_rx_timeout_enable = 0;
4735 		dhd->wakelock_ctrl_timeout_enable = 0;
4736 		wake_lock_destroy(&dhd->wl_wifi);
4737 		wake_lock_destroy(&dhd->wl_rxwake);
4738 		wake_lock_destroy(&dhd->wl_ctrlwake);
4739 		wake_lock_destroy(&dhd->wl_wdwake);
4740 #endif /* CONFIG_HAS_WAKELOCK */
4741 	}
4742 }
4743 
4744 
4745 void
4746 dhd_free(dhd_pub_t *dhdp)
4747 {
4748 	dhd_info_t *dhd;
4749 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4750 
4751 	if (dhdp) {
4752 		int i;
4753 		for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
4754 			if (dhdp->reorder_bufs[i]) {
4755 				reorder_info_t *ptr;
4756 				uint32 buf_size = sizeof(struct reorder_info);
4757 
4758 				ptr = dhdp->reorder_bufs[i];
4759 
4760 				buf_size += ((ptr->max_idx + 1) * sizeof(void*));
4761 				DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
4762 					i, ptr->max_idx, buf_size));
4763 
4764 				MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
4765 				dhdp->reorder_bufs[i] = NULL;
4766 			}
4767 		}
4768 		dhd = (dhd_info_t *)dhdp->info;
4769 #if defined(CONFIG_DHD_USE_STATIC_BUF)
4770 		/* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
4771 		if (dhd != (dhd_info_t *)dhd_os_prealloc(NULL, DHD_PREALLOC_DHD_INFO, 0)) {
4772 #endif /* CONFIG_DHD_USE_STATIC_BUF */
4773 			if (dhd)
4774 				MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
4775 #if defined(CONFIG_DHD_USE_STATIC_BUF)
4776 		}
4777 		else {
4778 			if (dhd)
4779 				dhd = NULL;
4780 		}
4781 #endif /* CONFIG_DHD_USE_STATIC_BUF */
4782 	}
4783 }
4784 
4785 static void __exit
4786 dhd_module_cleanup(void)
4787 {
4788 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4789 
4790 	dhd_bus_unregister();
4791 
4792 #if defined(CONFIG_WIFI_CONTROL_FUNC)
4793 	wl_android_wifictrl_func_del();
4794 #endif /* CONFIG_WIFI_CONTROL_FUNC */
4795 	wl_android_exit();
4796 
4797 	/* Call customer gpio to turn off power with WL_REG_ON signal */
4798 	dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
4799 }
4800 
4801 
4802 #if defined(CONFIG_WIFI_CONTROL_FUNC)
4803 extern bool g_wifi_poweron;
4804 #endif /* CONFIG_WIFI_CONTROL_FUNC */
4805 
4806 static int __init
4807 dhd_module_init(void)
4808 {
4809 	int error = 0;
4810 
4811 #if defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
4812 	int retry = POWERUP_MAX_RETRY;
4813 	int chip_up = 0;
4814 #endif
4815 
4816 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4817 
4818 	wl_android_init();
4819 
4820 #if defined(DHDTHREAD)
4821 	/* Sanity check on the module parameters */
4822 	do {
4823 		/* Both watchdog and DPC as tasklets are ok */
4824 		if ((dhd_watchdog_prio < 0) && (dhd_dpc_prio < 0))
4825 			break;
4826 
4827 		/* If both watchdog and DPC are threads, TX must be deferred */
4828 		if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0) && dhd_deferred_tx)
4829 			break;
4830 
4831 		DHD_ERROR(("Invalid module parameters.\n"));
4832 		error = -EINVAL;
4833 	} while (0);
4834 #endif
4835 	if (error)
4836 		goto fail_0;
4837 
4838 #if defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
4839 	do {
4840 		sema_init(&dhd_chipup_sem, 0);
4841 		dhd_bus_reg_sdio_notify(&dhd_chipup_sem);
4842 		dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
4843 #if defined(CONFIG_WIFI_CONTROL_FUNC)
4844 		if (wl_android_wifictrl_func_add() < 0) {
4845 			dhd_bus_unreg_sdio_notify();
4846 			goto fail_1;
4847 		}
4848 #endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
4849 		if (down_timeout(&dhd_chipup_sem,
4850 			msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) {
4851 			dhd_bus_unreg_sdio_notify();
4852 			chip_up = 1;
4853 			break;
4854 		}
4855 		DHD_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n",
4856 			retry+1));
4857 		dhd_bus_unreg_sdio_notify();
4858 #if defined(CONFIG_WIFI_CONTROL_FUNC)
4859 		wl_android_wifictrl_func_del();
4860 #endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
4861 		dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
4862 	} while (retry-- > 0);
4863 
4864 	if (!chip_up) {
4865 		DHD_ERROR(("\nfailed to power up wifi chip, max retry reached, exits **\n\n"));
4866 		error = -ENODEV;
4867 		goto fail_0;
4868 	}
4869 #else
4870 	dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
4871 #if defined(CONFIG_WIFI_CONTROL_FUNC)
4872 	if (wl_android_wifictrl_func_add() < 0)
4873 		goto fail_1;
4874 #endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
4875 
4876 #endif
4877 
4878 #if defined(CONFIG_WIFI_CONTROL_FUNC) && defined(BCMLXSDMMC)
4879 	/* If the wifi_set_power() is failed,
4880 	 * we need to jump error handling routines.
4881 	 */
4882 	if (!g_wifi_poweron) {
4883 		printk("%s: wifi_set_power() failed\n", __FUNCTION__);
4884 		error = -ENODEV;
4885 		goto fail_1;
4886 	}
4887 #endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
4888 
4889 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
4890 	sema_init(&dhd_registration_sem, 0);
4891 #endif
4892 
4893 
4894 	error = dhd_bus_register();
4895 
4896 	if (!error)
4897 		printf("\n%s\n", dhd_version);
4898 	else {
4899 		DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
4900 		goto fail_1;
4901 	}
4902 
4903 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
4904 	/*
4905 	 * Wait till MMC sdio_register_driver callback called and made driver attach.
4906 	 * It's needed to make sync up exit from dhd insmod  and
4907 	 * Kernel MMC sdio device callback registration
4908 	 */
4909 	if ((down_timeout(&dhd_registration_sem,
4910 		msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)) != 0) ||
4911 		(dhd_registration_check != TRUE)) {
4912 		error = -ENODEV;
4913 		DHD_ERROR(("%s: sdio_register_driver timeout or error \n", __FUNCTION__));
4914 		goto fail_2;
4915 	}
4916 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
4917 #if defined(WL_CFG80211)
4918 	wl_android_post_init();
4919 #endif /* defined(WL_CFG80211) */
4920 
4921 	return error;
4922 
4923 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
4924 fail_2:
4925 	dhd_bus_unregister();
4926 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
4927 
4928 fail_1:
4929 
4930 #if defined(CONFIG_WIFI_CONTROL_FUNC)
4931 	wl_android_wifictrl_func_del();
4932 #endif
4933 
4934 	/* Call customer gpio to turn off power with WL_REG_ON signal */
4935 	dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
4936 
4937 fail_0:
4938 	wl_android_exit();
4939 
4940 	return error;
4941 }
4942 
4943 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4944 #ifdef USE_LATE_INITCALL_SYNC
4945 late_initcall_sync(dhd_module_init);
4946 #else
4947 late_initcall(dhd_module_init);
4948 #endif /* USE_LATE_INITCALL_SYNC */
4949 #else
4950 module_init(dhd_module_init);
4951 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4952 
4953 module_exit(dhd_module_cleanup);
4954 
4955 /*
4956  * OS specific functions required to implement DHD driver in OS independent way
4957  */
4958 int
4959 dhd_os_proto_block(dhd_pub_t *pub)
4960 {
4961 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
4962 
4963 	if (dhd) {
4964 		down(&dhd->proto_sem);
4965 		return 1;
4966 	}
4967 
4968 	return 0;
4969 }
4970 
4971 int
4972 dhd_os_proto_unblock(dhd_pub_t *pub)
4973 {
4974 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
4975 
4976 	if (dhd) {
4977 		up(&dhd->proto_sem);
4978 		return 1;
4979 	}
4980 
4981 	return 0;
4982 }
4983 
4984 unsigned int
4985 dhd_os_get_ioctl_resp_timeout(void)
4986 {
4987 	return ((unsigned int)dhd_ioctl_timeout_msec);
4988 }
4989 
4990 void
4991 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
4992 {
4993 	dhd_ioctl_timeout_msec = (int)timeout_msec;
4994 }
4995 
4996 int
4997 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
4998 {
4999 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
5000 	int timeout;
5001 
5002 	/* Convert timeout in millsecond to jiffies */
5003 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
5004 	timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
5005 #else
5006 	timeout = dhd_ioctl_timeout_msec * HZ / 1000;
5007 #endif
5008 
5009 	timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
5010 	return timeout;
5011 }
5012 
5013 int
5014 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
5015 {
5016 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
5017 
5018 	if (waitqueue_active(&dhd->ioctl_resp_wait)) {
5019 		wake_up(&dhd->ioctl_resp_wait);
5020 	}
5021 
5022 	return 0;
5023 }
5024 
5025 void
5026 dhd_os_wd_timer_extend(void *bus, bool extend)
5027 {
5028 	dhd_pub_t *pub = bus;
5029 	dhd_info_t *dhd = (dhd_info_t *)pub->info;
5030 
5031 	if (extend)
5032 		dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
5033 	else
5034 		dhd_os_wd_timer(bus, dhd->default_wd_interval);
5035 }
5036 
5037 
5038 void
5039 dhd_os_wd_timer(void *bus, uint wdtick)
5040 {
5041 	dhd_pub_t *pub = bus;
5042 	dhd_info_t *dhd = (dhd_info_t *)pub->info;
5043 	unsigned long flags;
5044 
5045 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5046 
5047 	if (!dhd) {
5048 		DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
5049 		return;
5050 	}
5051 
5052 	flags = dhd_os_spin_lock(pub);
5053 
5054 	/* don't start the wd until fw is loaded */
5055 	if (pub->busstate == DHD_BUS_DOWN) {
5056 		dhd_os_spin_unlock(pub, flags);
5057 		if (!wdtick)
5058 			DHD_OS_WD_WAKE_UNLOCK(pub);
5059 		return;
5060 	}
5061 
5062 	/* Totally stop the timer */
5063 	if (!wdtick && dhd->wd_timer_valid == TRUE) {
5064 		dhd->wd_timer_valid = FALSE;
5065 		dhd_os_spin_unlock(pub, flags);
5066 #ifdef DHDTHREAD
5067 		del_timer_sync(&dhd->timer);
5068 #else
5069 		del_timer(&dhd->timer);
5070 #endif /* DHDTHREAD */
5071 		DHD_OS_WD_WAKE_UNLOCK(pub);
5072 		return;
5073 	}
5074 
5075 	if (wdtick) {
5076 		DHD_OS_WD_WAKE_LOCK(pub);
5077 		dhd_watchdog_ms = (uint)wdtick;
5078 		/* Re arm the timer, at last watchdog period */
5079 		mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
5080 		dhd->wd_timer_valid = TRUE;
5081 	}
5082 	dhd_os_spin_unlock(pub, flags);
5083 }
5084 
5085 void *
5086 dhd_os_open_image(char *filename)
5087 {
5088 	struct file *fp;
5089 
5090 	fp = filp_open(filename, O_RDONLY, 0);
5091 	/*
5092 	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
5093 	 * Alternative:
5094 	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
5095 	 * ???
5096 	 */
5097 	 if (IS_ERR(fp))
5098 		 fp = NULL;
5099 
5100 	 return fp;
5101 }
5102 
5103 int
5104 dhd_os_get_image_block(char *buf, int len, void *image)
5105 {
5106 	struct file *fp = (struct file *)image;
5107 	int rdlen;
5108 
5109 	if (!image)
5110 		return 0;
5111 
5112 	rdlen = kernel_read(fp, fp->f_pos, buf, len);
5113 	if (rdlen > 0)
5114 		fp->f_pos += rdlen;
5115 
5116 	return rdlen;
5117 }
5118 
5119 void
5120 dhd_os_close_image(void *image)
5121 {
5122 	if (image)
5123 		filp_close((struct file *)image, NULL);
5124 }
5125 
5126 
5127 void
5128 dhd_os_sdlock(dhd_pub_t *pub)
5129 {
5130 	dhd_info_t *dhd;
5131 
5132 	dhd = (dhd_info_t *)(pub->info);
5133 
5134 #ifdef DHDTHREAD
5135 	if (dhd->threads_only)
5136 		down(&dhd->sdsem);
5137 	else
5138 #endif /* DHDTHREAD */
5139 	spin_lock_bh(&dhd->sdlock);
5140 }
5141 
5142 void
5143 dhd_os_sdunlock(dhd_pub_t *pub)
5144 {
5145 	dhd_info_t *dhd;
5146 
5147 	dhd = (dhd_info_t *)(pub->info);
5148 
5149 #ifdef DHDTHREAD
5150 	if (dhd->threads_only)
5151 		up(&dhd->sdsem);
5152 	else
5153 #endif /* DHDTHREAD */
5154 	spin_unlock_bh(&dhd->sdlock);
5155 }
5156 
5157 void
5158 dhd_os_sdlock_txq(dhd_pub_t *pub)
5159 {
5160 	dhd_info_t *dhd;
5161 
5162 	dhd = (dhd_info_t *)(pub->info);
5163 	spin_lock_bh(&dhd->txqlock);
5164 }
5165 
5166 void
5167 dhd_os_sdunlock_txq(dhd_pub_t *pub)
5168 {
5169 	dhd_info_t *dhd;
5170 
5171 	dhd = (dhd_info_t *)(pub->info);
5172 	spin_unlock_bh(&dhd->txqlock);
5173 }
5174 
5175 void
5176 dhd_os_sdlock_rxq(dhd_pub_t *pub)
5177 {
5178 }
5179 
5180 void
5181 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
5182 {
5183 }
5184 
5185 void
5186 dhd_os_sdtxlock(dhd_pub_t *pub)
5187 {
5188 	dhd_os_sdlock(pub);
5189 }
5190 
5191 void
5192 dhd_os_sdtxunlock(dhd_pub_t *pub)
5193 {
5194 	dhd_os_sdunlock(pub);
5195 }
5196 
5197 #if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
5198 static void
5199 dhd_os_rxflock(dhd_pub_t *pub)
5200 {
5201 	dhd_info_t *dhd;
5202 
5203 	dhd = (dhd_info_t *)(pub->info);
5204 	spin_lock_bh(&dhd->rxf_lock);
5205 
5206 }
5207 
5208 static void
5209 dhd_os_rxfunlock(dhd_pub_t *pub)
5210 {
5211 	dhd_info_t *dhd;
5212 
5213 	dhd = (dhd_info_t *)(pub->info);
5214 	spin_unlock_bh(&dhd->rxf_lock);
5215 }
5216 #endif /* defined(DHDTHREAD) && defined(RXFRAME_THREAD) */
5217 
5218 #ifdef DHDTCPACK_SUPPRESS
5219 void
5220 dhd_os_tcpacklock(dhd_pub_t *pub)
5221 {
5222 	dhd_info_t *dhd;
5223 
5224 	dhd = (dhd_info_t *)(pub->info);
5225 	spin_lock_bh(&dhd->tcpack_lock);
5226 
5227 }
5228 
5229 void
5230 dhd_os_tcpackunlock(dhd_pub_t *pub)
5231 {
5232 	dhd_info_t *dhd;
5233 
5234 	dhd = (dhd_info_t *)(pub->info);
5235 	spin_unlock_bh(&dhd->tcpack_lock);
5236 }
5237 #endif /* DHDTCPACK_SUPPRESS */
5238 
5239 #if defined(CONFIG_DHD_USE_STATIC_BUF)
5240 uint8* dhd_os_prealloc(void *osh, int section, uint size)
5241 {
5242 	return (uint8*)wl_android_prealloc(section, size);
5243 }
5244 
5245 void dhd_os_prefree(void *osh, void *addr, uint size)
5246 {
5247 }
5248 #endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
5249 
5250 #if defined(WL_WIRELESS_EXT)
5251 struct iw_statistics *
5252 dhd_get_wireless_stats(struct net_device *dev)
5253 {
5254 	int res = 0;
5255 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5256 
5257 	if (!dhd->pub.up) {
5258 		return NULL;
5259 	}
5260 
5261 	res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
5262 
5263 	if (res == 0)
5264 		return &dhd->iw.wstats;
5265 	else
5266 		return NULL;
5267 }
5268 #endif /* defined(WL_WIRELESS_EXT) */
5269 
5270 static int
5271 dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
5272 	wl_event_msg_t *event, void **data)
5273 {
5274 	int bcmerror = 0;
5275 	ASSERT(dhd != NULL);
5276 
5277 	bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data);
5278 	if (bcmerror != BCME_OK)
5279 		return (bcmerror);
5280 
5281 #if defined(WL_WIRELESS_EXT)
5282 	if (event->bsscfgidx == 0) {
5283 		/*
5284 		 * Wireless ext is on primary interface only
5285 		 */
5286 
5287 	ASSERT(dhd->iflist[*ifidx] != NULL);
5288 	ASSERT(dhd->iflist[*ifidx]->net != NULL);
5289 
5290 		if (dhd->iflist[*ifidx]->net) {
5291 		wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
5292 		}
5293 	}
5294 #endif /* defined(WL_WIRELESS_EXT)  */
5295 
5296 #ifdef WL_CFG80211
5297 	if ((ntoh32(event->event_type) == WLC_E_IF) &&
5298 		(((dhd_if_event_t *)*data)->action == WLC_E_IF_ADD))
5299 		/* If ADD_IF has been called directly by wl utility then we
5300 		 * should not report this. In case if ADD_IF was called from
5301 		 * CFG stack, then too this event need not be reported back
5302 		 */
5303 		return (BCME_OK);
5304 	if ((wl_cfg80211_is_progress_ifchange() ||
5305 		wl_cfg80211_is_progress_ifadd()) && (*ifidx != 0)) {
5306 		/*
5307 		 * If IF_ADD/CHANGE operation is going on,
5308 		 *  discard any event received on the virtual I/F
5309 		 */
5310 		return (BCME_OK);
5311 	}
5312 
5313 	ASSERT(dhd->iflist[*ifidx] != NULL);
5314 	ASSERT(dhd->iflist[*ifidx]->net != NULL);
5315 	if (dhd->iflist[*ifidx]->event2cfg80211 && dhd->iflist[*ifidx]->net) {
5316 		wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
5317 	}
5318 #endif /* defined(WL_CFG80211) */
5319 
5320 	return (bcmerror);
5321 }
5322 
5323 /* send up locally generated event */
5324 void
5325 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
5326 {
5327 	switch (ntoh32(event->event_type)) {
5328 	default:
5329 		break;
5330 	}
5331 }
5332 
5333 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
5334 {
5335 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
5336 	struct dhd_info *dhdinfo =  dhd->info;
5337 
5338 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
5339 	int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
5340 #else
5341 	int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
5342 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
5343 
5344 	dhd_os_sdunlock(dhd);
5345 	wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
5346 	dhd_os_sdlock(dhd);
5347 #endif
5348 	return;
5349 }
5350 
5351 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
5352 {
5353 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
5354 	struct dhd_info *dhdinfo =  dhd->info;
5355 	if (waitqueue_active(&dhdinfo->ctrl_wait))
5356 		wake_up(&dhdinfo->ctrl_wait);
5357 #endif
5358 	return;
5359 }
5360 
5361 int
5362 dhd_dev_reset(struct net_device *dev, uint8 flag)
5363 {
5364 	int ret;
5365 
5366 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5367 
5368 	if (flag == TRUE) {
5369 		/* Issue wl down command before resetting the chip */
5370 		if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
5371 			DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
5372 		}
5373 #if defined(PROP_TXSTATUS) && !defined(PROP_TXSTATUS_VSDB)
5374 	dhd_wlfc_deinit(&dhd->pub);
5375 	if (dhd->pub.plat_deinit)
5376 		dhd->pub.plat_deinit((void *)&dhd->pub);
5377 #endif /* PROP_TXSTATUS && !PROP_TXSTATUS_VSDB */
5378 #ifdef PNO_SUPPORT
5379 	if (dhd->pub.pno_state)
5380 		dhd_pno_deinit(&dhd->pub);
5381 #endif
5382 	}
5383 
5384 	ret = dhd_bus_devreset(&dhd->pub, flag);
5385 	if (ret) {
5386 		DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
5387 		return ret;
5388 	}
5389 
5390 	return ret;
5391 }
5392 
5393 int net_os_set_suspend_disable(struct net_device *dev, int val)
5394 {
5395 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5396 	int ret = 0;
5397 
5398 	if (dhd) {
5399 		ret = dhd->pub.suspend_disable_flag;
5400 		dhd->pub.suspend_disable_flag = val;
5401 	}
5402 	return ret;
5403 }
5404 
5405 int net_os_set_suspend(struct net_device *dev, int val, int force)
5406 {
5407 	int ret = 0;
5408 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5409 
5410 	if (dhd) {
5411 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
5412 		ret = dhd_set_suspend(val, &dhd->pub);
5413 #else
5414 		ret = dhd_suspend_resume_helper(dhd, val, force);
5415 #endif
5416 #ifdef WL_CFG80211
5417 		wl_cfg80211_update_power_mode(dev);
5418 #endif
5419 	}
5420 	return ret;
5421 }
5422 
5423 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
5424 {
5425 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5426 
5427 	if (dhd)
5428 		dhd->pub.suspend_bcn_li_dtim = val;
5429 
5430 	return 0;
5431 }
5432 
5433 #ifdef PKT_FILTER_SUPPORT
5434 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
5435 {
5436 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5437 	char *filterp = NULL;
5438 	int filter_id = 0;
5439 	int ret = 0;
5440 
5441 	if (!dhd || (num == DHD_UNICAST_FILTER_NUM) ||
5442 	    (num == DHD_MDNS_FILTER_NUM))
5443 		return ret;
5444 	if (num >= dhd->pub.pktfilter_count)
5445 		return -EINVAL;
5446 	switch (num) {
5447 		case DHD_BROADCAST_FILTER_NUM:
5448 			filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
5449 			filter_id = 101;
5450 			break;
5451 		case DHD_MULTICAST4_FILTER_NUM:
5452 			filterp = "102 0 0 0 0xFFFFFF 0x01005E";
5453 			filter_id = 102;
5454 			break;
5455 		case DHD_MULTICAST6_FILTER_NUM:
5456 			filterp = "103 0 0 0 0xFFFF 0x3333";
5457 			filter_id = 103;
5458 			break;
5459 		default:
5460 			return -EINVAL;
5461 	}
5462 
5463 	/* Add filter */
5464 	if (add_remove) {
5465 		dhd->pub.pktfilter[num] = filterp;
5466 		dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
5467 	} else { /* Delete filter */
5468 		dhd->pub.pktfilter[num] = NULL;
5469 		/* dhd_pktfilter_offload_delete(&dhd->pub, filter_id); */
5470 	}
5471 	return ret;
5472 }
5473 
5474 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
5475 {
5476 	int ret = 0;
5477 
5478 	/* Packet filtering is set only if we still in early-suspend and
5479 	 * we need either to turn it ON or turn it OFF
5480 	 * We can always turn it OFF in case of early-suspend, but we turn it
5481 	 * back ON only if suspend_disable_flag was not set
5482 	*/
5483 	if (dhdp && dhdp->up) {
5484 		if (dhdp->in_suspend) {
5485 			if (!val || (val && !dhdp->suspend_disable_flag))
5486 				dhd_enable_packet_filter(val, dhdp);
5487 		}
5488 	}
5489 	return ret;
5490 }
5491 
5492 /* function to enable/disable packet for Network device */
5493 int net_os_enable_packet_filter(struct net_device *dev, int val)
5494 {
5495 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5496 
5497 	return dhd_os_enable_packet_filter(&dhd->pub, val);
5498 }
5499 #endif /* PKT_FILTER_SUPPORT */
5500 
5501 int
5502 dhd_dev_init_ioctl(struct net_device *dev)
5503 {
5504 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5505 	int ret;
5506 
5507 	dhd_process_cid_mac(&dhd->pub, TRUE);
5508 
5509 	if ((ret = dhd_preinit_ioctls(&dhd->pub)) < 0)
5510 		goto done;
5511 
5512 	dhd_process_cid_mac(&dhd->pub, FALSE);
5513 
5514 done:
5515 	return ret;
5516 }
5517 
5518 #ifdef PNO_SUPPORT
5519 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
5520 int
5521 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
5522 {
5523 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5524 
5525 	return (dhd_pno_stop_for_ssid(&dhd->pub));
5526 }
5527 
5528 /* Linux wrapper to call common dhd_pno_set_for_ssid */
5529 int
5530 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
5531 	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
5532 {
5533 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5534 
5535 	return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
5536 		pno_repeat, pno_freq_expo_max, channel_list, nchan));
5537 }
5538 
5539 /* Linux wrapper to call common dhd_pno_enable */
5540 int
5541 dhd_dev_pno_enable(struct net_device *dev, int enable)
5542 {
5543 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5544 
5545 	return (dhd_pno_enable(&dhd->pub, enable));
5546 }
5547 
5548 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
5549 int
5550 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
5551 	struct dhd_pno_hotlist_params *hotlist_params)
5552 {
5553 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5554 	return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
5555 }
5556 
5557 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
5558 int
5559 dhd_dev_pno_stop_for_batch(struct net_device *dev)
5560 {
5561 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5562 	return (dhd_pno_stop_for_batch(&dhd->pub));
5563 }
5564 
5565 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
5566 int
5567 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
5568 {
5569 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5570 	return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
5571 }
5572 
5573 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
5574 int
5575 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
5576 {
5577 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5578 	return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
5579 }
5580 #endif /* PNO_SUPPORT */
5581 
5582 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
5583 static void dhd_hang_process(struct work_struct *work)
5584 {
5585 	dhd_info_t *dhd;
5586 	struct net_device *dev;
5587 
5588 	dhd = (dhd_info_t *)container_of(work, dhd_info_t, work_hang);
5589 	dev = dhd->iflist[0]->net;
5590 
5591 	if (dev) {
5592 		rtnl_lock();
5593 		dev_close(dev);
5594 		rtnl_unlock();
5595 #if defined(WL_WIRELESS_EXT)
5596 		wl_iw_send_priv_event(dev, "HANG");
5597 #endif
5598 #if defined(WL_CFG80211)
5599 		wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
5600 #endif
5601 	}
5602 }
5603 
5604 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
5605 {
5606 	int ret = 0;
5607 	if (dhdp) {
5608 		if (!dhdp->hang_was_sent) {
5609 			dhdp->hang_was_sent = 1;
5610 			schedule_work(&dhdp->info->work_hang);
5611 		}
5612 	}
5613 	return ret;
5614 }
5615 
5616 int net_os_send_hang_message(struct net_device *dev)
5617 {
5618 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5619 	int ret = 0;
5620 
5621 	if (dhd) {
5622 		/* Report FW problem when enabled */
5623 		if (dhd->pub.hang_report) {
5624 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
5625 			ret = dhd_os_send_hang_message(&dhd->pub);
5626 #else
5627 			ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
5628 #endif
5629 		} else {
5630 			DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
5631 				__FUNCTION__));
5632 			/* Enforce bus down to stop any future traffic */
5633 			dhd->pub.busstate = DHD_BUS_DOWN;
5634 		}
5635 	}
5636 	return ret;
5637 }
5638 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
5639 
5640 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
5641 {
5642 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5643 	if (dhd && dhd->pub.up) {
5644 		memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
5645 #ifdef WL_CFG80211
5646 		wl_update_wiphybands(NULL, notify);
5647 #endif
5648 	}
5649 }
5650 
5651 void dhd_bus_band_set(struct net_device *dev, uint band)
5652 {
5653 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5654 	if (dhd && dhd->pub.up) {
5655 #ifdef WL_CFG80211
5656 		wl_update_wiphybands(NULL, true);
5657 #endif
5658 	}
5659 }
5660 
5661 void dhd_net_if_lock(struct net_device *dev)
5662 {
5663 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5664 	dhd_net_if_lock_local(dhd);
5665 }
5666 
5667 void dhd_net_if_unlock(struct net_device *dev)
5668 {
5669 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5670 	dhd_net_if_unlock_local(dhd);
5671 }
5672 
5673 static void dhd_net_if_lock_local(dhd_info_t *dhd)
5674 {
5675 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5676 	if (dhd)
5677 		mutex_lock(&dhd->dhd_net_if_mutex);
5678 #endif
5679 }
5680 
5681 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
5682 {
5683 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5684 	if (dhd)
5685 		mutex_unlock(&dhd->dhd_net_if_mutex);
5686 #endif
5687 }
5688 
5689 static void dhd_suspend_lock(dhd_pub_t *pub)
5690 {
5691 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5692 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
5693 	if (dhd)
5694 		mutex_lock(&dhd->dhd_suspend_mutex);
5695 #endif
5696 }
5697 
5698 static void dhd_suspend_unlock(dhd_pub_t *pub)
5699 {
5700 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5701 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
5702 	if (dhd)
5703 		mutex_unlock(&dhd->dhd_suspend_mutex);
5704 #endif
5705 }
5706 
5707 unsigned long dhd_os_spin_lock(dhd_pub_t *pub)
5708 {
5709 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
5710 	unsigned long flags = 0;
5711 
5712 	if (dhd)
5713 		spin_lock_irqsave(&dhd->dhd_lock, flags);
5714 
5715 	return flags;
5716 }
5717 
5718 void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags)
5719 {
5720 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
5721 
5722 	if (dhd)
5723 		spin_unlock_irqrestore(&dhd->dhd_lock, flags);
5724 }
5725 
5726 static int
5727 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
5728 {
5729 	return (atomic_read(&dhd->pend_8021x_cnt));
5730 }
5731 
5732 #define MAX_WAIT_FOR_8021X_TX	50
5733 
5734 int
5735 dhd_wait_pend8021x(struct net_device *dev)
5736 {
5737 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5738 	int timeout = msecs_to_jiffies(10);
5739 	int ntimes = MAX_WAIT_FOR_8021X_TX;
5740 	int pend = dhd_get_pend_8021x_cnt(dhd);
5741 
5742 	while (ntimes && pend) {
5743 		if (pend) {
5744 			set_current_state(TASK_INTERRUPTIBLE);
5745 			schedule_timeout(timeout);
5746 			set_current_state(TASK_RUNNING);
5747 			ntimes--;
5748 		}
5749 		pend = dhd_get_pend_8021x_cnt(dhd);
5750 	}
5751 	if (ntimes == 0)
5752 	{
5753 		atomic_set(&dhd->pend_8021x_cnt, 0);
5754 		DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
5755 	}
5756 	return pend;
5757 }
5758 
5759 #ifdef DHD_DEBUG
5760 int
5761 write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
5762 {
5763 	int ret = 0;
5764 	struct file *fp;
5765 	mm_segment_t old_fs;
5766 	loff_t pos = 0;
5767 
5768 	/* change to KERNEL_DS address limit */
5769 	old_fs = get_fs();
5770 	set_fs(KERNEL_DS);
5771 
5772 	/* open file to write */
5773 	fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
5774 	if (!fp) {
5775 		printf("%s: open file error\n", __FUNCTION__);
5776 		ret = -1;
5777 		goto exit;
5778 	}
5779 
5780 	/* Write buf to file */
5781 	fp->f_op->write(fp, buf, size, &pos);
5782 
5783 exit:
5784 	/* free buf before return */
5785 	MFREE(dhd->osh, buf, size);
5786 	/* close file before return */
5787 	if (fp)
5788 		filp_close(fp, current->files);
5789 	/* restore previous address limit */
5790 	set_fs(old_fs);
5791 
5792 	return ret;
5793 }
5794 #endif /* DHD_DEBUG */
5795 
5796 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
5797 {
5798 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
5799 	unsigned long flags;
5800 	int ret = 0;
5801 
5802 	if (dhd) {
5803 		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
5804 		ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
5805 			dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
5806 #ifdef CONFIG_HAS_WAKELOCK
5807 		if (dhd->wakelock_rx_timeout_enable)
5808 			wake_lock_timeout(&dhd->wl_rxwake,
5809 				msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
5810 		if (dhd->wakelock_ctrl_timeout_enable)
5811 			wake_lock_timeout(&dhd->wl_ctrlwake,
5812 				msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
5813 #endif
5814 		dhd->wakelock_rx_timeout_enable = 0;
5815 		dhd->wakelock_ctrl_timeout_enable = 0;
5816 		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
5817 	}
5818 	return ret;
5819 }
5820 
5821 int net_os_wake_lock_timeout(struct net_device *dev)
5822 {
5823 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5824 	int ret = 0;
5825 
5826 	if (dhd)
5827 		ret = dhd_os_wake_lock_timeout(&dhd->pub);
5828 	return ret;
5829 }
5830 
5831 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
5832 {
5833 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
5834 	unsigned long flags;
5835 
5836 	if (dhd) {
5837 		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
5838 		if (val > dhd->wakelock_rx_timeout_enable)
5839 			dhd->wakelock_rx_timeout_enable = val;
5840 		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
5841 	}
5842 	return 0;
5843 }
5844 
5845 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
5846 {
5847 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
5848 	unsigned long flags;
5849 
5850 	if (dhd) {
5851 		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
5852 		if (val > dhd->wakelock_ctrl_timeout_enable)
5853 			dhd->wakelock_ctrl_timeout_enable = val;
5854 		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
5855 	}
5856 	return 0;
5857 }
5858 
5859 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
5860 {
5861 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
5862 	unsigned long flags;
5863 
5864 	if (dhd) {
5865 		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
5866 		dhd->wakelock_ctrl_timeout_enable = 0;
5867 #ifdef CONFIG_HAS_WAKELOCK
5868 		if (wake_lock_active(&dhd->wl_ctrlwake))
5869 			wake_unlock(&dhd->wl_ctrlwake);
5870 #endif
5871 		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
5872 	}
5873 	return 0;
5874 }
5875 
5876 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
5877 {
5878 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5879 	int ret = 0;
5880 
5881 	if (dhd)
5882 		ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
5883 	return ret;
5884 }
5885 
5886 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
5887 {
5888 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5889 	int ret = 0;
5890 
5891 	if (dhd)
5892 		ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
5893 	return ret;
5894 }
5895 
5896 int dhd_os_wake_lock(dhd_pub_t *pub)
5897 {
5898 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
5899 	unsigned long flags;
5900 	int ret = 0;
5901 
5902 	if (dhd) {
5903 		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
5904 #ifdef CONFIG_HAS_WAKELOCK
5905 		if (!dhd->wakelock_counter)
5906 			wake_lock(&dhd->wl_wifi);
5907 #elif 1 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
5908 		if (pm_dev)
5909 			pm_stay_awake(pm_dev);
5910 #endif
5911 		dhd->wakelock_counter++;
5912 		ret = dhd->wakelock_counter;
5913 		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
5914 	}
5915 	return ret;
5916 }
5917 
5918 int net_os_wake_lock(struct net_device *dev)
5919 {
5920 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5921 	int ret = 0;
5922 
5923 	if (dhd)
5924 		ret = dhd_os_wake_lock(&dhd->pub);
5925 	return ret;
5926 }
5927 
5928 int dhd_os_wake_unlock(dhd_pub_t *pub)
5929 {
5930 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
5931 	unsigned long flags;
5932 	int ret = 0;
5933 
5934 	dhd_os_wake_lock_timeout(pub);
5935 	if (dhd) {
5936 		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
5937 		if (dhd->wakelock_counter) {
5938 			dhd->wakelock_counter--;
5939 #ifdef CONFIG_HAS_WAKELOCK
5940 			if (!dhd->wakelock_counter)
5941 				wake_unlock(&dhd->wl_wifi);
5942 #elif 1 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
5943 			if (pm_dev)
5944 				pm_relax(pm_dev);
5945 #endif
5946 			ret = dhd->wakelock_counter;
5947 		}
5948 		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
5949 	}
5950 	return ret;
5951 }
5952 
5953 int dhd_os_check_wakelock(void *dhdp)
5954 {
5955 #if defined(CONFIG_HAS_WAKELOCK) || (1 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, \
5956 	36)))
5957 	dhd_pub_t *pub = (dhd_pub_t *)dhdp;
5958 	dhd_info_t *dhd;
5959 
5960 	if (!pub)
5961 		return 0;
5962 	dhd = (dhd_info_t *)(pub->info);
5963 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
5964 
5965 #ifdef CONFIG_HAS_WAKELOCK
5966 	/* Indicate to the SD Host to avoid going to suspend if internal locks are up */
5967 	if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
5968 		(wake_lock_active(&dhd->wl_wdwake))))
5969 		return 1;
5970 #elif 1 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
5971 	if (dhd && (dhd->wakelock_counter > 0) && pm_dev)
5972 		return 1;
5973 #endif
5974 	return 0;
5975 }
5976 
5977 int net_os_wake_unlock(struct net_device *dev)
5978 {
5979 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
5980 	int ret = 0;
5981 
5982 	if (dhd)
5983 		ret = dhd_os_wake_unlock(&dhd->pub);
5984 	return ret;
5985 }
5986 
5987 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
5988 {
5989 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
5990 	unsigned long flags;
5991 	int ret = 0;
5992 
5993 	if (dhd) {
5994 		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
5995 #ifdef CONFIG_HAS_WAKELOCK
5996 		/* if wakelock_wd_counter was never used : lock it at once */
5997 		if (!dhd->wakelock_wd_counter)
5998 			wake_lock(&dhd->wl_wdwake);
5999 #endif
6000 		dhd->wakelock_wd_counter++;
6001 		ret = dhd->wakelock_wd_counter;
6002 		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
6003 	}
6004 	return ret;
6005 }
6006 
6007 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
6008 {
6009 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
6010 	unsigned long flags;
6011 	int ret = 0;
6012 
6013 	if (dhd) {
6014 		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
6015 		if (dhd->wakelock_wd_counter) {
6016 			dhd->wakelock_wd_counter = 0;
6017 #ifdef CONFIG_HAS_WAKELOCK
6018 			wake_unlock(&dhd->wl_wdwake);
6019 #endif
6020 		}
6021 		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
6022 	}
6023 	return ret;
6024 }
6025 
6026 int dhd_os_check_if_up(void *dhdp)
6027 {
6028 	dhd_pub_t *pub = (dhd_pub_t *)dhdp;
6029 
6030 	if (!pub)
6031 		return 0;
6032 	return pub->up;
6033 }
6034 
6035 /* function to collect firmware, chip id and chip version info */
6036 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
6037 {
6038 	int i;
6039 
6040 	i = snprintf(info_string, sizeof(info_string),
6041 		"  Driver: %s\n  Firmware: %s ", EPI_VERSION_STR, fw);
6042 
6043 	if (!dhdp)
6044 		return;
6045 
6046 	i = snprintf(&info_string[i], sizeof(info_string) - i,
6047 		"\n  Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
6048 		dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
6049 }
6050 
6051 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
6052 {
6053 	int ifidx;
6054 	int ret = 0;
6055 	dhd_info_t *dhd = NULL;
6056 
6057 	if (!net || !netdev_priv(net)) {
6058 		DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
6059 		return -EINVAL;
6060 	}
6061 
6062 	dhd = *(dhd_info_t **)netdev_priv(net);
6063 	if (!dhd)
6064 		return -EINVAL;
6065 
6066 	ifidx = dhd_net2idx(dhd, net);
6067 	if (ifidx == DHD_BAD_IF) {
6068 		DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
6069 		return -ENODEV;
6070 	}
6071 
6072 	DHD_OS_WAKE_LOCK(&dhd->pub);
6073 	ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
6074 	dhd_check_hang(net, &dhd->pub, ret);
6075 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
6076 
6077 	return ret;
6078 }
6079 
6080 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
6081 {
6082 	struct net_device *net;
6083 
6084 	net = dhd_idx2net(dhdp, ifidx);
6085 	return dhd_check_hang(net, dhdp, ret);
6086 }
6087 
6088 
6089 #ifdef PROP_TXSTATUS
6090 extern int dhd_wlfc_interface_entry_update(void* state,	ewlfc_mac_entry_action_t action, uint8 ifid,
6091 	uint8 iftype, uint8* ea);
6092 extern int dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits);
6093 
6094 int dhd_wlfc_interface_event(struct dhd_info *dhd,
6095 	ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
6096 {
6097 	if (dhd->pub.wlfc_state == NULL)
6098 		return BCME_OK;
6099 
6100 	return dhd_wlfc_interface_entry_update(dhd->pub.wlfc_state, action, ifid, iftype, ea);
6101 }
6102 
6103 int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data)
6104 {
6105 	if (dhd->pub.wlfc_state == NULL)
6106 		return BCME_OK;
6107 
6108 	return dhd_wlfc_FIFOcreditmap_update(dhd->pub.wlfc_state, event_data);
6109 }
6110 
6111 int dhd_wlfc_event(struct dhd_info *dhd)
6112 {
6113 	return dhd_wlfc_enable(&dhd->pub);
6114 }
6115 
6116 void dhd_wlfc_plat_enable(void *dhd)
6117 {
6118 	return;
6119 }
6120 
6121 void dhd_wlfc_plat_deinit(void *dhd)
6122 {
6123 	return;
6124 }
6125 
6126 bool dhd_wlfc_skip_fc(void)
6127 {
6128 #ifdef WL_CFG80211
6129 	extern struct wl_priv *wlcfg_drv_priv;
6130 
6131 	/* enable flow control in vsdb mode */
6132 	return !(wlcfg_drv_priv && wlcfg_drv_priv->vsdb_mode);
6133 #else
6134 	return TRUE; /* skip flow control */
6135 #endif /* WL_CFG80211 */
6136 }
6137 #endif /* PROP_TXSTATUS */
6138 
6139 #ifdef BCMDBGFS
6140 
6141 #include <linux/debugfs.h>
6142 
6143 extern uint32 dhd_readregl(void *bp, uint32 addr);
6144 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
6145 
6146 typedef struct dhd_dbgfs {
6147 	struct dentry	*debugfs_dir;
6148 	struct dentry	*debugfs_mem;
6149 	dhd_pub_t 	*dhdp;
6150 	uint32 		size;
6151 } dhd_dbgfs_t;
6152 
6153 dhd_dbgfs_t g_dbgfs;
6154 
6155 static int
6156 dhd_dbg_state_open(struct inode *inode, struct file *file)
6157 {
6158 	file->private_data = inode->i_private;
6159 	return 0;
6160 }
6161 
6162 static ssize_t
6163 dhd_dbg_state_read(struct file *file, char __user *ubuf,
6164                        size_t count, loff_t *ppos)
6165 {
6166 	ssize_t rval;
6167 	uint32 tmp;
6168 	loff_t pos = *ppos;
6169 	size_t ret;
6170 
6171 	if (pos < 0)
6172 		return -EINVAL;
6173 	if (pos >= g_dbgfs.size || !count)
6174 		return 0;
6175 	if (count > g_dbgfs.size - pos)
6176 		count = g_dbgfs.size - pos;
6177 
6178 	/* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
6179 	tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
6180 
6181 	ret = copy_to_user(ubuf, &tmp, 4);
6182 	if (ret == count)
6183 		return -EFAULT;
6184 
6185 	count -= ret;
6186 	*ppos = pos + count;
6187 	rval = count;
6188 
6189 	return rval;
6190 }
6191 
6192 
6193 static ssize_t
6194 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
6195 {
6196 	loff_t pos = *ppos;
6197 	size_t ret;
6198 	uint32 buf;
6199 
6200 	if (pos < 0)
6201 		return -EINVAL;
6202 	if (pos >= g_dbgfs.size || !count)
6203 		return 0;
6204 	if (count > g_dbgfs.size - pos)
6205 		count = g_dbgfs.size - pos;
6206 
6207 	ret = copy_from_user(&buf, ubuf, sizeof(uint32));
6208 	if (ret == count)
6209 		return -EFAULT;
6210 
6211 	/* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
6212 	dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
6213 
6214 	return count;
6215 }
6216 
6217 
6218 loff_t
6219 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
6220 {
6221 	loff_t pos = -1;
6222 
6223 	switch (whence) {
6224 		case 0:
6225 			pos = off;
6226 			break;
6227 		case 1:
6228 			pos = file->f_pos + off;
6229 			break;
6230 		case 2:
6231 			pos = g_dbgfs.size - off;
6232 	}
6233 	return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
6234 }
6235 
6236 static const struct file_operations dhd_dbg_state_ops = {
6237 	.read   = dhd_dbg_state_read,
6238 	.write	= dhd_debugfs_write,
6239 	.open   = dhd_dbg_state_open,
6240 	.llseek	= dhd_debugfs_lseek
6241 };
6242 
6243 static void dhd_dbg_create(void)
6244 {
6245 	if (g_dbgfs.debugfs_dir) {
6246 		g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
6247 			NULL, &dhd_dbg_state_ops);
6248 	}
6249 }
6250 
6251 void dhd_dbg_init(dhd_pub_t *dhdp)
6252 {
6253 	int err;
6254 
6255 	g_dbgfs.dhdp = dhdp;
6256 	g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
6257 
6258 	g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
6259 	if (IS_ERR(g_dbgfs.debugfs_dir)) {
6260 		err = PTR_ERR(g_dbgfs.debugfs_dir);
6261 		g_dbgfs.debugfs_dir = NULL;
6262 		return;
6263 	}
6264 
6265 	dhd_dbg_create();
6266 
6267 	return;
6268 }
6269 
6270 void dhd_dbg_remove(void)
6271 {
6272 	debugfs_remove(g_dbgfs.debugfs_mem);
6273 	debugfs_remove(g_dbgfs.debugfs_dir);
6274 
6275 	bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
6276 
6277 }
6278 #endif /* ifdef BCMDBGFS */
6279 
6280 #ifdef WLMEDIA_HTSF
6281 
6282 static
6283 void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
6284 {
6285 	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
6286 	struct sk_buff *skb;
6287 	uint32 htsf = 0;
6288 	uint16 dport = 0, oldmagic = 0xACAC;
6289 	char *p1;
6290 	htsfts_t ts;
6291 
6292 	/*  timestamp packet  */
6293 
6294 	p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
6295 
6296 	if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
6297 /*		memcpy(&proto, p1+26, 4);  	*/
6298 		memcpy(&dport, p1+40, 2);
6299 /* 	proto = ((ntoh32(proto))>> 16) & 0xFF;  */
6300 		dport = ntoh16(dport);
6301 	}
6302 
6303 	/* timestamp only if  icmp or udb iperf with port 5555 */
6304 /*	if (proto == 17 && dport == tsport) { */
6305 	if (dport >= tsport && dport <= tsport + 20) {
6306 
6307 		skb = (struct sk_buff *) pktbuf;
6308 
6309 		htsf = dhd_get_htsf(dhd, 0);
6310 		memset(skb->data + 44, 0, 2); /* clear checksum */
6311 		memcpy(skb->data+82, &oldmagic, 2);
6312 		memcpy(skb->data+84, &htsf, 4);
6313 
6314 		memset(&ts, 0, sizeof(htsfts_t));
6315 		ts.magic  = HTSFMAGIC;
6316 		ts.prio   = PKTPRIO(pktbuf);
6317 		ts.seqnum = htsf_seqnum++;
6318 		ts.c10    = get_cycles();
6319 		ts.t10    = htsf;
6320 		ts.endmagic = HTSFENDMAGIC;
6321 
6322 		memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
6323 	}
6324 }
6325 
6326 static void dhd_dump_htsfhisto(histo_t *his, char *s)
6327 {
6328 	int pktcnt = 0, curval = 0, i;
6329 	for (i = 0; i < (NUMBIN-2); i++) {
6330 		curval += 500;
6331 		printf("%d ",  his->bin[i]);
6332 		pktcnt += his->bin[i];
6333 	}
6334 	printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
6335 		his->bin[NUMBIN-1], s);
6336 }
6337 
6338 static
6339 void sorttobin(int value, histo_t *histo)
6340 {
6341 	int i, binval = 0;
6342 
6343 	if (value < 0) {
6344 		histo->bin[NUMBIN-1]++;
6345 		return;
6346 	}
6347 	if (value > histo->bin[NUMBIN-2])  /* store the max value  */
6348 		histo->bin[NUMBIN-2] = value;
6349 
6350 	for (i = 0; i < (NUMBIN-2); i++) {
6351 		binval += 500; /* 500m s bins */
6352 		if (value <= binval) {
6353 			histo->bin[i]++;
6354 			return;
6355 		}
6356 	}
6357 	histo->bin[NUMBIN-3]++;
6358 }
6359 
6360 static
6361 void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
6362 {
6363 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6364 	struct sk_buff *skb;
6365 	char *p1;
6366 	uint16 old_magic;
6367 	int d1, d2, d3, end2end;
6368 	htsfts_t *htsf_ts;
6369 	uint32 htsf;
6370 
6371 	skb = PKTTONATIVE(dhdp->osh, pktbuf);
6372 	p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
6373 
6374 	if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
6375 		memcpy(&old_magic, p1+78, 2);
6376 		htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
6377 	}
6378 	else
6379 		return;
6380 
6381 	if (htsf_ts->magic == HTSFMAGIC) {
6382 		htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
6383 		htsf_ts->cE0 = get_cycles();
6384 	}
6385 
6386 	if (old_magic == 0xACAC) {
6387 
6388 		tspktcnt++;
6389 		htsf = dhd_get_htsf(dhd, 0);
6390 		memcpy(skb->data+92, &htsf, sizeof(uint32));
6391 
6392 		memcpy(&ts[tsidx].t1, skb->data+80, 16);
6393 
6394 		d1 = ts[tsidx].t2 - ts[tsidx].t1;
6395 		d2 = ts[tsidx].t3 - ts[tsidx].t2;
6396 		d3 = ts[tsidx].t4 - ts[tsidx].t3;
6397 		end2end = ts[tsidx].t4 - ts[tsidx].t1;
6398 
6399 		sorttobin(d1, &vi_d1);
6400 		sorttobin(d2, &vi_d2);
6401 		sorttobin(d3, &vi_d3);
6402 		sorttobin(end2end, &vi_d4);
6403 
6404 		if (end2end > 0 && end2end >  maxdelay) {
6405 			maxdelay = end2end;
6406 			maxdelaypktno = tspktcnt;
6407 			memcpy(&maxdelayts, &ts[tsidx], 16);
6408 		}
6409 		if (++tsidx >= TSMAX)
6410 			tsidx = 0;
6411 	}
6412 }
6413 
6414 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
6415 {
6416 	uint32 htsf = 0, cur_cycle, delta, delta_us;
6417 	uint32    factor, baseval, baseval2;
6418 	cycles_t t;
6419 
6420 	t = get_cycles();
6421 	cur_cycle = t;
6422 
6423 	if (cur_cycle >  dhd->htsf.last_cycle)
6424 		delta = cur_cycle -  dhd->htsf.last_cycle;
6425 	else {
6426 		delta = cur_cycle + (0xFFFFFFFF -  dhd->htsf.last_cycle);
6427 	}
6428 
6429 	delta = delta >> 4;
6430 
6431 	if (dhd->htsf.coef) {
6432 		/* times ten to get the first digit */
6433 	        factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
6434 		baseval  = (delta*10)/factor;
6435 		baseval2 = (delta*10)/(factor+1);
6436 		delta_us  = (baseval -  (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
6437 		htsf = (delta_us << 4) +  dhd->htsf.last_tsf + HTSF_BUS_DELAY;
6438 	}
6439 	else {
6440 		DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
6441 	}
6442 
6443 	return htsf;
6444 }
6445 
6446 static void dhd_dump_latency(void)
6447 {
6448 	int i, max = 0;
6449 	int d1, d2, d3, d4, d5;
6450 
6451 	printf("T1       T2       T3       T4           d1  d2   t4-t1     i    \n");
6452 	for (i = 0; i < TSMAX; i++) {
6453 		d1 = ts[i].t2 - ts[i].t1;
6454 		d2 = ts[i].t3 - ts[i].t2;
6455 		d3 = ts[i].t4 - ts[i].t3;
6456 		d4 = ts[i].t4 - ts[i].t1;
6457 		d5 = ts[max].t4-ts[max].t1;
6458 		if (d4 > d5 && d4 > 0)  {
6459 			max = i;
6460 		}
6461 		printf("%08X %08X %08X %08X \t%d %d %d   %d i=%d\n",
6462 			ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
6463 			d1, d2, d3, d4, i);
6464 	}
6465 
6466 	printf("current idx = %d \n", tsidx);
6467 
6468 	printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
6469 	printf("%08X %08X %08X %08X \t%d %d %d   %d\n",
6470 	maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
6471 	maxdelayts.t2 - maxdelayts.t1,
6472 	maxdelayts.t3 - maxdelayts.t2,
6473 	maxdelayts.t4 - maxdelayts.t3,
6474 	maxdelayts.t4 - maxdelayts.t1);
6475 }
6476 
6477 
6478 static int
6479 dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
6480 {
6481 	wl_ioctl_t ioc;
6482 	char buf[32];
6483 	int ret;
6484 	uint32 s1, s2;
6485 
6486 	struct tsf {
6487 		uint32 low;
6488 		uint32 high;
6489 	} tsf_buf;
6490 
6491 	memset(&ioc, 0, sizeof(ioc));
6492 	memset(&tsf_buf, 0, sizeof(tsf_buf));
6493 
6494 	ioc.cmd = WLC_GET_VAR;
6495 	ioc.buf = buf;
6496 	ioc.len = (uint)sizeof(buf);
6497 	ioc.set = FALSE;
6498 
6499 	strncpy(buf, "tsf", sizeof(buf) - 1);
6500 	buf[sizeof(buf) - 1] = '\0';
6501 	s1 = dhd_get_htsf(dhd, 0);
6502 	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
6503 		if (ret == -EIO) {
6504 			DHD_ERROR(("%s: tsf is not supported by device\n",
6505 				dhd_ifname(&dhd->pub, ifidx)));
6506 			return -EOPNOTSUPP;
6507 		}
6508 		return ret;
6509 	}
6510 	s2 = dhd_get_htsf(dhd, 0);
6511 
6512 	memcpy(&tsf_buf, buf, sizeof(tsf_buf));
6513 	printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
6514 		tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
6515 		dhd->htsf.coefdec2, s2-tsf_buf.low);
6516 	printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
6517 	return 0;
6518 }
6519 
6520 void htsf_update(dhd_info_t *dhd, void *data)
6521 {
6522 	static ulong  cur_cycle = 0, prev_cycle = 0;
6523 	uint32 htsf, tsf_delta = 0;
6524 	uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
6525 	ulong b, a;
6526 	cycles_t t;
6527 
6528 	/* cycles_t in inlcude/mips/timex.h */
6529 
6530 	t = get_cycles();
6531 
6532 	prev_cycle = cur_cycle;
6533 	cur_cycle = t;
6534 
6535 	if (cur_cycle > prev_cycle)
6536 		cyc_delta = cur_cycle - prev_cycle;
6537 	else {
6538 		b = cur_cycle;
6539 		a = prev_cycle;
6540 		cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
6541 	}
6542 
6543 	if (data == NULL)
6544 		printf(" tsf update ata point er is null \n");
6545 
6546 	memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
6547 	memcpy(&cur_tsf, data, sizeof(tsf_t));
6548 
6549 	if (cur_tsf.low == 0) {
6550 		DHD_INFO((" ---- 0 TSF, do not update, return\n"));
6551 		return;
6552 	}
6553 
6554 	if (cur_tsf.low > prev_tsf.low)
6555 		tsf_delta = (cur_tsf.low - prev_tsf.low);
6556 	else {
6557 		DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
6558 		 cur_tsf.low, prev_tsf.low));
6559 		if (cur_tsf.high > prev_tsf.high) {
6560 			tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
6561 			DHD_INFO((" ---- Wrap around tsf coutner  adjusted TSF=%08X\n", tsf_delta));
6562 		}
6563 		else
6564 			return; /* do not update */
6565 	}
6566 
6567 	if (tsf_delta)  {
6568 		hfactor = cyc_delta / tsf_delta;
6569 		tmp  = 	(cyc_delta - (hfactor * tsf_delta))*10;
6570 		dec1 =  tmp/tsf_delta;
6571 		dec2 =  ((tmp - dec1*tsf_delta)*10) / tsf_delta;
6572 		tmp  = 	(tmp   - (dec1*tsf_delta))*10;
6573 		dec3 =  ((tmp - dec2*tsf_delta)*10) / tsf_delta;
6574 
6575 		if (dec3 > 4) {
6576 			if (dec2 == 9) {
6577 				dec2 = 0;
6578 				if (dec1 == 9) {
6579 					dec1 = 0;
6580 					hfactor++;
6581 				}
6582 				else {
6583 					dec1++;
6584 				}
6585 			}
6586 			else
6587 				dec2++;
6588 		}
6589 	}
6590 
6591 	if (hfactor) {
6592 		htsf = ((cyc_delta * 10)  / (hfactor*10+dec1)) + prev_tsf.low;
6593 		dhd->htsf.coef = hfactor;
6594 		dhd->htsf.last_cycle = cur_cycle;
6595 		dhd->htsf.last_tsf = cur_tsf.low;
6596 		dhd->htsf.coefdec1 = dec1;
6597 		dhd->htsf.coefdec2 = dec2;
6598 	}
6599 	else {
6600 		htsf = prev_tsf.low;
6601 	}
6602 }
6603 
6604 #endif /* WLMEDIA_HTSF */
6605